hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd41dc6c15abc396f48b5d70714d787d543ad746 | 3,910 | py | Python | options/train_options.py | icon-lab/ResViT | 8f22273a37b3c6b221034eb344b50c52a67df943 | [
"MIT"
] | 16 | 2021-11-11T15:31:09.000Z | 2022-03-18T09:08:19.000Z | options/train_options.py | 2805413893/ResViT | 699e7e900078263f6033751862146105fbceb73e | [
"MIT"
] | 5 | 2022-01-24T11:22:52.000Z | 2022-03-27T07:01:02.000Z | options/train_options.py | 2805413893/ResViT | 699e7e900078263f6033751862146105fbceb73e | [
"MIT"
] | 3 | 2021-12-28T12:02:15.000Z | 2022-02-10T09:22:44.000Z | from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--display_single_pane_ncols', type=int, default=0, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
self.parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--trans_lr_coef', type=float, default=1, help='initial learning rate for adam')
self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
self.parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
self.parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
self.parser.add_argument('--lambda_f', type=float, default=0.9, help='momentum term for f')
self.parser.add_argument('--lambda_identity', type=float, default=0,
help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss.'
'For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
self.parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
self.parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau')
self.parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
self.isTrain = True
self.parser.add_argument('--lambda_vgg', type=float, default=1.0, help='weight for vgg loss')
self.parser.add_argument('--vgg_layer', type=float, default=2, help='layer of vgg for perc loss')
self.parser.add_argument('--lambda_adv', type=float, default=1.0, help='weight for adversarial loss') | 102.894737 | 192 | 0.703581 |
af39f7f05a9caec87aa6561c64dc817819b45efd | 44 | py | Python | bsose/_version.py | gmacgilchrist/bsose | b4e8549f6e94537ded0909d268ffac1bcaf3e7af | [
"MIT"
] | null | null | null | bsose/_version.py | gmacgilchrist/bsose | b4e8549f6e94537ded0909d268ffac1bcaf3e7af | [
"MIT"
] | 3 | 2021-07-28T21:40:53.000Z | 2021-12-12T18:58:52.000Z | bsose/_version.py | gmacgilchrist/bsose | b4e8549f6e94537ded0909d268ffac1bcaf3e7af | [
"MIT"
] | null | null | null | __version__ = "0.1.dev13+g965e313.d20211108" | 44 | 44 | 0.795455 |
f80acde1739d498e5767d124fee05f4c3b519aef | 2,211 | py | Python | backend/damp_fog_30007/urls.py | crowdbotics-apps/damp-fog-30007 | 69fec9467e05d1a7d909c41b7e3f8f35cd9558d7 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/damp_fog_30007/urls.py | crowdbotics-apps/damp-fog-30007 | 69fec9467e05d1a7d909c41b7e3f8f35cd9558d7 | [
"FTL",
"AML",
"RSA-MD"
] | 33 | 2021-08-21T11:42:48.000Z | 2022-03-20T14:11:59.000Z | backend/damp_fog_30007/urls.py | crowdbotics-apps/damp-fog-30007 | 69fec9467e05d1a7d909c41b7e3f8f35cd9558d7 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """damp_fog_30007 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Damp Fog"
admin.site.site_title = "Damp Fog Admin Portal"
admin.site.index_title = "Damp Fog Admin"
# swagger
api_info = openapi.Info(
title="Damp Fog API",
default_version="v1",
description="API documentation for Damp Fog App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| 35.095238 | 87 | 0.710086 |
c1ac17aa853b8853f8273853a534d4f93c89aaac | 34,245 | py | Python | slak-matrix-migration/slak-matrix-migration/migrate.py | sapianco/slack-matrix-migration-orig | 26a8b80b0764c5b132308c6f683ff7e251304edd | [
"Apache-2.0"
] | null | null | null | slak-matrix-migration/slak-matrix-migration/migrate.py | sapianco/slack-matrix-migration-orig | 26a8b80b0764c5b132308c6f683ff7e251304edd | [
"Apache-2.0"
] | 1 | 2021-12-21T13:57:17.000Z | 2021-12-21T13:57:17.000Z | slak-matrix-migration/slak-matrix-migration/migrate.py | sapianco/slack-matrix-migration-orig | 26a8b80b0764c5b132308c6f683ff7e251304edd | [
"Apache-2.0"
] | null | null | null | #!/bin/python3
# -*- coding: utf-8 -*-
# Copyright 2019, 2020 Awesome Technologies Innovationslabor GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
import json
import logging
import os
import re
import secrets
import string
import sys
import time
import traceback
import zipfile
import requests
import slackdown
import yaml
from alive_progress import alive_bar
from dotenv import load_dotenv
from emoji import emojize
from files import process_attachments, process_files
from utils import send_event, invite_user
load_dotenv()
LOG_LEVEL = os.environ.get('LOG_LEVEL', "INFO").upper()
ADMIN_USER_MATRIX = os.environ.get('ADMIN_USER_MATRIX')
ADMIN_PASS_MATRIX = os.environ.get('ADMIN_PASS_MATRIX')
LOG_LEVEL = os.environ.get('LOG_LEVEL', "INFO").upper()
logging.basicConfig(level=LOG_LEVEL)
log = logging.getLogger('SLACK.MIGRATE')
log_filename = "log/migration.log"
os.makedirs(os.path.dirname(log_filename), exist_ok=True)
fileHandler = logging.FileHandler(log_filename, mode="w", encoding=None, delay=False)
log.addHandler(fileHandler)
# consoleHandler = logging.StreamHandler()
# consoleHandler.setFormatter(logFormatter)
# log.addHandler(consoleHandler)
channelTypes = ["dms.json", "groups.json", "mpims.json", "channels.json", "users.json"]
userLUT = {}
nameLUT = {}
roomLUT = {}
roomLUT2 = {}
dmLUT = {}
eventLUT = {}
threadLUT = {}
replyLUT = {}
later = []
read_luts = False
if not os.path.isfile("conf/config.yaml"):
log.info("Config file does not exist.")
sys.exit(1)
f = open("conf/config.yaml", "r")
config_yaml = yaml.load(f.read(), Loader=yaml.FullLoader)
# load luts from previous run
if os.path.isfile("run/luts.yaml"):
f = open("run/luts.yaml", "r")
luts = yaml.load(f.read(), Loader=yaml.FullLoader)
userLUT = luts["userLUT"]
nameLUT = luts["nameLUT"]
roomLUT = luts["roomLUT"]
roomLUT2 = luts["roomLUT2"]
dmLUT = luts["dmLUT"]
read_luts = True
def test_config(yaml):
if not config_yaml["zipfile"]:
log.info("No zipfile defined in config")
sys.exit(1)
if not config_yaml["homeserver"]:
log.info("No homeserver defined in config")
sys.exit(1)
if not config_yaml["as_token"]:
log.info("No Application Service token defined in config")
sys.exit(1)
dry_run = config_yaml["dry-run"]
skip_archived = config_yaml["skip-archived"]
config = config_yaml
return config
def loadZip(config):
zipName = config["zipfile"]
log.info("Opening zipfile: " + zipName)
archive = zipfile.ZipFile(zipName, 'r')
jsonFiles = {}
for channelType in channelTypes:
try:
jsonFiles[channelType] = archive.open(channelType)
log.info("Found " + channelType + " in archive. Adding.")
except:
log.info("Warning: Couldn't find " + channelType + " in archive. Skipping.")
return jsonFiles
def loadZipFolder(config, folder):
with zipfile.ZipFile(config["zipfile"], 'r') as file:
archive = file.infolist()
fileList = []
for entry in archive:
file_basename = entry.filename.split("/", maxsplit=1)[0]
if entry.is_dir() == False and folder == file_basename:
fileList.append(entry.filename)
return fileList
# TODO: user alive-progress
# using bubble bar and notes spinner
# with alive_bar(200, bar = 'bubbles', spinner = 'pointer') as bar:
# for i in range(200):
# sleep(0.03)
# bar() # call after consuming one ite
# update_progress() : Displays or updates a console progress bar
## Accepts a float between 0 and 1. Any int will be converted to a float.
## A value under 0 represents a 'halt'.
## A value at 1 or bigger represents 100%
def update_progress(progress):
barLength = 40 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1:3.2f}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
def login(server_location):
try:
default_user = getpass.getuser()
except Exception:
default_user = None
if not ADMIN_USER_MATRIX:
if default_user:
admin_user = input("Admin user localpart [%s]: " % (default_user,))
if not admin_user:
admin_user = default_user
else:
admin_user = input("Admin user localpart: ")
else:
admin_user = ADMIN_USER_MATRIX
if not admin_user:
log.info("Invalid user name")
sys.exit(1)
if not ADMIN_PASS_MATRIX:
admin_password = getpass.getpass("Password: ")
else:
admin_password = ADMIN_PASS_MATRIX
if not admin_password:
log.info("Password cannot be blank.")
sys.exit(1)
url = "%s/_matrix/client/r0/login" % (server_location,)
data = {
"type": "m.login.password",
"user": admin_user,
"password": admin_password,
}
# Get the access token
r = requests.post(url, json=data, verify=False)
if r.status_code != 200:
log.info("ERROR! Received %d %s" % (r.status_code, r.reason))
if 400 <= r.status_code < 500:
try:
log.info(r.json()["error"])
except Exception:
pass
return False
access_token = r.json()["access_token"]
return admin_user, access_token
def getMaxUploadSize(config, access_token):
# get maxUploadSize from Homeserver
url = "%s/_matrix/media/r0/config?access_token=%s" % (config_yaml["homeserver"],access_token,)
r = requests.get(url, verify=False)
if r.status_code != 200:
log.info("ERROR! Received %d %s" % (r.status_code, r.reason))
if 400 <= r.status_code < 500:
try:
log.info(r.json()["error"])
except Exception:
pass
maxUploadSize = r.json()["m.upload.size"]
return maxUploadSize
def register_user(
user,
password,
displayname,
server_location,
access_token,
admin=False,
user_type=None,
):
url = "%s/_synapse/admin/v2/users/@%s:%s" % (server_location, user, config_yaml['domain'])
headers = {'Authorization': ' '.join(['Bearer', access_token])}
data = {
"password": password,
"displayname": "".join([displayname, config_yaml["name-suffix"]]),
"admin": admin,
}
try:
r = requests.put(url, json=data, headers=headers, verify=False)
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
log.error(
"Logging an uncaught exception {}".format(e),
exc_info=(traceback)
)
# log.debug("error creating room {}".format(body))
return False
else:
if r.status_code != 200 and r.status_code != 201:
log.info("ERROR! Received %d %s" % (r.status_code, r.reason))
if 400 <= r.status_code < 500:
try:
log.info(r.json()["error"])
except Exception:
pass
return False
else:
return r
def register_room(
name,
creator,
topic,
invitees,
preset,
server_location,
as_token,
):
# register room
log.debug("register room {}".format(
(
name,
creator,
topic,
invitees,
preset,
)
)
)
url = "%s/_matrix/client/r0/createRoom?user_id=%s" % (server_location,creator,)
body = {
"preset": preset,
"name": "".join([name, config_yaml["room-suffix"]]),
"topic": topic,
"creation_content": {
"m.federate": config_yaml["federate-rooms"]
},
"invite": invitees,
"is_direct": True if preset == "trusted_private_chat" else False,
}
#_log.info("Sending registration request...")
try:
r = requests.post(url, headers={'Authorization': 'Bearer ' + as_token}, json=body, verify=False, timeout=300 )
# except requests.exceptions.Timeout:
# # Maybe set up for a retry, or continue in a retry loop
# except requests.exceptions.TooManyRedirects:
# # Tell the user their URL was bad and try a different one
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
log.error(
"Logging an uncaught exception {}".format(e),
exc_info=(traceback)
)
# log.debug("error creating room {}".format(body))
return False
if r.status_code != 200:
log.error("ERROR! Received %d %s" % (r.status_code, r.reason))
if 400 <= r.status_code < 500:
try:
log.error(r.json()["error"])
except Exception:
pass
return False
return r
def invite_users(
invitees,
creator,
roomId,
config,
):
for user in invitees:
invite_user(roomId, user, config)
def autojoin_users(
invitees,
roomId,
config,
):
for user in invitees:
#POST /_matrix/client/r0/rooms/{roomId}/join
url = "%s/_matrix/client/r0/rooms/%s/join?user_id=%s" % (config["homeserver"],roomId,user,)
#_log.info("Sending registration request...")
try:
r = requests.post(url, headers={'Authorization': 'Bearer ' + config["as_token"]}, verify=False)
except requests.exceptions.RequestException as e:
log.error(
"Logging an uncaught exception {}".format(e),
exc_info=(traceback)
)
# log.debug("error creating room {}".format(body))
return False
else:
if r.status_code != 200:
log.error("ERROR! Received %d %s" % (r.status_code, r.reason))
if 400 <= r.status_code < 500:
try:
log.info(r.json()["error"])
except Exception:
pass
def migrate_users(userFile, config, access_token):
log = logging.getLogger('SLACK.MIGRATE.USER')
userlist = []
userData = json.load(userFile)
with alive_bar(len(userData), bar = 'bubbles', spinner = 'waves2') as bar:
for user in userData:
if user["is_bot"] == True:
bar()
continue
# ignore slackbot
if user["id"] == "USLACKBOT":
bar()
continue
_servername = config["homeserver"].split('/')[2]
_matrix_user = user["name"]
_matrix_id = '@' + user["name"] + ':' + _servername
# check if display name is set
if "real_name" in user["profile"]:
_real_name = user["profile"]["real_name"]
else:
_real_name = ""
# check if email is set
if "email" in user["profile"]:
_email = user["profile"]["email"]
else:
_email = ""
# generate password
_alphabet = string.ascii_letters + string.digits
_password = ''.join(secrets.choice(_alphabet) for i in range(20)) # for a 20-character password
userDetails = {
"slack_id": user["id"],
"slack_team_id": user["team_id"],
"slack_name": user["name"],
"slack_real_name": _real_name,
"slack_email": _email,
"matrix_id": _matrix_id,
"matrix_user": _matrix_user,
"matrix_password": _password,
}
log.info("Registering Slack user " + userDetails["slack_id"] + " -> " + userDetails["matrix_id"])
if not config["dry-run"]:
res = register_user(userDetails["matrix_user"], userDetails["matrix_password"], userDetails["slack_real_name"], config["homeserver"], access_token)
if res == False:
log.error("ERROR while registering user '" + userDetails["matrix_id"] + "'")
continue
# TODO force password change at next login
# https://github.com/euank/synapse-password-reset
userLUT[userDetails["slack_id"]] = userDetails["matrix_id"]
nameLUT[userDetails["matrix_id"]] = userDetails["slack_real_name"]
userlist.append(userDetails)
# time.sleep(1)
bar()
return userlist
def migrate_rooms(roomFile, config, admin_user):
log = logging.getLogger('SLACK.MIGRATE.ROOMS')
roomlist = []
# channels
channelData = json.load(roomFile)
with alive_bar(len(channelData), bar = 'hollow', spinner = 'waves2') as bar:
for channel in channelData:
if config["skip-archived"]:
if channel["is_archived"] == True:
bar()
continue
if config_yaml["create-as-admin"]:
_mxCreator = "".join(["@", admin_user, ":", config_yaml["domain"]])
else:
# if user is not in LUT (maybe its a shared channel), default to admin_user
if channel["creator"] in userLUT:
_mxCreator = userLUT[channel["creator"]]
else:
_mxCreator = "".join(["@", admin_user, ":", config_yaml["domain"]])
_invitees = []
if config_yaml["invite-all"]:
for user in nameLUT.keys():
if user != _mxCreator:
_invitees.append(user)
else:
for user in channel["members"]:
if user != channel["creator"]:
if user in userLUT: # ignore dropped users like bots
_invitees.append(userLUT[user])
minimal_invites = []
for user in channel["members"]:
if user != channel["creator"]:
if user in userLUT: # ignore dropped users like bots
minimal_invites.append(userLUT[user])
roomDetails = {
"slack_id": channel["id"],
"slack_name": channel["name"],
"slack_members": channel["members"],
"slack_topic": channel["topic"],
"slack_purpose": channel["purpose"],
"slack_created": channel["created"],
"slack_creator": channel["creator"],
"matrix_id": '',
"matrix_creator": _mxCreator,
"matrix_topic": channel["topic"]["value"],
}
room_preset = "private_chat" if config_yaml["import-as-private"] else "public_chat"
if not config["dry-run"]:
res = register_room(roomDetails["slack_name"], roomDetails["matrix_creator"], roomDetails["matrix_topic"], minimal_invites, room_preset, config["homeserver"], config["as_token"])
if res == False:
log.info("ERROR while registering room '" + roomDetails["slack_name"] + "'")
continue
else:
_content = json.loads(res.content)
roomDetails["matrix_id"] = _content["room_id"]
log.info("Registered Slack channel " + roomDetails["slack_name"] + " -> " + roomDetails["matrix_id"])
#invite all members
if config_yaml["invite-all"]:
invite_users(_invitees, roomDetails["matrix_creator"], roomDetails["matrix_id"], config)
#autojoin all members
autojoin_users(_invitees, roomDetails["matrix_id"], config)
roomLUT[roomDetails["slack_id"]] = roomDetails["matrix_id"]
roomLUT2[roomDetails["slack_id"]] = roomDetails["slack_name"]
roomlist.append(roomDetails)
#time.sleep(1)
bar()
return roomlist
def migrate_dms(roomFile, config):
log = logging.getLogger('SLACK.MIGRATE.DMS')
roomlist = []
# channels
channelData = json.load(roomFile)
with alive_bar(len(channelData), bar = 'squares', spinner = 'waves2') as bar:
for channel in channelData:
if config["skip-archived"]:
if channel["is_archived"] == True:
bar()
continue
# skip dms with slackbot
if channel["user"] == "USLACKBOT":
continue
_mxCreator = userLUT[channel["user"]]
_invitees = []
for user in channel["members"]:
if user != channel["user"]:
_invitees.append(userLUT[user])
roomDetails = {
"slack_id": channel["id"],
"slack_members": channel["members"],
"slack_created": channel["created"],
"slack_creator": channel["user"],
"matrix_id": '',
"matrix_creator": _mxCreator,
}
if not config["dry-run"]:
res = register_room('', roomDetails["matrix_creator"], '', _invitees, "trusted_private_chat", config["homeserver"], config["as_token"])
if res == False:
log.info("ERROR while registering room '" + roomDetails["slack_name"] + "'")
continue
else:
_content = json.loads(res.content)
roomDetails["matrix_id"] = _content["room_id"]
log.info("Registered Slack DM channel " + roomDetails["slack_id"] + " -> " + roomDetails["matrix_id"])
#autojoin all members
autojoin_users(_invitees, roomDetails["matrix_id"], config)
dmLUT[roomDetails["slack_id"]] = roomDetails["matrix_id"]
roomlist.append(roomDetails)
bar()
return roomlist
def send_reaction(config, roomId, eventId, reactionKey, userId, txnId):
content = {
"m.relates_to": {
"event_id": eventId,
"key": reactionKey,
"rel_type": "m.annotation",
},
}
res = send_event(config, content, roomId, userId, "m.reaction", txnId)
return res
def replace_mention(matchobj):
_slack_id = matchobj.group(0)[2:-1]
if not _slack_id in userLUT:
return ''
user_id = userLUT[_slack_id]
displayname = nameLUT[user_id]
return "<a href='https://matrix.to/#/" + user_id + "'>" + displayname + "</a>"
def getFallbackHtml(roomId, replyEvent):
originalBody = replyEvent["body"]
originalHtml = replyEvent["formatted_body"]
if not replyEvent["body"]:
originalHtml = originalBody
return '<mx-reply><blockquote><a href="https://matrix.to/#/' + roomId + '/' + replyEvent["event_id"] + '">In reply to</a><a href="https://matrix.to/#/' + replyEvent["sender"] + '">' + replyEvent["sender"] + '</a><br />' + originalHtml + '</blockquote></mx-reply>'
def getFallbackText(replyEvent):
originalBody = replyEvent["body"]
originalBody = originalBody.split("\n")
originalBody = "\n> ".join(originalBody)
return '> <' + replyEvent["sender"] + '> ' + originalBody
def parse_and_send_message(config, message, matrix_room, txnId, is_later, log):
content = {}
is_thread = False
is_reply = False
if message["type"] == "message":
if "subtype" in message:
if (message["subtype"] == "bot_message" or
message["subtype"] == "bot_remove" or
message["subtype"] == "channel_name" or
message["subtype"] == "channel_join" or
message["subtype"] == "channel_purpose" or
message["subtype"] == "group_name" or
message["subtype"] == "group_join" or
message["subtype"] == "group_purpose"):
return txnId
if message["subtype"] == "file_comment":
# TODO migrate file_comments
return txnId
# ignore hidden messages
if "hidden" in message:
if message["hidden"] == True:
return txnId
if "user" in message: #TODO what messages have no user?
if not message["user"] in userLUT:
# ignore messages from bots
return txnId
else:
log.info("Message without user")
log.info(message)
# list of subtypes
'''
bot_message A message was posted by an app or integration
me_message A /me message was sent
message_changed A message was changed
message_deleted A message was deleted
channel_join A member joined a channel
channel_leave A member left a channel
channel_topic A channel topic was updated
channel_purpose A channel purpose was updated
channel_name A channel was renamed
channel_archive A channel was archived
channel_unarchive A channel was unarchived
group_join A member joined a group
group_leave A member left a group
group_topic A group topic was updated
group_purpose A group purpose was updated
group_name A group was renamed
group_archive A group was archived
group_unarchive A group was unarchived
file_share A file was shared into a channel
file_reply A reply was added to a file
file_mention A file was mentioned in a channel
pinned_item An item was pinned in a channel
unpinned_item An item was unpinned from a channel
'''
body = message["text"]
# TODO do not migrate empty messages?
#if body == "":
#
# return txnId
# replace mentions
body = body.replace("<!channel>", "@room");
body = body.replace("<!here>", "@room");
body = body.replace("<!everyone>", "@room");
body = re.sub('<@[A-Z0-9]+>', replace_mention, body)
if "files" in message:
if "subtype" in message:
log.info(message["subtype"])
if message["subtype"] == "file_comment" or message["subtype"] == "thread_broadcast":
#TODO treat as reply
log.info("")
else:
txnId = process_files(message["files"], matrix_room, userLUT[message["user"]], body, txnId, config)
else:
txnId = process_files(message["files"], matrix_room, userLUT[message["user"]], body, txnId, config)
if "attachments" in message:
if message["user"] in userLUT: # ignore attachments from bots
txnId = process_attachments(message["attachments"], matrix_room, userLUT[message["user"]], body, txnId, config)
for attachment in message["attachments"]:
if "is_share" in attachment and attachment["is_share"]:
if body:
body += "\n"
attachment_footer = "no footer"
if "footer" in attachment:
attachment_footer = attachment["footer"]
attachment_text = "no text"
if "text" in attachment:
attachment_text = attachment["text"]
body += "".join(["> _Shared (", attachment_footer, "):_ ", attachment_text, "\n"])
if "replies" in message: # this is the parent of a thread
is_thread = True
previous_message = None
for reply in message["replies"]:
if "user" in message and "ts" in message:
first_message = message["user"]+message["ts"]
current_message = reply["user"]+reply["ts"]
if not previous_message:
previous_message = first_message
replyLUT[current_message] = previous_message
if config_yaml["threads-reply-to-previous"]:
previous_message = current_message
# replys / threading
if "thread_ts" in message and "parent_user_id" in message and not "replies" in message: # this message is a reply to another message
is_reply = True
if not message["user"]+message["ts"] in replyLUT:
# seems like we don't know the thread yet, save event for later
if not is_later:
later.append(message)
return txnId
slack_event_id = replyLUT[message["user"]+message["ts"]]
matrix_event_id = eventLUT[slack_event_id]
# TODO pinned / stared items?
# replace emojis
body = emojize(body, use_aliases=True)
# TODO some URLs with special characters (e.g. _ ) are parsed wrong
formatted_body = slackdown.render(body)
if not is_reply:
content = {
"body": body,
"msgtype": "m.text",
"format": "org.matrix.custom.html",
"formatted_body": formatted_body,
}
else:
replyEvent = threadLUT[message["parent_user_id"]+message["thread_ts"]]
fallbackHtml = getFallbackHtml(matrix_room, replyEvent);
fallbackText = getFallbackText(replyEvent);
body = fallbackText + "\n\n" + body
formatted_body = fallbackHtml + formatted_body
content = {
"m.relates_to": {
"m.in_reply_to": {
"event_id": matrix_event_id,
},
},
"msgtype": "m.text",
"body": body,
"format": "org.matrix.custom.html",
"formatted_body": formatted_body,
}
# send message
ts = message["ts"].replace(".", "")[:-3]
res = send_event(config, content, matrix_room, userLUT[message["user"]], "m.room.message", txnId, ts)
# save event id
if res == False:
log.info("ERROR while sending event '" + message["user"] + " " + message["ts"] + "'")
log.error("ERROR body {}".format(body))
log.error("ERROR formatted_body {}".format(formatted_body))
else:
_content = json.loads(res.content)
# use "user" combined with "ts" as id like Slack does as "client_msg_id" is not always set
if "user" in message and "ts" in message:
eventLUT[message["user"]+message["ts"]] = _content["event_id"]
txnId = txnId + 1
if is_thread:
threadLUT[message["user"]+message["ts"]] = {"body": body, "formatted_body": formatted_body, "sender": userLUT[message["user"]], "event_id": _content["event_id"]}
# handle reactions
if "reactions" in message:
roomId = matrix_room
eventId = eventLUT[message["user"]+message["ts"]]
for reaction in message["reactions"]:
for user in reaction["users"]:
#log.info("Send reaction in room " + roomId)
send_reaction(config, roomId, eventId, emojize(reaction["name"], use_aliases=True), userLUT[user], txnId)
txnId = txnId + 1
else:
log.info("Ignoring message type " + message["type"])
return txnId
def migrate_messages(fileList, matrix_room, config, tick, log):
log.debug('start migration of messages for matrix room: {}'.format(matrix_room))
global later
archive = zipfile.ZipFile(config["zipfile"], 'r')
txnId = 1
progress = 0
with alive_bar(bar = 'checks', spinner = 'waves2', manual=True) as bar:
for file in fileList:
log.debug("prcessing file {}".format(file))
try:
fileData = archive.open(file)
messageData = json.load(fileData)
except:
log.info("Warning: Couldn't load data from file " + file + " in archive. Skipping this file.")
for message in messageData:
try:
txnId = parse_and_send_message(config, message, matrix_room, txnId, False, log)
except:
log.error(
"Warning: Couldn't send message: {} to matrix_room {} id:{}".format(message, matrix_room, txnId)
)
progress = progress + tick
# update_progress(progress)
bar(progress)
# process postponed messages
for message in later:
txnId = parse_and_send_message(config, message, matrix_room, txnId, True, log)
# clean up postponed messages
later = []
def kick_imported_users(server_location, admin_user, access_token, tick):
headers = {'Authorization': ' '.join(['Bearer', access_token])}
progress = 0
with alive_bar(spinner = 'triangles', manual=True) as bar:
for room in roomLUT.values():
url = "%s/_matrix/client/r0/rooms/%s/kick" % (server_location, room)
for name in nameLUT.keys():
data = {"user_id": name}
try:
r = requests.post(url, json=data, headers=headers, verify=False)
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
log.error(
"Logging an uncaught exception {}".format(e),
exc_info=(traceback)
)
# log.debug("error creating room {}".format(body))
return False
else:
if r.status_code != 200 and r.status_code != 201:
log.info("ERROR! Received %d %s" % (r.status_code, r.reason))
if 400 <= r.status_code < 500:
try:
log.info(r.json()["error"])
except Exception:
pass
progress = progress + tick
#update_progress(progress)
bar(progress)
def main():
logging.captureWarnings(True)
log = logging.getLogger('SLACK.MIGRATE.MAIN')
config = test_config(yaml)
jsonFiles = loadZip(config)
# login with admin user to gain access token
admin_user, access_token = login(config["homeserver"])
maxUploadSize = getMaxUploadSize(config, access_token)
config["maxUploadSize"] = maxUploadSize
config["admin_user"] = admin_user
log.info("maxUploadSize {}".format(maxUploadSize))
if access_token == False:
log.info("ERROR! Admin user could not be logged in.")
exit(1)
# create users in matrix and match them to slack users
if "users.json" in jsonFiles and not userLUT:
log.info("Creating Users")
userlist = migrate_users(jsonFiles["users.json"], config, access_token)
# create rooms and match to channels
# Slack channels
if "channels.json" in jsonFiles and not roomLUT:
log.info("Creating channels")
roomlist_channels = migrate_rooms(jsonFiles["channels.json"], config, admin_user)
# Slack groups
if "groups.json" in jsonFiles and not roomLUT:
log.info("Creating groups")
roomlist_groups = migrate_rooms(jsonFiles["groups.json"], config, admin_user)
# create DMs
if "dms.json" in jsonFiles and not dmLUT:
log.info("Creating DMS")
roomlist_dms = migrate_dms(jsonFiles["dms.json"], config, admin_user)
# write LUTs to file to be able to load from later if something goes wrong
if not read_luts:
data = dict(
userLUT = userLUT,
nameLUT = nameLUT,
roomLUT = roomLUT,
roomLUT2 = roomLUT2,
dmLUT = dmLUT,
users = userlist,
)
with open('run/luts.yaml', 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
# send events to rooms
log.info("Migrating messages to rooms. This may take a while...")
for slack_room, matrix_room in roomLUT.items():
log = logging.getLogger('SLACK.MIGRATE.MESSAGES.{}'.format(roomLUT2[slack_room]))
log.info("Migrating messages for room: " + roomLUT2[slack_room])
fileList = sorted(loadZipFolder(config, roomLUT2[slack_room]))
if fileList:
tick = 1/len(fileList)
migrate_messages(fileList, matrix_room, config, tick, log)
# clean up postponed messages
later = []
# send events to dms
log.info("Migrating messages to DMs. This may take a while...")
for slack_room, matrix_room in dmLUT.items():
fileList = sorted(loadZipFolder(config, slack_room))
if fileList:
tick = 1/len(fileList)
migrate_messages(fileList, matrix_room, config, tick, log)
# clean up postponed messages
later = []
# kick imported users from non-dm rooms
if config_yaml["kick-imported-users"]:
log.info("Kicking imported users from rooms. This may take a while...")
tick = 1/len(roomLUT)
kick_imported_users(config["homeserver"], admin_user, access_token, tick)
if __name__ == "__main__":
main()
| 35.971639 | 267 | 0.569105 |
9f9d27691f3e9870e23d0cc1b52403185cadeaba | 3,388 | py | Python | setup.py | PyUtilib/PyUtilib | d99406f2af1fb62268c34453a2fbe6bd4a7348f0 | [
"BSD-3-Clause"
] | 24 | 2016-04-02T10:00:02.000Z | 2021-03-02T16:40:18.000Z | setup.py | PyUtilib/PyUtilib | d99406f2af1fb62268c34453a2fbe6bd4a7348f0 | [
"BSD-3-Clause"
] | 105 | 2015-10-29T03:29:58.000Z | 2021-12-30T22:00:45.000Z | setup.py | PyUtilib/PyUtilib | d99406f2af1fb62268c34453a2fbe6bd4a7348f0 | [
"BSD-3-Clause"
] | 22 | 2016-01-21T15:35:25.000Z | 2021-05-15T20:17:44.000Z | # _________________________________________________________________________
#
# PyUtilib: A Python utility library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the BSD License.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
# _________________________________________________________________________
"""
Setup for PyUtilib package
"""
import os
from setuptools import setup, find_packages
def read(*rnames):
with open(os.path.join(os.path.dirname(__file__), *rnames)) as README:
# Strip all leading badges up to, but not including the COIN-OR
# badge so that they do not appear in the PyPI description
while True:
line = README.readline()
if 'COIN-OR' in line:
break
if line.strip() and '[![' not in line:
break
return line + README.read()
requires=[ 'nose', 'six' ]
setup(name="PyUtilib",
version='6.0.1.dev0',
maintainer='William E. Hart',
maintainer_email='wehart@sandia.gov',
url = 'https://github.com/PyUtilib/pyutilib',
license = 'BSD',
platforms = ["any"],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
description = 'PyUtilib: A collection of Python utilities',
long_description = read('README.md'),
long_description_content_type='text/markdown',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: Jython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'],
packages=find_packages(),
keywords=['utility'],
install_requires=requires,
entry_points="""
[nose.plugins.0.10]
nose.testdata = pyutilib.th.nose_testdata:TestData
nose.forcedgc = pyutilib.th.nose_gc:ForcedGC
nose.timeout = pyutilib.th.nose_timeout:TestTimeout
[console_scripts]
test.pyutilib = pyutilib.dev.runtests:runPyUtilibTests
lbin = pyutilib.dev.lbin:main
lpython = pyutilib.dev.lpython:main
pypi_downloads = pyutilib.dev.pypi_downloads:main
replaceCopyright = pyutilib.dev.replaceCopyright:main
checkCopyright = pyutilib.dev.checkCopyright:main
pyutilib_test_driver = pyutilib.autotest.driver:main
dispatch_srvr=pyutilib.pyro.dispatch_srvr:main
"""
)
| 39.858824 | 76 | 0.645514 |
cce3ac8194087917625821b84302b29b6337c46a | 419 | py | Python | web/src/schemas/database.py | disteLLL/saltyboy | d1c777c31d82f3b6e2126d8170446d9b028c37ab | [
"MIT"
] | null | null | null | web/src/schemas/database.py | disteLLL/saltyboy | d1c777c31d82f3b6e2126d8170446d9b028c37ab | [
"MIT"
] | null | null | null | web/src/schemas/database.py | disteLLL/saltyboy | d1c777c31d82f3b6e2126d8170446d9b028c37ab | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Dict
from dataclasses_jsonschema import JsonSchemaMixin
@dataclass
class DatabaseStatsBreakdown(JsonSchemaMixin):
"""Database stats broken down by tier"""
breakdown: Dict[str, int]
total: int
@dataclass
class DatabaseStatsSchema(JsonSchemaMixin):
"""All database stats"""
matches: DatabaseStatsBreakdown
fighters: DatabaseStatsBreakdown
| 22.052632 | 50 | 0.78043 |
acd45a3b3b8102583502e830e5fb0b561dc0ad00 | 33 | py | Python | vnpy_uf/gateway/__init__.py | noranhe/vnpy_ufx | f1073ee7eaa3ad54688e9914ba7400fa0740de8b | [
"MIT"
] | null | null | null | vnpy_uf/gateway/__init__.py | noranhe/vnpy_ufx | f1073ee7eaa3ad54688e9914ba7400fa0740de8b | [
"MIT"
] | null | null | null | vnpy_uf/gateway/__init__.py | noranhe/vnpy_ufx | f1073ee7eaa3ad54688e9914ba7400fa0740de8b | [
"MIT"
] | 2 | 2021-12-09T08:29:47.000Z | 2022-03-30T02:48:05.000Z | from .uf_gateway import UfGateway | 33 | 33 | 0.878788 |
afde46c967149b3df1c8af1a38f438e66d659643 | 96 | py | Python | venv/lib/python3.8/site-packages/numpy/ma/tests/test_subclassing.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/numpy/ma/tests/test_subclassing.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/numpy/ma/tests/test_subclassing.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/43/c6/e2/4a12ac069cc9a585b45a973e38be6fb20b591e7c0d02cafd16e181e67e | 96 | 96 | 0.895833 |
44f247eff2c9daa7d69aa66ab04d10f87f7a431f | 865 | py | Python | extract_region.py | sontung/photorealistic-blocksworld | 38fd21a1ed756a752c4b0be56e25ca820be0ef17 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-05-19T10:58:54.000Z | 2021-05-19T10:58:54.000Z | extract_region.py | sontung/photorealistic-blocksworld | 38fd21a1ed756a752c4b0be56e25ca820be0ef17 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | extract_region.py | sontung/photorealistic-blocksworld | 38fd21a1ed756a752c4b0be56e25ca820be0ef17 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2020-07-30T09:45:09.000Z | 2020-07-30T09:45:09.000Z | #!/usr/bin/env python3
"Extract the regions from a scene json file"
import numpy as np
import json
import imageio
import os
def main(scenefile):
with open(scenefile, 'r') as f:
scene = json.load(f)
base, nameext = os.path.split(scenefile)
name, ext = os.path.splitext(nameext)
# print(base,nameext,name,ext)
assert(ext==".json")
imagefile_base = os.path.join("{}/../images/{}".format(base,name))
image = imageio.imread(imagefile_base+".png")
# print(image.shape)
for i, obj in enumerate(scene["objects"]):
bbox = tuple(obj["bbox"])
# print(bbox)
x1, y1, x2, y2 = bbox
region = image[int(y1):int(y2), int(x1):int(x2), :]
imageio.imwrite("{}_{}.png".format(imagefile_base,i),region)
if __name__ == '__main__':
import sys
main(*sys.argv[1:])
| 24.027778 | 70 | 0.6 |
eb9caaf92a7754dd7c22440ad54ab00eda3f75ef | 599 | pyde | Python | sketches/readLines/readLines.pyde | kantel/processingpy | 74aae222e46f68d1c8f06307aaede3cdae65c8ec | [
"MIT"
] | 4 | 2018-06-03T02:11:46.000Z | 2021-08-18T19:55:15.000Z | sketches/readLines/readLines.pyde | kantel/processingpy | 74aae222e46f68d1c8f06307aaede3cdae65c8ec | [
"MIT"
] | null | null | null | sketches/readLines/readLines.pyde | kantel/processingpy | 74aae222e46f68d1c8f06307aaede3cdae65c8ec | [
"MIT"
] | 3 | 2019-12-23T19:12:51.000Z | 2021-04-30T14:00:31.000Z | # Font-Test auf UTF-8; aus der Dokumentation:
# Starting with Processing release 0134,
# all files loaded and saved by the Processing API
# use UTF-8 encoding.
font = None
def setup():
size(500, 500)
# fontList = PFont.list()
# printArray(fontList)
font = createFont("Palatino-Roman", 32)
textFont(font)
noLoop()
def draw():
background(30)
textSize(32)
u = 50
text("Seltsame Zeichen", 20, u)
u = 80
textSize(24)
lines = loadStrings("boxer.txt")
for line in lines:
print(line)
text(line, 20, u, 460, 500)
u += 80
| 21.392857 | 50 | 0.607679 |
f7d7f8f74b7b93c56d06ef5cf5bb112089c1b9f3 | 7,972 | py | Python | tests/test_binning.py | NicolasHug/pygbm | 7891113aa074a0c33705b2d454b6609b3544eaf2 | [
"MIT"
] | 2 | 2019-07-12T23:57:16.000Z | 2020-03-13T20:49:46.000Z | tests/test_binning.py | NicolasHug/pygbm | 7891113aa074a0c33705b2d454b6609b3544eaf2 | [
"MIT"
] | null | null | null | tests/test_binning.py | NicolasHug/pygbm | 7891113aa074a0c33705b2d454b6609b3544eaf2 | [
"MIT"
] | null | null | null | import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import pytest
from pygbm.binning import BinMapper, _find_binning_thresholds, _map_to_bins
DATA = np.random.RandomState(42).normal(
loc=[0, 10], scale=[1, 0.01], size=(int(1e6), 2)
).astype(np.float32)
def test_find_binning_thresholds_regular_data():
data = np.linspace(0, 10, 1001).reshape(-1, 1)
bin_thresholds = _find_binning_thresholds(data, max_bins=10)
assert_allclose(bin_thresholds[0], [1, 2, 3, 4, 5, 6, 7, 8, 9])
bin_thresholds = _find_binning_thresholds(data, max_bins=5)
assert_allclose(bin_thresholds[0], [2, 4, 6, 8])
def test_find_binning_thresholds_small_regular_data():
data = np.linspace(0, 10, 11).reshape(-1, 1)
bin_thresholds = _find_binning_thresholds(data, max_bins=5)
assert_allclose(bin_thresholds[0], [2, 4, 6, 8])
bin_thresholds = _find_binning_thresholds(data, max_bins=10)
assert_allclose(bin_thresholds[0], [1, 2, 3, 4, 5, 6, 7, 8, 9])
bin_thresholds = _find_binning_thresholds(data, max_bins=11)
assert_allclose(bin_thresholds[0], np.arange(10) + .5)
bin_thresholds = _find_binning_thresholds(data, max_bins=255)
assert_allclose(bin_thresholds[0], np.arange(10) + .5)
def test_find_binning_thresholds_random_data():
bin_thresholds = _find_binning_thresholds(DATA, random_state=0)
assert len(bin_thresholds) == 2
for i in range(len(bin_thresholds)):
assert bin_thresholds[i].shape == (255,) # 256 - 1
assert bin_thresholds[i].dtype == DATA.dtype
assert_allclose(bin_thresholds[0][[64, 128, 192]],
np.array([-0.7, 0.0, 0.7]), atol=1e-1)
assert_allclose(bin_thresholds[1][[64, 128, 192]],
np.array([9.99, 10.00, 10.01]), atol=1e-2)
def test_find_binning_thresholds_low_n_bins():
bin_thresholds = _find_binning_thresholds(DATA, max_bins=128,
random_state=0)
assert len(bin_thresholds) == 2
for i in range(len(bin_thresholds)):
assert bin_thresholds[i].shape == (127,) # 128 - 1
assert bin_thresholds[i].dtype == DATA.dtype
def test_find_binning_thresholds_invalid_n_bins():
with pytest.raises(ValueError):
_find_binning_thresholds(DATA, max_bins=1024)
@pytest.mark.parametrize('n_bins', [16, 128, 256])
def test_map_to_bins(n_bins):
bin_thresholds = _find_binning_thresholds(DATA, max_bins=n_bins,
random_state=0)
binned = _map_to_bins(DATA, bin_thresholds)
assert binned.shape == DATA.shape
assert binned.dtype == np.uint8
assert binned.flags.f_contiguous
min_indices = DATA.argmin(axis=0)
max_indices = DATA.argmax(axis=0)
for feature_idx, min_idx in enumerate(min_indices):
assert binned[min_idx, feature_idx] == 0
for feature_idx, max_idx in enumerate(max_indices):
assert binned[max_idx, feature_idx] == n_bins - 1
@pytest.mark.parametrize("n_bins", [5, 10, 42])
def test_bin_mapper_random_data(n_bins):
n_samples, n_features = DATA.shape
expected_count_per_bin = n_samples // n_bins
tol = int(0.05 * expected_count_per_bin)
mapper = BinMapper(max_bins=n_bins, random_state=42).fit(DATA)
binned = mapper.transform(DATA)
assert binned.shape == (n_samples, n_features)
assert binned.dtype == np.uint8
assert_array_equal(binned.min(axis=0), np.array([0, 0]))
assert_array_equal(binned.max(axis=0), np.array([n_bins - 1, n_bins - 1]))
assert len(mapper.bin_thresholds_) == n_features
for i in range(len(mapper.bin_thresholds_)):
assert mapper.bin_thresholds_[i].shape == (n_bins - 1,)
assert mapper.bin_thresholds_[i].dtype == DATA.dtype
assert np.all(mapper.n_bins_per_feature_ == n_bins)
# Check that the binned data is approximately balanced across bins.
for feature_idx in range(n_features):
for bin_idx in range(n_bins):
count = (binned[:, feature_idx] == bin_idx).sum()
assert abs(count - expected_count_per_bin) < tol
@pytest.mark.parametrize("n_samples, n_bins", [
(5, 5),
(5, 10),
(5, 11),
(42, 255)
])
def test_bin_mapper_small_random_data(n_samples, n_bins):
data = np.random.RandomState(42).normal(size=n_samples).reshape(-1, 1)
assert len(np.unique(data)) == n_samples
mapper = BinMapper(max_bins=n_bins, random_state=42)
binned = mapper.fit_transform(data)
assert binned.shape == data.shape
assert binned.dtype == np.uint8
assert_array_equal(binned.ravel()[np.argsort(data.ravel())],
np.arange(n_samples))
@pytest.mark.parametrize("n_bins, n_distinct, multiplier", [
(5, 5, 1),
(5, 5, 3),
(255, 12, 42),
])
def test_bin_mapper_identity_repeated_values(n_bins, n_distinct, multiplier):
data = np.array(list(range(n_distinct)) * multiplier).reshape(-1, 1)
binned = BinMapper(max_bins=n_bins).fit_transform(data)
assert_array_equal(data, binned)
@pytest.mark.parametrize('n_distinct', [2, 7, 42])
def test_bin_mapper_repeated_values_invariance(n_distinct):
rng = np.random.RandomState(42)
distinct_values = rng.normal(size=n_distinct)
assert len(np.unique(distinct_values)) == n_distinct
repeated_indices = rng.randint(low=0, high=n_distinct, size=1000)
data = distinct_values[repeated_indices]
rng.shuffle(data)
assert_array_equal(np.unique(data), np.sort(distinct_values))
data = data.reshape(-1, 1)
mapper_1 = BinMapper(max_bins=n_distinct)
binned_1 = mapper_1.fit_transform(data)
assert_array_equal(np.unique(binned_1[:, 0]), np.arange(n_distinct))
# Adding more bins to the mapper yields the same results (same thresholds)
mapper_2 = BinMapper(max_bins=min(256, n_distinct * 3))
binned_2 = mapper_2.fit_transform(data)
assert_allclose(mapper_1.bin_thresholds_[0], mapper_2.bin_thresholds_[0])
assert_array_equal(binned_1, binned_2)
@pytest.mark.parametrize("n_bins, scale, offset", [
(3, 2, -1),
(42, 1, 0),
(256, 0.3, 42),
])
def test_bin_mapper_identity_small(n_bins, scale, offset):
data = np.arange(n_bins).reshape(-1, 1) * scale + offset
binned = BinMapper(max_bins=n_bins).fit_transform(data)
assert_array_equal(binned, np.arange(n_bins).reshape(-1, 1))
@pytest.mark.parametrize('n_bins_small, n_bins_large', [
(2, 2),
(3, 3),
(4, 4),
(42, 42),
(256, 256),
(5, 17),
(42, 256),
])
def test_bin_mapper_idempotence(n_bins_small, n_bins_large):
assert n_bins_large >= n_bins_small
data = np.random.RandomState(42).normal(size=30000).reshape(-1, 1)
mapper_small = BinMapper(max_bins=n_bins_small)
mapper_large = BinMapper(max_bins=n_bins_large)
binned_small = mapper_small.fit_transform(data)
binned_large = mapper_large.fit_transform(binned_small)
assert_array_equal(binned_small, binned_large)
@pytest.mark.parametrize('max_bins', [10, 100, 256])
@pytest.mark.parametrize('diff', [-5, 0, 5])
def test_n_bins_per_feature(max_bins, diff):
# Check that n_bins_per_feature is n_unique_values when
# n_unique_values <= max_bins, else max_bins.
n_unique_values = max_bins + diff
X = list(range(n_unique_values)) * 2
X = np.array(X).reshape(-1, 1)
mapper = BinMapper(max_bins=max_bins).fit(X)
assert np.all(mapper.n_bins_per_feature_ == min(max_bins, n_unique_values))
def test_subsample():
# Make sure bin thresholds are different when applying subsampling
mapper_no_subsample = BinMapper(subsample=None, random_state=0).fit(DATA)
mapper_subsample = BinMapper(subsample=256, random_state=0).fit(DATA)
for feature in range(DATA.shape[1]):
with pytest.raises(AssertionError):
np.testing.assert_array_almost_equal(
mapper_no_subsample.bin_thresholds_[feature],
mapper_subsample.bin_thresholds_[feature],
decimal=3
)
| 36.072398 | 79 | 0.692549 |
00c64840e24b69d569122218d780b59810e266ad | 10,516 | py | Python | examples/research_projects/wav2vec2/run_asr.py | Sara-X/transformers | 6773fb5dccf88a2d6d250da2cdaaa1fa78a4f5c3 | [
"Apache-2.0"
] | 1 | 2020-08-09T15:59:41.000Z | 2020-08-09T15:59:41.000Z | examples/research_projects/wav2vec2/run_asr.py | Sara-X/transformers | 6773fb5dccf88a2d6d250da2cdaaa1fa78a4f5c3 | [
"Apache-2.0"
] | 25 | 2021-04-20T14:29:15.000Z | 2021-04-20T14:30:01.000Z | examples/research_projects/wav2vec2/run_asr.py | Sara-X/transformers | 6773fb5dccf88a2d6d250da2cdaaa1fa78a4f5c3 | [
"Apache-2.0"
] | 1 | 2021-04-20T14:09:17.000Z | 2021-04-20T14:09:17.000Z | #!/usr/bin/env python3
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torch.nn as nn
from packaging import version
import soundfile as sf
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
Wav2Vec2ForCTC,
Wav2Vec2Processor,
is_apex_available,
)
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
freeze_feature_extractor: Optional[bool] = field(
default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
dataset_name: str = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_split_name: Optional[str] = field(
default="train",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
@dataclass
class DataCollatorCTCWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.Wav2Vec2Processor`)
The processor used for proccessing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: Wav2Vec2Processor
padding: Union[bool, str] = True
max_length: Optional[int] = None
max_length_labels: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
input_features = [{"input_values": feature["input_values"]} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
with self.processor.as_target_processor():
labels_batch = self.processor.pad(
label_features,
padding=self.padding,
max_length=self.max_length_labels,
pad_to_multiple_of=self.pad_to_multiple_of_labels,
return_tensors="pt",
)
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch["labels"] = labels
return batch
class CTCTrainer(Trainer):
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
loss = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
loss = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model = Wav2Vec2ForCTC.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
processor = Wav2Vec2Processor.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
train_dataset = datasets.load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name
)
val_dataset = datasets.load_dataset(data_args.dataset_name, data_args.dataset_config_name, split="validation")
wer_metric = datasets.load_metric("wer")
def map_to_array(batch):
speech_array, sampling_rate = sf.read(batch["file"])
batch["speech"] = speech_array
batch["sampling_rate"] = sampling_rate
return batch
train_dataset = train_dataset.map(map_to_array, remove_columns=["file"])
val_dataset = val_dataset.map(map_to_array, remove_columns=["file"])
def prepare_dataset(batch):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"])) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values
with processor.as_target_processor():
batch["labels"] = processor(batch["text"]).input_ids
return batch
train_dataset = train_dataset.map(
prepare_dataset,
batch_size=training_args.per_device_train_batch_size,
batched=True,
num_proc=data_args.preprocessing_num_workers,
)
val_dataset = val_dataset.map(
prepare_dataset,
batch_size=training_args.per_device_train_batch_size,
batched=True,
num_proc=data_args.preprocessing_num_workers,
)
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
def compute_metrics(pred):
pred_logits = pred.predictions
pred_ids = np.argmax(pred_logits, axis=-1)
pred.label_ids[pred.label_ids == -100] = 0
pred_str = processor.batch_decode(pred_ids)
# we do not want to group tokens when computing the metrics
label_str = processor.batch_decode(pred.label_ids, group_tokens=False)
wer = wer_metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
trainer = CTCTrainer(
model=model,
data_collator=data_collator,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=val_dataset,
tokenizer=processor.feature_extractor,
)
trainer.train()
if __name__ == "__main__":
main()
| 37.29078 | 145 | 0.662895 |
44e336cd23811825cfa1cc68aea466c63154bed9 | 34,609 | py | Python | tensorflow/python/ops/gen_sparse_ops.py | shishaochen/TensorFlow-0.8-Win | 63221dfc4f1a1d064308e632ba12e6a54afe1fd8 | [
"Apache-2.0"
] | 1 | 2017-09-14T23:59:05.000Z | 2017-09-14T23:59:05.000Z | tensorflow/python/ops/gen_sparse_ops.py | shishaochen/TensorFlow-0.8-Win | 63221dfc4f1a1d064308e632ba12e6a54afe1fd8 | [
"Apache-2.0"
] | 1 | 2016-10-19T02:43:04.000Z | 2016-10-31T14:53:06.000Z | tensorflow/python/ops/gen_sparse_ops.py | shishaochen/TensorFlow-0.8-Win | 63221dfc4f1a1d064308e632ba12e6a54afe1fd8 | [
"Apache-2.0"
] | 8 | 2016-10-23T00:50:02.000Z | 2019-04-21T11:11:57.000Z | """Python wrappers around Brain.
This file is MACHINE GENERATED! Do not edit.
"""
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.ops import op_def_library
def _deserialize_many_sparse(serialized_sparse, dtype, name=None):
r"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`SerializeSparse`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects
(they have been concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `SparseReorder` to restore index ordering.
For example, if the serialized input is a `[2 x 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: A `Tensor` of type `string`.
2-D, The `N` serialized `SparseTensor` objects.
Must have 3 columns.
dtype: A `tf.DType`. The `dtype` of the serialized `SparseTensor` objects.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (sparse_indices, sparse_values, sparse_shape).
sparse_indices: A `Tensor` of type `int64`.
sparse_values: A `Tensor` of type `dtype`.
sparse_shape: A `Tensor` of type `int64`.
"""
return _op_def_lib.apply_op("DeserializeManySparse",
serialized_sparse=serialized_sparse,
dtype=dtype, name=name)
def _serialize_many_sparse(sparse_indices, sparse_values, sparse_shape,
name=None):
r"""Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of `serialized_sparse` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sparse_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the minibatch `SparseTensor`.
sparse_values: A `Tensor`.
1-D. The `values` of the minibatch `SparseTensor`.
sparse_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the minibatch `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
return _op_def_lib.apply_op("SerializeManySparse",
sparse_indices=sparse_indices,
sparse_values=sparse_values,
sparse_shape=sparse_shape, name=name)
def _serialize_sparse(sparse_indices, sparse_values, sparse_shape, name=None):
r"""Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.
Args:
sparse_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the `SparseTensor`.
sparse_values: A `Tensor`. 1-D. The `values` of the `SparseTensor`.
sparse_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `string`.
"""
return _op_def_lib.apply_op("SerializeSparse",
sparse_indices=sparse_indices,
sparse_values=sparse_values,
sparse_shape=sparse_shape, name=name)
def _sparse_add(a_indices, a_values, a_shape, b_indices, b_values, b_shape,
thresh, name=None):
r"""Adds two `SparseTensor` objects to produce another `SparseTensor`.
The input `SparseTensor` objects' indices are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
By default, if two values sum to zero at some index, the output `SparseTensor`
would still include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0` (default) means everything is kept and actual thresholding happens
only for a positive value.
In the following shapes, `nnz` is the count after taking `thresh` into account.
Args:
a_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
a_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector.
a_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
b_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
b_values: A `Tensor`. Must have the same type as `a_values`.
1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector.
b_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
thresh: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.
0-D. The magnitude threshold that determines if an output value/index
pair takes space.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (sum_indices, sum_values, sum_shape).
sum_indices: A `Tensor` of type `int64`.
sum_values: A `Tensor`. Has the same type as `a_values`.
sum_shape: A `Tensor` of type `int64`.
"""
return _op_def_lib.apply_op("SparseAdd", a_indices=a_indices,
a_values=a_values, a_shape=a_shape,
b_indices=b_indices, b_values=b_values,
b_shape=b_shape, thresh=thresh, name=name)
def _sparse_add_grad(backprop_val_grad, a_indices, b_indices, sum_indices,
name=None):
r"""The gradient operator for the SparseAdd op.
The SparseAdd op calculates A + B, where A, B, and the sum are all represented
as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
values of A and B.
Args:
backprop_val_grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D with shape `[nnz(sum)]`. The gradient with respect to
the non-empty values of the sum.
a_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
b_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
sum_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the sum `SparseTensor`, size
`[nnz(sum), ndims]`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (a_val_grad, b_val_grad).
a_val_grad: A `Tensor`. Has the same type as `backprop_val_grad`. 1-D with shape `[nnz(A)]`. The gradient with respect to the
non-empty values of A.
b_val_grad: A `Tensor`. Has the same type as `backprop_val_grad`. 1-D with shape `[nnz(B)]`. The gradient with respect to the
non-empty values of B.
"""
return _op_def_lib.apply_op("SparseAddGrad",
backprop_val_grad=backprop_val_grad,
a_indices=a_indices, b_indices=b_indices,
sum_indices=sum_indices, name=name)
def _sparse_concat(indices, values, shapes, concat_dim, name=None):
r"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of these sparse tensors.
It is assumed that each input is a `SparseTensor` whose elements are ordered
along increasing dimension number.
All inputs' shapes must match, except for the concat dimension. The
`indices`, `values`, and `shapes` lists must have the same length.
The output shape is identical to the inputs', except along the concat
dimension, where it is the sum of the inputs' sizes along that dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `concat_dim = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Args:
indices: A list of at least 2 `Tensor` objects of type `int64`.
2-D. Indices of each input `SparseTensor`.
values: A list with the same number of `Tensor` objects as `indices` of `Tensor` objects of the same type.
1-D. Non-empty values of each `SparseTensor`.
shapes: A list with the same number of `Tensor` objects as `indices` of `Tensor` objects of type `int64`.
1-D. Shapes of each `SparseTensor`.
concat_dim: An `int` that is `>= 0`. Dimension to concatenate along.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_values, output_shape).
output_indices: A `Tensor` of type `int64`. 2-D. Indices of the concatenated `SparseTensor`.
output_values: A `Tensor`. Has the same type as `values`. 1-D. Non-empty values of the concatenated `SparseTensor`.
output_shape: A `Tensor` of type `int64`. 1-D. Shape of the concatenated `SparseTensor`.
"""
return _op_def_lib.apply_op("SparseConcat", indices=indices, values=values,
shapes=shapes, concat_dim=concat_dim, name=name)
def sparse_dense_cwise_div(sp_indices, sp_values, sp_shape, dense, name=None):
r"""Component-wise divides a SparseTensor by a dense Tensor.
*Limitation*: this Op only broadcasts the dense side to the sparse side, but not
the other direction.
Args:
sp_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
sp_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. `N` non-empty values corresponding to `sp_indices`.
sp_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
dense: A `Tensor`. Must have the same type as `sp_values`.
`R`-D. The dense Tensor operand.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `sp_values`.
1-D. The `N` values that are operated on.
"""
return _op_def_lib.apply_op("SparseDenseCwiseDiv", sp_indices=sp_indices,
sp_values=sp_values, sp_shape=sp_shape,
dense=dense, name=name)
def sparse_dense_cwise_mul(sp_indices, sp_values, sp_shape, dense, name=None):
r"""Component-wise multiplies a SparseTensor by a dense Tensor.
*Limitation*: this Op only broadcasts the dense side to the sparse side, but not
the other direction.
Args:
sp_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
sp_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. `N` non-empty values corresponding to `sp_indices`.
sp_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
dense: A `Tensor`. Must have the same type as `sp_values`.
`R`-D. The dense Tensor operand.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `sp_values`.
1-D. The `N` values that are operated on.
"""
return _op_def_lib.apply_op("SparseDenseCwiseMul", sp_indices=sp_indices,
sp_values=sp_values, sp_shape=sp_shape,
dense=dense, name=name)
def sparse_reduce_sum(input_indices, input_values, input_shape,
reduction_axes, keep_dims=None, name=None):
r"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned.
Args:
input_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
input_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. `N` non-empty values corresponding to `input_indices`.
input_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
reduction_axes: A `Tensor` of type `int32`.
1-D. Length-`K` vector containing the reduction axes.
keep_dims: An optional `bool`. Defaults to `False`.
If true, retain reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input_values`.
`R-K`-D. The reduced Tensor.
"""
return _op_def_lib.apply_op("SparseReduceSum", input_indices=input_indices,
input_values=input_values,
input_shape=input_shape,
reduction_axes=reduction_axes,
keep_dims=keep_dims, name=name)
def _sparse_reorder(input_indices, input_values, input_shape, name=None):
r"""Reorders a SparseTensor into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering along
increasing dimension number. The only time ordering can be violated is during
manual manipulation of the indices and values vectors to add entries.
Reordering does not affect the shape of the SparseTensor.
If the tensor has rank `R` and `N` non-empty values, `input_indices` has
shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
Args:
input_indices: A `Tensor` of type `int64`.
2-D. `N x R` matrix with the indices of non-empty values in a
SparseTensor, possibly not in canonical ordering.
input_values: A `Tensor`.
1-D. `N` non-empty values corresponding to `input_indices`.
input_shape: A `Tensor` of type `int64`.
1-D. Shape of the input SparseTensor.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_values).
output_indices: A `Tensor` of type `int64`. 2-D. `N x R` matrix with the same indices as input_indices, but
in canonical row-major ordering.
output_values: A `Tensor`. Has the same type as `input_values`. 1-D. `N` non-empty values corresponding to `output_indices`.
"""
return _op_def_lib.apply_op("SparseReorder", input_indices=input_indices,
input_values=input_values,
input_shape=input_shape, name=name)
def _sparse_split(split_dim, indices, values, shape, num_split, name=None):
r"""Split a `SparseTensor` into `num_split` tensors along one dimension.
If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
`[0 : shape[split_dim] % num_split]` gets one extra dimension.
For example, if `split_dim = 1` and `num_split = 2` and the input is
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
output_tensor[0] = shape = [2, 4]
[ a ]
[b c ]
output_tensor[1] = shape = [2, 3]
[ d e ]
[ ]
Args:
split_dim: A `Tensor` of type `int64`.
0-D. The dimension along which to split. Must be in the range
`[0, rank(shape))`.
indices: A `Tensor` of type `int64`.
2-D tensor represents the indices of the sparse tensor.
values: A `Tensor`. 1-D tensor represents the values of the sparse tensor.
shape: A `Tensor` of type `int64`.
1-D. tensor represents the shape of the sparse tensor.
output indices: A list of 1-D tensors represents the indices of the output
sparse tensors.
num_split: An `int` that is `>= 1`. The number of ways to split.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output_indices, output_values, output_shape).
output_indices: A list of `num_split` `Tensor` objects of type `int64`.
output_values: A list of `num_split` `Tensor` objects of the same type as values. A list of 1-D tensors represents the values of the output sparse
tensors.
output_shape: A list of `num_split` `Tensor` objects of type `int64`. A list of 1-D tensors represents the shape of the output sparse
tensors.
"""
return _op_def_lib.apply_op("SparseSplit", split_dim=split_dim,
indices=indices, values=values, shape=shape,
num_split=num_split, name=name)
def _sparse_tensor_dense_add(a_indices, a_values, a_shape, b, name=None):
r"""Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
This Op does not require `a_indices` be sorted in standard lexicographic order.
Args:
a_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
a_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
1-D. The `values` of the `SparseTensor`, with shape `[nnz]`.
a_shape: A `Tensor`. Must have the same type as `a_indices`.
1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`.
b: A `Tensor`. Must have the same type as `a_values`.
`ndims`-D Tensor. With shape `a_shape`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a_values`.
"""
return _op_def_lib.apply_op("SparseTensorDenseAdd", a_indices=a_indices,
a_values=a_values, a_shape=a_shape, b=b,
name=name)
def _sparse_tensor_dense_mat_mul(a_indices, a_values, a_shape, b,
adjoint_a=None, adjoint_b=None, name=None):
r"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of A. However, the following
input format is recommended for optimal behavior:
if adjoint_a == false:
A should be sorted in lexicographically increasing order. Use SparseReorder
if you're not sure.
if adjoint_a == true:
A should be sorted in order of increasing dimension 1 (i.e., "column major"
order instead of "row major" order).
Args:
a_indices: A `Tensor` of type `int64`.
2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
a_values: A `Tensor`.
1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector.
a_shape: A `Tensor` of type `int64`.
1-D. The `shape` of the `SparseTensor`, size `[2]` Vector.
b: A `Tensor`. Must have the same type as `a_values`.
2-D. A dense Matrix.
adjoint_a: An optional `bool`. Defaults to `False`.
Use the adjoint of A in the matrix multiply. If A is complex, this
is transpose(conj(A)). Otherwise it's transpose(A).
adjoint_b: An optional `bool`. Defaults to `False`.
Use the adjoint of B in the matrix multiply. If B is complex, this
is transpose(conj(B)). Otherwise it's transpose(B).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `a_values`.
"""
return _op_def_lib.apply_op("SparseTensorDenseMatMul", a_indices=a_indices,
a_values=a_values, a_shape=a_shape, b=b,
adjoint_a=adjoint_a, adjoint_b=adjoint_b,
name=name)
def _sparse_to_dense(sparse_indices, output_shape, sparse_values,
default_value, validate_indices=None, name=None):
r"""Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
```prettyprint
# If sparse_indices is scalar
dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i
dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n)
dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
```
All other values in `dense` are set to `default_value`. If `sparse_values` is a
scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If `validate_indices` is true, these properties
are checked during execution.
Args:
sparse_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete
index where `sparse_values[i]` will be placed.
output_shape: A `Tensor`. Must have the same type as `sparse_indices`.
1-D. Shape of the dense output tensor.
sparse_values: A `Tensor`.
1-D. Values corresponding to each row of `sparse_indices`,
or a scalar value to be used for all sparse indices.
default_value: A `Tensor`. Must have the same type as `sparse_values`.
Scalar value to set for indices not specified in
`sparse_indices`.
validate_indices: An optional `bool`. Defaults to `True`.
If true, indices are checked to make sure they are sorted in
lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `sparse_values`.
Dense output tensor of shape `output_shape`.
"""
return _op_def_lib.apply_op("SparseToDense", sparse_indices=sparse_indices,
output_shape=output_shape,
sparse_values=sparse_values,
default_value=default_value,
validate_indices=validate_indices, name=name)
def _InitOpDefLibrary():
op_list = op_def_pb2.OpList()
text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
op_def_registry.register_op_list(op_list)
op_def_lib = op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "DeserializeManySparse"
input_arg {
name: "serialized_sparse"
type: DT_STRING
}
output_arg {
name: "sparse_indices"
type: DT_INT64
}
output_arg {
name: "sparse_values"
type_attr: "dtype"
}
output_arg {
name: "sparse_shape"
type: DT_INT64
}
attr {
name: "dtype"
type: "type"
}
}
op {
name: "SerializeManySparse"
input_arg {
name: "sparse_indices"
type: DT_INT64
}
input_arg {
name: "sparse_values"
type_attr: "T"
}
input_arg {
name: "sparse_shape"
type: DT_INT64
}
output_arg {
name: "serialized_sparse"
type: DT_STRING
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SerializeSparse"
input_arg {
name: "sparse_indices"
type: DT_INT64
}
input_arg {
name: "sparse_values"
type_attr: "T"
}
input_arg {
name: "sparse_shape"
type: DT_INT64
}
output_arg {
name: "serialized_sparse"
type: DT_STRING
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SparseAdd"
input_arg {
name: "a_indices"
type: DT_INT64
}
input_arg {
name: "a_values"
type_attr: "T"
}
input_arg {
name: "a_shape"
type: DT_INT64
}
input_arg {
name: "b_indices"
type: DT_INT64
}
input_arg {
name: "b_values"
type_attr: "T"
}
input_arg {
name: "b_shape"
type: DT_INT64
}
input_arg {
name: "thresh"
type_attr: "Treal"
}
output_arg {
name: "sum_indices"
type: DT_INT64
}
output_arg {
name: "sum_values"
type_attr: "T"
}
output_arg {
name: "sum_shape"
type: DT_INT64
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Treal"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_INT64
type: DT_UINT8
type: DT_INT16
type: DT_INT8
type: DT_UINT16
type: DT_HALF
}
}
}
}
op {
name: "SparseAddGrad"
input_arg {
name: "backprop_val_grad"
type_attr: "T"
}
input_arg {
name: "a_indices"
type: DT_INT64
}
input_arg {
name: "b_indices"
type: DT_INT64
}
input_arg {
name: "sum_indices"
type: DT_INT64
}
output_arg {
name: "a_val_grad"
type_attr: "T"
}
output_arg {
name: "b_val_grad"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseConcat"
input_arg {
name: "indices"
type: DT_INT64
number_attr: "N"
}
input_arg {
name: "values"
type_attr: "T"
number_attr: "N"
}
input_arg {
name: "shapes"
type: DT_INT64
number_attr: "N"
}
output_arg {
name: "output_indices"
type: DT_INT64
}
output_arg {
name: "output_values"
type_attr: "T"
}
output_arg {
name: "output_shape"
type: DT_INT64
}
attr {
name: "concat_dim"
type: "int"
has_minimum: true
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 2
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SparseDenseCwiseDiv"
input_arg {
name: "sp_indices"
type: DT_INT64
}
input_arg {
name: "sp_values"
type_attr: "T"
}
input_arg {
name: "sp_shape"
type: DT_INT64
}
input_arg {
name: "dense"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseDenseCwiseMul"
input_arg {
name: "sp_indices"
type: DT_INT64
}
input_arg {
name: "sp_values"
type_attr: "T"
}
input_arg {
name: "sp_shape"
type: DT_INT64
}
input_arg {
name: "dense"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseReduceSum"
input_arg {
name: "input_indices"
type: DT_INT64
}
input_arg {
name: "input_values"
type_attr: "T"
}
input_arg {
name: "input_shape"
type: DT_INT64
}
input_arg {
name: "reduction_axes"
type: DT_INT32
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "keep_dims"
type: "bool"
default_value {
b: false
}
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "SparseReorder"
input_arg {
name: "input_indices"
type: DT_INT64
}
input_arg {
name: "input_values"
type_attr: "T"
}
input_arg {
name: "input_shape"
type: DT_INT64
}
output_arg {
name: "output_indices"
type: DT_INT64
}
output_arg {
name: "output_values"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SparseSplit"
input_arg {
name: "split_dim"
type: DT_INT64
}
input_arg {
name: "indices"
type: DT_INT64
}
input_arg {
name: "values"
type_attr: "T"
}
input_arg {
name: "shape"
type: DT_INT64
}
output_arg {
name: "output_indices"
type: DT_INT64
number_attr: "num_split"
}
output_arg {
name: "output_values"
type_attr: "T"
number_attr: "num_split"
}
output_arg {
name: "output_shape"
type: DT_INT64
number_attr: "num_split"
}
attr {
name: "num_split"
type: "int"
has_minimum: true
minimum: 1
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SparseTensorDenseAdd"
input_arg {
name: "a_indices"
type_attr: "Tindices"
}
input_arg {
name: "a_values"
type_attr: "T"
}
input_arg {
name: "a_shape"
type_attr: "Tindices"
}
input_arg {
name: "b"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "SparseTensorDenseMatMul"
input_arg {
name: "a_indices"
type: DT_INT64
}
input_arg {
name: "a_values"
type_attr: "T"
}
input_arg {
name: "a_shape"
type: DT_INT64
}
input_arg {
name: "b"
type_attr: "T"
}
output_arg {
name: "product"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "adjoint_a"
type: "bool"
default_value {
b: false
}
}
attr {
name: "adjoint_b"
type: "bool"
default_value {
b: false
}
}
}
op {
name: "SparseToDense"
input_arg {
name: "sparse_indices"
type_attr: "Tindices"
}
input_arg {
name: "output_shape"
type_attr: "Tindices"
}
input_arg {
name: "sparse_values"
type_attr: "T"
}
input_arg {
name: "default_value"
type_attr: "T"
}
output_arg {
name: "dense"
type_attr: "T"
}
attr {
name: "validate_indices"
type: "bool"
default_value {
b: true
}
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
"""
_op_def_lib = _InitOpDefLibrary()
| 29.379457 | 210 | 0.628247 |
ae20f7a42f03b4e1d3a639e4df2b7844e14f92cc | 3,777 | py | Python | src/configurationparser.py | sajtizsolt/dumas | 4b7e307535bcc93a75784449bc44055d6dd0730b | [
"MIT"
] | 3 | 2021-08-17T08:14:40.000Z | 2021-09-05T10:21:11.000Z | src/configurationparser.py | sajtizsolt/dumas | 4b7e307535bcc93a75784449bc44055d6dd0730b | [
"MIT"
] | null | null | null | src/configurationparser.py | sajtizsolt/dumas | 4b7e307535bcc93a75784449bc44055d6dd0730b | [
"MIT"
] | null | null | null | from argumentparser import ArgumentParser
from configuration import Configuration
from util import print_exception_and_exit, print_message_and_exit
import json, os
CONFIGURATION_JSON = """
{{
"bot": {{
"app": {},
"token": "{}"
}},
"channel": {{
"sources": {},
"target": {}
}},
"message": {{
"farewell": "{}",
"frequency": {},
"length": {},
"limit": {},
"warning": "{}",
"welcome": "{}"
}}
}}"""
class ConfigurationParser:
MISSING_KEY_MESSAGE = """
Configuration file does not contain the following key: {}
"""
@staticmethod
def get_value_if_key_exists(dictionary, key_with_dots):
try:
keys = key_with_dots.split('.')
for key in keys:
if key in dictionary:
dictionary = dictionary[key]
else:
raise KeyError()
return dictionary
except KeyError:
print_message_and_exit(ConfigurationParser.MISSING_KEY_MESSAGE.format(key_with_dots))
@staticmethod
def verify_configuration_file(configuration_file_path):
try:
if not os.path.isfile(configuration_file_path):
raise ValueError(ArgumentParser.INVALID_CONFIGURATION_FILE_PATH)
configuration_file = open(configuration_file_path, encoding="utf-8")
data = json.load(configuration_file)
configuration_file.close()
return Configuration(
bot_app = ConfigurationParser.get_value_if_key_exists(data, 'bot.app'),
bot_token = ConfigurationParser.get_value_if_key_exists(data, 'bot.token'),
channel_sources = ConfigurationParser.get_value_if_key_exists(data, 'channel.sources'),
channel_target = ConfigurationParser.get_value_if_key_exists(data, 'channel.target'),
message_farewell = ConfigurationParser.get_value_if_key_exists(data, 'message.farewell'),
message_frequency = ConfigurationParser.get_value_if_key_exists(data, 'message.frequency'),
message_length = ConfigurationParser.get_value_if_key_exists(data, 'message.length'),
message_limit = ConfigurationParser.get_value_if_key_exists(data, 'message.limit'),
message_warning = ConfigurationParser.get_value_if_key_exists(data, 'message.warning'),
message_welcome = ConfigurationParser.get_value_if_key_exists(data, 'message.welcome'),
)
except ValueError:
print_exception_and_exit()
@staticmethod
def get_json(configuration):
return CONFIGURATION_JSON.format(
configuration.bot_app,
"Secret token!",
configuration.channel_sources,
configuration.channel_target,
configuration.message_farewell,
configuration.message_frequency,
configuration.message_length,
configuration.message_limit,
configuration.message_warning,
configuration.message_welcome
)
@staticmethod
def read_configuration(configuration_file_path):
configuration_file = open(configuration_file_path, encoding="utf-8")
data = json.load(configuration_file)
configuration_file.close()
return Configuration(
bot_app = data['bot']['app'],
bot_token = data['bot']['token'],
channel_sources = data['channel']['sources'],
channel_target = data['channel']['target'],
message_farewell = data['message']['farewell'],
message_frequency = data['message']['frequency'],
message_length = data['message']['length'],
message_limit = data['message']['limit'],
message_warning = data['message']['warning'],
message_welcome = data['message']['welcome']
)
@staticmethod
def write_configuration_to_file(path_to_write, configuration):
configuration_file = open(path_to_write, 'w', encoding="utf-8")
configuration_file.write(ConfigurationParser.get_json(configuration))
configuration_file.close()
| 35.299065 | 99 | 0.703998 |
4cb746e2c1ba1454d68056183ad7eca597aea5b8 | 1,729 | py | Python | python/batch-predictor/merlinpyspark/sink.py | ashwinath/merlin | 087a7fa6fb21e4c771d64418bd58873175226ca1 | [
"Apache-2.0"
] | 97 | 2020-10-15T08:03:56.000Z | 2022-03-31T22:30:59.000Z | python/batch-predictor/merlinpyspark/sink.py | ibnummuhammad/merlin | acf10a350bcacfdfe67f7020d535467b71ff1d89 | [
"Apache-2.0"
] | 91 | 2020-10-26T03:15:27.000Z | 2022-03-31T10:19:55.000Z | python/batch-predictor/merlinpyspark/sink.py | ibnummuhammad/merlin | acf10a350bcacfdfe67f7020d535467b71ff1d89 | [
"Apache-2.0"
] | 26 | 2020-10-21T03:53:36.000Z | 2022-03-16T06:43:15.000Z | # Copyright 2020 The Merlin Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from pyspark.sql import DataFrame
from merlinpyspark.config import SinkConfig, BigQuerySinkConfig
def create_sink(sink_config: SinkConfig) -> "Sink":
if sink_config.sink_type() == BigQuerySinkConfig.TYPE:
if not isinstance(sink_config, BigQuerySinkConfig):
raise ValueError("sink_config is not BigQuerySink")
return BigQuerySink(sink_config)
raise ValueError(f"sink type is not implemented: {sink_config.sink_type()}")
class Sink(ABC):
@abstractmethod
def save(self, df):
pass
class BigQuerySink(Sink):
WRITE_FORMAT = "bigquery"
OPTION_TABLE = "table"
OPTION_STAGING_BUCKET = "temporaryGcsBucket"
def __init__(self, config: BigQuerySinkConfig):
self._config = config
def save(self, df: DataFrame):
df.write \
.mode(self._config.save_mode()) \
.format(self.WRITE_FORMAT) \
.option(self.OPTION_TABLE, self._config.table()) \
.option(self.OPTION_STAGING_BUCKET, self._config.staging_bucket()) \
.options(**self._config.options()) \
.save()
| 32.018519 | 80 | 0.70214 |
b1f86d5a5a59afce7357a0d6a9963ff8115fec02 | 859 | py | Python | cassiopeia/core/gameapi.py | BubblegumDiscord/LoLTrivia | 70070dc2e8c59d7210d8b30ad2525bafaee4bba7 | [
"MIT"
] | null | null | null | cassiopeia/core/gameapi.py | BubblegumDiscord/LoLTrivia | 70070dc2e8c59d7210d8b30ad2525bafaee4bba7 | [
"MIT"
] | null | null | null | cassiopeia/core/gameapi.py | BubblegumDiscord/LoLTrivia | 70070dc2e8c59d7210d8b30ad2525bafaee4bba7 | [
"MIT"
] | 1 | 2018-05-06T15:49:33.000Z | 2018-05-06T15:49:33.000Z | import cassiopeia.dto.gameapi
import cassiopeia.core.requests
import cassiopeia.type.core.game
def get_recent_games(summoner):
"""
Gets the most recent games a summoner played
Args:
summoner (Summoner): the summoner to get recent games for
Returns:
list<Game>: the summoner's recent games
"""
games = cassiopeia.dto.gameapi.get_recent_games(summoner.id)
# Load required data if loading policy is eager
if cassiopeia.core.requests.load_policy is cassiopeia.type.core.common.LoadPolicy.eager:
summoner_ids = games.summoner_ids
cassiopeia.riotapi.get_summoners_by_id(list(summoner_ids)) if summoner_ids else None
cassiopeia.riotapi.get_summoner_spells() if games.summoner_spell_ids else None
return [cassiopeia.type.core.game.Game(game, games.summonerId) for game in games.games]
| 34.36 | 92 | 0.746217 |
c4725fcf76ac15e29c7c6cd26e0d3fd2ac0b0451 | 1,289 | py | Python | condor/catalog/views.py | icky-baker/condor | a0719eb4bfade0acd08134e1cca0d8217f5bd981 | [
"MIT"
] | null | null | null | condor/catalog/views.py | icky-baker/condor | a0719eb4bfade0acd08134e1cca0d8217f5bd981 | [
"MIT"
] | 1 | 2020-12-20T11:10:36.000Z | 2020-12-20T12:48:28.000Z | condor/catalog/views.py | icky-baker/condor | a0719eb4bfade0acd08134e1cca0d8217f5bd981 | [
"MIT"
] | null | null | null | # import socket
import os
from catalog.models import Author, Book, BookInstance, Genre
from django.shortcuts import render
from django.views import generic
from django.views.generic import ListView
def index(request):
"""View function for home page of site."""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.count()
num_instances_available = BookInstance.objects.filter(status__exact="a").count()
num_authors = Author.objects.count()
context = {
# "hostname": socket.gethostname(),
"hostname": os.popen("ip addr show enp0s3").read().split("inet ")[1].split("/")[0],
"num_books": num_books,
"num_instances": num_instances,
"num_instances_available": num_instances_available,
"num_authors": num_authors,
}
# Render the HTML template index.html with the data in the context variable
return render(request, "index.html", context=context)
class BookListView(generic.ListView):
model = Book
paginate_by = 3
class BookDetailView(generic.DetailView):
model = Book
class AuthorListView(generic.ListView):
model = Author
paginate_by = 5
class AuthorDetailView(generic.DetailView):
model = Author
| 25.78 | 91 | 0.704422 |
dfd82fcf7dbd301a65e122727823da220adf8ea2 | 3,793 | py | Python | app/models.py | elkwal/One-Minute-Pitch | 4b8df025beb7c8d5d395a8afe2f56f53401d9004 | [
"Unlicense"
] | null | null | null | app/models.py | elkwal/One-Minute-Pitch | 4b8df025beb7c8d5d395a8afe2f56f53401d9004 | [
"Unlicense"
] | null | null | null | app/models.py | elkwal/One-Minute-Pitch | 4b8df025beb7c8d5d395a8afe2f56f53401d9004 | [
"Unlicense"
] | null | null | null |
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index = True)
email = db.Column(db.String(255),unique = True,index = True)
role_id = db.Column(db.Integer,db.ForeignKey('roles.id'))
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
password_hash = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
pitch = db.relationship('Pitch',backref = 'users',lazy="dynamic")
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls,id):
comments = Comment.query.filter_by(pitch_id=id).all()
return comments
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure, password)
def __repr__(self):
return f'User {self.username}'
class Pitch (db.Model):
'''
Pitch class to define Pitch Objects
'''
__tablename__ = 'pitch'
id = db.Column(db.Integer,primary_key = True)
pitch = db.Column(db.String)
category_id = db.Column(db.Integer)
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
comments = db.relationship('Comment',backref = 'pitch',lazy="dynamic")
def save_pitch(self):
'''
Function that saves pitches
'''
db.session.add(self)
db.session.commit()
@classmethod
def get_all_pitches(cls):
'''
Function that queries the databse and returns all the pitches
'''
return Pitch.query.all()
@classmethod
def get_pitches_by_category(cls,cat_id):
'''
Function that queries the databse and returns pitches based on the
category passed to it
'''
return Pitch.query.filter_by(category_id= cat_id)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
comment= db.Column(db.String)
pitch_id = db.Column(db.Integer,db.ForeignKey('pitch.id'))
username = db.Column(db.String)
votes= db.Column(db.Integer)
def save_comment(self):
'''
Function that saves comments
'''
db.session.add(self)
db.session.commit()
@classmethod
def clear_comments(cls):
Comment.all_comments.clear()
@classmethod
def get_comments(cls,id):
comments = Comment.query.filter_by(pitch_id=id).all()
return comments
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer,primary_key = True)
name = db.Column(db.String(255))
users = db.relationship('User',backref = 'role',lazy="dynamic")
def __repr__(self):
return f'User {self.name}'
class PitchCategory(db.Model):
'''
Function that defines different categories of pitches
'''
__tablename__ ='pitch_categories'
id = db.Column(db.Integer, primary_key=True)
name_of_category = db.Column(db.String(255))
category_description = db.Column(db.String(255))
@classmethod
def get_categories(cls):
'''
This function fetches all the categories from the database
'''
categories = PitchCategory.query.all()
return categories
| 26.158621 | 74 | 0.657 |
0b4c28dd21d3d4c1bcc033fc1ccfea15934ea8f3 | 14,413 | py | Python | Welcomer 6.0/rockutils.py | TheRockettek/Welcomer | 60706b4d6eec7d4f2500b3acc37530e42d846532 | [
"MIT"
] | 12 | 2019-09-10T21:31:51.000Z | 2022-01-21T14:31:05.000Z | Welcomer 6.0/rockutils.py | TheRockettek/Welcomer | 60706b4d6eec7d4f2500b3acc37530e42d846532 | [
"MIT"
] | null | null | null | Welcomer 6.0/rockutils.py | TheRockettek/Welcomer | 60706b4d6eec7d4f2500b3acc37530e42d846532 | [
"MIT"
] | 1 | 2021-09-17T09:03:54.000Z | 2021-09-17T09:03:54.000Z | import asyncio
import aiohttp
import base64
import discord
import gettext
import os
import re
import math
import string
import time
import random
import unicodedata
import rethinkdb as r
from discord.ext import commands
from urllib.parse import urlparse
from datetime import datetime
if "nt" in os.name:
try:
from colorama import init
init()
except:
pass
try:
import ujson as json
except:
print("Could not import ujson, defaulting to built-in json library")
import json
class RockUtils():
def __init__(self):
self.long_prefix = 0
self.langs = {}
class InvalidFileIO(Exception):
pass
colour_prefix = {
"default": "39",
"black": "30",
"red": "31",
"green": "32",
"yellow": "33",
"blue": "34",
"magenta": "35",
"cyan": "36",
"light grey": "37",
"dark grey": "90",
"light red": "91",
"light green": "92",
"light yellow": "93",
"light blue": "94",
"light magenta": "95",
"light cyan": "96",
"white": "97"
}
def markmessage(self, string, guild):
# converts special message mentions such as
# channel mentions #
# emojis <a?:...:0>
# to normalised messages
found_channels = set(re.findall("\#([a-zA-Z-]+)",string))
found_emotes = set(re.findall(":(\S+):", string))
for emote in found_emotes:
_emote = discord.utils.get(guild.emojis, name=emote)
if _emote:
string = string.replace(f":{emote}:", str(_emote))
for channel in found_channels:
_channel = discord.utils.get(guild.channels, name=channel)
if _channel:
string = string.replace(f"#{channel}",_channel.mention)
return string
def strip_emotes(self, message, emojis):
# converts string with :emotes: to their valid counterparts
_server_emojis = []
for emoji in emojis:
_server_emojis[emoji.name] = str(emoji)
_message_emojis = re.findall(":.*?:", message)
for emoji in _message_emojis:
if emoji[1:-1] in _server_emojis:
message = message.replace(emoji, _message_emojis[emoji[1:-1]])
return message
def getprefix(self, intager):
strint = str(intager)
if strint[-1] == "1" and strint[-2:] != "11":
return "st"
if strint[-1] == "2" and strint[-2:] != "12":
return "nd"
if strint[-1] == "3" and strint[-2:] != "13":
return "rd"
return "th"
def retrieve_time_emoji(self):
a = int((time.time()/900-3)/2%24)
return chr(128336+a//2+a%2*12)
def text_format(self, message, formatting, encapsulation=["{","}"]):
for _key, _value in formatting.items():
message = message.replace(encapsulation[0] + str(_key) + encapsulation[1], str(_valie))
return message
def parse_unix(self, unix):
_d = math.floor(unix/86400)
unix = unix - (_d*86400)
_h = math.floor(unix/3600)
unix = unix - (_h*3600)
_m = math.floor(unix/60)
unix = unix - (_m*60)
_s = math.ceil(unix)
return _d,_h,_m,_s
async def incr_db(self, connection, db, table, key, count=1, keyname="count"):
return await r.db(db).table(table).get(key).update({keyname: r.row[keyname]+count}).run(connection)
def incr_db_noasync(self, connection, db, table, key, count=1, keyname="count"):
return r.db(db).table(table).get(key).update({keyname: r.row[keyname]+count}).run(connection)
def produce_timestamp(self, seconds, include_days=True, include_seconds=True):
# converts seconds to timestamp: 63 to 0D - 0H - 1M - 3S
_d,_h,_m,_s = self.parse_unix(seconds)
message = ""
if include_days and _d > 0:
message += f"{_d} D - "
else:
h += _d*24
message += f"{_h} H - {_m} M"
if include_seconds:
message += f" - {_s} S"
return message
def since_seconds_str(self, seconds, allow_secs=False, include_ago=True):
_d, _h, _m, _s = self.parseUnix(seconds)
message = ""
if _d > 0:
message += f"{str(_d)} day{'s' if _d > 1 else ''} "
if _h > 0:
if _m < 0:
message += "and "
elif len(message) > 1:
message += ", "
message += f"{str(_h)} hour{'s' if _h > 1 else ''} "
if _m > 0:
if _h > 0 or _d > 0:
message += "and "
message += f"{str(_m)} minute{'s' if _m > 1 else ''} "
if allow_secs:
if _h > 0 or _d > 0 or _m > 0:
message += "and "
message += f"{str(_s)} second{'s' if _s > 1 else ''} "
if include_ago:
message += "ago"
return message
def since_unix_str(self, unix, lang=["second","minute","hour","day","and","ago"], allow_secs=False, include_ago=True):
_d, _h, _m, _s = self.parseUnix(time.time() - unix)
message = ""
if _d > 0:
message += f"{str(_d)} {lang[3]}{'s' if _d > 1 else ''} "
if _h > 0:
if _m < 0:
message += f"{lang[4]} "
elif len(message) > 1:
message += ", "
message += f"{str(_h)} {lang[2]}{'s' if _h > 1 else ''} "
if _m > 0:
if _h > 0 or _d > 0:
message += f"{lang[4]} "
message += f"{str(_m)} {lang[1]}{'s' if _m > 1 else ''} "
if allow_secs:
if _h > 0 or _d > 0 or _m > 0:
message += f"{lang[4]} "
message += f"{str(_s)} {lang[0]}{'s' if _s > 1 else ''} "
if include_ago:
message += lang[5]
return message
def retrieve_gmt(self, format="%d %b %Y %H:%M:%S"):
return time.strftime(format, time.gmtime(time.time()))
def get_selection(self, text, fallback):
text = text.lower()
if text in ['yes','y','true','t','enable','on','active','activate']:
return True
if text in ['no','n','false','f','disable','off','inactive','deactivate']:
return False
return not fallback
def prefix_print(self, text, prefix="Welcomer", text_colour="default", prefix_colour="light blue"):
text_colour = self.colour_prefix.get(text_colour.lower(), "97")
prefix_colour = self.colour_prefix.get(prefix_colour.lower(), "39")
pre = f"[\033[{prefix_colour}m{prefix.rstrip()}\033[0m]"
# if len(pre) > self.long_prefix:
# self.long_prefix = len(pre)
# pre = f"{' '*(self.long_prefix-len(pre))}{pre}"
print(f"{pre}\033[{text_colour}m {text}\033[0m")
def merge_embeded_lists(self, _dict):
results = []
for cluster in _dict.values():
results += cluster
return results
def normalize(self, string, normal_format="NFKC", encode_format="ascii"):
try:
return unicodedata.normalize(normal_format, string).encode(encode_format, "ignore").decode()
except:
return string
async def send_webhook(self, url, text, **kwargs):
async with aiohttp.ClientSession() as session:
webhook = discord.Webhook.from_url(url, adapter=discord.AsyncWebhookAdapter(session))
await webhook.send(content=text, **kwargs)
def save_json(self, filename, data):
path, ext = os.path.splitext(filename)
tmp_file = f"{path}-{random.randint(1000, 9999)}.tmp"
self._save_json(tmp_file, data)
try:
self._read_json(tmp_file)
except json.decoder.JSONDecodeError:
return False
os.replace(tmp_file, filename)
return True
def load_json(self, filename):
return self._read_json(filename)
def is_valid_json(self, filename):
try:
self._read_json(filename)
return True
except FileNotFoundError:
return False
except json.decoder.JSONDecodeError:
return False
def _read_json(self, filename):
with open(filename, encoding='utf-8', mode="r") as f:
data = json.load(f)
return data
def _save_json(self, filename, data):
with open(filename, encoding='utf-8', mode="w") as f:
json.dump(data, f)
return data
def add_lang(self, language):
try:
self.prefix_print(f"Loading language: {language}", prefix="gettext")
l = gettext.translation('base', 'locale', languages=[language])
l.install()
self.langs[language] = l
return True
except Exception as e:
self.prefix_print(f"Failed to load language: {e}", prefix="gettext", prefix_colour="light red")
return False
def regex_text(self, string, dlist, return_string=False):
normalized_name = rockutils.normalize(string.lower())
stripped_name = re.sub(r'\s+', '', normalized_name)
bare_name = re.sub(r"[^0-9a-zA-Z]+",'',stripped_name)
rlist = []
for dom in dlist:
rlist.append(dom)
rlist.append(re.sub(r"[^0-9a-zA-Z]+",'',dom))
for dom in rlist:
if dom in stripped_name or dom in normalized_name or dom in string or dom in bare_name:
if return_string:
return True, dom
else:
return True
if return_string:
return False, ""
else:
return False
def _(self, text, lang=None):
if type(lang) == commands.Context and hasattr(lang, "userinfo"):
pass
if type(lang) == dict:
try:
lang = lang['g']['b']['l']
except:
pass
if not lang:
self.add_lang(lang)
if not lang in self.langs:
return text
else:
return self.langs[lang].gettext(text)
def randstr():
return base64.b64encode(bytes(str(time.time()*100000),"ascii")).decode().replace("=","").lower()
rockutils = RockUtils()
class GameRetriever():
def __init__(self):
self.http_cache = {}
try:
import valve.source.a2s
self.has_a2s = True
except:
rockutils.prefix_print("Could not import valve.source.a2s, retrieving valve server data will not be possible", prefix_colour="light red", text_colour="red")
self.has_a2s = False
try:
from mcstatus import MinecraftServer
self.has_mc = True
except:
rockutils.prefix_print("Could not import mcstatus, retrieving minecraft data will not be possible", prefix_colour="light red", text_colour="red")
self.has_mc = False
async def retrieve_url(self, url, cache_time=30):
_time = time.time()
for _key, _value in self.http_cache.keys():
if _time >= _value['time']:
del self.http_cache[_key]
_url_signature = hashlib.sha256(url.encode("utf8")).hexdigest()[-16:]
if _url_signature in self.http_cache and "handler" in self.http_cache[_url_signature]:
return self.http_cache[_url_signature]['handler']
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
self.http_cache[_url_signature] = {
"handler": response,
"time": _time + cache_time
}
return self.http_cache[_url_signature]['handler']
async def youtube(self, channel_id):
async with retrieve_url(f"https://www.youtube.com/channel/{channel_id}") as response:
_sub_regex = re.findall('[0-9,]+ subscribers',h)
if len(_sub_regex) > 0:
return True, _sub_regex[0][:-12]
else:
return False, None
async def twitter(self, display_name):
async with retrieve_url(f"https://cdn.syndication.twimg.com/widgets/followbutton/info.json?screen_names={display_name}") as response:
try:
_json_data = await response.json()
except:
return False, {}
if len(_json_data) > 0:
return True, _json_data[0]
else:
return False, _json_data
async def steam_group(self, group_id):
async with retrieve_url(f"https://steamcommunity.com/groups/{group_id}") as response:
h = await response.text()
data = {}
info = re.findall('count ">[0-9,]+</div>', b)
if len(info) == 0:
data['ingame'] = 0
data['online'] = 0
data['members'] = 0
data['display'] = "-"
return False, data
else:
data['ingame'] = info[0][8:-6]
data['online'] = info[1][8:-6]
data['members'] = re.findall('count">[0-9,]+</span>', b)[0][7:-7]
data['display'] = re.findall('grouppage_header_abbrev" >.+</span>', b)[0][26:-7]
return True, data
async def factorio(self, game_id):
async with retrieve_url(f"https://multiplayer.factorio.com/get-game/details/{game_id}") as response:
try:
_json_data = await response.json()
except:
return False, {}
if "no game" in j.get("message"):
return False, _json_data
return True, _json_data
async def a2s(self, ip):
if not self.has_a2s:
return False, None
_split = ip.split(":")
if len(_split) == 2:
server = valve.source.a2s.ServerQuerier(_split,timeout=1)
try:
return True, server.info().values
except valve.source.NoResponseError:
return True, None
else:
return False, None
async def minecraft(self, ip):
if not self.has_mc:
return False, None
try:
server = MinecraftServer.lookup(ip)
return True, server.status()
except:
return False, None
gameretriever = GameRetriever() | 33.992925 | 168 | 0.544439 |
3e90887b07e76dbff061b069257f72c3719f5510 | 8,102 | py | Python | rdflib/plugins/sparql/parserutils.py | ROZBEH/rdflib | 5b9da927714a92a8888407f42b46249002964e8e | [
"BSD-3-Clause"
] | 1 | 2021-01-29T07:05:22.000Z | 2021-01-29T07:05:22.000Z | rdflib/plugins/sparql/parserutils.py | ROZBEH/rdflib | 5b9da927714a92a8888407f42b46249002964e8e | [
"BSD-3-Clause"
] | 14 | 2021-07-12T19:07:42.000Z | 2022-01-31T19:10:37.000Z | rdflib/plugins/sparql/parserutils.py | ROZBEH/rdflib | 5b9da927714a92a8888407f42b46249002964e8e | [
"BSD-3-Clause"
] | 1 | 2021-11-26T10:04:33.000Z | 2021-11-26T10:04:33.000Z | from types import MethodType
from collections import OrderedDict
from pyparsing import TokenConverter, ParseResults, originalTextFor
from rdflib import BNode, Variable
DEBUG = True
DEBUG = False
if DEBUG:
import traceback
"""
NOTE: PyParsing setResultName/__call__ provides a very similar solution to this
I didn't realise at the time of writing and I will remove a
lot of this code at some point
Utility classes for creating an abstract-syntax tree out with pyparsing actions
Lets you label and group parts of parser production rules
For example:
# [5] BaseDecl ::= 'BASE' IRIREF
BaseDecl = Comp('Base', Keyword('BASE') + Param('iri',IRIREF))
After parsing, this gives you back an CompValue object,
which is a dict/object with the paramters specified.
So you can access the parameters are attributes or as keys:
baseDecl.iri
Comp lets you set an evalFn that is bound to the eval method of
the resulting CompValue
"""
# This is an alternative
# Comp('Sum')( Param('x')(Number) + '+' + Param('y')(Number) )
def value(ctx, val, variables=False, errors=False):
"""
utility function for evaluating something...
Variables will be looked up in the context
Normally, non-bound vars is an error,
set variables=True to return unbound vars
Normally, an error raises the error,
set errors=True to return error
"""
if isinstance(val, Expr):
return val.eval(ctx) # recurse?
elif isinstance(val, CompValue):
raise Exception("What do I do with this CompValue? %s" % val)
elif isinstance(val, list):
return [value(ctx, x, variables, errors) for x in val]
elif isinstance(val, (BNode, Variable)):
r = ctx.get(val)
if isinstance(r, SPARQLError) and not errors:
raise r
if r is not None:
return r
# not bound
if variables:
return val
else:
raise NotBoundError
elif isinstance(val, ParseResults) and len(val) == 1:
return value(ctx, val[0], variables, errors)
else:
return val
class ParamValue(object):
"""
The result of parsing a Param
This just keeps the name/value
All cleverness is in the CompValue
"""
def __init__(self, name, tokenList, isList):
self.isList = isList
self.name = name
if isinstance(tokenList, (list, ParseResults)) and len(tokenList) == 1:
tokenList = tokenList[0]
self.tokenList = tokenList
def __str__(self):
return "Param(%s, %s)" % (self.name, self.tokenList)
class Param(TokenConverter):
"""
A pyparsing token for labelling a part of the parse-tree
if isList is true repeat occurrences of ParamList have
their values merged in a list
"""
def __init__(self, name, expr, isList=False):
self.name = name
self.isList = isList
TokenConverter.__init__(self, expr)
self.addParseAction(self.postParse2)
def postParse2(self, tokenList):
return ParamValue(self.name, tokenList, self.isList)
class ParamList(Param):
"""
A shortcut for a Param with isList=True
"""
def __init__(self, name, expr):
Param.__init__(self, name, expr, True)
class plist(list):
"""this is just a list, but we want our own type to check for"""
pass
class CompValue(OrderedDict):
"""
The result of parsing a Comp
Any included Params are avaiable as Dict keys
or as attributes
"""
def __init__(self, name, **values):
OrderedDict.__init__(self)
self.name = name
self.update(values)
def clone(self):
return CompValue(self.name, **self)
def __str__(self):
return self.name + "_" + OrderedDict.__str__(self)
def __repr__(self):
return self.name + "_" + dict.__repr__(self)
def _value(self, val, variables=False, errors=False):
if self.ctx is not None:
return value(self.ctx, val, variables)
else:
return val
def __getitem__(self, a):
return self._value(OrderedDict.__getitem__(self, a))
def get(self, a, variables=False, errors=False):
return self._value(OrderedDict.get(self, a, a), variables, errors)
def __getattr__(self, a):
# Hack hack: OrderedDict relies on this
if a in ("_OrderedDict__root", "_OrderedDict__end"):
raise AttributeError()
try:
return self[a]
except KeyError:
# raise AttributeError('no such attribute '+a)
return None
class Expr(CompValue):
"""
A CompValue that is evaluatable
"""
def __init__(self, name, evalfn=None, **values):
super(Expr, self).__init__(name, **values)
self._evalfn = None
if evalfn:
self._evalfn = MethodType(evalfn, self)
def eval(self, ctx={}):
try:
self.ctx = ctx
return self._evalfn(ctx)
except SPARQLError as e:
return e
finally:
self.ctx = None
class Comp(TokenConverter):
"""
A pyparsing token for grouping together things with a label
Any sub-tokens that are not Params will be ignored.
Returns CompValue / Expr objects - depending on whether evalFn is set.
"""
def __init__(self, name, expr):
self.expr = expr
TokenConverter.__init__(self, expr)
self.name = name
self.evalfn = None
def postParse(self, instring, loc, tokenList):
if self.evalfn:
res = Expr(self.name)
res._evalfn = MethodType(self.evalfn, res)
else:
res = CompValue(self.name)
if self.name == "ServiceGraphPattern":
# Then this must be a service graph pattern and have
# already matched.
# lets assume there is one, for now, then test for two later.
sgp = originalTextFor(self.expr)
service_string = sgp.searchString(instring)[0][0]
res["service_string"] = service_string
for t in tokenList:
if isinstance(t, ParamValue):
if t.isList:
if t.name not in res:
res[t.name] = plist()
res[t.name].append(t.tokenList)
else:
res[t.name] = t.tokenList
# res.append(t.tokenList)
# if isinstance(t,CompValue):
# res.update(t)
return res
def setEvalFn(self, evalfn):
self.evalfn = evalfn
return self
def prettify_parsetree(t, indent="", depth=0):
out = []
if isinstance(t, ParseResults):
for e in t.asList():
out.append(prettify_parsetree(e, indent, depth + 1))
for k, v in sorted(t.items()):
out.append("%s%s- %s:\n" % (indent, " " * depth, k))
out.append(prettify_parsetree(v, indent, depth + 1))
elif isinstance(t, CompValue):
out.append("%s%s> %s:\n" % (indent, " " * depth, t.name))
for k, v in t.items():
out.append("%s%s- %s:\n" % (indent, " " * (depth + 1), k))
out.append(prettify_parsetree(v, indent, depth + 2))
elif isinstance(t, dict):
for k, v in t.items():
out.append("%s%s- %s:\n" % (indent, " " * (depth + 1), k))
out.append(prettify_parsetree(v, indent, depth + 2))
elif isinstance(t, list):
for e in t:
out.append(prettify_parsetree(e, indent, depth + 1))
else:
out.append("%s%s- %r\n" % (indent, " " * depth, t))
return "".join(out)
if __name__ == "__main__":
from pyparsing import Word, nums
import sys
Number = Word(nums)
Number.setParseAction(lambda x: int(x[0]))
Plus = Comp("plus", Param("a", Number) + "+" + Param("b", Number))
Plus.setEvalFn(lambda self, ctx: self.a + self.b)
r = Plus.parseString(sys.argv[1])
print(r)
print(r[0].eval({}))
# hurrah for circular imports
from rdflib.plugins.sparql.sparql import SPARQLError, NotBoundError
| 27.651877 | 79 | 0.603308 |
8c95dd3d4c7037c5d0adc7221ac9e209f1bd8184 | 2,660 | py | Python | gae/client/auth_helper.py | jlapenna/bikebuds | 6e2b54fa2e4fa03e5ff250ca779c269ccc49a2d8 | [
"Apache-2.0"
] | 9 | 2018-11-17T00:53:47.000Z | 2021-03-16T05:18:01.000Z | gae/client/auth_helper.py | jlapenna/bikebuds | 6e2b54fa2e4fa03e5ff250ca779c269ccc49a2d8 | [
"Apache-2.0"
] | 8 | 2018-11-28T17:19:07.000Z | 2022-02-26T17:46:09.000Z | gae/client/auth_helper.py | jlapenna/bikebuds | 6e2b54fa2e4fa03e5ff250ca779c269ccc49a2d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from google.oauth2.credentials import Credentials
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
from shared.config import config
import bikebuds_api
def load_credentials():
try:
creds = json.load(open('.credentials.json', 'r'))
flow_creds = Credentials(**creds)
except FileNotFoundError:
flow_creds = None
if not (flow_creds and flow_creds.valid):
print('Unable to load credentials!')
return None
flow_creds.refresh(Request())
return flow_creds
def load_configuration(flow_creds):
configuration = bikebuds_api.Configuration()
configuration.host = config.api_url
# Configure API key authorization: api_key
configuration.api_key['key'] = config.gcp_server_creds['api_key']
# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed
# configuration.api_key_prefix['key'] = 'Bearer'
# Configure OAuth2 access token for authorization: firebase
configuration.access_token = flow_creds.id_token
return configuration
def run_flow():
oauth_flow = InstalledAppFlow.from_client_secrets_file(
os.path.join(config.base_path, 'service_keys/gcp-server-oauth.json'),
scopes=[
'openid',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile',
],
)
return oauth_flow.run_local_server(host='localhost', port=0, open_browser=True)
def fetch_credentials():
flow_creds = run_flow()
if not flow_creds.valid:
print('Unable to complete creds flow!')
creds = {
'client_id': flow_creds.client_id,
'client_secret': flow_creds.client_secret,
'id_token': flow_creds.id_token,
'refresh_token': flow_creds.refresh_token,
'scopes': flow_creds.scopes,
'token': flow_creds.token,
'token_uri': flow_creds.token_uri,
}
json.dump(creds, open('.credentials.json', 'w'))
| 31.666667 | 83 | 0.707519 |
d634869be72900103bd989a5214307001fd27b8c | 2,009 | py | Python | setup.py | mshvartsman/hydra | 65057c36c05187fdadd07db4f4b4e086a22fc030 | [
"MIT"
] | 1 | 2020-09-25T07:12:14.000Z | 2020-09-25T07:12:14.000Z | setup.py | mshvartsman/hydra | 65057c36c05187fdadd07db4f4b4e086a22fc030 | [
"MIT"
] | 7 | 2021-06-28T20:30:25.000Z | 2022-02-27T10:27:47.000Z | setup.py | mshvartsman/hydra | 65057c36c05187fdadd07db4f4b4e086a22fc030 | [
"MIT"
] | 1 | 2020-10-10T21:40:08.000Z | 2020-10-10T21:40:08.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# type: ignore
import pathlib
import pkg_resources
from setuptools import find_packages, setup
from build_helpers.build_helpers import (
ANTLRCommand,
BuildPyCommand,
CleanCommand,
Develop,
SDistCommand,
find_version,
)
with pathlib.Path("requirements/requirements.txt").open() as requirements_txt:
install_requires = [
str(requirement)
for requirement in pkg_resources.parse_requirements(requirements_txt)
]
with open("README.md", "r") as fh:
LONG_DESC = fh.read()
setup(
cmdclass={
"antlr": ANTLRCommand,
"clean": CleanCommand,
"sdist": SDistCommand,
"build_py": BuildPyCommand,
"develop": Develop,
},
name="hydra-core",
version=find_version("hydra", "__init__.py"),
author="Omry Yadan",
author_email="omry@fb.com",
description="A framework for elegantly configuring complex applications",
long_description=LONG_DESC,
long_description_content_type="text/markdown",
url="https://github.com/facebookresearch/hydra",
keywords="command-line configuration yaml tab-completion",
packages=find_packages(include=["hydra"]),
include_package_data=True,
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
],
install_requires=install_requires,
entry_points={"pytest11": ["hydra_pytest = hydra.extra.pytest_plugin"]},
# Install development dependencies with
# pip install -r requirements/dev.txt -e .
)
| 33.483333 | 81 | 0.625684 |
8df49e4312cc1fe9502e3c18ba45f03084c93e1a | 39 | py | Python | frontend/infra/provision/route53.py | jim-hill-r/BlueSteel | fa3ac479096679146b99bf5b12792fad25d9d0f1 | [
"MIT"
] | 2 | 2020-04-03T12:19:00.000Z | 2020-04-03T13:32:11.000Z | frontend/infra/provision/route53.py | jim-hill-r/BlueSteel | fa3ac479096679146b99bf5b12792fad25d9d0f1 | [
"MIT"
] | 25 | 2020-04-23T23:17:20.000Z | 2020-05-29T21:00:44.000Z | frontend/infra/provision/route53.py | jim-hill-r/BlueSteel | fa3ac479096679146b99bf5b12792fad25d9d0f1 | [
"MIT"
] | null | null | null | #TODO: Create automation around route53 | 39 | 39 | 0.846154 |
0d1de869f6ef91791a235cfe545b3b3a9b734e72 | 14,300 | py | Python | tensorflow/contrib/model_pruning/examples/cifar10/cifar10_pruning.py | ryorda/tensorflow-viennacl | 054b515feec0a3fca4cfb1f29adbf423c9027c3a | [
"Apache-2.0"
] | 22 | 2018-01-13T14:52:47.000Z | 2018-07-05T01:00:28.000Z | tensorflow/contrib/model_pruning/examples/cifar10/cifar10_pruning.py | ryorda/tensorflow-viennacl | 054b515feec0a3fca4cfb1f29adbf423c9027c3a | [
"Apache-2.0"
] | 3 | 2018-05-09T11:31:58.000Z | 2021-01-27T12:26:21.000Z | tensorflow/contrib/model_pruning/examples/cifar10/cifar10_pruning.py | ryorda/tensorflow-viennacl | 054b515feec0a3fca4cfb1f29adbf423c9027c3a | [
"Apache-2.0"
] | 13 | 2018-02-22T21:04:13.000Z | 2020-11-17T11:38:36.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network with additional variables to support pruning.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.model_pruning.examples.cifar10 import cifar10_input
from tensorflow.contrib.model_pruning.python import pruning
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
BATCH_SIZE = 128
DATA_DIR = '/tmp/cifar10_data'
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity',
tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not DATA_DIR:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(
data_dir=data_dir, batch_size=BATCH_SIZE)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not DATA_DIR:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(DATA_DIR, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(
eval_data=eval_data, data_dir=data_dir, batch_size=BATCH_SIZE)
return images, labels
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# While instantiating conv and local layers, we add mask and threshold
# variables to the layer by calling the pruning.apply_mask() function.
# Note that the masks are applied only to the weight tensors
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(
images, pruning.apply_mask(kernel, scope), [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(
norm1, pruning.apply_mask(kernel, scope), [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool2, [BATCH_SIZE, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local3 = tf.nn.relu(
tf.matmul(reshape, pruning.apply_mask(weights, scope)) + biases,
name=scope.name)
_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local4 = tf.nn.relu(
tf.matmul(local3, pruning.apply_mask(weights, scope)) + biases,
name=scope.name)
_activation_summary(local4)
# linear layer(WX + b),
# We don't apply softmax here because
# tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits
# and performs the softmax internally for efficiency.
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1/192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(
tf.matmul(local4, pruning.apply_mask(weights, scope)),
biases,
name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / BATCH_SIZE
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = DATA_DIR
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
| 36.111111 | 81 | 0.691958 |
2c790e60119a2f55cc998a4bb02e21df48e1967f | 778 | py | Python | alien_project/settings.py | yiyidhuang/PythonCrashCrouse2nd | 3512f9ab8fcf32c6145604a37e2a62feddf174d1 | [
"MIT"
] | null | null | null | alien_project/settings.py | yiyidhuang/PythonCrashCrouse2nd | 3512f9ab8fcf32c6145604a37e2a62feddf174d1 | [
"MIT"
] | null | null | null | alien_project/settings.py | yiyidhuang/PythonCrashCrouse2nd | 3512f9ab8fcf32c6145604a37e2a62feddf174d1 | [
"MIT"
] | null | null | null | class Settings:
"""Class that stores all settings in the game alien invasion"""
def __init__(self):
"""Initialize the setting of the game"""
# Set the screen
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230, 230, 230)
# Setup ship
self.ship_speed = 1.5
self.ship_limit = 3
# Setup bullet
self.bullet_speed = 1.5
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = (60, 60, 60)
self.bullets_allowed = 3
# Setup alien
self.alien_speed = 1.0
self.fleet_drop_speed = 10
# fleet_direction, 1 means to move to the right, and -1 means to the move to the left.
self.fleet_direction = 1
| 28.814815 | 94 | 0.588689 |
04d72465250fbe52ca8383678df3302bd89cceb4 | 546 | py | Python | samples/KleinWebAppSample/templates.py | spotware/cTraderFixPy | d146aabc4306c81a65e9257c1984a5e8120d66c5 | [
"MIT"
] | 15 | 2021-11-22T14:48:05.000Z | 2022-03-29T12:11:48.000Z | samples/KleinWebAppSample/templates.py | azurite-r/cTraderFixPy | 4c7e32ca7fbffbfe5f007f3926f48f7ac8e0fbe4 | [
"MIT"
] | 2 | 2021-11-21T18:31:40.000Z | 2022-03-30T04:54:24.000Z | samples/KleinWebAppSample/templates.py | azurite-r/cTraderFixPy | 4c7e32ca7fbffbfe5f007f3926f48f7ac8e0fbe4 | [
"MIT"
] | 2 | 2021-12-22T19:48:01.000Z | 2022-01-17T00:12:53.000Z | from twisted.web.template import Element, renderer, XMLFile
from twisted.python.filepath import FilePath
class AddAccountsElement(Element):
loader = XMLFile(FilePath('./markup/add_accounts.xml'))
def __init__(self, addAccountLink):
self.addAccountLink = addAccountLink
super().__init__()
@renderer
def addAccountButton(self, request, tag):
tag.fillSlots(addAccountLink=self.addAccountLink)
return tag
class ClientAreaElement(Element):
loader = XMLFile(FilePath('./markup/client_area.xml'))
| 30.333333 | 59 | 0.730769 |
545a5109332538595d79019977ebc32cec3e5e89 | 577 | py | Python | plotly/validators/scatterternary/line/_smoothing.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/scatterternary/line/_smoothing.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/scatterternary/line/_smoothing.py | Vesauza/plotly.py | e53e626d59495d440341751f60aeff73ff365c28 | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class SmoothingValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name='smoothing',
parent_name='scatterternary.line',
**kwargs
):
super(SmoothingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
max=kwargs.pop('max', 1.3),
min=kwargs.pop('min', 0),
role=kwargs.pop('role', 'style'),
**kwargs
)
| 27.47619 | 71 | 0.587522 |
1abffe9afade5d7829a9788a77d9ddeb56e4e52e | 456 | py | Python | nickstagram/posts/urls.py | NickKirilov/Nickstagram | 872025b082b01d8e607e675b8138b9dffa2e2e85 | [
"Unlicense"
] | 1 | 2022-03-19T17:24:54.000Z | 2022-03-19T17:24:54.000Z | nickstagram/posts/urls.py | NickKirilov/Nickstagram | 872025b082b01d8e607e675b8138b9dffa2e2e85 | [
"Unlicense"
] | null | null | null | nickstagram/posts/urls.py | NickKirilov/Nickstagram | 872025b082b01d8e607e675b8138b9dffa2e2e85 | [
"Unlicense"
] | null | null | null | from django.urls import path
from nickstagram.posts.views import CreatePostView, PostDetailsView, EditPostView, DeletePostView
urlpatterns = [
path('create/', CreatePostView.as_view(), name='create post page'),
path('details/<int:pk>/', PostDetailsView.as_view(), name='post details page'),
path('edit/<int:pk>/', EditPostView.as_view(), name='post edit page'),
path('delete/<int:pk>', DeletePostView.as_view(), name='post delete page'),
] | 50.666667 | 97 | 0.721491 |
ca21d6e4d020687eef9b77e6b0cb63866a8a27e8 | 3,684 | py | Python | torchelie/nn/adain.py | maxferrari/Torchelie | d133f227bebc3c4cbbb6167bd1fae815d2b5fa81 | [
"MIT"
] | null | null | null | torchelie/nn/adain.py | maxferrari/Torchelie | d133f227bebc3c4cbbb6167bd1fae815d2b5fa81 | [
"MIT"
] | null | null | null | torchelie/nn/adain.py | maxferrari/Torchelie | d133f227bebc3c4cbbb6167bd1fae815d2b5fa81 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torchelie.utils as tu
from typing import Optional
class AdaIN2d(nn.Module):
"""
Adaptive InstanceNormalization from *Arbitrary Style Transfer in Real-time
with Adaptive Instance Normalization* (Huang et al, 2017)
Args:
channels (int): number of input channels
cond_channels (int): number of conditioning channels from which bias
and scale will be derived
"""
weight: torch.Tensor
bias: torch.Tensor
def __init__(self, channels: int, cond_channels: int) -> None:
super(AdaIN2d, self).__init__()
self.make_weight = nn.Linear(cond_channels, channels)
self.make_bias = nn.Linear(cond_channels, channels)
self.register_buffer('weight', torch.zeros(0))
self.register_buffer('bias', torch.zeros(0))
def forward(self,
x: torch.Tensor,
z: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Forward pass
Args:
x (4D tensor): input tensor
z (2D tensor, optional): conditioning vector. If not present,
:code:`condition(z)` must be called first
Returns:
x, renormalized
"""
if z is not None:
self.condition(z)
m = x.mean(dim=(2, 3), keepdim=True)
s = torch.sqrt(x.var(dim=(2, 3), keepdim=True) + 1e-8)
weight = self.weight / (s + 1e-5)
bias = -m * weight + self.bias
out = weight * x + bias
return out
def condition(self, z: torch.Tensor) -> None:
"""
Conditions the layer before the forward pass if z will not be present
when calling forward
Args:
z (2D tensor, optional): conditioning vector
"""
self.weight = self.make_weight(z)[:, :, None, None] + 1
self.bias = self.make_bias(z)[:, :, None, None]
class FiLM2d(nn.Module):
"""
Feature-wise Linear Modulation from
https://distill.pub/2018/feature-wise-transformations/
The difference with AdaIN is that FiLM does not uses the input's mean and
std in its calculations
Args:
channels (int): number of input channels
cond_channels (int): number of conditioning channels from which bias
and scale will be derived
"""
weight: Optional[torch.Tensor]
bias: Optional[torch.Tensor]
def __init__(self, channels: int, cond_channels: int) -> None:
super(FiLM2d, self).__init__()
self.make_weight = nn.Linear(cond_channels, channels)
tu.normal_init(self.make_weight, 0.01)
self.make_bias = nn.Linear(cond_channels, channels)
tu.normal_init(self.make_bias, 0.01)
self.weight = None
self.bias = None
def forward(self, x, z: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Forward pass
Args:
x (4D tensor): input tensor
z (2D tensor, optional): conditioning vector. If not present,
:code:`condition(z)` must be called first
Returns:
x, conditioned
"""
if z is not None:
self.condition(z)
w = self.weight
b = self.bias
assert w is not None and b is not None
return w * x + b
def condition(self, z: torch.Tensor) -> None:
"""
Conditions the layer before the forward pass if z will not be present
when calling forward
Args:
z (2D tensor, optional): conditioning vector
"""
self.weight = self.make_weight(z)[:, :, None, None].add_(1)
self.bias = self.make_bias(z)[:, :, None, None]
| 30.7 | 78 | 0.593105 |
14ade0389fada302c1600dd7f85563bea2fc3bdb | 9,527 | py | Python | ghostnet.py | QFaceblue/Driving-Behavior-Recognition | 98c8fab51c7074852598ea9119f472ed7b1bda13 | [
"Apache-2.0"
] | 1 | 2022-03-13T14:37:17.000Z | 2022-03-13T14:37:17.000Z | ghostnet.py | QFaceblue/Driving-Behavior-Recognition | 98c8fab51c7074852598ea9119f472ed7b1bda13 | [
"Apache-2.0"
] | null | null | null | ghostnet.py | QFaceblue/Driving-Behavior-Recognition | 98c8fab51c7074852598ea9119f472ed7b1bda13 | [
"Apache-2.0"
] | null | null | null | # 2020.06.09-Changed for building GhostNet
# Huawei Technologies Co., Ltd. <foss@huawei.com>
"""
Creates a GhostNet Model as defined in:
GhostNet: More Features from Cheap Operations By Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu.
https://arxiv.org/abs/1911.11907
Modified from https://github.com/d-li14/mobilenetv3.pytorch and https://github.com/rwightman/pytorch-image-models
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
__all__ = ['ghost_net']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, act_layer=nn.ReLU):
super(ConvBnAct, self).__init__()
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False)
self.bn1 = nn.BatchNorm2d(out_chs)
self.act1 = act_layer(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
return x
class GhostModule(nn.Module):
def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True):
super(GhostModule, self).__init__()
self.oup = oup
init_channels = math.ceil(oup / ratio)
new_channels = init_channels * (ratio - 1)
self.primary_conv = nn.Sequential(
nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size // 2, bias=False),
nn.BatchNorm2d(init_channels),
nn.ReLU(inplace=True) if relu else nn.Sequential(),
)
self.cheap_operation = nn.Sequential(
nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size // 2, groups=init_channels, bias=False),
nn.BatchNorm2d(new_channels),
nn.ReLU(inplace=True) if relu else nn.Sequential(),
)
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
return out[:, :self.oup, :, :]
class GhostBottleneck(nn.Module):
""" Ghost bottleneck w/ optional SE"""
def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3,
stride=1, act_layer=nn.ReLU, se_ratio=0.):
super(GhostBottleneck, self).__init__()
has_se = se_ratio is not None and se_ratio > 0.
self.stride = stride
# Point-wise expansion
self.ghost1 = GhostModule(in_chs, mid_chs, relu=True)
# Depth-wise convolution
if self.stride > 1:
self.conv_dw = nn.Conv2d(mid_chs, mid_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size - 1) // 2,
groups=mid_chs, bias=False)
self.bn_dw = nn.BatchNorm2d(mid_chs)
# Squeeze-and-excitation
if has_se:
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio)
else:
self.se = None
# Point-wise linear projection
self.ghost2 = GhostModule(mid_chs, out_chs, relu=False)
# shortcut
if (in_chs == out_chs and self.stride == 1):
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(in_chs, in_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size - 1) // 2, groups=in_chs, bias=False),
nn.BatchNorm2d(in_chs),
nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
residual = x
# 1st ghost bottleneck
x = self.ghost1(x)
# Depth-wise convolution
if self.stride > 1:
x = self.conv_dw(x)
x = self.bn_dw(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
x += self.shortcut(residual)
return x
class GhostNet(nn.Module):
def __init__(self, cfgs, num_classes=1000, width=1.0, dropout=0.2):
super(GhostNet, self).__init__()
# setting of inverted residual blocks
self.cfgs = cfgs
self.dropout = dropout
# building first layer
output_channel = _make_divisible(16 * width, 4)
self.conv_stem = nn.Conv2d(3, output_channel, 3, 2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(output_channel)
self.act1 = nn.ReLU(inplace=True)
input_channel = output_channel
# building inverted residual blocks
stages = []
block = GhostBottleneck
for cfg in self.cfgs:
layers = []
for k, exp_size, c, se_ratio, s in cfg:
output_channel = _make_divisible(c * width, 4)
hidden_channel = _make_divisible(exp_size * width, 4)
layers.append(block(input_channel, hidden_channel, output_channel, k, s,
se_ratio=se_ratio))
input_channel = output_channel
stages.append(nn.Sequential(*layers))
output_channel = _make_divisible(exp_size * width, 4)
stages.append(nn.Sequential(ConvBnAct(input_channel, output_channel, 1)))
input_channel = output_channel
self.blocks = nn.Sequential(*stages)
# building last several layers
output_channel = 1280
# self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.conv_head = nn.Conv2d(input_channel, output_channel, 1, 1, 0, bias=True)
self.act2 = nn.ReLU(inplace=True)
self.classifier = nn.Linear(output_channel, num_classes)
def forward(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
x = x.view(x.size(0), -1)
if self.dropout > 0.:
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.classifier(x)
return x
# # 原代码没有权值初始化
# def _initialize_weights(self):
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# if m.bias is not None:
# m.bias.data.zero_()
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
# elif isinstance(m, nn.Linear):
# m.weight.data.normal_(0, 0.01)
# m.bias.data.zero_()
def ghostnet(**kwargs):
"""
Constructs a GhostNet model
"""
cfgs = [
# k, t, c, SE, s
# stage1
[[3, 16, 16, 0, 1]],
# stage2
[[3, 48, 24, 0, 2]],
[[3, 72, 24, 0, 1]],
# stage3
[[5, 72, 40, 0.25, 2]],
[[5, 120, 40, 0.25, 1]],
# stage4
[[3, 240, 80, 0, 2]],
[[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 0.25, 1],
[3, 672, 112, 0.25, 1]
],
# stage5
[[5, 672, 160, 0.25, 2]],
[[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1]
]
]
return GhostNet(cfgs, **kwargs)
if __name__ == '__main__':
import torch
model = torch.hub.load('huawei-noah/ghostnet', 'ghostnet_1x', pretrained=True)
model.eval()
# model = ghostnet(num_classes=10)
# model.eval()
# print(model)
input = torch.randn(1, 3, 224, 224)
# y = model(input)
# print(y.shape)
# thop:pytorch操作计数器
from thop import profile
flops, params = profile(model, inputs=[input])
print(flops)
print(params)
print('Total params: %f M' % (sum(p.numel() for p in model.parameters())))
# learn() | 33.545775 | 115 | 0.573528 |
a442cc6fabda9c9574bc83d759619be72bd85f5a | 7,677 | py | Python | tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py | Anon-Artist/tfx | 2692c9ab437d76b5d9517996bfe2596862e0791d | [
"Apache-2.0"
] | 2 | 2021-05-10T21:39:48.000Z | 2021-11-17T11:24:29.000Z | tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py | Anon-Artist/tfx | 2692c9ab437d76b5d9517996bfe2596862e0791d | [
"Apache-2.0"
] | 1 | 2021-01-28T13:44:51.000Z | 2021-04-28T16:15:47.000Z | tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py | Anon-Artist/tfx | 2692c9ab437d76b5d9517996bfe2596862e0791d | [
"Apache-2.0"
] | 1 | 2021-01-28T13:41:51.000Z | 2021-01-28T13:41:51.000Z | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from typing import List, Text
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner
from tfx.orchestration.airflow.airflow_dag_runner import AirflowPipelineConfig
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
# TODO(jyzhao): rename to chicago_taxi_airflow.
_pipeline_name = 'chicago_taxi_simple'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# Pipeline arguments for Beam powered Components.
_beam_pipeline_args = [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
]
# Airflow-specific configs; these will be passed directly to airflow
_airflow_config = {
'schedule_interval': None,
'start_date': datetime.datetime(2019, 1, 1),
}
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=module_file,
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# 'DAG' below need to be kept for Airflow to detect dag.
DAG = AirflowDagRunner(AirflowPipelineConfig(_airflow_config)).run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args))
| 41.497297 | 80 | 0.736225 |
9ae3791e3126427f81b53f648526a53955c24d6e | 20,984 | py | Python | dev/breeze/src/airflow_breeze/commands/production_image_commands.py | MoBagel/airflow | 0905e386f17e34d96f6ee575404c62b13242c75d | [
"Apache-2.0"
] | null | null | null | dev/breeze/src/airflow_breeze/commands/production_image_commands.py | MoBagel/airflow | 0905e386f17e34d96f6ee575404c62b13242c75d | [
"Apache-2.0"
] | null | null | null | dev/breeze/src/airflow_breeze/commands/production_image_commands.py | MoBagel/airflow | 0905e386f17e34d96f6ee575404c62b13242c75d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import multiprocessing as mp
import os
import sys
from typing import List, Optional, Tuple
import click
from airflow_breeze.commands.main_command import main
from airflow_breeze.global_constants import ALLOWED_INSTALLATION_METHODS, DEFAULT_EXTRAS
from airflow_breeze.params.build_prod_params import BuildProdParams
from airflow_breeze.utils.common_options import (
option_additional_dev_apt_command,
option_additional_dev_apt_deps,
option_additional_dev_apt_env,
option_additional_extras,
option_additional_python_deps,
option_additional_runtime_apt_command,
option_additional_runtime_apt_deps,
option_additional_runtime_apt_env,
option_airflow_constraints_mode_prod,
option_airflow_constraints_reference_build,
option_answer,
option_debian_version,
option_dev_apt_command,
option_dev_apt_deps,
option_docker_cache,
option_dry_run,
option_empty_image,
option_github_repository,
option_github_token,
option_github_username,
option_image_name,
option_image_tag,
option_install_providers_from_sources,
option_parallelism,
option_platform,
option_prepare_buildx_cache,
option_pull_image,
option_push_image,
option_python,
option_python_versions,
option_run_in_parallel,
option_runtime_apt_command,
option_runtime_apt_deps,
option_tag_as_latest,
option_upgrade_to_newer_dependencies,
option_verbose,
option_verify_image,
option_wait_for_image,
)
from airflow_breeze.utils.console import get_console
from airflow_breeze.utils.custom_param_types import BetterChoice
from airflow_breeze.utils.docker_command_utils import (
build_cache,
perform_environment_checks,
prepare_docker_build_command,
prepare_docker_build_from_input,
)
from airflow_breeze.utils.image import run_pull_image, run_pull_in_parallel, tag_image_as_latest
from airflow_breeze.utils.parallel import check_async_run_results
from airflow_breeze.utils.path_utils import AIRFLOW_SOURCES_ROOT, DOCKER_CONTEXT_DIR
from airflow_breeze.utils.python_versions import get_python_version_list
from airflow_breeze.utils.registry import login_to_github_docker_registry
from airflow_breeze.utils.run_tests import verify_an_image
from airflow_breeze.utils.run_utils import filter_out_none, fix_group_permissions, run_command
PRODUCTION_IMAGE_TOOLS_COMMANDS = {
"name": "Production Image tools",
"commands": [
"build-prod-image",
"pull-prod-image",
"verify-prod-image",
],
}
PRODUCTION_IMAGE_TOOLS_PARAMETERS = {
"breeze build-prod-image": [
{
"name": "Basic usage",
"options": [
"--python",
"--install-airflow-version",
"--upgrade-to-newer-dependencies",
"--debian-version",
"--image-tag",
"--tag-as-latest",
"--docker-cache",
],
},
{
"name": "Building images in parallel",
"options": [
"--run-in-parallel",
"--parallelism",
"--python-versions",
],
},
{
"name": "Options for customizing images",
"options": [
"--install-providers-from-sources",
"--airflow-extras",
"--airflow-constraints-mode",
"--airflow-constraints-reference",
"--additional-python-deps",
"--additional-extras",
"--additional-runtime-apt-deps",
"--additional-runtime-apt-env",
"--additional-runtime-apt-command",
"--additional-dev-apt-deps",
"--additional-dev-apt-env",
"--additional-dev-apt-command",
"--runtime-apt-deps",
"--runtime-apt-command",
"--dev-apt-deps",
"--dev-apt-command",
],
},
{
"name": "Customization options (for specific customization needs)",
"options": [
"--install-packages-from-context",
"--airflow-is-in-context",
"--cleanup-context",
"--disable-mysql-client-installation",
"--disable-mssql-client-installation",
"--disable-postgres-client-installation",
"--disable-airflow-repo-cache",
"--install-airflow-reference",
"--installation-method",
],
},
{
"name": "Preparing cache and push (for maintainers and CI)",
"options": [
"--github-token",
"--github-username",
"--platform",
"--login-to-github-registry",
"--push-image",
"--empty-image",
"--prepare-buildx-cache",
],
},
],
"breeze pull-prod-image": [
{
"name": "Pull image flags",
"options": [
"--image-tag",
"--python",
"--github-token",
"--verify-image",
"--wait-for-image",
"--tag-as-latest",
],
},
{
"name": "Parallel running",
"options": [
"--run-in-parallel",
"--parallelism",
"--python-versions",
],
},
],
"breeze verify-prod-image": [
{
"name": "Verify image flags",
"options": [
"--image-name",
"--python",
"--image-tag",
"--pull-image",
],
}
],
}
def start_building(prod_image_params: BuildProdParams, dry_run: bool, verbose: bool):
if prod_image_params.cleanup_context:
clean_docker_context_files(verbose=verbose, dry_run=dry_run)
check_docker_context_files(prod_image_params.install_packages_from_context)
if prod_image_params.prepare_buildx_cache or prod_image_params.push_image:
login_to_github_docker_registry(image_params=prod_image_params, dry_run=dry_run, verbose=verbose)
def run_build_in_parallel(
image_params_list: List[BuildProdParams],
python_version_list: List[str],
parallelism: int,
dry_run: bool,
verbose: bool,
) -> None:
get_console().print(
f"\n[info]Building with parallelism = {parallelism} for the images: {python_version_list}:"
)
pool = mp.Pool(parallelism)
results = [
pool.apply_async(
run_build_production_image,
args=(
verbose,
dry_run,
image_param,
True,
),
)
for image_param in image_params_list
]
check_async_run_results(results)
pool.close()
@option_verbose
@option_dry_run
@option_answer
@main.command(name='build-prod-image')
@option_python
@option_run_in_parallel
@option_parallelism
@option_python_versions
@option_upgrade_to_newer_dependencies
@option_platform
@option_debian_version
@option_github_repository
@option_github_token
@option_github_username
@option_docker_cache
@option_image_tag
@option_prepare_buildx_cache
@option_push_image
@option_empty_image
@option_airflow_constraints_mode_prod
@click.option(
'--installation-method',
help="Install Airflow from: sources or PyPI.",
type=BetterChoice(ALLOWED_INSTALLATION_METHODS),
)
@option_install_providers_from_sources
@click.option(
'--airflow-is-in-context',
help="If set Airflow is installed from docker-context-files only rather than from PyPI or sources.",
is_flag=True,
)
@click.option(
'--install-packages-from-context',
help='Install wheels from local docker-context-files when building image.',
is_flag=True,
)
@click.option(
'--cleanup-context',
help='Clean up docker context files before running build (cannot be used together'
' with --install-packages-from-context).',
is_flag=True,
)
@click.option(
'--airflow-extras',
default=",".join(DEFAULT_EXTRAS),
show_default=True,
help="Extras to install by default.",
)
@click.option('--disable-mysql-client-installation', help="Do not install MySQL client.", is_flag=True)
@click.option('--disable-mssql-client-installation', help="Do not install MsSQl client.", is_flag=True)
@click.option('--disable-postgres-client-installation', help="Do not install Postgres client.", is_flag=True)
@click.option(
'--disable-airflow-repo-cache',
help="Disable cache from Airflow repository during building.",
is_flag=True,
)
@click.option(
'--install-airflow-reference',
help="Install Airflow using GitHub tag or branch.",
)
@option_airflow_constraints_reference_build
@click.option('-V', '--install-airflow-version', help="Install version of Airflow from PyPI.")
@option_additional_extras
@option_additional_dev_apt_deps
@option_additional_runtime_apt_deps
@option_additional_python_deps
@option_additional_dev_apt_command
@option_additional_dev_apt_env
@option_additional_runtime_apt_env
@option_additional_runtime_apt_command
@option_dev_apt_command
@option_dev_apt_deps
@option_runtime_apt_command
@option_runtime_apt_deps
@option_tag_as_latest
def build_prod_image(
verbose: bool,
dry_run: bool,
run_in_parallel: bool,
parallelism: int,
python_versions: str,
answer: Optional[str],
**kwargs,
):
"""
Build Production image. Include building multiple images for all or selected Python versions sequentially.
"""
def run_build(prod_image_params: BuildProdParams) -> None:
return_code, info = run_build_production_image(
verbose=verbose, dry_run=dry_run, prod_image_params=prod_image_params, parallel=False
)
if return_code != 0:
get_console().print(f"[error]Error when building image! {info}")
sys.exit(return_code)
perform_environment_checks(verbose=verbose)
parameters_passed = filter_out_none(**kwargs)
fix_group_permissions(verbose=verbose)
if run_in_parallel:
python_version_list = get_python_version_list(python_versions)
params_list: List[BuildProdParams] = []
for python in python_version_list:
params = BuildProdParams(**parameters_passed)
params.python = python
params.answer = answer
params_list.append(params)
start_building(prod_image_params=params_list[0], dry_run=dry_run, verbose=verbose)
run_build_in_parallel(
image_params_list=params_list,
python_version_list=python_version_list,
parallelism=parallelism,
dry_run=dry_run,
verbose=verbose,
)
else:
params = BuildProdParams(**parameters_passed)
start_building(prod_image_params=params, dry_run=dry_run, verbose=verbose)
run_build(prod_image_params=params)
@main.command(name='pull-prod-image')
@option_verbose
@option_dry_run
@option_python
@option_github_repository
@option_run_in_parallel
@option_parallelism
@option_python_versions
@option_github_token
@option_image_tag
@option_wait_for_image
@option_tag_as_latest
@option_verify_image
@click.argument('extra_pytest_args', nargs=-1, type=click.UNPROCESSED)
def pull_prod_image(
verbose: bool,
dry_run: bool,
python: str,
github_repository: str,
run_in_parallel: bool,
parallelism: int,
python_versions: str,
github_token: str,
image_tag: Optional[str],
wait_for_image: bool,
tag_as_latest: bool,
verify_image: bool,
extra_pytest_args: Tuple,
):
"""Pull and optionally verify Production images - possibly in parallel for all Python versions."""
perform_environment_checks(verbose=verbose)
if run_in_parallel:
python_version_list = get_python_version_list(python_versions)
prod_image_params_list = [
BuildProdParams(
image_tag=image_tag,
python=python,
github_repository=github_repository,
github_token=github_token,
)
for python in python_version_list
]
run_pull_in_parallel(
dry_run=dry_run,
parallelism=parallelism,
image_params_list=prod_image_params_list,
python_version_list=python_version_list,
verbose=verbose,
verify_image=verify_image,
wait_for_image=wait_for_image,
tag_as_latest=tag_as_latest,
extra_pytest_args=extra_pytest_args if extra_pytest_args is not None else (),
)
else:
image_params = BuildProdParams(
image_tag=image_tag, python=python, github_repository=github_repository, github_token=github_token
)
return_code, info = run_pull_image(
image_params=image_params,
dry_run=dry_run,
verbose=verbose,
wait_for_image=wait_for_image,
tag_as_latest=tag_as_latest,
poll_time=10.0,
parallel=False,
)
if return_code != 0:
get_console().print(f"[error]There was an error when pulling PROD image: {info}[/]")
sys.exit(return_code)
@main.command(
name='verify-prod-image',
context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True,
),
)
@option_verbose
@option_dry_run
@option_python
@option_github_repository
@option_image_tag
@option_image_name
@option_pull_image
@click.option(
'--slim-image',
help='The image to verify is slim and non-slim tests should be skipped.',
is_flag=True,
)
@click.argument('extra_pytest_args', nargs=-1, type=click.UNPROCESSED)
def verify_prod_image(
verbose: bool,
dry_run: bool,
python: str,
github_repository: str,
image_name: str,
image_tag: str,
pull_image: bool,
slim_image: bool,
extra_pytest_args: Tuple,
):
"""Verify Production image."""
perform_environment_checks(verbose=verbose)
if image_name is None:
build_params = BuildProdParams(
python=python, image_tag=image_tag, github_repository=github_repository
)
image_name = build_params.airflow_image_name_with_tag
if pull_image:
command_to_run = ["docker", "pull", image_name]
run_command(command_to_run, verbose=verbose, dry_run=dry_run, check=True)
get_console().print(f"[info]Verifying PROD image: {image_name}[/]")
return_code, info = verify_an_image(
image_name=image_name,
verbose=verbose,
dry_run=dry_run,
image_type='PROD',
extra_pytest_args=extra_pytest_args,
slim_image=slim_image,
)
sys.exit(return_code)
def clean_docker_context_files(verbose: bool, dry_run: bool):
"""
Cleans up docker context files folder - leaving only .README.md there.
"""
if verbose or dry_run:
get_console().print("[info]Cleaning docker-context-files[/]")
if dry_run:
return
with contextlib.suppress(FileNotFoundError):
context_files_to_delete = DOCKER_CONTEXT_DIR.glob('**/*')
for file_to_delete in context_files_to_delete:
if file_to_delete.name != '.README.md':
file_to_delete.unlink()
def check_docker_context_files(install_packages_from_context: bool):
"""
Quick check - if we want to install from docker-context-files we expect some packages there but if
we don't - we don't expect them, and they might invalidate Docker cache.
This method exits with an error if what we see is unexpected for given operation.
:param install_packages_from_context: whether we want to install from docker-context-files
"""
context_file = DOCKER_CONTEXT_DIR.glob('**/*')
number_of_context_files = len(
[context for context in context_file if context.is_file() and context.name != '.README.md']
)
if number_of_context_files == 0:
if install_packages_from_context:
get_console().print('[warning]\nERROR! You want to install packages from docker-context-files')
get_console().print('[warning]\n but there are no packages to install in this folder.')
sys.exit(1)
else:
if not install_packages_from_context:
get_console().print(
'[warning]\n ERROR! There are some extra files in docker-context-files except README.md'
)
get_console().print('[warning]\nAnd you did not choose --install-packages-from-context flag')
get_console().print(
'[warning]\nThis might result in unnecessary cache invalidation and long build times'
)
get_console().print(
'[warning]\nExiting now \
- please restart the command with --cleanup-context switch'
)
sys.exit(1)
def run_build_production_image(
verbose: bool, dry_run: bool, prod_image_params: BuildProdParams, parallel: bool
) -> Tuple[int, str]:
"""
Builds PROD image:
* fixes group permissions for files (to improve caching when umask is 002)
* converts all the parameters received via kwargs into BuildProdParams (including cache)
* prints info about the image to build
* removes docker-context-files if requested
* performs quick check if the files are present in docker-context-files if expected
* logs int to docker registry on CI if build cache is being executed
* removes "tag" for previously build image so that inline cache uses only remote image
* constructs docker-compose command to run based on parameters passed
* run the build command
* update cached information that the build completed and saves checksums of all files
for quick future check if the build is needed
:param verbose: print commands when running
:param dry_run: do not execute "write" commands - just print what would happen
:param prod_image_params: PROD image parameters
"""
if (
prod_image_params.is_multi_platform()
and not prod_image_params.push_image
and not prod_image_params.prepare_buildx_cache
):
get_console().print(
"\n[red]You cannot use multi-platform build without using --push-image flag"
" or preparing buildx cache![/]\n"
)
return 1, "Error: building multi-platform image without --push-image."
get_console().print(f"\n[info]Building PROD Image for Python {prod_image_params.python}\n")
if prod_image_params.prepare_buildx_cache:
build_command_result = build_cache(
image_params=prod_image_params, dry_run=dry_run, verbose=verbose, parallel=parallel
)
else:
if prod_image_params.empty_image:
env = os.environ.copy()
env['DOCKER_BUILDKIT'] = "1"
get_console().print(f"\n[info]Building empty PROD Image for Python {prod_image_params.python}\n")
build_command_result = run_command(
prepare_docker_build_from_input(image_params=prod_image_params),
input="FROM scratch\n",
verbose=verbose,
dry_run=dry_run,
cwd=AIRFLOW_SOURCES_ROOT,
check=False,
text=True,
env=env,
)
else:
build_command_result = run_command(
prepare_docker_build_command(
image_params=prod_image_params,
verbose=verbose,
),
verbose=verbose,
dry_run=dry_run,
cwd=AIRFLOW_SOURCES_ROOT,
check=False,
text=True,
enabled_output_group=not parallel,
)
if build_command_result.returncode == 0:
if prod_image_params.tag_as_latest:
build_command_result = tag_image_as_latest(prod_image_params, dry_run, verbose)
return build_command_result.returncode, f"Image build: {prod_image_params.python}"
| 35.208054 | 110 | 0.657978 |
86f14b68a94753b24dda4d7f4d8d991c6f745a45 | 4,203 | py | Python | saleor/dashboard/product/urls.py | fcopantoja/saleor-ecommerce | d8745de423e1afc7aa60a59dbe05fec43b67ba80 | [
"BSD-3-Clause"
] | null | null | null | saleor/dashboard/product/urls.py | fcopantoja/saleor-ecommerce | d8745de423e1afc7aa60a59dbe05fec43b67ba80 | [
"BSD-3-Clause"
] | 1 | 2022-02-13T22:52:38.000Z | 2022-02-13T22:52:38.000Z | saleor/dashboard/product/urls.py | fcopantoja/saleor-ecommerce | d8745de423e1afc7aa60a59dbe05fec43b67ba80 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',
views.product_list, name='product-list'),
url(r'^(?P<pk>[0-9]+)/$',
views.product_details, name='product-details'),
url(r'^(?P<pk>[0-9]+)/publish/$',
views.product_toggle_is_published, name='product-publish'),
url(r'^add/select-type/$',
views.product_select_type, name='product-add-select-type'),
url(r'^add/(?P<type_pk>[0-9]+)/$',
views.product_create, name='product-add'),
url(r'^(?P<pk>[0-9]+)/update/$',
views.product_edit, name='product-update'),
url(r'^(?P<pk>[0-9]+)/delete/$',
views.product_delete, name='product-delete'),
url(r'^bulk-update/$',
views.product_bulk_update, name='product-bulk-update'),
url(r'^ajax/products/$',
views.ajax_products_list, name='ajax-products'),
url(r'^types/$',
views.product_type_list, name='product-type-list'),
url(r'^types/add/$',
views.product_type_create, name='product-type-add'),
url(r'^types/(?P<pk>[0-9]+)/update/$',
views.product_type_edit, name='product-type-update'),
url(r'^types/(?P<pk>[0-9]+)/delete/$',
views.product_type_delete, name='product-type-delete'),
url(r'^(?P<product_pk>[0-9]+)/variants/(?P<variant_pk>[0-9]+)/$',
views.variant_details, name='variant-details'),
url(r'^(?P<product_pk>[0-9]+)/variants/add/$',
views.variant_create, name='variant-add'),
url(r'^(?P<product_pk>[0-9]+)/variants/(?P<variant_pk>[0-9]+)/update/$',
views.variant_edit, name='variant-update'),
url(r'^(?P<product_pk>[0-9]+)/variants/(?P<variant_pk>[0-9]+)/delete/$',
views.variant_delete, name='variant-delete'),
url(r'^(?P<product_pk>[0-9]+)/variants/(?P<variant_pk>[0-9]+)/images/$',
views.variant_images, name='variant-images'),
url(r'^ajax/variants/$',
views.ajax_available_variants_list, name='ajax-available-variants'),
url(r'^(?P<product_pk>[0-9]+)/recommendation/add/$',
views.recommendation_create, name='recommendation-add'),
url(r'^(?P<product_pk>[0-9]+)/images/$',
views.product_images, name='product-image-list'),
url(r'^(?P<product_pk>[0-9]+)/images/add/$',
views.product_image_create, name='product-image-add'),
url(r'^(?P<product_pk>[0-9]+)/images/(?P<img_pk>[0-9]+)/$',
views.product_image_edit, name='product-image-update'),
url(r'^(?P<product_pk>[0-9]+)/images/(?P<img_pk>[0-9]+)/delete/$',
views.product_image_delete, name='product-image-delete'),
url(r'^(?P<product_pk>[0-9]+)/images/reorder/$',
views.ajax_reorder_product_images, name='product-images-reorder'),
url(r'^(?P<product_pk>[0-9]+)/images/upload/$',
views.ajax_upload_image, name='product-images-upload'),
url(r'attributes/$',
views.attribute_list, name='product-attributes'),
url(r'attributes/(?P<pk>[0-9]+)/$',
views.attribute_details, name='product-attribute-details'),
url(r'attributes/add/$',
views.attribute_create, name='product-attribute-add'),
url(r'attributes/(?P<pk>[0-9]+)/update/$',
views.attribute_edit, name='product-attribute-update'),
url(r'attributes/(?P<pk>[0-9]+)/delete/$',
views.attribute_delete, name='product-attribute-delete'),
url(r'attributes/(?P<attribute_pk>[0-9]+)/value/add/$',
views.attribute_choice_value_create,
name='product-attribute-value-add'),
url(r'attributes/(?P<attribute_pk>[0-9]+)/value/(?P<value_pk>[0-9]+)/update/$', # noqa
views.attribute_choice_value_edit,
name='product-attribute-value-update'),
url(r'attributes/(?P<attribute_pk>[0-9]+)/value/(?P<value_pk>[0-9]+)/delete/$', # noqa
views.attribute_choice_value_delete,
name='product-attribute-value-delete'),
url(r'attributes/(?P<attribute_pk>[0-9]+)/values/reorder/$',
views.ajax_reorder_attribute_choice_values,
name='product-attribute-values-reorder'),
url(r'quotations/$', views.quotation_list,
name='quotation-list'),
url(r'quotations/(?P<pk>[0-9]+)/$',
views.quotation_details, name='quotation-details'),
]
| 46.186813 | 91 | 0.627171 |
ccc2e54600aa7414465ca4249b5db8a63a0948af | 693 | py | Python | pygluu/kubernetes/gui/forms/license.py | WaqasAhmedLatif/cloud-native-edition | 1e6002f27ea971c153df59373e30d4506e9932dc | [
"Apache-2.0"
] | 23 | 2020-04-18T14:51:41.000Z | 2022-03-31T19:59:40.000Z | pygluu/kubernetes/gui/forms/license.py | WaqasAhmedLatif/cloud-native-edition | 1e6002f27ea971c153df59373e30d4506e9932dc | [
"Apache-2.0"
] | 236 | 2020-04-22T08:59:27.000Z | 2022-03-31T07:21:12.000Z | pygluu/kubernetes/gui/forms/license.py | WaqasAhmedLatif/cloud-native-edition | 1e6002f27ea971c153df59373e30d4506e9932dc | [
"Apache-2.0"
] | 23 | 2020-04-19T15:25:59.000Z | 2022-03-16T17:17:36.000Z | """
pygluu.kubernetes.gui.license
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains helpers to interact with user's input for gui license form
License terms and conditions for Gluu Cloud Native Edition:
https://www.apache.org/licenses/LICENSE-2.0
"""
from flask_wtf import FlaskForm
from wtforms import BooleanField
from wtforms.validators import DataRequired
class LicenseForm(FlaskForm):
"""
License form,
form to accept Apache 2.0 lisence
Fields :
accept_gluu_license (string|required)
"""
accept_gluu_license = BooleanField(
"I accept the Gluu license stated above",
validators=[DataRequired(message="License has not been accepted")])
| 26.653846 | 79 | 0.708514 |
0fb550c2eb372d2375da5aa510687ddf63136aa0 | 1,398 | py | Python | examples/bayes_2afc.py | esc/Psignifit-3.x | 2a2a8a40f39d94f8fee99cb0ba4b9595ae216f88 | [
"MIT"
] | 6 | 2015-07-14T23:38:52.000Z | 2018-10-12T08:15:36.000Z | examples/bayes_2afc.py | esc/Psignifit-3.x | 2a2a8a40f39d94f8fee99cb0ba4b9595ae216f88 | [
"MIT"
] | null | null | null | examples/bayes_2afc.py | esc/Psignifit-3.x | 2a2a8a40f39d94f8fee99cb0ba4b9595ae216f88 | [
"MIT"
] | 5 | 2015-11-25T10:11:04.000Z | 2018-10-12T08:17:48.000Z | #!/usr/bin/env python
# This file illustrates the analysis of 2afc data using bayesian
# inference and markov chain monte carlo sampling.
#
# The analysis is explained in more detail in the "Quick start to psignifit"
# that can be found at http://psignifit.sourceforge.net/TUTORIAL.html
from pypsignifit import *
from pylab import figure
# The data are form a 2afc task
nafc = 2
# Now we get the data
stimulus_intensities = [0.0,2.0,4.0,6.0,8.0,10.0]
number_of_correct = [34,32,40,48,50,48]
number_of_trials = [50]*len(stimulus_intensities)
data = zip(stimulus_intensities,number_of_correct,number_of_trials)
# Select priors
# These priors are clearly informative priors. This means they impose considerable
# prior information on the inference. This is useful in this case to illustrate
# bayesian analysis in general. However, in many cases, other, less informative priors
# might be of interest.
priors = ( 'Gauss(0,5)', 'Gamma(1,3)', 'Beta(2,30)' )
# Perform the actual inference
mcmc = BayesInference ( data, priors=priors, nafc=nafc )
# Add some more chains for convergence diagnostic
mcmc.sample ( start = (0,1,0.01) )
mcmc.sample ( start = (6,11,0.3) )
# Generate convergence plots for all three paramters
for i in xrange ( 3 ):
ConvergenceMCMC ( mcmc, i )
# Assess goodness of fit
GoodnessOfFit ( mcmc )
# See parameter plot
ParameterPlot ( mcmc )
# Show everything
show()
| 29.744681 | 86 | 0.745351 |
cd4f909ca4723c6d6109be81fb746149d501c1bc | 3,009 | py | Python | migrations/env.py | vissssa/ai_replay | f695a58cdd6cd68d163c575da986b4f2275217de | [
"MIT"
] | null | null | null | migrations/env.py | vissssa/ai_replay | f695a58cdd6cd68d163c575da986b4f2275217de | [
"MIT"
] | null | null | null | migrations/env.py | vissssa/ai_replay | f695a58cdd6cd68d163c575da986b4f2275217de | [
"MIT"
] | null | null | null | from __future__ import with_statement
import logging
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option(
'sqlalchemy.url', current_app.config.get(
'SQLALCHEMY_DATABASE_URI').replace('%', '%%'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args,
compare_type=True, # 检查字段类型
compare_server_default=True # 比较默认值
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 30.393939 | 77 | 0.708541 |
cf627c83cd5101fc2412aa57e6f8731dc4aa65e8 | 274 | py | Python | stream/elastic/elastic/__init__.py | aakloul/connectors | 171bdc3441b9196ee7aef3f1f9524d8594da6425 | [
"Apache-2.0"
] | null | null | null | stream/elastic/elastic/__init__.py | aakloul/connectors | 171bdc3441b9196ee7aef3f1f9524d8594da6425 | [
"Apache-2.0"
] | null | null | null | stream/elastic/elastic/__init__.py | aakloul/connectors | 171bdc3441b9196ee7aef3f1f9524d8594da6425 | [
"Apache-2.0"
] | null | null | null | import os
__version__ = "5.2.1"
LOGGER_NAME = "elastic"
RE_DATEMATH = (
r"\{(?P<modulo>.*now[^{]*)(?:\{(?P<format>[^|]*)(?:\|(?P<offset>[^}]+))?\})?\}"
)
DM_DEFAULT_FMT = "YYYY.MM.DD"
__DATA_DIR__: str = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
| 27.4 | 84 | 0.587591 |
f23a78d1140918a573a2b022ead4b2f0573b83f9 | 4,147 | py | Python | olive/util/config.py | Jsewill/Olive-blockchain | ba0169a56d7e67cefd95dc1f1f60e9a19d5cd2c5 | [
"Apache-2.0"
] | 10 | 2021-08-01T17:15:15.000Z | 2021-09-16T08:04:46.000Z | olive/util/config.py | Jsewill/Olive-blockchain | ba0169a56d7e67cefd95dc1f1f60e9a19d5cd2c5 | [
"Apache-2.0"
] | 8 | 2021-08-06T08:11:13.000Z | 2021-11-03T20:49:37.000Z | olive/util/config.py | Jsewill/Olive-blockchain | ba0169a56d7e67cefd95dc1f1f60e9a19d5cd2c5 | [
"Apache-2.0"
] | 7 | 2021-08-07T06:45:36.000Z | 2022-03-15T08:43:24.000Z | import argparse
import os
import shutil
import sys
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Union
import pkg_resources
import yaml
from olive.util.path import mkdir
def initial_config_file(filename: Union[str, Path]) -> str:
return pkg_resources.resource_string(__name__, f"initial-{filename}").decode()
def create_default_olive_config(root_path: Path, filenames=["config.yaml"]) -> None:
for filename in filenames:
default_config_file_data = initial_config_file(filename)
path = config_path_for_filename(root_path, filename)
mkdir(path.parent)
with open(path, "w") as f:
f.write(default_config_file_data)
def config_path_for_filename(root_path: Path, filename: Union[str, Path]) -> Path:
path_filename = Path(filename)
if path_filename.is_absolute():
return path_filename
return root_path / "config" / filename
def save_config(root_path: Path, filename: Union[str, Path], config_data: Any):
path = config_path_for_filename(root_path, filename)
with open(path.with_suffix("." + str(os.getpid())), "w") as f:
yaml.safe_dump(config_data, f)
shutil.move(str(path.with_suffix("." + str(os.getpid()))), path)
def load_config(
root_path: Path,
filename: Union[str, Path],
sub_config: Optional[str] = None,
exit_on_error=True,
) -> Dict:
path = config_path_for_filename(root_path, filename)
if not path.is_file():
if not exit_on_error:
raise ValueError("Config not found")
print(f"can't find {path}")
print("** please run `olive init` to migrate or create new config files **")
# TODO: fix this hack
sys.exit(-1)
r = yaml.safe_load(open(path, "r"))
if sub_config is not None:
r = r.get(sub_config)
return r
def load_config_cli(root_path: Path, filename: str, sub_config: Optional[str] = None) -> Dict:
"""
Loads configuration from the specified filename, in the config directory,
and then overrides any properties using the passed in command line arguments.
Nested properties in the config file can be used in the command line with ".",
for example --farmer_peer.host. Does not support lists.
"""
config = load_config(root_path, filename, sub_config)
flattened_props = flatten_properties(config)
parser = argparse.ArgumentParser()
for prop_name, value in flattened_props.items():
if type(value) is list:
continue
prop_type: Callable = str2bool if type(value) is bool else type(value) # type: ignore
parser.add_argument(f"--{prop_name}", type=prop_type, dest=prop_name)
for key, value in vars(parser.parse_args()).items():
if value is not None:
flattened_props[key] = value
return unflatten_properties(flattened_props)
def flatten_properties(config: Dict) -> Dict:
properties = {}
for key, value in config.items():
if type(value) is dict:
for key_2, value_2 in flatten_properties(value).items():
properties[key + "." + key_2] = value_2
else:
properties[key] = value
return properties
def unflatten_properties(config: Dict) -> Dict:
properties: Dict = {}
for key, value in config.items():
if "." in key:
add_property(properties, key, value)
else:
properties[key] = value
return properties
def add_property(d: Dict, partial_key: str, value: Any):
key_1, key_2 = partial_key.split(".", maxsplit=1)
if key_1 not in d:
d[key_1] = {}
if "." in key_2:
add_property(d[key_1], key_2, value)
else:
d[key_1][key_2] = value
def str2bool(v: Union[str, bool]) -> bool:
# Source from https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "True", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "False", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
| 32.653543 | 99 | 0.657102 |
2cd788b063e9520a4e0256efcc1dd47544f968a3 | 9,282 | py | Python | Platforms/OnePlus6TPkg/PlatformCI/PlatformBuild.py | longjunyu2/mu_platform_oneplus6t | 1ab93dc8638ea0d316cd7323fd9026c52ae6ad56 | [
"BSD-2-Clause"
] | 2 | 2021-06-25T15:41:14.000Z | 2021-06-30T03:38:54.000Z | Platforms/OnePlus6TPkg/PlatformCI/PlatformBuild.py | longjunyu2/mu_platform_oneplus6t | 1ab93dc8638ea0d316cd7323fd9026c52ae6ad56 | [
"BSD-2-Clause"
] | null | null | null | Platforms/OnePlus6TPkg/PlatformCI/PlatformBuild.py | longjunyu2/mu_platform_oneplus6t | 1ab93dc8638ea0d316cd7323fd9026c52ae6ad56 | [
"BSD-2-Clause"
] | 1 | 2021-09-19T18:35:36.000Z | 2021-09-19T18:35:36.000Z | # @file
# Script to Build Mu OnePlus6T UEFI firmware
#
# Copyright (c) 2021, Junyu Long <ljy122@qq.com>.
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import logging
import io
from edk2toolext.environment import shell_environment
from edk2toolext.environment.uefi_build import UefiBuilder
from edk2toolext.invocables.edk2_platform_build import BuildSettingsManager
from edk2toolext.invocables.edk2_setup import SetupSettingsManager, RequiredSubmodule
from edk2toolext.invocables.edk2_update import UpdateSettingsManager
from edk2toolext.invocables.edk2_pr_eval import PrEvalSettingsManager
from edk2toollib.utility_functions import RunCmd
# ####################################################################################### #
# Common Configuration #
# ####################################################################################### #
class CommonPlatform():
''' Common settings for this platform. Define static data here and use
for the different parts of stuart
'''
PackagesSupported = ("OnePlus6TPkg",)
ArchSupported = ("AARCH64")
TargetsSupported = ("DEBUG", "RELEASE", "NOOPT")
Scopes = ('oneplus6t', 'edk2-build')
WorkspaceRoot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) # MU_CHANGE - support new workspace
PackagesPath = ("Platforms", "MU_BASECORE", "Common/MU", "Common/MU_TIANO", "Common/MU_OEM_SAMPLE", "Silicon/ARM/TIANO") # MU_CHANGE add packages path
@classmethod
def GetDscName(cls, ArchCsv: str) -> str:
''' return the DSC given the architectures requested.
ArchCsv: csv string containing all architectures to build
'''
dsc = "OnePlus6TPkg.dsc"
return dsc
# ####################################################################################### #
# Configuration for Update & Setup #
# ####################################################################################### #
class SettingsManager(UpdateSettingsManager, SetupSettingsManager, PrEvalSettingsManager):
def GetPackagesSupported(self):
''' return iterable of edk2 packages supported by this build.
These should be edk2 workspace relative paths '''
return CommonPlatform.PackagesSupported
def GetArchitecturesSupported(self):
''' return iterable of edk2 architectures supported by this build '''
return CommonPlatform.ArchSupported
def GetTargetsSupported(self):
''' return iterable of edk2 target tags supported by this build '''
return CommonPlatform.TargetsSupported
def GetRequiredSubmodules(self):
''' return iterable containing RequiredSubmodule objects.
If no RequiredSubmodules return an empty iterable
'''
rs = []
# intentionally declare this one with recursive false to avoid overhead
# MU_CHANGE start - remove OpenSSL
#rs.append(RequiredSubmodule(
# "CryptoPkg/Library/OpensslLib/openssl", False))
# MU_CHANGE end - remove OpenSSL
# To avoid maintenance of this file for every new submodule
# lets just parse the .gitmodules and add each if not already in list.
# The GetRequiredSubmodules is designed to allow a build to optimize
# the desired submodules but it isn't necessary for this repository.
result = io.StringIO()
ret = RunCmd("git", "config --file .gitmodules --get-regexp path", workingdir=self.GetWorkspaceRoot(), outstream=result)
# Cmd output is expected to look like:
# submodule.CryptoPkg/Library/OpensslLib/openssl.path CryptoPkg/Library/OpensslLib/openssl
# submodule.SoftFloat.path ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3
if ret == 0:
for line in result.getvalue().splitlines():
_, _, path = line.partition(" ")
if path is not None:
if path not in [x.path for x in rs]:
rs.append(RequiredSubmodule(path, True)) # add it with recursive since we don't know
return rs
def SetArchitectures(self, list_of_requested_architectures):
''' Confirm the requests architecture list is valid and configure SettingsManager
to run only the requested architectures.
Raise Exception if a list_of_requested_architectures is not supported
'''
unsupported = set(list_of_requested_architectures) - set(self.GetArchitecturesSupported())
if(len(unsupported) > 0):
errorString = ( "Unsupported Architecture Requested: " + " ".join(unsupported))
logging.critical( errorString )
raise Exception( errorString )
self.ActualArchitectures = list_of_requested_architectures
def GetWorkspaceRoot(self):
''' get WorkspacePath '''
return CommonPlatform.WorkspaceRoot
def GetActiveScopes(self):
''' return tuple containing scopes that should be active for this process '''
return CommonPlatform.Scopes
def FilterPackagesToTest(self, changedFilesList: list, potentialPackagesList: list) -> list:
''' Filter other cases that this package should be built
based on changed files. This should cover things that can't
be detected as dependencies. '''
build_these_packages = []
possible_packages = potentialPackagesList.copy()
for f in changedFilesList:
# BaseTools files that might change the build
if "BaseTools" in f:
if os.path.splitext(f) not in [".txt", ".md"]:
build_these_packages = possible_packages
break
# if the azure pipeline platform template file changed
if "platform-build-run-steps.yml" in f:
build_these_packages = possible_packages
break
return build_these_packages
def GetPlatformDscAndConfig(self) -> tuple:
''' If a platform desires to provide its DSC then Policy 4 will evaluate if
any of the changes will be built in the dsc.
The tuple should be (<workspace relative path to dsc file>, <input dictionary of dsc key value pairs>)
'''
dsc = CommonPlatform.GetDscName(",".join(self.ActualArchitectures))
return (f"OnePlus6TPkg/{dsc}", {})
def GetPackagesPath(self): # MU_CHANGE - use packages path
''' Return a list of paths that should be mapped as edk2 PackagesPath ''' # MU_CHANGE - use packages path
return CommonPlatform.PackagesPath # MU_CHANGE - use packages path
# ####################################################################################### #
# Actual Configuration for Platform Build #
# ####################################################################################### #
class PlatformBuilder( UefiBuilder, BuildSettingsManager):
def __init__(self):
UefiBuilder.__init__(self)
def AddCommandLineOptions(self, parserObj):
''' Add command line options to the argparser '''
parserObj.add_argument('-a', "--arch", dest="build_arch", type=str, default="AARCH64",
help="Optional - CSV of architecture to build."
"AARCH64 is the only valid option for this platform.")
def RetrieveCommandLineOptions(self, args):
''' Retrieve command line options from the argparser '''
shell_environment.GetBuildVars().SetValue("TARGET_ARCH"," ".join(args.build_arch.upper().split(",")), "From CmdLine")
dsc = CommonPlatform.GetDscName(args.build_arch)
shell_environment.GetBuildVars().SetValue("ACTIVE_PLATFORM", f"OnePlus6TPkg/{dsc}", "From CmdLine")
def GetWorkspaceRoot(self):
''' get WorkspacePath '''
return CommonPlatform.WorkspaceRoot
def GetPackagesPath(self):
''' Return a list of workspace relative paths that should be mapped as edk2 PackagesPath '''
return CommonPlatform.PackagesPath # MU_CHANGE - use packages path
def GetActiveScopes(self):
''' return tuple containing scopes that should be active for this process '''
return CommonPlatform.Scopes
def GetName(self):
''' Get the name of the repo, platform, or product being build '''
''' Used for naming the log file, among others '''
return "OnePlus6TPkg"
def GetLoggingLevel(self, loggerType):
''' Get the logging level for a given type
base == lowest logging level supported
con == Screen logging
txt == plain text file logging
md == markdown file logging
'''
return logging.DEBUG
def SetPlatformEnv(self):
logging.debug("PlatformBuilder SetPlatformEnv")
self.env.SetValue("PRODUCT_NAME", "OnePlus6TPkg", "Platform Hardcoded")
self.env.SetValue("MAKE_STARTUP_NSH", "FALSE", "Default to false")
return 0
def PlatformPreBuild(self):
return 0
def PlatformPostBuild(self):
return 0
def FlashRomImage(self):
return 0
| 44.411483 | 154 | 0.625404 |
3db64023c11e59cfae6dd8a5d27325fc3d6836e7 | 430 | py | Python | xTool/utils/threads.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | 2 | 2020-09-02T13:46:06.000Z | 2020-10-11T16:11:02.000Z | xTool/utils/threads.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | null | null | null | xTool/utils/threads.py | luciferliu/xTools | 324ef1388be13ece0d952e3929eb685212d573f1 | [
"Apache-2.0"
] | 4 | 2018-10-15T07:08:34.000Z | 2019-11-26T01:52:47.000Z | # coding: utf-8
import threading
class defaultlocal(threading.local):
"""
Thread local storage with default values for each field in each thread
>>>
>>> l = defaultlocal( foo=42 )
>>> def f(): print(l.foo)
>>> t = threading.Thread(target=f)
>>> t.start() ; t.join()
42
"""
def __init__(self, **kwargs):
super(defaultlocal, self).__init__()
self.__dict__.update(kwargs)
| 20.47619 | 74 | 0.595349 |
577429218ca8534d7ce95dcc7baac8200e53e976 | 5,015 | py | Python | tests/test_tokenizers.py | NC0DER/sumy | ee1bf6d4836c4065f27168ef5cc9137a91836615 | [
"Apache-2.0"
] | null | null | null | tests/test_tokenizers.py | NC0DER/sumy | ee1bf6d4836c4065f27168ef5cc9137a91836615 | [
"Apache-2.0"
] | null | null | null | tests/test_tokenizers.py | NC0DER/sumy | ee1bf6d4836c4065f27168ef5cc9137a91836615 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import pytest
from sumy.nlp.tokenizers import Tokenizer
def test_missing_language():
with pytest.raises(LookupError):
Tokenizer("klingon")
def test_ensure_czech_tokenizer_available():
tokenizer = Tokenizer("czech")
assert "czech" == tokenizer.language
sentences = tokenizer.to_sentences("""
Měl jsem sen, že toto je sen. Bylo to také zvláštní.
Jakoby jsem plaval v moři rekurze.
""")
expected = (
"Měl jsem sen, že toto je sen.",
"Bylo to také zvláštní.",
"Jakoby jsem plaval v moři rekurze.",
)
assert expected == sentences
def test_language_getter():
tokenizer = Tokenizer("english")
assert "english" == tokenizer.language
@pytest.mark.parametrize("language, sentence, expected_words", [
(
"english",
"I am a very nice sentence with comma, but..",
("I", "am", "a", "very", "nice", "sentence", "with", "comma", "but"),
),
(
"english",
"I am doing sugar-free data-mining for Peter's study - vega punk.",
("I", "am", "doing", "sugar-free", "data-mining", "for", "Peter", "study", "vega", "punk"),
),
(
"japanese",
"この文章を、正しくトークン化したい。",
("この", "文章", "を", "正しく", "トークン", "化", "し", "たい"),
),
(
"chinese",
"好用的文档自动化摘要程序",
("好用", "的", "文档", "自动化", "摘要", "程序"),
),
pytest.param(
"korean",
"대학에서 DB, 통계학, 이산수학 등을 배웠지만...",
("대학", "통계학", "이산", "이산수학", "수학", "등"),
marks=pytest.mark.skipif(sys.version_info < (3,), reason="JPype1 from konlpy does not support Python 2 anymore")
),
(
"greek",
"Ποιό είναι το κείμενο; Αυτό εδώ - και είναι έτοιμο! Τέλεια. Το στέλνω...",
('Ποιό', 'είναι', 'το', 'κείμενο', 'Αυτό', 'εδώ', 'και', 'είναι', 'έτοιμο', 'Τέλεια', 'Το', 'στέλνω'),
),
])
def test_tokenize_sentence_to_words(language, sentence, expected_words):
tokenizer = Tokenizer(language)
words = tokenizer.to_words(sentence)
assert words == expected_words
assert tokenizer.language == language
def test_tokenize_sentences_with_abbreviations():
tokenizer = Tokenizer("english")
sentences = tokenizer.to_sentences("There are people who are weird, e.g. normal people. These people know you.")
expected = ("There are people who are weird, e.g. normal people.", "These people know you.",)
assert expected == sentences
def test_tokenize_paragraph():
tokenizer = Tokenizer("english")
sentences = tokenizer.to_sentences("""
I am a very nice sentence with comma, but..
This is next sentence. "I'm bored", said Pepek.
Ou jee, duffman is here.
""")
expected = (
"I am a very nice sentence with comma, but..",
"This is next sentence.",
'"I\'m bored", said Pepek.',
"Ou jee, duffman is here.",
)
assert expected == sentences
def test_slovak_alias_into_czech_tokenizer():
tokenizer = Tokenizer("slovak")
assert tokenizer.language == "slovak"
sentences = tokenizer.to_sentences("""
Je to veľmi fajn. Bodaj by nie.
Ale na druhej strane čo je to oproti inému?
To nechám na čitateľa.
""")
expected = (
"Je to veľmi fajn.",
"Bodaj by nie.",
"Ale na druhej strane čo je to oproti inému?",
"To nechám na čitateľa.",
)
assert expected == sentences
def test_tokenize_japanese_paragraph():
tokenizer = Tokenizer('japanese')
expected = (
'1つ目の文章です。',
'その次は何が来ますか?',
'「2つ目の文章」です。'
)
paragraph = '1つ目の文章です。その次は何が来ますか? 「2つ目の文章」です。'
assert expected == tokenizer.to_sentences(paragraph)
def test_tokenize_chinese_paragraph():
tokenizer = Tokenizer('chinese')
expected = (
'我正在为这个软件添加中文支持。',
'这个软件是用于文档摘要!',
'这个软件支持网页和文本两种输入格式?'
)
paragraph = '我正在为这个软件添加中文支持。这个软件是用于文档摘要!这个软件支持网页和文本两种输入格式?'
assert expected == tokenizer.to_sentences(paragraph)
@pytest.mark.skipif(sys.version_info < (3,), reason="JPype1 from konlpy does not support Python 2 anymore")
def test_tokenize_korean_paragraph():
tokenizer = Tokenizer('korean')
expected = (
'회사 동료 분들과 다녀왔는데 분위기도 좋고 음식도 맛있었어요',
'다만, 강남 토끼 정이 강남 쉑쉑 버거 골목길로 쭉 올라가야 하는데 다들 쉑쉑버거의 유혹에 넘어갈 뻔 했답니다',
'강남 역 맛 집 토끼정의 외부 모습.'
)
paragraph = '회사 동료 분들과 다녀왔는데 분위기도 좋고 음식도 맛있었어요 다만, 강남 토끼정이 강남 쉑쉑버거 골목길로 쭉 올라가야 하는데 다들 쉑쉑버거의 유혹에 넘어갈 뻔 했답니다 강남역 맛집 토끼정의 외부 모습.'
assert expected == tokenizer.to_sentences(paragraph)
def test_tokenize_greek_paragraph():
tokenizer = Tokenizer('greek')
expected = (
'Ποιό είναι το κείμενο;',
'Αυτό εδώ - και είναι έτοιμο!',
'Τέλεια.',
'Το στέλνω...'
)
paragraph = 'Ποιό είναι το κείμενο; Αυτό εδώ - και είναι έτοιμο! Τέλεια. Το στέλνω...'
assert expected == tokenizer.to_sentences(paragraph) | 29.327485 | 130 | 0.615753 |
f5896b51376ca5aae91fca32278582c9dd37a682 | 1,808 | py | Python | test/python/test_yml.py | plandes/grsync | bdedb6f298ba444afe016d9a70f04d4772ff19e1 | [
"MIT"
] | 1 | 2019-06-23T15:34:05.000Z | 2019-06-23T15:34:05.000Z | test/python/test_yml.py | plandes/grsync | bdedb6f298ba444afe016d9a70f04d4772ff19e1 | [
"MIT"
] | null | null | null | test/python/test_yml.py | plandes/grsync | bdedb6f298ba444afe016d9a70f04d4772ff19e1 | [
"MIT"
] | 1 | 2020-09-07T09:20:11.000Z | 2020-09-07T09:20:11.000Z | import unittest
from pathlib import Path
from zensols.config import YamlConfig
from zensols.grsync import AppConfig
class TestConfig(unittest.TestCase):
def test_yaml(self):
correct = {'discover.bootstrap.inst_dir': '${HOME}/grsync',
'discover.bootstrap.python_dir': '${HOME}/opt/lib/python3',
'discover.bootstrap.wheel_dir': 'wheels',
'discover.empty_dirs': ['~/tmp'],
'discover.local.dist_dir': './dist',
'discover.local.wheels_dir': 'wheels',
'discover.objects.default': ['~/.profile',
'~/.bashrc',
'~/.Xdefaults',
'~/.xsession',
'~/.emacs',
'~/.emacs.d',
'~/code/home-dir',
'~/code/emacs'],
'discover.target':
[{'link': {'source': '~/.profile_${os}',
'target':
'~/code/home-dir/dot/os/${os}/profile'}}],
'discover.codedir': '~/code',
'discover.wheel.create': False}
config = YamlConfig('test-resources/yaml-test.yml', delimiter='^')
self.maxDiff = float('inf')
self.assertEqual(correct, config.options)
def test_set_dist(self):
config = AppConfig('test-resources/midsize-test.yml')
self.assertEqual(Path('./dist').absolute(), config.dist_dir)
config.dist_dir = Path('./anewdir')
self.assertEqual(Path('./anewdir').absolute(), config.dist_dir)
| 47.578947 | 78 | 0.450221 |
3aabb7c5b25e2872c474b85b9cdb8341a330ddf5 | 807 | py | Python | django_project/test_mode/manage.py | sinomiko/project | 00fadb0033645f103692f5b06c861939a9d4aa0e | [
"BSD-3-Clause"
] | 1 | 2018-12-30T14:07:42.000Z | 2018-12-30T14:07:42.000Z | django_project/test_mode/manage.py | sinomiko/project | 00fadb0033645f103692f5b06c861939a9d4aa0e | [
"BSD-3-Clause"
] | null | null | null | django_project/test_mode/manage.py | sinomiko/project | 00fadb0033645f103692f5b06c861939a9d4aa0e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_mode.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.086957 | 77 | 0.643123 |
61d38c67bdb9d150de3ae523416ea5ca60c51801 | 543 | py | Python | utils/set_bot_commands.py | AleksZavg/Admin-telegram-bot | c671419ba9fd5e93df742ebe9443d72afa4c99aa | [
"MIT"
] | null | null | null | utils/set_bot_commands.py | AleksZavg/Admin-telegram-bot | c671419ba9fd5e93df742ebe9443d72afa4c99aa | [
"MIT"
] | null | null | null | utils/set_bot_commands.py | AleksZavg/Admin-telegram-bot | c671419ba9fd5e93df742ebe9443d72afa4c99aa | [
"MIT"
] | null | null | null | from aiogram import types
async def set_default_commands(dp):
await dp.bot.set_my_commands(
[
types.BotCommand("start", "Запустить бота"),
types.BotCommand("admin", "Панель управления администратора"),
types.BotCommand("private", "Доступ к приватным возможностям"),
types.BotCommand("info", "Информация о боте (от автора-разработчика)"),
types.BotCommand("help", "Вывести справку"),
types.BotCommand("cancel", "Остановить текущее действие")
]
)
| 36.2 | 83 | 0.631676 |
114f2c802089c09ce99502add30eabfb9f350571 | 19,474 | py | Python | modules/QQqt4/mycopymsglist.py | earlybackhome/You-cannot-guess | 8674b9c089321835205c75cbc2b36ca4fd9a5b80 | [
"MIT"
] | 21 | 2017-05-25T09:03:23.000Z | 2021-12-27T13:02:52.000Z | modules/QQqt4/mycopymsglist.py | earlybackhome/You-cannot-guess | 8674b9c089321835205c75cbc2b36ca4fd9a5b80 | [
"MIT"
] | null | null | null | modules/QQqt4/mycopymsglist.py | earlybackhome/You-cannot-guess | 8674b9c089321835205c75cbc2b36ca4fd9a5b80 | [
"MIT"
] | 4 | 2017-06-19T13:50:54.000Z | 2020-12-12T15:51:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-12-08 10:56:55
# @Author : He Liang (helianghit@foxmail.com)
# @Link : https://github.com/HeLiangHIT
import os, sys, time, re
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from math import *
from exptable import Window
import imghdr
from bestexplist import BestWindow
sys.path.append('..')
from DL import maxSim
from OCR import Image2txt
DEFAULT_HEAD = 'icons/qq.png'
DEFAULT_MSG = 'Hello is there anyone?'
DEFAULT_IMG = 'icons/img.png'
def checkContainChinese(s):#判断是否为英文
for ch in s:
if u'\u4e00' <= ch <= u'\u9fff':
return True
return False
def splitStringByLen(text,Len):#其中使用\n分割,因此原来的\n将会被替换,一个中文大致顶三个英文的宽度
# reload(sys) #暂时用
# sys.setdefaultencoding('utf-8')
text = text.replace('\n','.')
(myText, nLen) = ('',0)
for s in text:
myText += s
nLen += 3 if checkContainChinese(s) else 1
if nLen >= (Len - 1):
myText += '\n'
nLen = 0
return myText
class NoticeWindow(QWidget):
def __init__(self, txt, listView, ):
super(NoticeWindow, self).__init__()
self.resize(770, 60)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setStyleSheet("QScrollBar{width:0;height:0}")
lo = QGridLayout()
self.label = QLabel(txt)
self.label.setFont(QFont("Microsoft Yahei",15,QFont.Bold))
lo.addWidget(self.label)
self.setLayout(lo)
self.move(listView.x()+270, listView.y()+590)
self.setStyleSheet(
"QLabel{background: red;color: white;border-width: 2px; border-style: solid;border-color: red;border-radius:2px}")
class BubbleText(QLabel):
"""**文字的显示**主要是控件的大小调节,
起初准备用QTextEdit后来发现实现起来很难控制大小和混动条!
只能舍弃次用QLabel继承实现了,关于控件的水平大小采用控制字符数量的方法(ヘ(_ _ヘ)),
考虑到一个中文字符的宽度大概是3倍英文字符因此出现了checkContainChinese和splitStringByLen函数
(我也不记得哪儿抄来的方法了)。在输入调用super(BubbleText, self).__init__(myText)
前就把字符用\n分割好来显示"""
border = 5
trigon = 20
lineLen = 40 #每行的文字数量
minH = 2 * trigon + 2 * border
minW = 2 * trigon + 2 * border
def __init__(self,listItem,listView,text = DEFAULT_MSG,lr = True):
self.listItem = listItem
self.listView = listView
self.text = text
#字符串长度限制
myText = splitStringByLen(text, self.lineLen) # 文字分割
super(BubbleText, self).__init__(myText)
self.setMinimumWidth(self.minW)
self.setFont(QFont("Microsoft Yahei",15,QFont.Normal))
# self.setStyleSheet("QLabel:hover{background-color:rgba(210,240,250,255);}")#鼠标滑过的颜色设置,这样自定义的paintEvent绘图区域就被看穿了
self.setState(False)#设置鼠标不进入状态,方便绘图区域的颜色更新
self.lr = lr #标志绘制左还是右
if self.lr:
'''为了让实现显示的图片不会在super(BubbleImage, self).paintEvent(e)时和绘制的背景气泡冲突,
设置控件的setContentsMargins绘图范围保证图像的绘图区域。'''
self.setContentsMargins(self.trigon*sqrt(3)/2 + 3,self.border + 3,self.border + 3,self.border + 3)
else:
self.setContentsMargins(self.border + 3,self.border + 3,self.trigon*sqrt(3)/2 + 3,self.border + 3)
self.setStyleSheet("color: #FFFFFF")
def paintEvent(self, e):
size = self.size()
qp = QPainter()
qp.begin(self)
if self.lr:
self.leftBubble(qp,size.width(),size.height())
else:
self.rightBubble(qp,size.width(),size.height())
qp.end()
super(BubbleText, self).paintEvent(e)
def leftBubble(self,qp, w, h):
qp.setPen(self.colorLeftE)#设置画笔颜色,绘制的矩形边缘颜色
qp.setBrush(self.colorLeftM)#设置红色的笔刷
middle = h/2
shifty = self.trigon/2
shiftx = self.trigon*sqrt(3)/2
pL = QPolygonF()
pL.append(QPointF(0,middle)) #起始点
pL.append(QPointF(shiftx, middle + shifty)) # 第二点
pL.append(QPointF(shiftx, h - self.border)) #第三点
pL.append(QPointF(w - self.border, h - self.border)) #第四点
pL.append(QPointF(w - self.border, self.border)) #第五点
pL.append(QPointF(shiftx, self.border)) #第六点
pL.append(QPointF(shiftx, middle - shifty)) #第七点
qp.drawPolygon(pL)
def rightBubble(self, qp, w, h):
qp.setPen(self.colorRightE)#设置画笔颜色,绘制的矩形边缘颜色
qp.setBrush(self.colorRightM)#设置红色的笔刷
middle = h/2
shifty = self.trigon/2
shiftx = self.trigon*sqrt(3)/2
pL = QPolygonF()
pL.append(QPointF(w,middle)) #起始点
pL.append(QPointF(w - shiftx, middle + shifty)) # 第二点
pL.append(QPointF(w - shiftx, h - self.border)) #第三点
pL.append(QPointF(self.border, h - self.border)) #第四点
pL.append(QPointF(self.border, self.border)) #第五点
pL.append(QPointF(w - shiftx, self.border)) #第六点
pL.append(QPointF(w - shiftx, middle - shifty)) #第七点
qp.drawPolygon(pL)
def setState(self,mouse):
'''鼠标进入和鼠标出时需要显示不一样的效果,主要就是更新颜色变量,然后调用update更新重绘'''
if mouse:#鼠标进入
self.colorLeftM = QColor("#FCF8F8")
self.colorLeftE = QColor("#FCF8F8")
self.colorRightM = QColor("#0761F4")
self.colorRightE = QColor("#0761F4")
else:
self.colorLeftM = QColor("#FFFFFF")
self.colorLeftE = QColor("#FFFFFF")
self.colorRightM = QColor("#3526F7")
self.colorRightE = QColor("#3526F7")
self.update() #更新界面,不用执行也可以更新,但是不实时
def enterEvent(self,e):
# print 'mouse entered'
self.setState(True)
def leaveEvent(self,e):
# print 'mouse leaved'
self.setState(False)
def contextMenuEvent(self,e):
''' 右键菜单实现文本的复制和控件的删除'''
editUser = QAction(QIcon('icons/copy.png'),u'复制',self)#第一个参数也可以给一个QIcon图标
editUser.triggered.connect(self.copyText)
delUser = QAction(QIcon('icons/delete.png'),u'删除',self)
delUser.triggered.connect(self.delTextItem)#选中就会触发
menu = QMenu()
menu.addAction(editUser)
menu.addAction(delUser)
menu.exec_(QCursor.pos())#全局位置比较好,使用e.pos()还得转换
e.accept() #禁止弹出菜单事件传递到父控件中
def copyText(self,b):
# print 'msg copyed'
cb = QApplication.clipboard()
cb.setText(self.text)
def delTextItem(self,b):
# print 'msg deleted'
self.listView.takeItem(self.listView.indexFromItem(self.listItem).row())
class LabelHead(QLabel):
'''LabelHead(QLabel) 类主要是为了展示用户头像'''
def __init__(self,addr = DEFAULT_HEAD):
super(LabelHead,self).__init__()
self.setScaledContents(True)
self.setReadOnly(True)
self.setPicture(addr)
def setReadOnly(self,b):
self._readOnly = bool(b)
def setPicture(self,addr):
'''设置图像:继承至QLabel以便可以setPixmap设置图片'''
self._picAddr = addr
img = QPixmap(addr)
self.setPixmap(img)
return True
def getPicture(self):
return self._picAddr
# ref : http://stackoverflow.com/questions/18047427/pyqt-sms-bubble-widget
class TextItem(QWidget):
'''显示文字的Widget内容,为了让消息可以删除增加listItem和list传递到文本控件'''
def __init__(self, listItem, listView, text = DEFAULT_MSG, lr=True, head = DEFAULT_HEAD):
super(TextItem,self).__init__()
hbox = QHBoxLayout()
text = BubbleText(listItem,listView,text,lr)
head = LabelHead(head)
head.setFixedSize(50,50)
if lr is not True:
hbox.addSpacerItem(QSpacerItem(1,1,QSizePolicy.Expanding,QSizePolicy.Preferred))
hbox.addWidget(text)
hbox.addWidget(head)
else:
hbox.addWidget(head)
hbox.addWidget(text)
hbox.addSpacerItem(QSpacerItem(1,1,QSizePolicy.Expanding,QSizePolicy.Preferred))
hbox.setContentsMargins(0,0,0,0)
self.setLayout(hbox)
self.setContentsMargins(0,0,0,0)
class BubbleImage(QLabel):
"""绘制背景气泡的控件"""
border = 5
trigon = 20
lineLen = 40 #每行的文字数量
minH = 2 * trigon + 2 * border
minW = 2 * trigon + 2 * border
def __init__(self,listItem,listView,path=os.getcwd(),img = DEFAULT_IMG,lr = True,maxWidth = 500):#图片显示的最长宽度
self.listItem = listItem
self.listView = listView
self.img = img
self.path = path
super(BubbleImage, self).__init__()
self.setMinimumWidth(self.minW)
# self.setStyleSheet("QLabel:hover{background-color:rgba(210,240,250,255);}")#鼠标滑过的颜色设置,这样自定义的paintEvent绘图区域就被看穿了
self.setState(False)#设置鼠标不进入状态,方便绘图区域的颜色更新
self.lr = lr #标志绘制左还是右
if self.lr:
self.setContentsMargins(self.trigon*sqrt(3)/2 + 3,self.border + 3,self.border + 3,self.border + 3)
else:
self.setContentsMargins(self.border + 3,self.border + 3,self.trigon*sqrt(3)/2 + 3,self.border + 3)
self.setScaledContents(True)
if not os.path.exists(img):
img = DEFAULT_IMG
pic = QPixmap(img)
self.wid = pic.size().width() if pic.size().width()<maxWidth else maxWidth
nPic = pic.scaledToWidth(self.wid) #高度自动计算,以便保持比例
self.setPixmap(nPic)
if img.endswith('gif'):
self.movie = QMovie(self)
self.movie.setFileName(img)
self.movie.setCacheMode(QMovie.CacheNone)
self.movie.frameChanged.connect(self.animate)
self.movie.start()
@pyqtSlot(int)
def animate(self,index):
pic = self.movie.currentPixmap()
nPic = pic.scaledToWidth(self.wid)
self.setPixmap(nPic)
def paintEvent(self, e):
size = self.size()
qp = QPainter()
qp.begin(self)
if self.lr:
self.leftBubble(qp,size.width(),size.height())
else:
self.rightBubble(qp,size.width(),size.height())
qp.end()
super(BubbleImage, self).paintEvent(e)
def leftBubble(self,qp, w, h):
qp.setPen(self.colorLeftE)#设置画笔颜色,绘制的矩形边缘颜色
qp.setBrush(self.colorLeftM)#设置红色的笔刷
middle = h/2
shifty = self.trigon/2
shiftx = self.trigon*sqrt(3)/2
pL = QPolygonF()
pL.append(QPointF(0,middle)) #起始点
pL.append(QPointF(shiftx, middle + shifty)) # 第二点
pL.append(QPointF(shiftx, h - self.border)) #第三点
pL.append(QPointF(w - self.border, h - self.border)) #第四点
pL.append(QPointF(w - self.border, self.border)) #第五点
pL.append(QPointF(shiftx, self.border)) #第六点
pL.append(QPointF(shiftx, middle - shifty)) #第七点
qp.drawPolygon(pL)
def rightBubble(self, qp, w, h):
qp.setPen(self.colorRightE)#设置画笔颜色,绘制的矩形边缘颜色
qp.setBrush(self.colorRightM)#设置红色的笔刷
middle = h/2
shifty = self.trigon/2
shiftx = self.trigon*sqrt(3)/2
pL = QPolygonF()
pL.append(QPointF(w,middle)) #起始点
pL.append(QPointF(w - shiftx, middle + shifty)) # 第二点
pL.append(QPointF(w - shiftx, h - self.border)) #第三点
pL.append(QPointF(self.border, h - self.border)) #第四点
pL.append(QPointF(self.border, self.border)) #第五点
pL.append(QPointF(w - shiftx, self.border)) #第六点
pL.append(QPointF(w - shiftx, middle - shifty)) #第七点
qp.drawPolygon(pL)
def setState(self,mouse):
'''鼠标进入和鼠标出时需要显示不一样的效果,主要就是更新颜色变量,然后调用update更新重绘'''
if mouse:#鼠标进入
self.colorLeftM = QColor("#FFFFFF")
self.colorLeftE = QColor("#FFFFFF")
self.colorRightM = QColor("#3728F1")
self.colorRightE = QColor("#3728F1")
else:
self.colorLeftM = QColor("#FFFFFF")
self.colorLeftE = QColor("#FFFFFF")
self.colorRightM = QColor("#2374FB")
self.colorRightE = QColor("#2374FB")
self.update() #更新界面,不用执行也可以更新,但是不实时
def enterEvent(self,e):
# print 'mouse entered'
self.setState(True)
def leaveEvent(self,e):
# print 'mouse leaved'
self.setState(False)
def contextMenuEvent(self,e):
''' 右键菜单实现文本的复制和控件的删除'''
editUser = QAction(QIcon('icons/copy.png'),u'收藏',self)#第一个参数也可以给一个QIcon图标
editUser.triggered.connect(self.downloadImage)
delUser = QAction(QIcon('icons/delete.png'),u'删除',self)
delUser.triggered.connect(self.delTextItem)#选中就会触发
menu = QMenu()
menu.addAction(editUser)
menu.addAction(delUser)
menu.exec_(QCursor.pos())#全局位置比较好,使用e.pos()还得转换
e.accept() #禁止弹出菜单事件传递到父控件中
def downloadImage(self,b):
# print 'msg copyed'
checked_info = open('../OCR/checked.info', 'a+')
checked_info.seek(0)
checked_filelist = checked_info.readlines()
download_path = '../OCR/img/download/'
imgtype = imghdr.what(self.img)
i = len(os.listdir(download_path)) + 1
img = download_path+'/'+str(i)+'.'+imgtype
if not os.path.exists(download_path):
os.makedirs(download_path)
with open(self.img, 'rb') as fp:
with open(img, 'wb') as f:
f.write(fp.read())
with open('../OCR/biaoqing.txt', 'a') as out:
if img in checked_filelist or (imgtype != 'jpg' and imgtype != 'jpeg'):
print('%s 已存在 或此图片不为JPG', img)
return
else :
checked_info.write(img + '\n')
try:
pic_ocr = Image2txt.picture_ocr(img)
txt = pic_ocr.get_crop_txt()
# print('pre: %s', txt)
txt = re.subn(r'[^\w\u4e00-\u9fa5]+','', txt)[0].strip()
# print("after: %s", txt)
write_string = txt + '#' + img+'\n'
out.write(write_string)
except AttributeError as e:
pass
if not txt:
print('ocr failed %s', '放弃')
checked_info.close()
def delTextItem(self,b):
# print 'msg deleted'
self.listView.takeItem(self.listView.indexFromItem(self.listItem).row())
def mouseDoubleClickEvent(self,e):
from PIL import Image
im = Image.open(self.img)
im.show()
class ImageItem(QWidget):
'''显示文字的Widget内容,为了让消息可以删除增加listItem和list传递到文本控件'''
def __init__(self, listItem, listView, img = DEFAULT_MSG, lr=True, head = DEFAULT_HEAD, path = os.getcwd()):
super(ImageItem,self).__init__()
hbox = QHBoxLayout()
img = BubbleImage(listItem,listView,path,img,lr)
head = LabelHead(head)
head.setFixedSize(50,50)
if lr is not True:
hbox.addSpacerItem(QSpacerItem(1,1,QSizePolicy.Expanding,QSizePolicy.Preferred))
hbox.addWidget(img)
hbox.addWidget(head)
else:
hbox.addWidget(head)
hbox.addWidget(img)
hbox.addSpacerItem(QSpacerItem(1,1,QSizePolicy.Expanding,QSizePolicy.Preferred))
hbox.setContentsMargins(0,0,0,0)
self.setLayout(hbox)
self.setContentsMargins(0,0,0,0)
'''
设置样式参考: http://blog.csdn.net/taiyang1987912/article/details/40979309
"QListWidget{background-color:rgba(0,240,250,255);color:#19649F;border:0px solid gray;padding:0px -2px 5px 5px;}"
"QListWidget::item{width:94px;height:35px;border:0px solid gray;background-color:transparent;padding:-1px;color:#000000}"
"QListView::item:!enabled{background-image:url(:/handleMenu_clusters_error.png);background:#ceaf01;color:#FF0000}"
"QListWidget::item:hover{background-image:url(:/handleMenu_lixt_bg_hover);color:#FFFFFF;border-width:0;}"
"QListWidget::item:selected{background-image:url(:/handleMenu_lixt_bg_selected.png);}"
'''
class MsgList(QListWidget):
"""消息消息列表的控件,支持增加文字消息和增加图片消息"""
def __init__(self, path = os.getcwd(), txt_file='../OCR/biaoqing.txt', model_file = '../DL/fash250.model.bin'):
self.links = maxSim.link_processor(txt_file, model_file)
self.path = path
super(MsgList, self).__init__()
# 设置所有样式锁定
self.setStyleSheet(
"QListWidget::item{border:0px solid gray;background-color:transparent;padding:0px;color:transparent}"
"QListView::item:!enabled{background-color:transparent;color:transparent;border:0px solid gray;padding:0px 0px 0px 0px;}"
"QListWidget::item:hover{background-color:transparent;color:transparent;border:0px solid gray;padding:0px 0px 0px 0px;}"
"QListWidget::item:selected{background-color:transparent;color:transparent;border:0px solid gray;padding:0px 0px 0px 0px;}"
"QScrollBar{width:0;height:0}")
self.expcalling = False
self.bestexpcalling = False
# self.focusPolicy(Nofocus)
self.setSelectionMode(False)
self.setFocusPolicy(False)
self.table = QTableWidget()
def addTextMsg(self,sz = DEFAULT_MSG, lr = True, head = DEFAULT_HEAD):
it = QListWidgetItem(self)
wid = self.size().width()
item = TextItem(it,self,sz,lr,head) #增加必须指定本list和本item用于删除item
# item.setEnabled(False) #对象灰度显示,不能导致ITEM不可选
it.setSizeHint(item.sizeHint())
it.setFlags(Qt.ItemIsEnabled)# 设置Item不可选择
self.addItem(it)
self.setItemWidget(it,item)
self.setCurrentItem(it)
def addImageMsg(self,img = DEFAULT_IMG, lr = True, head = DEFAULT_HEAD):
it = QListWidgetItem(self)
wid = self.size().width()
item = ImageItem(it,self,img,lr,head, self.path) #增加必须指定本list和本item用于删除item
# item.setEnabled(False) #对象灰度显示,不能导致ITEM不可选
it.setSizeHint(item.sizeHint())
it.setFlags(Qt.ItemIsEnabled)# 设置Item不可选择
self.addItem(it)
self.setItemWidget(it,item)
self.setCurrentItem(it)
def addExpList(self):
if self.expcalling == False:
self.mywindow = Window(self, self.path)
self.mywindow.show()
self.expcalling = True
# def tableCellClick(self, row, col):
# #self.addImageMsg(self.path+self.tableitem.img.piclist[col+row*self.tableitem.img.columncount], False)
# self.addImageMsg(self.path+self.tableitem.img.piclist[col+
# row*self.tableitem.img.columncount], False)
def mouseReleaseEvent(self, event):
if self.expcalling == True:
self.mywindow.close()
self.expcalling = False
if self.bestexpcalling == True:
self.bestwindow.close()
self.bestexpcalling = False
def selectImage(self, input_txt):
# code to analyse the txt
# picture list
maxSimLinks = self.links.maxSimTxt(input_txt)
if maxSimLinks == None:
self.notice = NoticeWindow('Sorry, no match was found.', self)
self.notice.show()
QTimer.singleShot(1000, self.notice.close)
return None
mylist = [x.picture for x in maxSimLinks]
self.mylist = mylist
self.bestwindow = BestWindow(self, mylist, self.path)
self.bestwindow.show()
self.bestexpcalling = True
if __name__=='__main__':
app = QApplication(sys.argv)
ml=MsgList()
ml.setMinimumSize(500,500)
ml.addTextMsg("Hello",True)
ml.addTextMsg("World!",False)
ml.addTextMsg(u"昨夜小楼又东风,春心泛秋意上心头,恰似故人远来载乡愁,今夜月稀掩朦胧,低声叹呢喃望星空,恰似回首终究一场梦,轻轻叹哀怨...",True)
ml.addTextMsg(u"With a gentle look on her face, she paused and said,她脸上带着温柔的表情,稍稍停顿了一下,便开始讲话",False)
ml.addImageMsg('ref/bq.gif',True)
ml.addImageMsg('ref/mt.gif',False)
ml.show()
sys.exit(app.exec_())
| 36.743396 | 135 | 0.620571 |
a570df30920a23f214ce4d73fae8a936216fb3c9 | 9,249 | py | Python | result_one_test_fold_from_test_prediction_da_whole_image.py | ooooverflow/DigestPath2019 | db7b6a0a86bffbe8f44b5d6aa72b4c76e982c0b8 | [
"Apache-2.0"
] | 1 | 2021-04-22T02:38:38.000Z | 2021-04-22T02:38:38.000Z | result_one_test_fold_from_test_prediction_da_whole_image.py | ooooverflow/DigestPath2019 | db7b6a0a86bffbe8f44b5d6aa72b4c76e982c0b8 | [
"Apache-2.0"
] | 1 | 2020-02-18T12:09:10.000Z | 2021-04-24T16:28:53.000Z | result_one_test_fold_from_test_prediction_da_whole_image.py | ooooverflow/DigestPath2019 | db7b6a0a86bffbe8f44b5d6aa72b4c76e982c0b8 | [
"Apache-2.0"
] | null | null | null | from retinanet.dataset import Ring_Cell_all_dataset
import torch
from torch.utils.data import Dataset, DataLoader
import model_all_dataset_weight_loss as model
import os
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
import cv2
import shutil
# from lib.nms.pth_nms import pth_nms
from lib_new.nms.gpu_nms import gpu_nms
from lib_new.nms.nums_py import py_cpu_nms, py_cpu_nms_contain
from imgaug import augmenters as iaa
from metric import detection_metric, calculate_metric_final
import imgaug as ia
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
retinanet = model.resnet18(num_classes=2, pretrained=True)
retinanet = torch.nn.DataParallel(retinanet).cuda()
def nms(dets, thresh):
"Dispatch to either CPU or GPU NMS implementations.\
Accept dets as tensor"""
dets = dets.cpu().detach().numpy()
return py_cpu_nms(dets, thresh)
def nms_contain(dets, thresh):
"Dispatch to either CPU or GPU NMS implementations.\
Accept dets as tensor"""
dets = dets.cpu().detach().numpy()
return py_cpu_nms_contain(dets, thresh)
# this .py is used to generate the result or prediction of the model on test fold
# and combine this test fold to make new training data
da_list = ['original', 'rotate90', 'rotate180', 'rotate270', 'fliplr', 'flipud']
model_path = 'ckpt_new/best_valid_recall_fold_2_all_dataset_weight_loss_1_resnet18_round4_using_best_valid_recall_0.4_ensemble(round0)_0.4_ensemble(round1)_0.4_ensemble(round2)_0.4_multi3(round3)_1e-4_no_pretrain_remove_some_da.pth'
test_fold = model_path.split('_fold_')[1][0]
test_dataset = Ring_Cell_all_dataset('/data/sqy/code/miccai2019/train_test_4/test_{}.txt'.format(test_fold))
retinanet.module.load_state_dict(torch.load(model_path))
retinanet.eval()
image_size = 1024
stride_num = 1
score_threshold = 0.05
nms_threshold = 0.4
for da in da_list:
# da = 'rotate270'
result_dict = {}
result_csv = './test_result_da/retinanet_resnet18_round4_fold_{}_weight_loss_1_on_test_data_best_valid_recall_{}_0.4(round0)_0.4(round1)_0.4(round2)_0.4_multi3(round3)_whole_image.csv'.format(test_fold, da)
print(result_csv)
if os.path.exists(result_csv):
continue
# if os.path.isdir(vis_dir):
# shutil.rmtree(vis_dir)
# os.mkdir(vis_dir)
pred_boxes_total = []
pred_scores_total = []
gt_boxes_total = []
font = cv2.FONT_HERSHEY_SIMPLEX
for i, (image, bbox, image_, image_name) in enumerate(tqdm(test_dataset)):
h, w = image.size()[1:]
# stride_h = (h - image_size) / (stride_num - 1)
# stride_w = (w - image_size) / (stride_num - 1)
if da == 'rotate90':
seq = iaa.Rot90(1)
image = np.array(image).transpose((1, 2, 0))
image = seq.augment_image(image).transpose((2, 0, 1))
image = image - np.zeros_like(image)
image = torch.Tensor(image)
elif da == 'rotate180':
seq = iaa.Rot90(2)
image = np.array(image).transpose((1, 2, 0))
image = seq.augment_image(image).transpose((2, 0, 1))
image = image - np.zeros_like(image)
image = torch.Tensor(image)
elif da == 'rotate270':
seq = iaa.Rot90(3)
image = np.array(image).transpose((1, 2, 0))
image = seq.augment_image(image).transpose((2, 0, 1))
image = image - np.zeros_like(image)
image = torch.Tensor(image)
elif da == 'fliplr':
seq = iaa.Fliplr(1)
image = np.array(image).transpose((1, 2, 0))
image = seq.augment_image(image).transpose((2, 0, 1))
image = image - np.zeros_like(image)
image = torch.Tensor(image)
elif da == 'flipud':
seq = iaa.Flipud(1)
image = np.array(image).transpose((1, 2, 0))
image = seq.augment_image(image).transpose((2, 0, 1))
image = image - np.zeros_like(image)
image = torch.Tensor(image)
pred_boxes = []
pred_scores = []
result_dict[image_name] = []
image_patch = image
# predict
scores_patch, labels_patch, boxes_patch = retinanet(image_patch.unsqueeze(0).cuda().float(), score_threshold=score_threshold)
scores_patch = scores_patch.cpu().detach().numpy() # size -> [num_box]
# labels_patch = la bels_patch.cpu().detach().numpy() # size -> [num_box]
boxes_patch = boxes_patch.cpu().detach().numpy() # size -> [num_box, 4]
# change bbox coordinates
if boxes_patch.shape[0] != 0:
# start_x = int(w_index * stride_w)
# start_y = int(h_index * stride_h)
# box_index = (boxes_patch[:, 0] > 2) & (boxes_patch[:, 1] > 2) & (boxes_patch[:, 2] < image_size - 3)\
# & (boxes_patch[:, 3] < image_size - 3) & (scores_patch > score_threshold)
# boxes_patch = boxes_patch[box_index]
# scores_patch = scores_patch[box_index]
# boxes_patch[:, 0] = boxes_patch[:, 0] + start_x
# boxes_patch[:, 1] = boxes_patch[:, 1] + start_y
# boxes_patch[:, 2] = boxes_patch[:, 2] + start_x
# boxes_patch[:, 3] = boxes_patch[:, 3] + start_y
boxes_patch = boxes_patch.tolist()
scores_patch = scores_patch.tolist()
pred_boxes.extend(boxes_patch)
pred_scores.extend(scores_patch)
image = image_.permute(1, 2, 0).numpy()
# for box in pred_boxes:
# image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 2)
# nms
if len(pred_boxes) != 0:
pred_boxes = torch.Tensor(pred_boxes).unsqueeze(0) # size -> [1, num_box, 4]
pred_scores = torch.Tensor(pred_scores).unsqueeze(0).unsqueeze(-1) # size -> [1, num_box, 1]
pred_boxes_w = pred_boxes[0, :, 2] - pred_boxes[0, :, 0]
pred_boxes_h = pred_boxes[0, :, 3] - pred_boxes[0, :, 1]
# wh_idx = (pred_boxes_w > 10) & (pred_boxes_h > 10)
# pred_boxes = pred_boxes[:, wh_idx, :]
# pred_scores = pred_scores[:, wh_idx, :]
anchors_nms_idx = nms(torch.cat([pred_boxes, pred_scores], dim=2)[0, :, :], nms_threshold)
pred_boxes = pred_boxes[:, anchors_nms_idx, :]
pred_scores = pred_scores[:, anchors_nms_idx, :]
# anchors_nms_idx = nms_contain(torch.cat([pred_boxes, pred_scores], dim=2)[0, :, :], 0.8)
#
# pred_boxes = pred_boxes[0, anchors_nms_idx, :]
# pred_scores = pred_scores[0, anchors_nms_idx, 0]
pred_boxes = pred_boxes[0, :, :]
pred_scores = pred_scores[0, :, 0]
pred_boxes = pred_boxes.numpy().tolist()
pred_scores = pred_scores.numpy().tolist()
bbs = []
for box in pred_boxes:
bbs.append(ia.BoundingBox(x1=box[0], y1=box[1], x2=box[2], y2=box[3]))
bbs = ia.BoundingBoxesOnImage(bbs, shape=(h, w, 3))
if da == 'original':
pred_boxes = []
for box in bbs.bounding_boxes:
pred_boxes.append([box.x1, box.y1, box.x2, box.y2])
elif da == 'rotate90':
seq = iaa.Rot90(3)
bbs_ = seq.augment_bounding_boxes([bbs])
pred_boxes = []
for box in bbs_[0].bounding_boxes:
pred_boxes.append([box.x1, box.y1, box.x2, box.y2])
elif da == 'rotate180':
seq = iaa.Rot90(2)
bbs_ = seq.augment_bounding_boxes([bbs])
pred_boxes = []
for box in bbs_[0].bounding_boxes:
pred_boxes.append([box.x1, box.y1, box.x2, box.y2])
elif da == 'rotate270':
seq = iaa.Rot90(1)
bbs_ = seq.augment_bounding_boxes([bbs])
pred_boxes = []
for box in bbs_[0].bounding_boxes:
pred_boxes.append([box.x1, box.y1, box.x2, box.y2])
elif da == 'fliplr':
seq = iaa.Fliplr(1)
bbs_ = seq.augment_bounding_boxes([bbs])
pred_boxes = []
for box in bbs_[0].bounding_boxes:
pred_boxes.append([box.x1, box.y1, box.x2, box.y2])
elif da == 'flipud':
seq = iaa.Flipud(1)
bbs_ = seq.augment_bounding_boxes([bbs])
pred_boxes = []
for box in bbs_[0].bounding_boxes:
pred_boxes.append([box.x1, box.y1, box.x2, box.y2])
pred_boxes_total.append(pred_boxes)
pred_scores_total.append(pred_scores)
gt_boxes_total.append(bbox)
else:
pred_boxes_total.append([])
pred_scores_total.append([])
gt_boxes_total.append(bbox)
for j, box in enumerate(pred_boxes):
if float(pred_scores[j]) >=score_threshold:
# image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 2)
# image = cv2.putText(image, str(float(pred_scores[j]))[:3], (int(box[0]) + 10, int(box[1]) + 20), font, 0.8, (0, 0, 0),
# 2)
result_dict[image_name].append([box, pred_scores[j]])
# for box in bbox:
# image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
# cv2.imwrite(os.path.join(vis_dir, 'train_{}_18_all_dataset_{}_latest.jpg'.format(i, score_threshold)), image)
result_str = ''
for image_name in result_dict:
result_str += image_name
result_str += ','
results = result_dict[image_name]
for result in results:
box, score = result
for element in box:
result_str += str(element)
result_str += ' '
result_str += str(score)
result_str += ';'
result_str += '\n'
with open(result_csv, 'w') as f:
f.write(result_str)
# recall, precision, froc, FPs = calculate_metric_final(pred_boxes_total, gt_boxes_total, pred_scores_total, score_threshold=score_threshold)
#
# print('froc: {}, recall: {}, precision: {}, FPs: {}'.format(froc, recall[-1], precision[-1], FPs))
| 33.51087 | 233 | 0.657476 |
3c6a17279de709eaa7701fa9dfea4b8185075f3b | 4,075 | py | Python | scrape_20180304.py | ScrapeOpen/elezionistorico.interno.gov.it | ab0c627b644eb504baf3acc7732c586d0a6ec45c | [
"MIT"
] | null | null | null | scrape_20180304.py | ScrapeOpen/elezionistorico.interno.gov.it | ab0c627b644eb504baf3acc7732c586d0a6ec45c | [
"MIT"
] | 1 | 2018-06-02T22:04:46.000Z | 2019-02-22T21:49:23.000Z | scrape_20180304.py | ScrapeOpen/elezionistorico.interno.gov.it | ab0c627b644eb504baf3acc7732c586d0a6ec45c | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import glob
import os
import re
# Set dirs and paths
download_dir = '/Users/francesco/Downloads' # EDIT THIS
exec_file = '/Users/francesco/Downloads/geckodriver' # EDIT THIS
# Paste `Elezione` + `Data` URL
base_urls = ["http://elezionistorico.interno.gov.it/index.php?tpel=C&dtel=04/03/2018", "http://elezionistorico.interno.gov.it/index.php?tpel=S&dtel=04/03/2018"]
# Firefox settings
options = Options()
options.set_headless(headless=True)
options.set_preference("browser.download.folderList", 2)
options.set_preference("browser.download.manager.showWhenStarting", False)
options.set_preference("browser.download.dir", download_dir)
options.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/csv")
driver = webdriver.Firefox(firefox_options=options, executable_path=exec_file)
# Xpaths
heading_xpath = "//*/div[@class='well sidebar-nav']/div[%s]/div[1]/div/a[1]"
section_xpath = "//*/div[@class='well sidebar-nav']/div[%s]/div[2]/div/div/ul/li/a"
csv_xpath = "//*/div[@class='well sidebar-nav']/div[%s]/div[1]/div/a[%s]"
def getSectionHeading ( level ):
return driver.find_element_by_xpath(heading_xpath % (str(level),)).text
def getSectionTitles ( level ):
these_as = driver.find_elements_by_xpath(section_xpath % (str(level),))
these_titles = []
for a in these_as:
these_titles.append(a.get_attribute('title'))
return these_titles
def getSectionHrefs ( level ):
these_as = driver.find_elements_by_xpath(section_xpath % (str(level),))
these_hrefs = []
for a in these_as:
these_hrefs.append(a.get_attribute('href'))
return these_hrefs
def downloadCandidateCSV ( level ):
list_of_files = glob.glob(download_dir + '/*.csv')
latest_file = max(list_of_files, key=os.path.getctime)
this_file = re.sub(r"/LISTE|/SCRUTINI", "/CANDIDATI", latest_file)
this_file_con = open(this_file, "w")
this_file_con.write("Ente;Candidato;Voti;Perc;\n")
these_titles = getSectionTitles(level)
these_hrefs = getSectionHrefs(level)
for this_href in these_hrefs:
driver.get(this_href)
these_trs = driver.find_elements_by_xpath("//*/tr[@class='leader']")
for this_tr in these_trs:
place = these_titles[these_hrefs.index(this_href)]
candidate = this_tr.find_element_by_xpath(".//td[@headers='hcandidato']").text
votes = this_tr.find_element_by_xpath(".//td[contains(@headers, 'hvoti')]").text
perc = this_tr.find_element_by_xpath(".//td[contains(@headers, 'hpercentuale')]").text
this_file_con.write(place + ";" + candidate + ";" + votes + ";" + perc + ";\n")
this_file_con.close()
def downloadCSV ( level ):
driver.find_element_by_xpath(csv_xpath % (level, 2)).click()
driver.find_element_by_xpath(csv_xpath % (level, 3)).click()
downloadCandidateCSV(level)
def scrapeItalia ( ):
for url in base_urls:
print(url)
driver.get(url)
level_3_hrefs = getSectionHrefs(3)
for level_3_href in level_3_hrefs:
print(level_3_href)
driver.get(level_3_href)
level_4_hrefs = getSectionHrefs(4)
for level_4_href in level_4_hrefs:
print(level_4_href)
driver.get(level_4_href)
level_5_hrefs = getSectionHrefs(5)
for level_5_href in level_5_hrefs:
print(level_5_href)
driver.get(level_5_href)
level_6_hrefs = getSectionHrefs(6)
for level_6_href in level_6_hrefs:
print(level_5_href)
driver.get(level_6_href)
downloadCSV(7)
# Temp fix (Valle D'Aosta must be downloaded almost manually)
def scrapeValleDAosta ( ):
url = base_urls[1]
print(url)
driver.get(url)
level_3_hrefs = getSectionHrefs(3)
level_3_href = level_3_hrefs[2]
print(level_3_href)
driver.get(level_3_href)
level_4_hrefs = getSectionHrefs(4)
for level_4_href in level_4_hrefs:
print(level_4_href)
driver.get(level_4_href)
downloadCSV(5)
# scrapeItalia()
scrapeValleDAosta()
driver.quit()
| 33.401639 | 160 | 0.706012 |
9581f0989b807c6fdd604c730c117440d97482fa | 1,269 | py | Python | tests/test_remove_comments.py | PoWWoP/wiki-dump-reader | a7c195f132753a1f411ba2615410910fbf8c6888 | [
"MIT"
] | 18 | 2019-03-05T13:09:07.000Z | 2022-01-27T20:45:11.000Z | tests/test_remove_comments.py | PoWWoP/wiki-dump-reader | a7c195f132753a1f411ba2615410910fbf8c6888 | [
"MIT"
] | 2 | 2019-03-21T17:59:38.000Z | 2019-09-20T22:16:11.000Z | tests/test_remove_comments.py | PoWWoP/wiki-dump-reader | a7c195f132753a1f411ba2615410910fbf8c6888 | [
"MIT"
] | 5 | 2019-10-06T13:47:33.000Z | 2022-02-25T15:11:04.000Z | import unittest
from wiki_dump_reader import Cleaner
class TestRemoveComments(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.cleaner = Cleaner()
def test_remove_comments_normal(self):
text = "[[面包]]是德国饮食的重要组成部分。德国出产近600种面包和1,200种糕点和[[圆面包]]。德国[[奶酪]]的生产数量占到全欧" \
"洲的近三分之一{{refTag|name=IG|1=[https://books.google.com/books?id=sjW9adVFS2kC&pg=PA113 The " \
"Complete Idiot's Guide to Cheeses of the World - Steve Ehlers, Jeanette Hurt<!-- Bot generated " \
"title -->]. pp. 113-115.}}。2012年,超过99%在德国生产的肉类为猪肉、鸡肉和牛肉。香肠在德国极为普遍,生产种类" \
"近1,500种,包括[[德国油煎香肠|油煎香肠]]、[[巴伐利亚白香肠|白香肠]]和[[德國咖哩香腸|咖喱香肠]]等"
expected = "[[面包]]是德国饮食的重要组成部分。德国出产近600种面包和1,200种糕点和[[圆面包]]。德国[[奶酪]]的生产数量占到" \
"全欧洲的近三分之一{{refTag|name=IG|1=[https://books.google.com/books?id=sjW9adVFS2kC&pg=PA113" \
" The Complete Idiot's Guide to Cheeses of the World - Steve Ehlers, Jeanette Hurt]. pp. " \
"113-115.}}。2012年,超过99%在德国生产的肉类为猪肉、鸡肉和牛肉。香肠在德国极为普遍,生产种类近1,500种," \
"包括[[德国油煎香肠|油煎香肠]]、[[巴伐利亚白香肠|白香肠]]和[[德國咖哩香腸|咖喱香肠]]等"
actual = self.cleaner._remove_comments(text)
self.assertEqual(expected, actual)
| 52.875 | 114 | 0.627266 |
c69a6da9bb084c6703d0401bfd3982dac873703f | 45,949 | py | Python | django/db/models/fields/__init__.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | 3 | 2016-07-08T23:49:32.000Z | 2018-04-15T22:55:01.000Z | django/db/models/fields/__init__.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | 27 | 2017-02-05T15:57:04.000Z | 2018-04-15T22:57:26.000Z | django/db/models/fields/__init__.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | null | null | null | import datetime
import decimal
import re
import time
import math
from itertools import tee
import django.utils.copycompat as copy
from django.db import connection
from django.db.models.fields.subclassing import LegacyConnection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils import datetime_safe
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
__metaclass__ = LegacyConnection
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if self.empty_strings_allowed and connection.features.interprets_empty_strings_as_nulls:
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = unique_for_date, unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(self.error_messages['invalid_choice'] % value)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors from to_python
and validate are propagated. The correct value is returned if no error is
raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# an XMLField is represented by a TEXT column type, which is the same
# as the TextField Django field type, which means XMLField's
# get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
def related_db_type(self, connection):
# This is the db_type used by a ForeignKey.
return self.db_type(connection=connection)
def unique(self):
return self._unique or self.primary_key
unique = property(unique)
def set_attributes_from_name(self, name):
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and name:
self.verbose_name = name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name, curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"Perform preliminary non-db specific value checks and conversions."
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"Returns field's value prepared for saving into a database."
return self.get_db_prep_value(value, connection=connection, prepared=False)
def get_prep_lookup(self, lookup_type, value):
"Perform preliminary non-db specific lookup checks and conversions"
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"Returns field's value prepared for database lookup."
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day', 'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection, prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection, prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"Returns a boolean of whether this field has a default value."
return self.default is not NOT_PROVIDED
def get_default(self):
"Returns the default value for this field."
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if not self.empty_strings_allowed or (self.null and not connection.features.interprets_empty_strings_as_nulls):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"Returns flattened choices with a default blank choice included."
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"Returns a django.forms.Field instance for this database Field."
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = self.blank or not (self.has_default() or 'initial' in kwargs)
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be an integer.'),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, "%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def related_db_type(self, connection):
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types['RelatedAutoField'] % data
except KeyError:
return IntegerField().db_type(connection=connection)
def to_python(self, value):
if not (value is None or isinstance(value, (basestring, int, long))):
raise exceptions.ValidationError(self.error_messages['invalid'])
return value
def validate(self, value, model_instance):
pass
def get_prep_value(self, value):
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts AutoField into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_auto(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, "A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be either True or False.'),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = self.null or not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
ansi_date_re = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$')
class DateField(Field):
description = _("Date (without time)")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid date in YYYY-MM-DD format.'),
'invalid_date': _('Invalid date: %s'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
#HACKs : auto_now_add/auto_now should be done as a default or a pre_save.
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
if not ansi_date_re.search(value):
raise exceptions.ValidationError(self.error_messages['invalid'])
# Now that we have the date string in YYYY-MM-DD format, check to make
# sure it's a valid date.
# We could use time.strptime here and catch errors, but datetime.date
# produces much friendlier error messages.
year, month, day = map(int, value.split('-'))
try:
return datetime.date(year, month, day)
except ValueError, e:
msg = self.error_messages['invalid_date'] % _(str(e))
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
old_value = getattr(model_instance, self.attname)
if self.auto_now or (not old_value and self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = datetime_safe.new_date(val).strftime("%Y-%m-%d")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
default_error_messages = {
'invalid': _(u'Enter a valid date/time in YYYY-MM-DD HH:MM[:ss[.uuuuuu]] format.'),
}
description = _("Date (with time)")
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5],
**kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
old_value = getattr(model_instance, self.attname)
if self.auto_now or (not old_value and self.auto_now_add and add):
value = datetime.datetime.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
d = datetime_safe.new_datetime(val)
data = d.strftime('%Y-%m-%d %H:%M:%S')
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be a decimal number.'),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(self.error_messages['invalid'])
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed twice
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if (lookup_type == 'gte' or lookup_type == 'lt') \
and isinstance(value, float):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Integer")
def related_db_type(self, connection):
if not connection.features.related_fields_match_type:
return IntegerField().db_type(connection=connection)
return super(PositiveIntegerField, self).related_db_type(
connection=connection)
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Integer")
def related_db_type(self, connection):
if not connection.features.related_fields_match_type:
return IntegerField().db_type(connection=connection)
return super(PositiveSmallIntegerField, self).related_db_type(
connection=connection)
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
description = _("Time")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid time in HH:MM[:ss[.uuuuuu]] format.'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.time(*time.strptime(value, '%H:%M:%S')[3:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.time(*time.strptime(value, '%H:%M')[3:5],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
old_value = getattr(model_instance, self.attname)
if self.auto_now or (not old_value and self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = val.strftime("%H:%M:%S")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=True, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists))
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed twice
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class XMLField(TextField):
description = _("XML text")
def __init__(self, verbose_name=None, name=None, schema_path=None, **kwargs):
self.schema_path = schema_path
Field.__init__(self, verbose_name, name, **kwargs)
| 39.27265 | 167 | 0.615661 |
7e27bf1834f10ea5991650db004e2b807f23ce53 | 13,291 | py | Python | tools/math/svgmath/tables.py | heycam/svgwg | bde951fcca4c7a4c117921219b3fcaa2245aa9a1 | [
"RSA-MD"
] | 1 | 2018-12-15T03:34:53.000Z | 2018-12-15T03:34:53.000Z | tools/math/svgmath/tables.py | heycam/svgwg | bde951fcca4c7a4c117921219b3fcaa2245aa9a1 | [
"RSA-MD"
] | null | null | null | tools/math/svgmath/tables.py | heycam/svgwg | bde951fcca4c7a4c117921219b3fcaa2245aa9a1 | [
"RSA-MD"
] | null | null | null | """Table-related formatting functions.
This module contains functions called from measurers.py to format tables."""
import sys, mathnode
def getByIndexOrLast(lst, idx):
if idx < len(lst): return lst[idx]
else: return lst[-1]
class CellDescriptor:
"""Descriptor of a single cell in a table"""
def __init__(self, content, halign, valign, colspan, rowspan):
self.content = content
self.halign = halign
self.valign = valign
self.colspan = colspan
self.rowspan = rowspan
class ColumnDescriptor:
"""Descriptor of a single column in a table"""
def __init__(self):
self.auto = True
self.fit = False
self.width = 0
self.spaceAfter = 0
self.lineAfter = None
class RowDescriptor:
"""Descriptor of a single row in a table; contains cells"""
def __init__(self, node, cells, rowalign, columnaligns, busycells):
self.alignToAxis = (rowalign == u"axis")
self.height = 0
self.depth = 0
self.spaceAfter = 0
self.lineAfter = None
self.cells = []
for c in cells:
# Find the first free cell
while len(busycells) > len(self.cells) and busycells[len(self.cells)] > 0:
self.cells.append(None)
halign = getByIndexOrLast(columnaligns, len(self.cells))
valign = rowalign
colspan = 1
rowspan = 1
if c.elementName == u"mtd":
halign = c.attributes.get(u"columnalign", halign)
valign = c.attributes.get(u"rowalign", valign)
colspan = node.parseInt(c.attributes.get(u"colspan", u"1"))
rowspan = node.parseInt(c.attributes.get(u"rowspan", u"1"))
while len(self.cells) >= len(node.columns):
node.columns.append(ColumnDescriptor())
self.cells.append(CellDescriptor(c, halign, valign, colspan, rowspan))
for i in range (1, colspan): self.cells.append(None)
while len(self.cells) > len(node.columns):
node.columns.append(ColumnDescriptor())
def arrangeCells(node):
node.rows = []
node.columns = []
busycells = []
# Read table-level alignment properties
table_rowaligns = node.getListProperty(u"rowalign")
table_columnaligns = node.getListProperty(u"columnalign")
for ch in node.children:
rowalign = getByIndexOrLast(table_rowaligns, len(node.rows))
row_columnaligns = table_columnaligns
if ch.elementName == u"mtr" or ch.elementName == "mlabeledtr":
cells = ch.children
rowalign = ch.attributes.get(u"rowalign", rowalign)
if u"columnalign" in ch.attributes.keys():
columnaligns = node.getListProperty(u"columnalign", ch.attributes.get(u"columnalign"))
else:
cells = [ch]
row = RowDescriptor(node, cells, rowalign, row_columnaligns, busycells)
node.rows.append(row)
# busycells passes information about cells spanning multiple rows
busycells = [max (0, n - 1) for n in busycells]
while len(busycells) < len(row.cells): busycells.append(0)
for i in range (len(row.cells)):
cell = row.cells[i]
if cell is None: continue
if cell.rowspan > 1:
for j in range(i, i + cell.colspan):
busycells[j] = cell.rowspan - 1
# Pad the table with empty rows until no spanning cell protrudes
while max(busycells) > 0:
rowalign = getByIndexOrLast(table_rowaligns, len(node.rows))
node.rows.append(RowDescriptor(node, [], rowalign, table_columnaligns, busycells))
busycells = [max (0, n - 1) for n in busycells]
def arrangeLines(node):
# Get spacings and line styles; expand to cover the table fully
spacings = map(node.parseLength, node.getListProperty(u"rowspacing"))
lines = node.getListProperty(u"rowlines")
for i in range(len(node.rows) - 1):
node.rows[i].spaceAfter = getByIndexOrLast(spacings, i)
line = getByIndexOrLast(lines, i)
if line != u"none":
node.rows[i].lineAfter = line
node.rows[i].spaceAfter += node.lineWidth
spacings = map(node.parseSpace, node.getListProperty(u"columnspacing"))
lines = node.getListProperty(u"columnlines")
for i in range(len(node.columns) - 1):
node.columns[i].spaceAfter = getByIndexOrLast(spacings, i)
line = getByIndexOrLast(lines, i)
if line != u"none":
node.columns[i].lineAfter = line
node.columns[i].spaceAfter += node.lineWidth
node.framespacings = [0, 0]
node.framelines = [None, None]
spacings = map(node.parseSpace, node.getListProperty(u"framespacing"))
lines = node.getListProperty(u"frame")
for i in range(2):
line = getByIndexOrLast(lines, i)
if line != u"none":
node.framespacings[i] = getByIndexOrLast(spacings, i)
node.framelines[i] = line
def calculateColumnWidths(node):
# Get total width
fullwidthattr = node.attributes.get(u"width", u"auto")
if fullwidthattr == u"auto":
fullwidth = None
else:
fullwidth = node.parseLength(fullwidthattr)
if fullwidth <= 0: fullwidth = None
# Fill fixed column widths
columnwidths = node.getListProperty(u"columnwidth")
for i in range(len(node.columns)):
column = node.columns[i]
attr = getByIndexOrLast(columnwidths, i)
if attr in [u"auto", u"fit"]:
column.fit = (attr == u"fit")
elif attr.endswith(u'%'):
if fullwidth is None:
node.error("Percents in column widths supported only in tables with explicit width; width of column %d treated as 'auto'" % (i+1))
else:
value = node.parseFloat(attr[:-1])
if value and value > 0:
column.width = fullwidth * value / 100
column.auto = False
else:
column.width = node.parseSpace(attr)
column.auto = False
# Set initial auto widths for cells with colspan == 1
for r in node.rows:
for i in range(len(r.cells)):
c = r.cells[i]
if c is None or c.content is None or c.colspan > 1: continue
column = node.columns[i]
if column.auto: column.width = max(column.width, c.content.width)
# Calculate auto widths for cells with colspan > 1
while True:
adjustedColumns = []
adjustedWidth = 0
for r in node.rows:
for i in range(len(r.cells)):
c = r.cells[i]
if c is None or c.content is None or c.colspan == 1: continue
columns = node.columns[i : i + c.colspan]
autoColumns = [x for x in columns if x.auto]
if len(autoColumns) == 0: continue # nothing to adjust
fixedColumns = [x for x in columns if not x.auto]
fixedWidth = sum([x.spaceAfter for x in columns[:-1]])
if len(fixedColumns) > 0:
fixedWidth += sum ([x.width for x in fixedColumns])
autoWidth = sum ([x.width for x in autoColumns])
if c.content.width <= fixedWidth + autoWidth: continue # already fits
requiredWidth = c.content.width - fixedWidth
unitWidth = requiredWidth / len(autoColumns)
while True:
oversizedColumns = [x for x in autoColumns if x.width >= unitWidth]
if len(oversizedColumns) == 0: break
autoColumns = [x for x in autoColumns if x.width < unitWidth]
if len(autoColumns) == 0: break # weird rounding effects
requiredWidth -= sum ([x.width for x in oversizedColumns])
unitWidth = requiredWidth / len(autoColumns)
if len(autoColumns) == 0: continue; # protection against arithmetic overflow
# Store the maximum unit width
if unitWidth > adjustedWidth:
adjustedWidth = unitWidth
adjustedColumns = autoColumns
if len(adjustedColumns) == 0: break;
for col in adjustedColumns: col.width = adjustedWidth
if node.getProperty(u"equalcolumns") == u"true":
globalWidth = max([col.width for col in node.columns if col.auto])
for col in node.columns:
if col.auto: col.width = globalWidth
if fullwidth is not None:
delta = fullwidth
delta -= sum ([x.width for x in node.columns])
delta -= sum ([x.spaceAfter for x in node.columns[:-1]])
delta -= 2 * node.framespacings[0]
if delta != 0:
sizableColumns = [x for x in node.columns if x.fit]
if len(sizableColumns) == 0:
sizableColumns = [x for x in node.columns if x.auto]
if len(sizableColumns) == 0:
node.error("Overconstrained table layout: explicit table width specified, but no column has automatic width; table width attribute ignored")
else:
delta /= len(sizableColumns)
for col in sizableColumns: col.width += delta
def calculateRowHeights(node):
# Set initial row heights for cells with rowspan == 1
commonAxis = node.axis()
for r in node.rows:
r.height = 0
r.depth = 0
for c in r.cells:
if c is None or c.content is None or c.rowspan != 1: continue
cellAxis = c.content.axis()
c.vshift = 0
if c.valign == u"baseline":
if r.alignToAxis: cell.vshift -= commonAxis
if c.content.alignToAxis: c.vshift += cellAxis
elif c.valign == u"axis":
if not r.alignToAxis: c.vshift += commonAxis
if not c.content.alignToAxis: c.vshift -= cellAxis
else:
c.vshift = (r.height - r.depth - c.content.height + c.content.depth) / 2
r.height = max(r.height, c.content.height + c.vshift)
r.depth = max(r.depth, c.content.depth - c.vshift)
# Calculate heights for cells with rowspan > 1
while True:
adjustedRows = []
adjustedSize = 0
for i in range(len(node.rows)):
r = node.rows[i]
for c in r.cells:
if c is None or c.content is None or c.rowspan == 1: continue
rows = node.rows[i : i + c.rowspan]
requiredSize = c.content.height + c.content.depth
requiredSize -= sum([x.spaceAfter for x in rows[:-1]])
fullSize = sum ([x.height + x.depth for x in rows])
if fullSize >= requiredSize: continue
unitSize = requiredSize / len(rows)
while True:
oversizedRows = [x for x in rows if x.height + x.depth >= unitSize]
if len(oversizedRows) == 0: break
rows = [x for x in rows if x.height + x.depth < unitSize]
if len(rows) == 0: break # weird rounding effects
requiredSize -= sum ([x.height + x.depth for x in oversizedRows])
unitSize = requiredSize / len(rows)
if len(rows) == 0: continue; # protection against arithmetic overflow
if unitSize > adjustedSize:
adjustedSize = unitSize
adjustedRows = rows
if len(adjustedRows) == 0: break;
for r in adjustedRows:
delta = (adjustedSize - r.height - r.depth) / 2
r.height += delta; r.depth += delta
if node.getProperty(u"equalrows") == u"true":
maxvsize = max([r.height + r.depth for r in node.rows])
for r in node.rows:
delta = (maxvsize - r.height - r.depth) / 2
r.height += delta; r.depth += delta
def getAlign(node):
alignattr = node.getProperty(u"align").strip()
if len(alignattr) == 0: alignattr = mathnode.globalDefaults[u"align"]
splitalign = alignattr.split()
alignType = splitalign[0]
if len(splitalign) == 1:
alignRow = None
else:
alignRow = node.parseInt(splitalign[1])
if alignrownumber == 0:
node.error("Alignment row number cannot be zero")
alignrownumber = None
elif alignrownumber > len(node.rows):
node.error("Alignment row number cannot exceed row count")
alignrownumber = len(node.rows)
elif alignrownumber < - len(node.rows):
node.error("Negative alignment row number cannot exceed row count")
alignrownumber = 1
elif alignrownumber < 0:
alignrownumber = len(node.rows) - alignrownumber + 1
return (alignType, alignRow) | 41.664577 | 156 | 0.564517 |
04e426c62da194d8fb3e6f92257a5ab48db31355 | 1,136 | py | Python | src3/compute_ars.py | WiktorJ/node2vec | f2b003a4fb394106c4acc0df0e3de8095473dc0a | [
"MIT"
] | null | null | null | src3/compute_ars.py | WiktorJ/node2vec | f2b003a4fb394106c4acc0df0e3de8095473dc0a | [
"MIT"
] | null | null | null | src3/compute_ars.py | WiktorJ/node2vec | f2b003a4fb394106c4acc0df0e3de8095473dc0a | [
"MIT"
] | null | null | null | from sklearn import cluster
from distance import calc_ars, get_gmm_clusters, principal_angle_distance
import click
from plot_utils import get_nx_graph
from utils import get_as_numpy_array, map_embeddings_to_consecutive
@click.command()
@click.option("-e1", "--embeddings_1", type=str, required=True)
@click.option("-e2", "--embeddings_2", type=str, required=True)
@click.option("-g", "--graph_path", type=str)
@click.option("-c", "--clusters", type=int, required=True)
@click.option("-m", "--method", type=click.Choice(['kmeans', 'gmm']))
def ars(embeddings_1, embeddings_2, graph_path, clusters, method):
emb1, emb2 = map_embeddings_to_consecutive([embeddings_1, embeddings_2])
if method == 'kmeans':
prediction1 = cluster.KMeans(n_clusters=clusters, random_state=0).fit(emb1).labels_
prediction2 = cluster.KMeans(n_clusters=clusters, random_state=0).fit(emb2).labels_
else:
prediction1 = get_gmm_clusters(emb1, clusters)
prediction2 = get_gmm_clusters(emb2, clusters)
click.echo(f"Adjusted Rand Score: {calc_ars(prediction1, prediction2)}")
if __name__ == '__main__':
ars()
| 36.645161 | 91 | 0.732394 |
69a80438a4f25e5333691d431ef2268c9d360201 | 3,677 | py | Python | argo/workflows/client/models/v1alpha1_node_synchronization_status.py | argentumcode/argo-client-python | 31c1519056379d3f046d4b522f37af87243fdbb4 | [
"Apache-2.0"
] | null | null | null | argo/workflows/client/models/v1alpha1_node_synchronization_status.py | argentumcode/argo-client-python | 31c1519056379d3f046d4b522f37af87243fdbb4 | [
"Apache-2.0"
] | null | null | null | argo/workflows/client/models/v1alpha1_node_synchronization_status.py | argentumcode/argo-client-python | 31c1519056379d3f046d4b522f37af87243fdbb4 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: v3.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.workflows.client.configuration import Configuration
class V1alpha1NodeSynchronizationStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'waiting': 'str'
}
attribute_map = {
'waiting': 'waiting'
}
def __init__(self, waiting=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1NodeSynchronizationStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._waiting = None
self.discriminator = None
if waiting is not None:
self.waiting = waiting
@property
def waiting(self):
"""Gets the waiting of this V1alpha1NodeSynchronizationStatus. # noqa: E501
Waiting is the name of the lock that this node is waiting for # noqa: E501
:return: The waiting of this V1alpha1NodeSynchronizationStatus. # noqa: E501
:rtype: str
"""
return self._waiting
@waiting.setter
def waiting(self, waiting):
"""Sets the waiting of this V1alpha1NodeSynchronizationStatus.
Waiting is the name of the lock that this node is waiting for # noqa: E501
:param waiting: The waiting of this V1alpha1NodeSynchronizationStatus. # noqa: E501
:type: str
"""
self._waiting = waiting
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1NodeSynchronizationStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1NodeSynchronizationStatus):
return True
return self.to_dict() != other.to_dict()
| 29.894309 | 134 | 0.596954 |
adbccaafa10f602d409545c4b11772171eaf469a | 89 | py | Python | collective/talkflow/__init__.py | upiq/collective.talkflow | 18241b3750c1ceb55cc5b97cdd2cd272823534ce | [
"BSD-4-Clause-UC"
] | null | null | null | collective/talkflow/__init__.py | upiq/collective.talkflow | 18241b3750c1ceb55cc5b97cdd2cd272823534ce | [
"BSD-4-Clause-UC"
] | null | null | null | collective/talkflow/__init__.py | upiq/collective.talkflow | 18241b3750c1ceb55cc5b97cdd2cd272823534ce | [
"BSD-4-Clause-UC"
] | null | null | null | from zope.i18nmessageid import MessageFactory
_ = MessageFactory('collective.talkflow')
| 22.25 | 45 | 0.831461 |
0919c5a5d62f4bd16838619114b70dab2f7074b2 | 10,902 | py | Python | Scheduler+Agent/server.py | erichilarysmithsr/daytona | c5dc829066fb4cfdb5f3271bcbe68f2f4b265c41 | [
"Apache-2.0"
] | 85 | 2017-01-12T20:47:58.000Z | 2018-10-22T18:43:08.000Z | Scheduler+Agent/server.py | YahooArchive/daytona | be9d2cbea047cbbcb7a7c5f9e027dc1443b39a40 | [
"Apache-2.0"
] | 11 | 2017-04-11T01:21:35.000Z | 2018-01-05T18:27:04.000Z | Scheduler+Agent/server.py | YahooArchive/daytona | be9d2cbea047cbbcb7a7c5f9e027dc1443b39a40 | [
"Apache-2.0"
] | 18 | 2017-03-14T06:54:26.000Z | 2018-10-25T19:05:22.000Z | #!/usr/bin/env python
# -*- coding:cp949 -*-
import socket
import SocketServer
import os
import threading
import time
import common
import action
import config
import testobj
from logger import LOG
from action import activeTest
global serverInstance
global actc
class ActionCaller:
async_actions = []
lock = threading.Lock()
def __init__(self, lctx):
self.lctx = lctx
self.async_actions = []
self.conf = config.CFG("DaytonaHost", lctx)
self.conf.readCFG("config.ini")
def removeActionItem(self, actionID):
self.lock.acquire()
for i in self.async_actions:
(t,actionid,tst,ts) = i
if actionid == actionID :
self.lctx.debug("CALLBACK RCV: " + str(actionID))
self.async_actions.remove(i)
self.lock.release()
def execute(self, command, paramcsv, actionID):
# based on SYNCFLAG release from here
# send actionID for currently being executed action based on this we can stream resp
# keep exec details over time in a buffer with actionID mapped
# send actionID NULL and hold return till exec is complete
module = self.conf.actionMap[command.strip()].split(".")[0]
function = self.conf.actionMap[command.strip()].split(".")[1]
sync = self.conf.actionMap[command.strip()].split(".")[2]
t2 = testobj.testDefn()
if command == "DAYTONA_START_TEST":
testid = int(paramcsv.split(",")[0])
hosttype = paramcsv.split(",")[1]
current_test = action.get_test(testid)
if current_test:
t2 = current_test.tobj
m = __import__ (module)
f = getattr(m,function)
if sync == "T": # wait for func to complete and return the ret
self.lctx.debug("Executing SYNC ACTION for " + command.strip() + " : " + self.conf.actionMap[command.strip()] + ":" + str(actionID))
ret = f(self, self, command, paramcsv, actionID, sync)
self.lctx.debug("ACTION completed for " + command.strip() + " : " + self.conf.actionMap[command.strip()] + ":" + str(actionID))
if command == "DAYTONA_CLI":
return "actionID=" + str(actionID) + "%" + ret + "%" + "SYNC EXEC"
else:
return "actionID=" + str(actionID) + "," + ret + "," + "SYNC EXEC"
else:
self.lctx.debug("Executing ASYNC ACTION for " + command.strip() + " : " + self.conf.actionMap[command.strip()] + ":" + str(actionID))
t1 = common.FuncThread(f, True, self, command, paramcsv, actionID, sync)
if hosttype == "EXEC":
x = (t1, actionID, t2, time.time())
self.lock.acquire()
self.async_actions.append(x)
self.lctx.debug( "async_actions size :" + str(len(self.async_actions)))
self.lock.release()
t1.start()
self.lctx.debug( "Executing ACTION for " + command.strip() + " : " + self.conf.actionMap[command.strip()] + ":" + str(actionID))
return "actionID=" + str(actionID) + "," + "SUCCESS," + "ASYNC EXEC"
class serv:
lctx = None
actc = None
role = None
registered_hosts = None
def __init__(self):
serv.lctx = LOG.getLogger("listenerlog",serv.role)
action.lctx = LOG.getLogger("actionlog", serv.role)
# todo this integration has to be reviewed
actc = ActionCaller(LOG.getLogger("actionlog", serv.role))
serv.registered_hosts={}
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def __init__(self, request, client_address, server):
global actc
self.act = serv.actc
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
return
def setup(self):
return SocketServer.BaseRequestHandler.setup(self)
def finish(self):
return SocketServer.BaseRequestHandler.finish(self)
def handle(self):
host = self.client_address[0]
data = self.request.recv(8192)
cur_thread = threading.current_thread()
ev = data.split(":")
serv.lctx.debug("Envelope contents : " + str(ev))
cmd = ev[1]
msgid = ev[2]
params = ev[3]
serv.lctx.info(cmd)
serv.lctx.debug(msgid)
serv.lctx.debug(params)
if cmd == "DAYTONA_HANDSHAKE":
# todo : maintain a list of daytona host that this server talks to only respond to the ones in the list
p = params.split(",")
if p[0] == "handshake1":
serv.registered_hosts[host] = host
addr = socket.gethostbyname(host)
serv.registered_hosts[addr] = addr
current_test = activeTest(0, None, None, None)
current_test.stathostip = p[4]
current_test.stathostport = self.server.server_address[1]
current_test.serverip = p[1]
current_test.testid = int(p[3])
current_test.serverport = int(p[2])
current_test.status = "SETUP"
test_logger = LOG.init_testlogger(current_test,"STAT")
if test_logger:
current_test.agent_log_file = test_logger.handlers[0].baseFilename
con = action.scheduler_handshake(current_test)
if con:
action.action_lock.acquire()
action.running_tests[int(p[3])] = current_test
action.action_lock.release()
response = "{}".format("SUCCESS")
test_logger.info("Handshake successfull with daytona host : " + current_test.serverip)
else:
response = "{}".format("ERROR")
test_logger.error("Handshake failed with daytona host : " + current_test.serverip)
self.request.sendall(response)
return
else:
response = "{}".format("SUCCESS")
self.request.sendall(response)
return
if host in serv.registered_hosts.keys() or cmd in ("DAYTONA_HEARTBEAT", "DAYTONA_CLI"):
if cmd == "DAYTONA_STREAM_END":
serv.lctx.debug("End stream...")
return
if cmd == "DAYTONA_STREAM_START":
filepath = params+"/execution.log"
d = os.path.dirname(filepath)
if not os.path.exists(d):
os.makedirs(d)
f = open(filepath,'wb')
serv.lctx.debug(filepath)
serv.lctx.debug("Receiving stream..." + filepath)
response = "{}".format("STREAMFILEREADY")
self.request.send(response)
l = self.request.recv(8192)
serv.lctx.debug(len(l))
while (l):
serv.lctx.debug("Receiving stream...")
f.write(l)
print l
serv.lctx.debug(l)
f.flush()
l = self.request.recv(8192)
if l == "DAYTONA_STREAM_END":
serv.lctx.debug("receiving term string : ")
break
f.close()
# response = "{}".format("SUCCESS")
# self.request.sendall(response)
serv.lctx.debug("Done receiving stream into : " + filepath)
return
if cmd == "DAYTONA_FILE_UPLOAD":
p = params.split(",")
serv.lctx.debug("SER SERVER : " + params)
fn = p[0].split("/")
fn.reverse()
loc = p[1].strip()
serv.lctx.debug("SER SERVER : " + loc)
filepath = loc+fn[0]
d = os.path.dirname(filepath)
if not os.path.exists(d):
os.makedirs(d)
serv.lctx.debug("Receiving..." + filepath)
response = "{}".format("FILEREADY")
self.request.send(response)
f = open(filepath, 'wb')
l = self.request.recv(8192)
serv.lctx.debug(len(l))
while l:
serv.lctx.debug("Receiving...")
f.write(l)
f.flush()
l = self.request.recv(8192)
serv.lctx.debug(len(l))
f.close()
serv.lctx.debug("Done receiving results : " + filepath)
return
if cmd == "DAYTONA_STOP_SERVER":
serverInstance.shutdown()
serverInstance.server_close()
response = "{}: {}".format(cur_thread.name, "Shutting down server")
self.request.sendall(response)
if len(self.act.async_actions) > 0 :
for pending in self.act.async_actions:
(t1, actionID, tst, ts) = pending
t1.stop()
serv.lctx.debug("DAYTONA_STOP_SERVER handler, Async action thread ended after stop : " + cur_thread.name)
return
# todo : Set server to shutdown state, reject all incomming reqs if this flag set wait for all threads
# to shutdown (with timeout) gracefully shutdown before timeout, or force close beyond timeout
# exResp = self.act.execute(cmd, params, msgid)
if serv.actc is None:
serv.actc = ActionCaller(LOG.getLogger("listenerlog", serv.role))
exResp = serv.actc.execute(cmd, params, msgid)
response = "{}: {}".format(cur_thread.name, exResp)
self.request.sendall(response)
if len(serv.actc.async_actions) > 0 :
serv.lctx.debug("Async action list : " + str(len(serv.actc.async_actions)))
for pending in self.act.async_actions:
(t1, actionID, tst, ts) = pending
t1.join()
serv.lctx.debug("Async action thread ended after join : " + t1.name)
else:
serv.lctx.error("Command recieved from unknown host before handshake")
serv.lctx.error(host)
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
| 41.930769 | 145 | 0.517336 |
0729a3d028c4f9e9fb45e2d00662d4f5087eb045 | 230 | py | Python | notebook/numpy_count_inf.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 174 | 2018-05-30T21:14:50.000Z | 2022-03-25T07:59:37.000Z | notebook/numpy_count_inf.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 5 | 2019-08-10T03:22:02.000Z | 2021-07-12T20:31:17.000Z | notebook/numpy_count_inf.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 53 | 2018-04-27T05:26:35.000Z | 2022-03-25T07:59:37.000Z | import numpy as np
a_inf = np.array([-np.inf, 0, np.inf])
print(a_inf)
# [-inf 0. inf]
print(np.isinf(a_inf))
# [ True False True]
print(a_inf == np.inf)
# [False False True]
print(a_inf == -np.inf)
# [ True False False]
| 15.333333 | 38 | 0.621739 |
bde33b667b47bf988ea6f574ff0a64b8ee592229 | 3,284 | py | Python | tests/pandas/df_merge_datetime.py | marcinz/legate.pandas | 94c21c436f59c06cfba454c6569e9f5d7109d839 | [
"Apache-2.0"
] | 67 | 2021-04-12T18:06:55.000Z | 2022-03-28T06:51:05.000Z | tests/pandas/df_merge_datetime.py | marcinz/legate.pandas | 94c21c436f59c06cfba454c6569e9f5d7109d839 | [
"Apache-2.0"
] | 2 | 2021-06-22T00:30:36.000Z | 2021-07-01T22:12:43.000Z | tests/pandas/df_merge_datetime.py | marcinz/legate.pandas | 94c21c436f59c06cfba454c6569e9f5d7109d839 | [
"Apache-2.0"
] | 6 | 2021-04-14T21:28:00.000Z | 2022-03-22T09:45:25.000Z | # Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import numpy as np
import pandas as pd
from numpy.random import randn
from legate import pandas as lp
key_dtype = np.dtype("datetime64[ns]")
def convert_to_datetime(arr):
return [np.datetime64("2020-03-09") + val for val in arr]
def to_pandas(ldf):
columns = {}
for column, dtype in ldf.dtypes.items():
columns[column] = ldf[column].to_numpy()
return pd.DataFrame(columns)
def sort_and_compare(df1, df2):
df1 = df1.sort_values(by=df1.columns.to_list(), ignore_index=True)
df2 = df2.sort_values(by=df1.columns.to_list(), ignore_index=True)
return df1.equals(df2)
for n in [15, 30, 45]:
c1 = np.array(randn(n) * 100.0, dtype=np.int64)
c2 = np.array(randn(n // 3) * 100.0, dtype=np.int64)
c3_l = np.array(randn(n) * 100.0, dtype=np.int64)
c3_r = np.array(randn(n // 3) * 100.0, dtype=np.int64)
key_right = list(range(n // 3))
key_left = list(
itertools.chain(*[[x] * 3 for x in range(n // 3 - 1, -1, -1)])
)
print("Type: inner, Size: %u, Key dtype: %s" % (n, str(key_dtype)))
df1 = pd.DataFrame(
{
"c1": c1,
"key": np.array(convert_to_datetime(key_left), dtype=key_dtype),
"c3": c3_l,
}
)
df2 = pd.DataFrame(
{
"c2": c2,
"key": np.array(convert_to_datetime(key_right), dtype=key_dtype),
"c3": c3_r,
}
)
ldf1 = lp.DataFrame(df1)
ldf2 = lp.DataFrame(df2)
join_pandas = df1.merge(df2, on="key")
join_legate = ldf1.merge(ldf2, on="key", method="broadcast")
join_legate_hash = ldf1.merge(ldf2, on="key", method="hash")
assert sort_and_compare(join_pandas, to_pandas(join_legate))
assert sort_and_compare(join_pandas, to_pandas(join_legate_hash))
key_left = list(itertools.chain(*[[x] * 3 for x in range(n // 3, 0, -1)]))
print("Type: left, Size: %u, Key dtype: %s" % (n, str(key_dtype)))
df1 = pd.DataFrame(
{
"c1": c1,
"key": np.array(convert_to_datetime(key_left), dtype=key_dtype),
"c3": c3_l,
}
)
df2 = pd.DataFrame(
{
"c2": c2,
"key": np.array(convert_to_datetime(key_right), dtype=key_dtype),
"c3": c3_r,
}
)
ldf1 = lp.DataFrame(df1)
ldf2 = lp.DataFrame(df2)
join_pandas = df1.merge(df2, on="key", how="left")
join_legate = ldf1.merge(ldf2, on="key", how="left", method="broadcast")
join_legate_hash = ldf1.merge(ldf2, on="key", how="left", method="hash")
assert sort_and_compare(join_pandas, to_pandas(join_legate))
assert sort_and_compare(join_pandas, to_pandas(join_legate_hash))
| 30.407407 | 78 | 0.629415 |
6f94488a0fde48cca550440c116f5403e66da8ae | 812 | py | Python | docassemble_base/docassemble/base/pdfa.py | eeeschwartz/docassemble | d74473de274e94ce754d2ec4bba0ac786fb77293 | [
"MIT"
] | 1 | 2019-01-29T15:45:56.000Z | 2019-01-29T15:45:56.000Z | docassemble_base/docassemble/base/pdfa.py | toolness/docassemble | 8273ae257daa2e318ad58f36829d1c83880b0961 | [
"MIT"
] | 1 | 2019-04-04T11:03:13.000Z | 2019-04-04T11:03:13.000Z | docassemble_base/docassemble/base/pdfa.py | toolness/docassemble | 8273ae257daa2e318ad58f36829d1c83880b0961 | [
"MIT"
] | 2 | 2019-01-26T14:20:35.000Z | 2021-01-15T06:48:26.000Z | import tempfile
import subprocess
import shutil
from docassemble.base.error import DAError
from docassemble.base.logger import logmessage
def pdf_to_pdfa(filename):
logmessage("pdf_to_pdfa: running")
outfile = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
directory = tempfile.mkdtemp()
commands = ['gs', '-dPDFA', '-dBATCH', '-dNOPAUSE', '-sProcessColorModel=DeviceCMYK', '-sDEVICE=pdfwrite', '-sPDFACompatibilityPolicy=1', '-sOutputFile=' + outfile.name, filename]
try:
output = subprocess.check_output(commands, cwd=directory, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
output = err.output
raise DAError("pdf_to_pdfa: error running ghostscript. " + output)
logmessage(output)
shutil.move(outfile.name, filename)
| 42.736842 | 183 | 0.736453 |
3e84d7bce80223431d2bb90cf6654c4bf2c215fe | 27,803 | py | Python | contrib/runners/orquesta_runner/tests/unit/test_inquiries.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | null | null | null | contrib/runners/orquesta_runner/tests/unit/test_inquiries.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | 15 | 2021-02-11T22:58:54.000Z | 2021-08-06T18:03:47.000Z | contrib/runners/orquesta_runner/tests/unit/test_inquiries.py | kkkanil/st2 | 07cd195d7a6e177a37dd019e5c9ab8329259d0fa | [
"Apache-2.0"
] | 1 | 2021-07-10T15:02:29.000Z | 2021-07-10T15:02:29.000Z | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
from orquesta import statuses as wf_statuses
import st2tests
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
from tests.unit import base
from st2actions.workflows import workflows
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as action_constants
from st2common.models.api import inquiry as inqy_api_models
from st2common.models.db import liveaction as lv_db_models
from st2common.persistence import execution as ex_db_access
from st2common.persistence import liveaction as lv_db_access
from st2common.persistence import workflow as wf_db_access
from st2common.services import action as action_service
from st2common.services import inquiry as inquiry_service
from st2common.transport import liveaction as lv_ac_xport
from st2common.transport import workflow as wf_ex_xport
from st2common.transport import publishers
from st2tests.mocks import liveaction as mock_lv_ac_xport
from st2tests.mocks import workflow as mock_wf_ex_xport
TEST_PACK = 'orquesta_tests'
TEST_PACK_PATH = st2tests.fixturesloader.get_fixtures_packs_base_path() + '/' + TEST_PACK
PACKS = [
TEST_PACK_PATH,
st2tests.fixturesloader.get_fixtures_packs_base_path() + '/core'
]
@mock.patch.object(
publishers.CUDPublisher,
'publish_update',
mock.MagicMock(return_value=None))
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
'publish_create',
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create))
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
'publish_state',
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state))
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
'publish_create',
mock.MagicMock(side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_create))
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
'publish_state',
mock.MagicMock(side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_state))
class OrquestaRunnerTest(st2tests.ExecutionDbTestCase):
@classmethod
def setUpClass(cls):
super(OrquestaRunnerTest, cls).setUpClass()
# Register runners.
runnersregistrar.register_runners()
# Register test pack(s).
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False,
fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
def test_inquiry(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'ask-approval.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Assert start task is already completed.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'start'}
t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t1_ex_db.id))[0]
t1_lv_ac_db = lv_db_access.LiveAction.get_by_id(t1_ac_ex_db.liveaction['id'])
self.assertEqual(t1_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t1_ac_ex_db)
t1_ex_db = wf_db_access.TaskExecution.get_by_id(t1_ex_db.id)
self.assertEqual(t1_ex_db.status, wf_statuses.SUCCEEDED)
# Assert the main workflow is still running.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert get approval task is already pending.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'get_approval'}
t2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t2_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t2_ex_db.id))[0]
t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(t2_ac_ex_db.liveaction['id'])
self.assertEqual(t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_PENDING)
workflows.get_engine().process(t2_ac_ex_db)
t2_ex_db = wf_db_access.TaskExecution.get_by_id(t2_ex_db.id)
self.assertEqual(t2_ex_db.status, wf_statuses.PENDING)
# Assert the main workflow is paused since it has no other active tasks.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.PAUSED)
# Respond to the inquiry and check status.
inquiry_api = inqy_api_models.InquiryAPI.from_model(t2_ac_ex_db)
inquiry_response = {'approved': True}
inquiry_service.respond(inquiry_api, inquiry_response)
t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(t2_lv_ac_db.id))
self.assertEqual(t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
t2_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t2_ac_ex_db.id))
self.assertEqual(t2_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t2_ac_ex_db)
t2_ex_db = wf_db_access.TaskExecution.get_by_id(str(t2_ex_db.id))
self.assertEqual(t2_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
# Assert the main workflow is paused since it has no other active tasks.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert the final task is completed.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'finish'}
t3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t3_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t3_ex_db.id))[0]
t3_lv_ac_db = lv_db_access.LiveAction.get_by_id(t3_ac_ex_db.liveaction['id'])
self.assertEqual(t3_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t3_ac_ex_db)
t3_ex_db = wf_db_access.TaskExecution.get_by_id(t3_ex_db.id)
self.assertEqual(t3_ex_db.status, wf_statuses.SUCCEEDED)
# Assert the main workflow is completed
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED)
def test_consecutive_inquiries(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'ask-consecutive-approvals.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Assert start task is already completed.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'start'}
t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t1_ex_db.id))[0]
t1_lv_ac_db = lv_db_access.LiveAction.get_by_id(t1_ac_ex_db.liveaction['id'])
self.assertEqual(t1_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t1_ac_ex_db)
t1_ex_db = wf_db_access.TaskExecution.get_by_id(t1_ex_db.id)
self.assertEqual(t1_ex_db.status, wf_statuses.SUCCEEDED)
# Assert the main workflow is still running.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert get approval task is already pending.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'get_approval'}
t2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t2_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t2_ex_db.id))[0]
t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(t2_ac_ex_db.liveaction['id'])
self.assertEqual(t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_PENDING)
workflows.get_engine().process(t2_ac_ex_db)
t2_ex_db = wf_db_access.TaskExecution.get_by_id(t2_ex_db.id)
self.assertEqual(t2_ex_db.status, wf_statuses.PENDING)
# Assert the main workflow is paused since it has no other active tasks.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.PAUSED)
# Respond to the inquiry and check status.
inquiry_api = inqy_api_models.InquiryAPI.from_model(t2_ac_ex_db)
inquiry_response = {'approved': True}
inquiry_service.respond(inquiry_api, inquiry_response)
t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(t2_lv_ac_db.id))
self.assertEqual(t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
t2_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t2_ac_ex_db.id))
self.assertEqual(t2_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t2_ac_ex_db)
t2_ex_db = wf_db_access.TaskExecution.get_by_id(str(t2_ex_db.id))
self.assertEqual(t2_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
# Assert the main workflow is paused since it has no other active tasks.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert the final task is completed.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'get_confirmation'}
t3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t3_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t3_ex_db.id))[0]
t3_lv_ac_db = lv_db_access.LiveAction.get_by_id(t3_ac_ex_db.liveaction['id'])
self.assertEqual(t3_lv_ac_db.status, action_constants.LIVEACTION_STATUS_PENDING)
workflows.get_engine().process(t3_ac_ex_db)
t3_ex_db = wf_db_access.TaskExecution.get_by_id(t3_ex_db.id)
self.assertEqual(t3_ex_db.status, wf_statuses.PENDING)
# Assert the main workflow is completed
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.PAUSED)
# Respond to the inquiry and check status.
inquiry_api = inqy_api_models.InquiryAPI.from_model(t3_ac_ex_db)
inquiry_response = {'approved': True}
inquiry_service.respond(inquiry_api, inquiry_response)
t3_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(t3_lv_ac_db.id))
self.assertEqual(t3_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
t3_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t3_ac_ex_db.id))
self.assertEqual(t3_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t3_ac_ex_db)
t3_ex_db = wf_db_access.TaskExecution.get_by_id(str(t3_ex_db.id))
self.assertEqual(t3_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
# Assert the main workflow is completed
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert the final task is completed.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'finish'}
t4_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t4_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t4_ex_db.id))[0]
t4_lv_ac_db = lv_db_access.LiveAction.get_by_id(t4_ac_ex_db.liveaction['id'])
self.assertEqual(t4_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t4_ac_ex_db)
t4_ex_db = wf_db_access.TaskExecution.get_by_id(t4_ex_db.id)
self.assertEqual(t4_ex_db.status, wf_statuses.SUCCEEDED)
# Assert the main workflow is completed
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED)
def test_parallel_inquiries(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'ask-parallel-approvals.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Assert start task is already completed.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'start'}
t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t1_ex_db.id))[0]
t1_lv_ac_db = lv_db_access.LiveAction.get_by_id(t1_ac_ex_db.liveaction['id'])
self.assertEqual(t1_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t1_ac_ex_db)
t1_ex_db = wf_db_access.TaskExecution.get_by_id(t1_ex_db.id)
self.assertEqual(t1_ex_db.status, wf_statuses.SUCCEEDED)
# Assert the main workflow is still running.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert get approval task is already pending.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'ask_jack'}
t2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t2_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t2_ex_db.id))[0]
t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(t2_ac_ex_db.liveaction['id'])
self.assertEqual(t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_PENDING)
workflows.get_engine().process(t2_ac_ex_db)
t2_ex_db = wf_db_access.TaskExecution.get_by_id(t2_ex_db.id)
self.assertEqual(t2_ex_db.status, wf_statuses.PENDING)
# Assert the main workflow is paused since it has no other active tasks.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.PAUSING)
# Assert get approval task is already pending.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'ask_jill'}
t3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t3_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t3_ex_db.id))[0]
t3_lv_ac_db = lv_db_access.LiveAction.get_by_id(t3_ac_ex_db.liveaction['id'])
self.assertEqual(t3_lv_ac_db.status, action_constants.LIVEACTION_STATUS_PENDING)
workflows.get_engine().process(t3_ac_ex_db)
t3_ex_db = wf_db_access.TaskExecution.get_by_id(t3_ex_db.id)
self.assertEqual(t3_ex_db.status, wf_statuses.PENDING)
# Assert the main workflow is paused since it has no other active tasks.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.PAUSED)
# Respond to the inquiry and check status.
inquiry_api = inqy_api_models.InquiryAPI.from_model(t2_ac_ex_db)
inquiry_response = {'approved': True}
inquiry_service.respond(inquiry_api, inquiry_response)
t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(t2_lv_ac_db.id))
self.assertEqual(t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
t2_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t2_ac_ex_db.id))
self.assertEqual(t2_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t2_ac_ex_db)
t2_ex_db = wf_db_access.TaskExecution.get_by_id(str(t2_ex_db.id))
self.assertEqual(t2_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
# Assert the main workflow is paused because we are still waiting for
# the other pending task and there are no other active tasks.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.PAUSED)
# Respond to the inquiry and check status.
inquiry_api = inqy_api_models.InquiryAPI.from_model(t3_ac_ex_db)
inquiry_response = {'approved': True}
inquiry_service.respond(inquiry_api, inquiry_response)
t3_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(t3_lv_ac_db.id))
self.assertEqual(t3_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
t3_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t3_ac_ex_db.id))
self.assertEqual(t3_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t3_ac_ex_db)
t3_ex_db = wf_db_access.TaskExecution.get_by_id(str(t3_ex_db.id))
self.assertEqual(t3_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
# Assert the main workflow resumed running.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert the final task is completed.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'finish'}
t4_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t4_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t4_ex_db.id))[0]
t4_lv_ac_db = lv_db_access.LiveAction.get_by_id(t4_ac_ex_db.liveaction['id'])
self.assertEqual(t4_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t4_ac_ex_db)
t4_ex_db = wf_db_access.TaskExecution.get_by_id(t4_ex_db.id)
self.assertEqual(t4_ex_db.status, wf_statuses.SUCCEEDED)
# Assert the main workflow is completed
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED)
def test_nested_inquiry(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'ask-nested-approval.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = action_service.request(lv_ac_db)
# Assert action execution is running.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
wf_ex_db = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))[0]
self.assertEqual(wf_ex_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
# Assert start task is already completed.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'start'}
t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t1_ex_db.id))[0]
t1_lv_ac_db = lv_db_access.LiveAction.get_by_id(t1_ac_ex_db.liveaction['id'])
self.assertEqual(t1_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t1_ac_ex_db)
t1_ex_db = wf_db_access.TaskExecution.get_by_id(t1_ex_db.id)
self.assertEqual(t1_ex_db.status, wf_statuses.SUCCEEDED)
# Assert the main workflow is still running.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Assert the subworkflow is already started.
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'get_approval'}
t2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t2_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t2_ex_db.id))[0]
t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(t2_ac_ex_db.liveaction['id'])
self.assertEqual(t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_RUNNING)
workflows.get_engine().process(t2_ac_ex_db)
t2_ex_db = wf_db_access.TaskExecution.get_by_id(t2_ex_db.id)
self.assertEqual(t2_ex_db.status, wf_statuses.RUNNING)
t2_wf_ex_db = wf_db_access.WorkflowExecution.query(action_execution=str(t2_ac_ex_db.id))[0]
self.assertEqual(t2_wf_ex_db.status, wf_statuses.RUNNING)
# Process task1 of subworkflow.
query_filters = {'workflow_execution': str(t2_wf_ex_db.id), 'task_id': 'start'}
t2_t1_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t2_t1_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t2_t1_ex_db.id))[0]
t2_t1_lv_ac_db = lv_db_access.LiveAction.get_by_id(t2_t1_ac_ex_db.liveaction['id'])
self.assertEqual(t2_t1_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t2_t1_ac_ex_db)
t2_t1_ex_db = wf_db_access.TaskExecution.get_by_id(t2_t1_ex_db.id)
self.assertEqual(t2_t1_ex_db.status, wf_statuses.SUCCEEDED)
t2_wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(t2_wf_ex_db.id))
self.assertEqual(t2_wf_ex_db.status, wf_statuses.RUNNING)
# Process inquiry task of subworkflow and assert the subworkflow is paused.
query_filters = {'workflow_execution': str(t2_wf_ex_db.id), 'task_id': 'get_approval'}
t2_t2_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t2_t2_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t2_t2_ex_db.id))[0]
t2_t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(t2_t2_ac_ex_db.liveaction['id'])
self.assertEqual(t2_t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_PENDING)
workflows.get_engine().process(t2_t2_ac_ex_db)
t2_t2_ex_db = wf_db_access.TaskExecution.get_by_id(t2_t2_ex_db.id)
self.assertEqual(t2_t2_ex_db.status, wf_statuses.PENDING)
t2_wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(t2_wf_ex_db.id))
self.assertEqual(t2_wf_ex_db.status, wf_statuses.PAUSED)
# Process the corresponding task in parent workflow and assert the task is paused.
t2_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t2_ex_db.id))[0]
t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(t2_ac_ex_db.liveaction['id'])
self.assertEqual(t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_PAUSED)
workflows.get_engine().process(t2_ac_ex_db)
t2_ex_db = wf_db_access.TaskExecution.get_by_id(t2_ex_db.id)
self.assertEqual(t2_ex_db.status, wf_statuses.PAUSED)
# Assert the main workflow is paused.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.PAUSED)
# Respond to the inquiry and check status.
inquiry_api = inqy_api_models.InquiryAPI.from_model(t2_t2_ac_ex_db)
inquiry_response = {'approved': True}
inquiry_service.respond(inquiry_api, inquiry_response)
t2_t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(t2_t2_lv_ac_db.id))
self.assertEqual(t2_t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
t2_t2_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(t2_t2_ac_ex_db.id))
self.assertEqual(t2_t2_ac_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t2_t2_ac_ex_db)
t2_t2_ex_db = wf_db_access.TaskExecution.get_by_id(str(t2_t2_ex_db.id))
self.assertEqual(t2_t2_ex_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
# Assert the main workflow is running again.
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Complete the rest of the subworkflow
query_filters = {'workflow_execution': str(t2_wf_ex_db.id), 'task_id': 'finish'}
t2_t3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t2_t3_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t2_t3_ex_db.id))[0]
t2_t3_lv_ac_db = lv_db_access.LiveAction.get_by_id(t2_t3_ac_ex_db.liveaction['id'])
self.assertEqual(t2_t3_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t2_t3_ac_ex_db)
t2_t3_ex_db = wf_db_access.TaskExecution.get_by_id(t2_t3_ex_db.id)
self.assertEqual(t2_t3_ex_db.status, wf_statuses.SUCCEEDED)
t2_wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(str(t2_wf_ex_db.id))
self.assertEqual(t2_wf_ex_db.status, wf_statuses.SUCCEEDED)
t2_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t2_ex_db.id))[0]
t2_lv_ac_db = lv_db_access.LiveAction.get_by_id(t2_ac_ex_db.liveaction['id'])
self.assertEqual(t2_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t2_ac_ex_db)
t2_ex_db = wf_db_access.TaskExecution.get_by_id(t2_ex_db.id)
self.assertEqual(t2_ex_db.status, wf_statuses.SUCCEEDED)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.RUNNING)
# Complete the rest of the main workflow
query_filters = {'workflow_execution': str(wf_ex_db.id), 'task_id': 'finish'}
t3_ex_db = wf_db_access.TaskExecution.query(**query_filters)[0]
t3_ac_ex_db = ex_db_access.ActionExecution.query(task_execution=str(t3_ex_db.id))[0]
t3_lv_ac_db = lv_db_access.LiveAction.get_by_id(t3_ac_ex_db.liveaction['id'])
self.assertEqual(t3_lv_ac_db.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
workflows.get_engine().process(t3_ac_ex_db)
t3_ex_db = wf_db_access.TaskExecution.get_by_id(t3_ex_db.id)
self.assertEqual(t3_ex_db.status, wf_statuses.SUCCEEDED)
wf_ex_db = wf_db_access.WorkflowExecution.get_by_id(wf_ex_db.id)
self.assertEqual(wf_ex_db.status, wf_statuses.SUCCEEDED)
| 57.563147 | 99 | 0.752365 |
3d444d0b24113aa65f25436dcead76ced37094ea | 3,398 | py | Python | tenkit/lib/python/tenkit/vcf_utils.py | qiangli/cellranger | 046e24c3275cfbd4516a6ebc064594513a5c45b7 | [
"MIT"
] | 1 | 2019-03-29T04:05:58.000Z | 2019-03-29T04:05:58.000Z | tenkit/lib/python/tenkit/vcf_utils.py | qiangli/cellranger | 046e24c3275cfbd4516a6ebc064594513a5c45b7 | [
"MIT"
] | null | null | null | tenkit/lib/python/tenkit/vcf_utils.py | qiangli/cellranger | 046e24c3275cfbd4516a6ebc064594513a5c45b7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2014 10X Genomics, Inc. All rights reserved.
#
import log_subprocess
import json
import tenkit.bam as tk_bam
from tenkit.constants import FASTA_LOCATION
def count_vars_vcf(vcf_path):
""" Counts the number of variants in a VCF file.
"""
in_file = open(vcf_path)
num_records = 0
for line in in_file:
if line.startswith('#'):
continue
num_records += 1
return num_records
def split_alt_alleles_vcf(vcf_path, out_path):
""" Splits records with more than one ALT field into two
"""
out_file = open(out_path, 'w')
log_subprocess.check_call(['vcfbreakmulti', vcf_path], stdout=out_file)
def output_primitives_vcf(vcf_path, out_path):
""" Decomposes all complex variants into SNP and indel primitives
"""
out_file = open(out_path, 'w')
log_subprocess.check_call(['vcfallelicprimitives', vcf_path], stdout=out_file)
def output_restrict_location_vcf(vcf_path, bed_path, genome, out_path):
""" Outputs a vcf restricted to the locations specified in the given bed file
"""
ref_path = FASTA_LOCATION + genome + '/' + genome + '.fa'
out_file = open(out_path, 'w')
print ' '.join(['vcfintersect', '-b', bed_path, '-r', ref_path, vcf_path])
log_subprocess.check_call(['vcfintersect', '-b', bed_path, '-r', ref_path, vcf_path], stdout=out_file)
out_file.close()
def output_intersect_vcfs(vcf1_path, vcf2_path, genome, out_path):
""" Outputs a vcf which is the intersection of the two given vcfs.
"""
ref_path = FASTA_LOCATION + genome + '/' + genome + '.fa'
out_file = open(out_path, 'w')
log_subprocess.check_call(['vcfintersect', vcf1_path, '-i', vcf2_path, '-r', ref_path], stdout=out_file)
out_file.close()
def output_setdiff_vcfs(vcf1_path, vcf2_path, genome, out_path):
""" Outputs a VCF file which contains variants in the first but not the second VCF file.
"""
out_file = open(out_path, 'w')
ref_path = FASTA_LOCATION + genome + '/' + genome + '.fa'
log_subprocess.check_call(['vcfintersect', vcf1_path, '-i', vcf2_path, '-v', '-r', ref_path], stdout=out_file)
out_file.close()
def output_as_tsv(vcf_path, out_path, output_gt_info=False):
""" Outputs all of the information from the vcf file as one big tsv
"""
out_file = open(out_path, 'w')
if output_gt_info:
log_subprocess.check_call(['vcf2tsv', vcf_path, '-g'], stdout=out_file)
else:
log_subprocess.check_call(['vcf2tsv', vcf_path], stdout=out_file)
out_file.close()
def output_variant_depths(vcf_path, bam_path, out_path):
""" Outputs a JSON file with chrom and pos as keys and depths as values
corresponding to the depths of the variants in the vcf file, for the
sequencing run in the bam file.
The bam file needs to be sorted and index.
"""
in_file = open(vcf_path, 'r')
bam_file = tk_bam.create_bam_infile(bam_path)
depths = {}
for line in in_file:
if line.startswith('#'):
continue
info = line.split('\t')
chrom = info[0]
pos = int(info[1])
chrom_depths = depths.setdefault(chrom, {})
depth = 0
for r in bam_file.fetch(chrom, pos, pos + 1):
depth += 1
chrom_depths[pos] = depth
out_file = open(out_path, 'w')
out_file.write(json.dumps(depths))
out_file.close()
| 36.148936 | 114 | 0.668334 |
40600155417903532818ad3879313e3e5270a205 | 26,045 | py | Python | builds/runtimes/python-2.7.6/lib/python2.7/test/test_ftplib.py | Orange-OpenSource/php-buildpack | d9a9864192d3ac3906daa9386fee952bfe8a80f3 | [
"MIT"
] | 2 | 2015-07-06T17:17:00.000Z | 2016-05-05T18:35:40.000Z | builds/runtimes/python-2.7.6/lib/python2.7/test/test_ftplib.py | Orange-OpenSource/php-buildpack | d9a9864192d3ac3906daa9386fee952bfe8a80f3 | [
"MIT"
] | 1 | 2018-06-28T16:16:37.000Z | 2018-06-28T16:16:37.000Z | builds/runtimes/python-2.7.6/lib/python2.7/test/test_ftplib.py | Orange-OpenSource/php-buildpack | d9a9864192d3ac3906daa9386fee952bfe8a80f3 | [
"MIT"
] | 9 | 2015-03-13T18:27:27.000Z | 2018-12-03T15:38:51.000Z | """Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import StringIO
import errno
import os
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase
from test import test_support
from test.test_support import HOST, HOSTv6
threading = test_support.import_module('threading')
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def handle_error(self):
raise
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem")
class SSLConnection(object, asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
self.socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except socket.error, err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
super(SSLConnection, self).close()
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return ''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return ''
raise
def handle_error(self):
raise
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', received.append, rest=rest)
self.assertEqual(''.join(received), RETR_DATA[rest:],
msg='rest test case %d %d %d' % (rest,
len(''.join(received)),
len(RETR_DATA[rest:])))
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = StringIO.StringIO(RETR_DATA)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler.rest, str(r))
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = StringIO.StringIO('x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
sock = self.client.transfercmd('list')
self.assertIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv3
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
if ssl is not None:
tests.extend([TestTLS_FTPClassMixin, TestTLS_FTPClass])
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
| 32.273854 | 85 | 0.59355 |
a2ce5d1362d3520f793c776dff7df34fe3e727d9 | 202 | py | Python | django_two/django_two/settings/production.py | itsnamgyu/django-two | 02e61dc20750d6bb000b58b62c1cd0a120a5a15d | [
"MIT"
] | 6 | 2018-01-19T13:20:12.000Z | 2019-02-02T12:44:41.000Z | django_two/django_two/settings/production.py | itsnamgyu/django-two | 02e61dc20750d6bb000b58b62c1cd0a120a5a15d | [
"MIT"
] | 14 | 2018-01-19T13:12:47.000Z | 2018-02-15T07:55:46.000Z | django_two/django_two/settings/production.py | itsnamgyu/django-two | 02e61dc20750d6bb000b58b62c1cd0a120a5a15d | [
"MIT"
] | null | null | null | STATIC_ROOT = '/var/www/django-two/static/'
DEBUG = False
ALLOWED_HOSTS = [
"ec2-13-125-167-5.ap-northeast-2.compute.amazonaws.com"
]
print("Loaded settings for deployment using Apache and WSGI")
| 22.444444 | 61 | 0.732673 |
8c689bcd895255d8e0f32372f942f8f73618cbb2 | 15,311 | py | Python | demo_app/app/models.py | guocdfeifei/xadmin | 2413da625582a53e5453c08cabd1f07d3f48639e | [
"BSD-3-Clause"
] | null | null | null | demo_app/app/models.py | guocdfeifei/xadmin | 2413da625582a53e5453c08cabd1f07d3f48639e | [
"BSD-3-Clause"
] | null | null | null | demo_app/app/models.py | guocdfeifei/xadmin | 2413da625582a53e5453c08cabd1f07d3f48639e | [
"BSD-3-Clause"
] | 2 | 2018-12-07T03:01:44.000Z | 2020-08-25T08:49:10.000Z | from django.db import models
# from viewflow.models import Process
from django.contrib.auth.models import Group
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
import sys
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
SERVER_STATUS = (
(0, u"Normal"),
(1, u"Down"),
(2, u"No Connect"),
(3, u"Error"),
)
SERVICE_TYPES = (
('moniter', u"Moniter"),
('lvs', u"LVS"),
('db', u"Database"),
('analysis', u"Analysis"),
('admin', u"Admin"),
('storge', u"Storge"),
('web', u"WEB"),
('email', u"Email"),
('mix', u"Mix"),
)
from django.core.files.storage import FileSystemStorage
from django.http import HttpResponse
class ImageStorage(FileSystemStorage):
from django.conf import settings
def __init__(self, location=settings.MEDIA_ROOT, base_url=settings.MEDIA_URL):
# 初始化
super(ImageStorage, self).__init__(location, base_url)
# 重写 _save方法
def _save(self, name, content):
# name为上传文件名称
import os, time, random
# 文件扩展名
ext = os.path.splitext(name)[1]
# 文件目录
d = os.path.dirname(name)
# 定义文件名,年月日时分秒随机数
fn = time.strftime('%Y%m%d%H%M%S')
fn = fn + '_%d' % random.randint(0, 100)
# 重写合成文件名
name = os.path.join(d, fn + ext)
# 调用父类方法
return super(ImageStorage, self)._save(name, content)
@python_2_unicode_compatible
class IDC(models.Model):
name = models.CharField(max_length=64)
description = models.TextField()
contact = models.CharField(max_length=32)
telphone = models.CharField(max_length=32)
address = models.CharField(max_length=128)
customer_id = models.CharField(max_length=128)
groups = models.ManyToManyField(Group) # many
create_time = models.DateField(auto_now=True)
def __str__(self):
return self.name
class Meta:
verbose_name = u"IDC"
verbose_name_plural = verbose_name
@python_2_unicode_compatible
class Host(models.Model):
idc = models.ForeignKey(IDC, on_delete=models.CASCADE)
name = models.CharField(max_length=64)
nagios_name = models.CharField(u"Nagios Host ID", max_length=64, blank=True, null=True)
ip = models.GenericIPAddressField(blank=True, null=True)
internal_ip = models.GenericIPAddressField(blank=True, null=True)
user = models.CharField(max_length=64)
password = models.CharField(max_length=128)
ssh_port = models.IntegerField(blank=True, null=True)
status = models.SmallIntegerField(choices=SERVER_STATUS)
brand = models.CharField(max_length=64, choices=[(i, i) for i in (u"DELL", u"HP", u"Other")])
model = models.CharField(max_length=64)
cpu = models.CharField(max_length=64)
core_num = models.SmallIntegerField(choices=[(i * 2, "%s Cores" % (i * 2)) for i in range(1, 15)])
hard_disk = models.IntegerField()
memory = models.IntegerField()
system = models.CharField(u"System OS", max_length=32, choices=[(i, i) for i in (u"CentOS", u"FreeBSD", u"Ubuntu")])
system_version = models.CharField(max_length=32)
system_arch = models.CharField(max_length=32, choices=[(i, i) for i in (u"x86_64", u"i386")])
create_time = models.DateField()
guarantee_date = models.DateField()
service_type = models.CharField(max_length=32, choices=SERVICE_TYPES)
description = models.TextField()
administrator = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name="Admin")
def __str__(self):
return self.name
class Meta:
verbose_name = u"Host"
verbose_name_plural = verbose_name
@python_2_unicode_compatible
class MaintainLog(models.Model):
host = models.ForeignKey(Host, on_delete=models.CASCADE)
maintain_type = models.CharField(max_length=32)
hard_type = models.CharField(max_length=16)
time = models.DateTimeField()
operator = models.CharField(max_length=16)
note = models.TextField()
def __str__(self):
return '%s maintain-log [%s] %s %s' % (self.host.name, self.time.strftime('%Y-%m-%d %H:%M:%S'),
self.maintain_type, self.hard_type)
class Meta:
verbose_name = u"Maintain Log"
verbose_name_plural = verbose_name
@python_2_unicode_compatible
class HostGroup(models.Model):
name = models.CharField(max_length=32)
description = models.TextField()
hosts = models.ManyToManyField(
Host, verbose_name=u'Hosts', blank=True, related_name='groups')
class Meta:
verbose_name = u"Host Group"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
@python_2_unicode_compatible
class AccessRecord(models.Model):
date = models.DateField()
user_count = models.IntegerField()
view_count = models.IntegerField()
class Meta:
verbose_name = u"Access Record"
verbose_name_plural = verbose_name
def __str__(self):
return "%s Access Record" % self.date.strftime('%Y-%m-%d')
@python_2_unicode_compatible
class kmChoices(models.Model):
description = models.CharField(max_length=64)
def __str__(self):
return self.description
class Meta:
verbose_name = u"考试科目"
verbose_name_plural = verbose_name
@python_2_unicode_compatible
class ccpa(models.Model):
# UPLOAD_PATH_IMAGE = 'upload/image/'
area = models.CharField(max_length=64, verbose_name=u'报名地区')
train = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name="培训机构名称")
periods = models.CharField(max_length=64, verbose_name=u'期数', choices=[(i, i) for i in (u"一期", u"二期", u"三期")])
name = models.CharField(max_length=64, verbose_name=u'姓名')
pinyin = models.CharField(max_length=64, verbose_name=u'姓名拼音')
sex = models.CharField(max_length=64, verbose_name=u'性别', choices=[(i, i) for i in (u"男", u"女")])
guarantee_date = models.DateField(verbose_name=u'出生日期')
nation = models.CharField(max_length=16, verbose_name=u'民族')
edu = models.CharField(max_length=64, verbose_name=u'学历', choices=[(i, i) for i in (u"本科", u"专科", u"硕士")])
poilt = models.CharField(max_length=64, verbose_name=u'政治面貌', choices=[(i, i) for i in (u"群众", u"党员")])
icc = models.CharField(max_length=64, verbose_name=u'身份证号')
phone = models.CharField(max_length=20, verbose_name=u'手机号')
email = models.EmailField(error_messages={'invalid': '格式错了.'}, verbose_name=u'联系邮箱')
school = models.CharField(max_length=64, verbose_name=u'学校')
specialty = models.CharField(max_length=64, verbose_name=u'专业')
work = models.CharField(max_length=64, verbose_name=u'工作单位', blank=True, null=True)
job = models.CharField(max_length=64, verbose_name=u'职务', blank=True, null=True)
address = models.CharField(max_length=128, verbose_name=u'联系地址')
enaddress = models.CharField(max_length=128, verbose_name=u'英文地址')
Postcodes = models.CharField(max_length=16, verbose_name=u'邮编')
telephone = models.CharField(max_length=16, verbose_name=u'电话')
type = models.CharField(max_length=64, verbose_name=u'报考类型', choices=[(i, i) for i in (u"CCPA初级", u"CCPA中级", u"CCPA高级")])
kskm = models.ManyToManyField(kmChoices, verbose_name=u'考试科目')
exam_date = models.DateField(verbose_name=u'考试时间')
exam_address = models.CharField(max_length=128, verbose_name=u'考试地点')
photo = models.ImageField(upload_to='upload/image/%Y/%m',storage=ImageStorage(), max_length=100, verbose_name=u'上传照片', null=True, blank=True, )
status = models.CharField(max_length=64,verbose_name=u'报名状态', choices=[(i, i) for i in (u"草稿", u"通过")], default='草稿')
# card_no = models.CharField(max_length=64, verbose_name=u'准考证号')
create_date = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
update_date = models.DateTimeField(verbose_name='最近修改时间', auto_now=True)
def get_card_no(self):
school_no = str(self.train.id).zfill(3)
# bmyearm = self.create_date.strftime("1%m")
# print('bmyearm', bmyearm)
card_no = school_no + '1' + str(self.id).zfill(6)
return card_no
get_card_no.short_description = "准考证号"
card_no = property(get_card_no)
def __str__(self):
return self.name
def get_card_no(self):
return self.id
class Meta:
verbose_name = u"CCPA项目"
verbose_name_plural = verbose_name
@python_2_unicode_compatible
class xss(models.Model):
model_name = 'xss'
# UPLOAD_PATH_IMAGE = 'upload/image/'
area = models.CharField(max_length=64, verbose_name=u'报名地区')
train = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name="培训机构名称")
periods = models.CharField(max_length=64, verbose_name=u'期数', choices=[(i, i) for i in (u"一期", u"二期", u"三期")])
name = models.CharField(max_length=64, verbose_name=u'姓名')
pinyin = models.CharField(max_length=64, verbose_name=u'姓名拼音')
sex = models.CharField(max_length=64, verbose_name=u'性别', choices=[(i, i) for i in (u"男", u"女")])
guarantee_date = models.DateField(verbose_name=u'出生日期')
nation = models.CharField(max_length=16, verbose_name=u'民族')
edu = models.CharField(max_length=64, verbose_name=u'学历', choices=[(i, i) for i in (u"本科", u"专科", u"硕士")])
poilt = models.CharField(max_length=64, verbose_name=u'政治面貌', choices=[(i, i) for i in (u"群众", u"党员")])
icc = models.CharField(max_length=64, verbose_name=u'身份证号')
phone = models.CharField(max_length=20, verbose_name=u'手机号')
email = models.EmailField(error_messages={'invalid': '格式错了.'}, verbose_name=u'联系邮箱')
# school = models.CharField(max_length=64, verbose_name=u'学校')
# specialty = models.CharField(max_length=64, verbose_name=u'专业')
work = models.CharField(max_length=64, verbose_name=u'工作单位', blank=True, null=True)
job = models.CharField(max_length=64, verbose_name=u'职务', blank=True, null=True)
address = models.CharField(max_length=128, verbose_name=u'联系地址')
enaddress = models.CharField(max_length=128, verbose_name=u'英文地址')
Postcodes = models.CharField(max_length=16, verbose_name=u'邮编')
telephone = models.CharField(max_length=16, verbose_name=u'电话')
type = models.CharField(max_length=64, verbose_name=u'报名类型', choices=[(i, i) for i in (u"CCPA《薪税师》一级", u"CCPA《薪税师》二级", u"CCPA《薪税师》三级")])
# kskm = models.ManyToManyField(kmChoices, verbose_name=u'考试科目')
below_date = models.DateField(verbose_name=u'线下授课时间')
exam_date = models.DateField(verbose_name=u'考试时间')
exam_address = models.CharField(max_length=128, verbose_name=u'考试地点')
photo = models.ImageField(upload_to='upload/image/%Y/%m',storage=ImageStorage(), max_length=100, verbose_name=u'上传照片', null=True, blank=True, )
status = models.CharField(max_length=64,verbose_name=u'报名状态', choices=[(i, i) for i in (u"草稿", u"通过")], default='草稿')
create_date = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
update_date = models.DateTimeField(verbose_name='最近修改时间', auto_now=True)
# card_no = models.CharField(max_length=64, verbose_name=u'准考证号')
def get_card_no(self):
school_no = str(self.train.id).zfill(3)
# bmyearm = self.create_date.strftime("2%m")
# print('bmyearm',bmyearm)
card_no =school_no+ '2'+str(self.id).zfill(6)
return card_no
get_card_no.short_description = "准考证号"
card_no = property(get_card_no)
# card_no.setter(verbose_name='准考证号')
def __str__(self):
return self.name
# def clean_fields(self, exclude=None):
# class Meta:
class Meta:
verbose_name = u"薪税师项目"
# model_name = 'xss'
verbose_name_plural = verbose_name
# swappable = 'AUTH_USER_MODEL'
# permissions = (
# ("change_xss_status", "Can change the status of xss"),
# )
@python_2_unicode_compatible
class fund(models.Model):
# UPLOAD_PATH_IMAGE = 'upload/image/'
name = models.CharField(max_length=64)
def __str__(self):
return self.name
@python_2_unicode_compatible
class treatment_item(models.Model):
# UPLOAD_PATH_IMAGE = 'upload/image/'
name = models.CharField(max_length=64)
def __str__(self):
return self.name
@python_2_unicode_compatible
class customer(models.Model):
# UPLOAD_PATH_IMAGE = 'upload/image/'
first_name = models.CharField(max_length=64)
# train = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name="培训机构名称")
last_name = models.CharField(max_length=64)
date_of_birth = models.DateField(verbose_name=u'birthday')
contact_number = models.CharField(max_length=20)
health_fund = models.ForeignKey(fund, on_delete=models.CASCADE,blank=True, null=True)
health_fund_number = models.CharField(max_length=64,blank=True, null=True)
def __str__(self):
return self.first_name+' '+self.last_name
def fullname(self):
return self.first_name+' '+self.last_name
# def get_card_no(self):
# return self.id
class Meta:
verbose_name = u"customer"
verbose_name_plural = verbose_name
@python_2_unicode_compatible
class provider(models.Model):
# UPLOAD_PATH_IMAGE = 'upload/image/'
first_name = models.CharField(max_length=64)
# train = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name="培训机构名称")
last_name = models.CharField(max_length=64)
# contact_number = models.CharField(max_length=20)
health_fund = models.ForeignKey(fund, on_delete=models.CASCADE)
health_fund_number = models.CharField(max_length=64,verbose_name=u'provider number')
def __str__(self):
return self.first_name+' '+self.last_name+' '+str(self.health_fund)
def fullname(self):
return self.first_name+' '+self.last_name
# def get_card_no(self):
# return self.id
class Meta:
verbose_name = u"provider"
verbose_name_plural = verbose_name
@python_2_unicode_compatible
class treatment(models.Model):
# UPLOAD_PATH_IMAGE = 'upload/image/'
cust = models.ForeignKey(customer, on_delete=models.CASCADE, verbose_name="customer")
date = models.DateTimeField( auto_now_add=True)#,input_formats=['%d/%m/%Y %H:%M:%S','%d/%m/%Y %H:%M:%S',
# train = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name="培训机构名称")
item = models.ForeignKey(treatment_item, on_delete=models.CASCADE)
prov = models.ForeignKey(provider, on_delete=models.CASCADE, verbose_name="provider")
# contact_number = models.CharField(max_length=20)
hicaps = models.DecimalField(max_digits=10, decimal_places=2)
cash = models.DecimalField(max_digits=10, decimal_places=2)
minute = models.IntegerField()
def __str__(self):
return str(self.id)#+' '+self.last_name
def cost(self):
return str(self.hicaps+self.cash)
class Meta:
verbose_name = u"treatment"
verbose_name_plural = verbose_name
# @python_2_unicode_compatible
# class xsstest(Process):
# # UPLOAD_PATH_IMAGE = 'upload/image/'
# area = models.CharField(max_length=64, verbose_name=u'报名地区')
# approved = models.BooleanField(default=False) | 38.087065 | 147 | 0.694141 |
7c92b325d5836f25e2791e663ceff9805ec905ae | 549 | py | Python | Labs/Term3/Lesson 3 - 16/python/CarND-GNB classifier-Driver Behaviours/prediction.py | mlandry1/CarND | bfa8a1af634017cc35eedff8974d299a58006554 | [
"MIT"
] | 1 | 2018-05-13T08:43:59.000Z | 2018-05-13T08:43:59.000Z | Labs/Term3/Lesson 3 - 16/python/CarND-GNB classifier-Driver Behaviours/prediction.py | mlandry1/CarND | bfa8a1af634017cc35eedff8974d299a58006554 | [
"MIT"
] | null | null | null | Labs/Term3/Lesson 3 - 16/python/CarND-GNB classifier-Driver Behaviours/prediction.py | mlandry1/CarND | bfa8a1af634017cc35eedff8974d299a58006554 | [
"MIT"
] | 3 | 2018-05-13T08:44:05.000Z | 2021-01-12T08:04:16.000Z | #!/usr/bin/env python
from classifier_solution import GNB
import json
def main():
gnb = GNB()
j = json.loads(open('train.json').read())
print(j.keys())
X = j['states']
Y = j['labels']
gnb.train(X, Y)
j = json.loads(open('test.json').read())
X = j['states']
Y = j['labels']
score = 0
for coords, label in zip(X,Y):
predicted = gnb.predict(coords)
if predicted == label:
score += 1
fraction_correct = float(score) / len(X)
print("You got {} percent correct".format(100 * fraction_correct))
if __name__ == "__main__":
main() | 18.931034 | 67 | 0.639344 |
2cd927ebd14e72fb4f8c4666833681f6402753a1 | 1,641 | py | Python | examples/undocumented/python/transfer_multitask_clustered_logistic_regression.py | gf712/shogun | ca2afb8f092288455701539aa58952dbf6743378 | [
"BSD-3-Clause"
] | 1 | 2020-05-12T10:37:30.000Z | 2020-05-12T10:37:30.000Z | examples/undocumented/python/transfer_multitask_clustered_logistic_regression.py | gf712/shogun | ca2afb8f092288455701539aa58952dbf6743378 | [
"BSD-3-Clause"
] | null | null | null | examples/undocumented/python/transfer_multitask_clustered_logistic_regression.py | gf712/shogun | ca2afb8f092288455701539aa58952dbf6743378 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from numpy import array,hstack,sin,cos
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat]]
def transfer_multitask_clustered_logistic_regression (fm_train=traindat,fm_test=testdat,label_train=label_traindat):
from shogun import BinaryLabels, Task, TaskGroup, MSG_DEBUG
try:
from shogun import MultitaskClusteredLogisticRegression
except ImportError:
print("MultitaskClusteredLogisticRegression not available")
exit()
import shogun as sg
features = sg.create_features(hstack((traindat,sin(traindat),cos(traindat))))
labels = BinaryLabels(hstack((label_train,label_train,label_train)))
n_vectors = features.get_num_vectors()
task_one = Task(0,n_vectors//3)
task_two = Task(n_vectors//3,2*n_vectors//3)
task_three = Task(2*n_vectors//3,n_vectors)
task_group = TaskGroup()
task_group.append_task(task_one)
task_group.append_task(task_two)
task_group.append_task(task_three)
mtlr = MultitaskClusteredLogisticRegression(1.0,100.0,features,labels,task_group,2)
#mtlr.io.set_loglevel(MSG_DEBUG)
mtlr.set_tolerance(1e-3) # use 1e-2 tolerance
mtlr.set_max_iter(100)
mtlr.train()
mtlr.set_current_task(0)
#print mtlr.get_w()
out = mtlr.apply_regression().get_labels()
return out
if __name__=='__main__':
print('TransferMultitaskClusteredLogisticRegression')
transfer_multitask_clustered_logistic_regression(*parameter_list[0])
| 34.1875 | 116 | 0.800122 |
daac62d7f2d39941d2277bd4e0dd7ac3234b58b7 | 331 | py | Python | Geolocation/geolocation.py | SaidRem/weather_API | 9ce43ac58ca530c101c7f98a0ecc67f597be9f0d | [
"MIT"
] | null | null | null | Geolocation/geolocation.py | SaidRem/weather_API | 9ce43ac58ca530c101c7f98a0ecc67f597be9f0d | [
"MIT"
] | null | null | null | Geolocation/geolocation.py | SaidRem/weather_API | 9ce43ac58ca530c101c7f98a0ecc67f597be9f0d | [
"MIT"
] | null | null | null | import requests
from pprint import pprint
def geoloc():
r = requests.get("http://freegeoip.app/json/").json()
return r
def coordinates():
r = geoloc()
return dict(latitude=r["latitude"], longitude=r["longitude"])
if __name__ == "__main__":
pprint(geoloc())
print(f'Your coordinates:\n{coordinates()}')
| 19.470588 | 65 | 0.65861 |
9ecd4503c9ee4738d828647f1aa2e88966f4840c | 11,549 | py | Python | google/ads/googleads/v6/services/services/custom_interest_service/transports/grpc.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/services/services/custom_interest_service/transports/grpc.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v6/services/services/custom_interest_service/transports/grpc.py | wxxlouisa/google-ads-python | f24137966f6bfcb765a9b1fae79f2d23041825fe | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v6.resources.types import custom_interest
from google.ads.googleads.v6.services.types import custom_interest_service
from .base import CustomInterestServiceTransport, DEFAULT_CLIENT_INFO
class CustomInterestServiceGrpcTransport(CustomInterestServiceTransport):
"""gRPC backend transport for CustomInterestService.
Service to manage custom interests.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_custom_interest(
self,
) -> Callable[
[custom_interest_service.GetCustomInterestRequest],
custom_interest.CustomInterest,
]:
r"""Return a callable for the get custom interest method over gRPC.
Returns the requested custom interest in full detail.
Returns:
Callable[[~.GetCustomInterestRequest],
~.CustomInterest]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_custom_interest" not in self._stubs:
self._stubs["get_custom_interest"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v6.services.CustomInterestService/GetCustomInterest",
request_serializer=custom_interest_service.GetCustomInterestRequest.serialize,
response_deserializer=custom_interest.CustomInterest.deserialize,
)
return self._stubs["get_custom_interest"]
@property
def mutate_custom_interests(
self,
) -> Callable[
[custom_interest_service.MutateCustomInterestsRequest],
custom_interest_service.MutateCustomInterestsResponse,
]:
r"""Return a callable for the mutate custom interests method over gRPC.
Creates or updates custom interests. Operation
statuses are returned.
Returns:
Callable[[~.MutateCustomInterestsRequest],
~.MutateCustomInterestsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_custom_interests" not in self._stubs:
self._stubs[
"mutate_custom_interests"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v6.services.CustomInterestService/MutateCustomInterests",
request_serializer=custom_interest_service.MutateCustomInterestsRequest.serialize,
response_deserializer=custom_interest_service.MutateCustomInterestsResponse.deserialize,
)
return self._stubs["mutate_custom_interests"]
__all__ = ("CustomInterestServiceGrpcTransport",)
| 41.693141 | 104 | 0.628539 |
749286c25d4fff2a13aabc4d270b3c650766d68a | 690 | py | Python | ebl/dictionary/web/words.py | ElectronicBabylonianLiterature/dictionary | 5977a57314cf57f94f75cd12520f178b1d6a6555 | [
"MIT"
] | 4 | 2020-04-12T14:24:51.000Z | 2020-10-15T15:48:15.000Z | ebl/dictionary/web/words.py | ElectronicBabylonianLiterature/dictionary | 5977a57314cf57f94f75cd12520f178b1d6a6555 | [
"MIT"
] | 200 | 2019-12-04T09:53:20.000Z | 2022-03-30T20:11:31.000Z | ebl/dictionary/web/words.py | ElectronicBabylonianLiterature/dictionary | 5977a57314cf57f94f75cd12520f178b1d6a6555 | [
"MIT"
] | 1 | 2021-09-06T16:22:39.000Z | 2021-09-06T16:22:39.000Z | import falcon
from ebl.dictionary.application.word_schema import WordSchema
from ebl.marshmallowschema import validate
from ebl.users.web.require_scope import require_scope
class WordsResource:
def __init__(self, dictionary):
self._dictionary = dictionary
@falcon.before(require_scope, "read:words")
def on_get(self, _req, resp, object_id):
resp.media = self._dictionary.find(object_id)
@falcon.before(require_scope, "write:words")
@validate(WordSchema())
def on_post(self, req, resp, object_id):
word = {**req.media, "_id": object_id}
self._dictionary.update(word, req.context.user)
resp.status = falcon.HTTP_NO_CONTENT
| 31.363636 | 61 | 0.721739 |
1b9ef55734dca4d43ff1786870ea09eaec860fe0 | 14,465 | py | Python | python/ccxt/__init__.py | MoreChickenDelivered/ccxt | a8996798d08a1cedc30bfb3db72d14bc8919930f | [
"MIT"
] | null | null | null | python/ccxt/__init__.py | MoreChickenDelivered/ccxt | a8996798d08a1cedc30bfb3db72d14bc8919930f | [
"MIT"
] | null | null | null | python/ccxt/__init__.py | MoreChickenDelivered/ccxt | a8996798d08a1cedc30bfb3db72d14bc8919930f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library"""
# MIT License
# Copyright (c) 2017 Igor Kroitor
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------
__version__ = '1.15.45'
# ----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt._1broker import _1broker # noqa: F401
from ccxt._1btcxe import _1btcxe # noqa: F401
from ccxt.acx import acx # noqa: F401
from ccxt.allcoin import allcoin # noqa: F401
from ccxt.anxpro import anxpro # noqa: F401
from ccxt.anybits import anybits # noqa: F401
from ccxt.bibox import bibox # noqa: F401
from ccxt.binance import binance # noqa: F401
from ccxt.bit2c import bit2c # noqa: F401
from ccxt.bitbank import bitbank # noqa: F401
from ccxt.bitbay import bitbay # noqa: F401
from ccxt.bitfinex import bitfinex # noqa: F401
from ccxt.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.bitflyer import bitflyer # noqa: F401
from ccxt.bithumb import bithumb # noqa: F401
from ccxt.bitkk import bitkk # noqa: F401
from ccxt.bitlish import bitlish # noqa: F401
from ccxt.bitmarket import bitmarket # noqa: F401
from ccxt.bitmex import bitmex # noqa: F401
from ccxt.bitsane import bitsane # noqa: F401
from ccxt.bitso import bitso # noqa: F401
from ccxt.bitstamp import bitstamp # noqa: F401
from ccxt.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.bittrex import bittrex # noqa: F401
from ccxt.bitz import bitz # noqa: F401
from ccxt.bl3p import bl3p # noqa: F401
from ccxt.bleutrade import bleutrade # noqa: F401
from ccxt.braziliex import braziliex # noqa: F401
from ccxt.btcbox import btcbox # noqa: F401
from ccxt.btcchina import btcchina # noqa: F401
from ccxt.btcexchange import btcexchange # noqa: F401
from ccxt.btcmarkets import btcmarkets # noqa: F401
from ccxt.btctradeim import btctradeim # noqa: F401
from ccxt.btctradeua import btctradeua # noqa: F401
from ccxt.btcturk import btcturk # noqa: F401
from ccxt.btcx import btcx # noqa: F401
from ccxt.bxinth import bxinth # noqa: F401
from ccxt.ccex import ccex # noqa: F401
from ccxt.cex import cex # noqa: F401
from ccxt.chbtc import chbtc # noqa: F401
from ccxt.chilebit import chilebit # noqa: F401
from ccxt.cobinhood import cobinhood # noqa: F401
from ccxt.coinbase import coinbase # noqa: F401
from ccxt.coinbasepro import coinbasepro # noqa: F401
from ccxt.coincheck import coincheck # noqa: F401
from ccxt.coinegg import coinegg # noqa: F401
from ccxt.coinex import coinex # noqa: F401
from ccxt.coinexchange import coinexchange # noqa: F401
from ccxt.coinfalcon import coinfalcon # noqa: F401
from ccxt.coinfloor import coinfloor # noqa: F401
from ccxt.coingi import coingi # noqa: F401
from ccxt.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.coinmate import coinmate # noqa: F401
from ccxt.coinnest import coinnest # noqa: F401
from ccxt.coinone import coinone # noqa: F401
from ccxt.coinsecure import coinsecure # noqa: F401
from ccxt.coinspot import coinspot # noqa: F401
from ccxt.cointiger import cointiger # noqa: F401
from ccxt.coolcoin import coolcoin # noqa: F401
from ccxt.crypton import crypton # noqa: F401
from ccxt.cryptopia import cryptopia # noqa: F401
from ccxt.deribit import deribit # noqa: F401
from ccxt.dsx import dsx # noqa: F401
from ccxt.ethfinex import ethfinex # noqa: F401
from ccxt.exmo import exmo # noqa: F401
from ccxt.exx import exx # noqa: F401
from ccxt.fcoin import fcoin # noqa: F401
from ccxt.flowbtc import flowbtc # noqa: F401
from ccxt.foxbit import foxbit # noqa: F401
from ccxt.fybse import fybse # noqa: F401
from ccxt.fybsg import fybsg # noqa: F401
from ccxt.gatecoin import gatecoin # noqa: F401
from ccxt.gateio import gateio # noqa: F401
from ccxt.gdax import gdax # noqa: F401
from ccxt.gemini import gemini # noqa: F401
from ccxt.getbtc import getbtc # noqa: F401
from ccxt.hadax import hadax # noqa: F401
from ccxt.hitbtc import hitbtc # noqa: F401
from ccxt.hitbtc2 import hitbtc2 # noqa: F401
from ccxt.huobi import huobi # noqa: F401
from ccxt.huobicny import huobicny # noqa: F401
from ccxt.huobipro import huobipro # noqa: F401
from ccxt.ice3x import ice3x # noqa: F401
from ccxt.independentreserve import independentreserve # noqa: F401
from ccxt.indodax import indodax # noqa: F401
from ccxt.itbit import itbit # noqa: F401
from ccxt.jubi import jubi # noqa: F401
from ccxt.kraken import kraken # noqa: F401
from ccxt.kucoin import kucoin # noqa: F401
from ccxt.kuna import kuna # noqa: F401
from ccxt.lakebtc import lakebtc # noqa: F401
from ccxt.lbank import lbank # noqa: F401
from ccxt.liqui import liqui # noqa: F401
from ccxt.livecoin import livecoin # noqa: F401
from ccxt.luno import luno # noqa: F401
from ccxt.lykke import lykke # noqa: F401
from ccxt.mercado import mercado # noqa: F401
from ccxt.mixcoins import mixcoins # noqa: F401
from ccxt.negociecoins import negociecoins # noqa: F401
from ccxt.nova import nova # noqa: F401
from ccxt.okcoincny import okcoincny # noqa: F401
from ccxt.okcoinusd import okcoinusd # noqa: F401
from ccxt.okex import okex # noqa: F401
from ccxt.paymium import paymium # noqa: F401
from ccxt.poloniex import poloniex # noqa: F401
from ccxt.qryptos import qryptos # noqa: F401
from ccxt.quadrigacx import quadrigacx # noqa: F401
from ccxt.quoinex import quoinex # noqa: F401
from ccxt.southxchange import southxchange # noqa: F401
from ccxt.surbitcoin import surbitcoin # noqa: F401
from ccxt.therock import therock # noqa: F401
from ccxt.tidebit import tidebit # noqa: F401
from ccxt.tidex import tidex # noqa: F401
from ccxt.urdubit import urdubit # noqa: F401
from ccxt.vaultoro import vaultoro # noqa: F401
from ccxt.vbtc import vbtc # noqa: F401
from ccxt.virwox import virwox # noqa: F401
from ccxt.wex import wex # noqa: F401
from ccxt.xbtce import xbtce # noqa: F401
from ccxt.yobit import yobit # noqa: F401
from ccxt.yunbi import yunbi # noqa: F401
from ccxt.zaif import zaif # noqa: F401
from ccxt.zb import zb # noqa: F401
exchanges = [
'_1broker',
'_1btcxe',
'acx',
'allcoin',
'anxpro',
'anybits',
'bibox',
'binance',
'bit2c',
'bitbank',
'bitbay',
'bitfinex',
'bitfinex2',
'bitflyer',
'bithumb',
'bitkk',
'bitlish',
'bitmarket',
'bitmex',
'bitsane',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitz',
'bl3p',
'bleutrade',
'braziliex',
'btcbox',
'btcchina',
'btcexchange',
'btcmarkets',
'btctradeim',
'btctradeua',
'btcturk',
'btcx',
'bxinth',
'ccex',
'cex',
'chbtc',
'chilebit',
'cobinhood',
'coinbase',
'coinbasepro',
'coincheck',
'coinegg',
'coinex',
'coinexchange',
'coinfalcon',
'coinfloor',
'coingi',
'coinmarketcap',
'coinmate',
'coinnest',
'coinone',
'coinsecure',
'coinspot',
'cointiger',
'coolcoin',
'crypton',
'cryptopia',
'deribit',
'dsx',
'ethfinex',
'exmo',
'exx',
'fcoin',
'flowbtc',
'foxbit',
'fybse',
'fybsg',
'gatecoin',
'gateio',
'gdax',
'gemini',
'getbtc',
'hadax',
'hitbtc',
'hitbtc2',
'huobi',
'huobicny',
'huobipro',
'ice3x',
'independentreserve',
'indodax',
'itbit',
'jubi',
'kraken',
'kucoin',
'kuna',
'lakebtc',
'lbank',
'liqui',
'livecoin',
'luno',
'lykke',
'mercado',
'mixcoins',
'negociecoins',
'nova',
'okcoincny',
'okcoinusd',
'okex',
'paymium',
'poloniex',
'qryptos',
'quadrigacx',
'quoinex',
'southxchange',
'surbitcoin',
'therock',
'tidebit',
'tidex',
'urdubit',
'vaultoro',
'vbtc',
'virwox',
'wex',
'xbtce',
'yobit',
'yunbi',
'zaif',
'zb',
]
base = [
'Exchange',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
| 45.344828 | 80 | 0.529623 |
aec100dc4b23fc25fcd53f10cf96523e6efce923 | 3,292 | py | Python | caffe2/python/modeling/compute_histogram_for_blobs_test.py | shigengtian/caffe2 | e19489d6acd17fea8ca98cd8e4b5b680e23a93c5 | [
"Apache-2.0"
] | 1 | 2018-03-26T13:25:03.000Z | 2018-03-26T13:25:03.000Z | caffe2/python/modeling/compute_histogram_for_blobs_test.py | shigengtian/caffe2 | e19489d6acd17fea8ca98cd8e4b5b680e23a93c5 | [
"Apache-2.0"
] | null | null | null | caffe2/python/modeling/compute_histogram_for_blobs_test.py | shigengtian/caffe2 | e19489d6acd17fea8ca98cd8e4b5b680e23a93c5 | [
"Apache-2.0"
] | 1 | 2018-12-20T09:14:48.000Z | 2018-12-20T09:14:48.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.compute_histogram_for_blobs import (
ComputeHistogramForBlobs
)
import numpy as np
class ComputeHistogramForBlobsTest(unittest.TestCase):
def histogram(self, X, lower_bound=0.0, upper_bound=1.0, num_buckets=20):
assert X.ndim == 2, ('this test assume 2d array, but X.ndim is {0}'.
format(X.ndim))
N, M = X.shape
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist = hist.astype(np.float32) / (N * M)
acc_hist = cur_hist
return [cur_hist, acc_hist]
def test_compute_histogram_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
num_buckets = 20
lower_bound = 0.2
upper_bound = 0.8
accumulate = False
net_modifier = ComputeHistogramForBlobs(blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
num_buckets=num_buckets,
lower_bound=lower_bound,
upper_bound=upper_bound,
accumulate=accumulate)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_curr_normalized_hist = workspace.FetchBlob('fc1_w_curr_normalized_hist')
cur_hist, acc_hist = self.histogram(fc1_w,
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
self.assertEqual(fc1_w_curr_normalized_hist.size, num_buckets + 2)
self.assertAlmostEqual(np.linalg.norm(
fc1_w_curr_normalized_hist - cur_hist), 0.0, delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 12)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
| 40.641975 | 86 | 0.582625 |
123e54aa36a52e7895cb5f2875e25a785c5c8373 | 7,268 | py | Python | utils/metrics.py | aimagelab/MCMR | eb3556bffebc734c19e7f3e39dcf018ba28c63b3 | [
"MIT"
] | 11 | 2021-10-11T12:48:47.000Z | 2022-03-01T02:00:00.000Z | utils/metrics.py | aimagelab/MCMR | eb3556bffebc734c19e7f3e39dcf018ba28c63b3 | [
"MIT"
] | 1 | 2022-01-19T00:47:58.000Z | 2022-01-23T14:52:45.000Z | utils/metrics.py | aimagelab/MCMR | eb3556bffebc734c19e7f3e39dcf018ba28c63b3 | [
"MIT"
] | 1 | 2022-02-08T14:53:54.000Z | 2022-02-08T14:53:54.000Z | from math import exp
import numpy as np
import torch
import torch.nn.functional as F
from scipy import linalg
from torch.autograd import Variable
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size=11, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
def get_IoU(mask_gt, mask_pred):
bs = mask_gt.shape[0]
mask_gt = (mask_gt.view(bs, -1).detach().cpu().numpy() > 0).astype(np.float32)
mask_pred = (mask_pred.view(bs, -1).detach().cpu().numpy() > 0).astype(np.float32)
intersection = mask_gt * mask_pred
union = mask_gt + mask_pred - intersection
iou = intersection.sum(1) / (union.sum(1) + 1e-12)
return iou.mean()
def get_PCK(kp_gt, kp_pred, img_size, alpha=0.1):
batch_size, n_kpoints, _ = kp_pred.shape
kp_gt = kp_gt.detach().cpu().numpy()
kp_gt[:, :, :2] = (kp_gt[:, :, :2] * 127.5) + 127.5
kp_pred = kp_pred.detach().cpu().numpy()
kp_pred = (kp_pred * 127.5) + 127.5
x_margin = alpha * img_size[0]
y_margin = alpha * img_size[1]
visible_corrects = np.full(shape=(batch_size, n_kpoints), fill_value=np.nan)
occluded_corrects = np.full(shape=(batch_size, n_kpoints), fill_value=np.nan)
for b in range(batch_size):
for n in range(n_kpoints):
true_kpoint_idx = kp_gt[b][n]
pred_kpoint_idx = kp_pred[b][n]
x_dist = np.abs(true_kpoint_idx[1] - pred_kpoint_idx[1])
y_dist = np.abs(true_kpoint_idx[0] - pred_kpoint_idx[0])
is_correct = int(x_dist <= x_margin and y_dist <= y_margin)
if true_kpoint_idx[2] == 0: # kpoint coords really annotated on image
visible_corrects[b][n] = is_correct
else: # kpoint coords inferred by keypoints projection on image
occluded_corrects[b][n] = is_correct
overall_corrects = np.nansum(np.stack([visible_corrects, occluded_corrects]), axis=0)
hist_overall = np.nanmean(overall_corrects, 0)
hist_visible = np.nanmean(visible_corrects, 0)
hist_occluded = np.nanmean(occluded_corrects, 0)
hist_overall[np.isnan(hist_overall)] = 0
hist_visible[np.isnan(hist_visible)] = 0
hist_occluded[np.isnan(hist_occluded)] = 0
return np.mean(hist_overall), hist_overall
def get_L1(input_imgs, pred_imgs):
return torch.nn.L1Loss()(pred_imgs, input_imgs)
def get_SSIM(input_imgs, pred_imgs):
return ssim(input_imgs, pred_imgs)
def get_FID(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""
Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def compute_mean_and_cov(feats_list: list):
feats = np.stack(feats_list, axis=0)
return np.mean(feats, axis=0), np.cov(feats, rowvar=False)
def get_feat(net, img: torch.Tensor):
pred = net(img)[-1]
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = F.adaptive_avg_pool2d(pred, output_size=(1, 1))
pred = pred.squeeze()
return pred.to('cpu').numpy()
| 34.283019 | 114 | 0.647771 |
ce7c0ca1962445b9d8a3deba6c033d8f15e46807 | 6,584 | py | Python | Phylogeny Scripts/Genbanker.py | chibbargroup/CentralRepository | 0f152cdc207565656e8f6f10622517c5422f18f6 | [
"MIT"
] | null | null | null | Phylogeny Scripts/Genbanker.py | chibbargroup/CentralRepository | 0f152cdc207565656e8f6f10622517c5422f18f6 | [
"MIT"
] | null | null | null | Phylogeny Scripts/Genbanker.py | chibbargroup/CentralRepository | 0f152cdc207565656e8f6f10622517c5422f18f6 | [
"MIT"
] | null | null | null | '''
Genbanker.py
Written by J. Hayes, last edit 1/10/17
Purpose: Reads through lists of accession numbers generated through similarity comparisons,
blast searches, and general NCBI searches and fetches the corresponding Genbank file
Input: Blast search (.csv; file must have 'blast' or 'Blast' in the file name)
Accession list exported from web database search
Similarity files (.csv format)
Any csv file with accession numbers in a column; column should be labelled as 'axcn' or
change line ~67 in General_CSV_Parser function as needed
Output: Saves the genbank files in the specified output directory as .gb files; format of name is
accession_number.gb
Usage: python Genbanker.py Input_File_Directory Save_Directory Retrieval_Type
*Note: Retrieval type can be any of the following: Protein, protein, prot, Nucleotide, nucleotide, nt
(Other options to specifiy protein or nucleotide can be added to the lists in lines ~127, 128 in Program Runner)
'''
from os.path import isfile, isdir, join
from os import mkdir, listdir
from Bio import Entrez
import pandas as pd
import sys
### Always tell NCBI your email; change when used by a different user
Entrez.email = 'john.hayes@usask.ca'
#Parses through the blast results table and pulls out the GB accession numbers; function is based
#on the output from the NCBI Blast results table export; returns the accession numbers in a list
def Blast_Parser(file):
#Read the CSV; generally the data headers are rows 1-7, and the data columns are in row 8
#(Row 8 indexes to row 7 bc Python starts at 0)
df = pd.read_csv(file, header = 7)
subject_ids = list(df['Subject_ID'])
holder = []
gb_accessions = []
#Split out the results; in some cases there are multiple accession numbers per row; these entries
#are separated by a ';'
for item in subject_ids:
result = item.split(';')
for entry in result:
holder.append(entry)
#Once match sets are split, read the match sets; pick up when a code is 'gb', and capture those
#codes (parse through and ignore the GI indicies)
for item in holder:
result = item.split('|')
capture = False
for entry in result:
if entry == 'gb':
capture = True
if capture and entry != 'gb':
gb_accessions.append(entry)
capture = False
return gb_accessions
#Read the accession numbers from a CSV file with a column labelled "Accession Number"; good
#for results from similarity searches or more general lists
def General_CSV_Parser(file):
df = pd.read_csv(file)
accession_list = list(df['acxn']) #Note, the column name may have to be changed based on the input file
holder = []
for accession in accession_list:
if accession not in holder:
holder.append(accession)
return holder
#Reads the accession numbers generated from an NCBI search; these search results are exported from the
#NCBI website by clicking Send:; then selecting file, and selecting format: Accession Number, which generates a .txt file
def NCBI_Search_Reader(file):
accession_list = []
with open(file, 'r') as f:
for line in f:
line = line.strip('\n')
accession_list.append(line)
return accession_list
#Takes two lists, a and b, and combines them such that there are no repeats between the two
#Combines list_b into list_a, and returns list_a with the added results
def List_Combiner(list_a, list_b):
for item in list_b:
if item not in list_a:
list_a.append(item)
return list_a
#Fetches the protein genbank file for each accession in the list passed to it (accession_list), and saves
#the genbank files as separate .gb files in the specified file file path (save_path)
def NCBI_Protein_Genbank_Grabber(accession_list, save_path):
for accession in accession_list:
file_name = accession + '.gb'
if not isfile(join(save_path, file_name)):
with open(join(save_path, file_name), 'w') as f:
print("Fetching %s" %accession)
handle = Entrez.efetch(db = 'protein', id=accession, rettype = 'gb', retmode = 'text')
f.write(handle.read())
else:
print("The entry %s has already been fetched" %accession)
#Fetches the nucleotide genbank file for each accession in the list passed to it (accession_list), and saves
#the genbank files as separate .gb files in the specified file file path (save_path)
def NCBI_Nucleotide_Genbank_Grabber(accession_list, save_path):
for accession in accession_list:
file_name = accession + '.gb'
if not isfile(join(save_path, file_name)):
with open(join(save_path, file_name), 'w') as f:
print("Fetching %s" %accession)
handle = Entrez.efetch(db = 'nucleotide', id=accession, rettype = 'gb', retmode = 'text')
f.write(handle.read())
else:
print("The entry %s has already been fetched" %accession)
def Program_Runner(file_directory, save_directory, rettype):
#Generate a list of files in the file directory; make the save directory if necessary, and initialize the
#accession list
file_list = [f for f in listdir(file_directory) if isfile(join(file_directory, f))]
if not isdir(save_directory):
mkdir(save_directory)
accession_list = []
#Lists of options for interpreting rettype; can be added to
protein = ['Protein', 'protein', 'prot', 'Prot']
nucleotide = ['Nucleotide', 'nucleotide', 'nt', 'NT', 'Nt']
#Read each file in the file list; procedure is based on extension and if blast is in the file name
for file in file_list:
file = join(file_directory, file)
if file[-4:] == '.csv' and 'blast' not in file and "Blast" not in file:
results = General_CSV_Parser(file)
accession_list = List_Combiner(accession_list, results)
elif file [-4:] == '.csv':
results = Blast_Parser(file)
accession_list = List_Combiner(accession_list, results)
elif file [-4:] == '.txt':
results = NCBI_Search_Reader(file)
accession_list = List_Combiner(accession_list, results)
#Grab the genbank file for each accession number; use Protein or Nucleotide based on user input when calling the script
if rettype in protein:
NCBI_Protein_Genbank_Grabber(accession_list, save_directory)
elif rettype nucleotide:
NCBI_Nucleotide_Genbank_Grabber(accession_list, save_directory)
else:
print("Retrival type not understood; use Protein or Nucleotide")
if len(sys.argv) > 4 or len(sys.argv) < 4:
print("I'm sorry, I don't understand; I only take 3 arguments")
print("These arguments are: 1) File directory, 2) Save directory, 3) Retrieval type")
else:
if not isdir(sys.argv[1]):
print("The file directory you gave me doesn't seem to exist...try calling me again with the correct directory")
else:
Program_Runner(sys.argv[1], sys.argv[2], sys.argv[3])
| 41.408805 | 121 | 0.744532 |
3aecc73480c3f0cbc49bb7e74cde901c47e48b1d | 164 | py | Python | NetLSD/netlsd/__init__.py | Abdumaleek/infinity-mirror | b493c5602d9e4bcf374b748e9b80e7c85be54a88 | [
"MIT"
] | 49 | 2018-06-05T08:20:41.000Z | 2022-02-25T11:06:04.000Z | NetLSD/netlsd/__init__.py | Abdumaleek/infinity-mirror | b493c5602d9e4bcf374b748e9b80e7c85be54a88 | [
"MIT"
] | 2 | 2021-11-10T19:47:00.000Z | 2022-02-10T01:24:59.000Z | NetLSD/netlsd/__init__.py | Abdumaleek/infinity-mirror | b493c5602d9e4bcf374b748e9b80e7c85be54a88 | [
"MIT"
] | 4 | 2019-02-12T16:26:48.000Z | 2019-11-21T00:26:44.000Z | # -*- coding: utf-8 -*-
__author__ = 'Anton Tsitsulin'
__email__ = 'anton.tsitsulin@hpi.de'
__version__ = '0.0.1'
from .kernels import heat, wave, netlsd, compare | 23.428571 | 48 | 0.695122 |
4e9edeb8cfe54326f8431f051b52f19911496292 | 18,775 | bzl | Python | bazel/repository_locations.bzl | toliu/pixie | 2547cf9ccfece6079323194110cf92d1371a28be | [
"Apache-2.0"
] | null | null | null | bazel/repository_locations.bzl | toliu/pixie | 2547cf9ccfece6079323194110cf92d1371a28be | [
"Apache-2.0"
] | null | null | null | bazel/repository_locations.bzl | toliu/pixie | 2547cf9ccfece6079323194110cf92d1371a28be | [
"Apache-2.0"
] | null | null | null | # Copyright 2018- The Pixie Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
REPOSITORY_LOCATIONS = dict(
bazel_gazelle = dict(
sha256 = "de69a09dc70417580aabf20a28619bb3ef60d038470c7cf8442fafcf627c21cb",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.24.0/bazel-gazelle-v0.24.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.24.0/bazel-gazelle-v0.24.0.tar.gz",
],
),
io_bazel_rules_go = dict(
# NOTE: Many BPF test programs are written in Go, to avoid accidentally breaking them.
# Run the following command when upgrading Golang version:
# scripts/sudo_bazel_run.sh //src/stirling/source_connectors/socket_tracer:http2_trace_bpf_test
#
sha256 = "8e968b5fcea1d2d64071872b12737bbb5514524ee5f0a4f54f5920266c261acb",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.28.0/rules_go-v0.28.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.28.0/rules_go-v0.28.0.zip",
],
),
io_bazel_rules_scala = dict(
sha256 = "0485168f15607ca3eab999ed531bd25596cf4a43b295552c80032ba0e056cd1a",
urls = [
"https://github.com/bazelbuild/rules_scala/archive/9d0d4f99ff79d5d454180a1c799ff1af1d380ed2.tar.gz",
],
strip_prefix = "rules_scala-9d0d4f99ff79d5d454180a1c799ff1af1d380ed2",
),
io_bazel_rules_k8s = dict(
sha256 = "a08850199d6900328ef899906717fb1dfcc6cde62701c63725748b2e6ca1d5d9",
strip_prefix = "rules_k8s-d05cbea5c56738ef02c667c10951294928a1d64a",
urls = [
"https://github.com/bazelbuild/rules_k8s/archive/d05cbea5c56738ef02c667c10951294928a1d64a.tar.gz",
],
),
com_github_apache_arrow = dict(
sha256 = "487ae884d035d9c8bbc052199268e6259d22cf502ee976e02661ee3f8e9468c0",
strip_prefix = "arrow-ecbb9de0b4c8739347f7ffa9e7aee7e46926bbab",
urls = ["https://github.com/pixie-io/arrow/archive/ecbb9de0b4c8739347f7ffa9e7aee7e46926bbab.tar.gz"],
),
com_github_bazelbuild_buildtools = dict(
sha256 = "d368c47bbfc055010f118efb2962987475418737e901f7782d2a966d1dc80296",
strip_prefix = "buildtools-4.2.5",
urls = ["https://github.com/bazelbuild/buildtools/archive/refs/tags/4.2.5.tar.gz"],
),
com_google_benchmark = dict(
sha256 = "dccbdab796baa1043f04982147e67bb6e118fe610da2c65f88912d73987e700c",
strip_prefix = "benchmark-1.5.2",
urls = ["https://github.com/google/benchmark/archive/refs/tags/v1.5.2.tar.gz"],
),
com_github_packetzero_dnsparser = dict(
sha256 = "bdf6c7f56f33725c1c32e672a4779576fb639dd2df565115778eb6be48296431",
strip_prefix = "dnsparser-77398ffc200765db1cea9000d9f550ea99a29f7b",
urls = ["https://github.com/pixie-io/dnsparser/archive/77398ffc200765db1cea9000d9f550ea99a29f7b.tar.gz"],
),
com_github_serge1_elfio = dict(
sha256 = "386bbeaac176683a68ee1941ab5b12dc381b7d43ff300cccca060047c2c9b291",
strip_prefix = "ELFIO-9a70dd299199477bf9f8319424922d0fa436c225",
urls = ["https://github.com/pixie-io/ELFIO/archive/9a70dd299199477bf9f8319424922d0fa436c225.tar.gz"],
),
bazel_skylib = dict(
sha256 = "e5d90f0ec952883d56747b7604e2a15ee36e288bb556c3d0ed33e818a4d971f2",
strip_prefix = "bazel-skylib-1.0.2",
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/refs/tags/1.0.2.tar.gz"],
),
io_bazel_rules_docker = dict(
sha256 = "59536e6ae64359b716ba9c46c39183403b01eabfbd57578e84398b4829ca499a",
strip_prefix = "rules_docker-0.22.0",
urls = ["https://github.com/bazelbuild/rules_docker/archive/refs/tags/v0.22.0.tar.gz"],
),
com_google_googletest = dict(
sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb",
strip_prefix = "googletest-release-1.10.0",
urls = ["https://github.com/google/googletest/archive/release-1.10.0.tar.gz"],
),
com_github_grpc_grpc = dict(
sha256 = "27dd2fc5c9809ddcde8eb6fa1fa278a3486566dfc28335fca13eb8df8bd3b958",
strip_prefix = "grpc-1.35.0",
urls = ["https://github.com/grpc/grpc/archive/refs/tags/v1.35.0.tar.gz"],
),
com_google_boringssl = dict(
sha256 = "781fa39693ec2984c71213cd633e9f6589eaaed75e3a9ac413237edec96fd3b9",
strip_prefix = "boringssl-83da28a68f32023fd3b95a8ae94991a07b1f6c62",
urls = ["https://github.com/google/boringssl/" +
"archive/83da28a68f32023fd3b95a8ae94991a07b1f6c6.tar.gz"],
),
com_github_gflags_gflags = dict(
sha256 = "9e1a38e2dcbb20bb10891b5a171de2e5da70e0a50fff34dd4b0c2c6d75043909",
strip_prefix = "gflags-524b83d0264cb9f1b2d134c564ef1aa23f207a41",
urls = ["https://github.com/gflags/gflags/archive/524b83d0264cb9f1b2d134c564ef1aa23f207a41.tar.gz"],
),
com_github_google_glog = dict(
sha256 = "95dc9dd17aca4e12e2cb18087a5851001f997682f5f0d0c441a5be3b86f285bd",
strip_prefix = "glog-bc1fada1cf63ad12aee26847ab9ed4c62cffdcf9",
# We cannot use the last released version due to https://github.com/google/glog/pull/706
# Once there is a realease that includes that fix, we can switch to a released version.
urls = ["https://github.com/google/glog/archive/bc1fada1cf63ad12aee26847ab9ed4c62cffdcf9.tar.gz"],
),
com_github_rlyeh_sole = dict(
sha256 = "ff82a1d6071cbc9c709864266210ddedecdb2b1e507ac5e7c4290ca6453e89b3",
strip_prefix = "sole-1.0.2",
urls = ["https://github.com/r-lyeh-archived/sole/archive/refs/tags/1.0.2.tar.gz"],
),
com_google_absl = dict(
sha256 = "dcf71b9cba8dc0ca9940c4b316a0c796be8fab42b070bb6b7cab62b48f0e66c4",
strip_prefix = "abseil-cpp-20211102.0",
urls = ["https://github.com/abseil/abseil-cpp/archive/refs/tags/20211102.0.tar.gz"],
),
com_google_flatbuffers = dict(
sha256 = "9ddb9031798f4f8754d00fca2f1a68ecf9d0f83dfac7239af1311e4fd9a565c4",
strip_prefix = "flatbuffers-2.0.0",
urls = ["https://github.com/google/flatbuffers/archive/refs/tags/v2.0.0.tar.gz"],
),
com_google_double_conversion = dict(
sha256 = "3dbcdf186ad092a8b71228a5962009b5c96abde9a315257a3452eb988414ea3b",
strip_prefix = "double-conversion-3.2.0",
urls = ["https://github.com/google/double-conversion/archive/refs/tags/v3.2.0.tar.gz"],
),
com_google_protobuf = dict(
sha256 = "bc3dbf1f09dba1b2eb3f2f70352ee97b9049066c9040ce0c9b67fb3294e91e4b",
strip_prefix = "protobuf-3.15.5",
urls = ["https://github.com/protocolbuffers/protobuf/archive/refs/tags/v3.15.5.tar.gz"],
),
com_intel_tbb = dict(
sha256 = "ebc4f6aa47972daed1f7bf71d100ae5bf6931c2e3144cf299c8cc7d041dca2f3",
strip_prefix = "oneTBB-2020.3",
urls = ["https://github.com/oneapi-src/oneTBB/archive/refs/tags/v2020.3.tar.gz"],
),
com_github_libarchive_libarchive = dict(
sha256 = "b60d58d12632ecf1e8fad7316dc82c6b9738a35625746b47ecdcaf4aed176176",
strip_prefix = "libarchive-3.4.2",
urls = ["https://github.com/libarchive/libarchive/releases/download/v3.4.2/libarchive-3.4.2.tar.gz"],
),
com_google_farmhash = dict(
sha256 = "09b5da9eaa7c7f4f073053c1c6c398e320ca917e74e8f366fd84679111e87216",
strip_prefix = "farmhash-2f0e005b81e296fa6963e395626137cf729b710c",
urls = ["https://github.com/google/farmhash/archive/2f0e005b81e296fa6963e395626137cf729b710c.tar.gz"],
),
com_github_tencent_rapidjson = dict(
sha256 = "4a34a0c21794f067afca6c9809971f0bd77a1d1834c4dc53bdd09e4ab4d05ce4",
strip_prefix = "rapidjson-f56928de85d56add3ca6ae7cf7f119a42ee1585b",
urls = ["https://github.com/Tencent/rapidjson/archive/f56928de85d56add3ca6ae7cf7f119a42ee1585b.tar.gz"],
),
com_github_ariafallah_csv_parser = dict(
sha256 = "c722047128c97b7a3f38d0c320888d905692945e4a96b6ebd6d208686764644a",
strip_prefix = "csv-parser-e3c1207f4de50603a4946dc5daa0633ce31a9257",
urls = ["https://github.com/AriaFallah/csv-parser/archive/e3c1207f4de50603a4946dc5daa0633ce31a9257.tar.gz"],
),
rules_foreign_cc = dict(
sha256 = "1df78c7d7eed2dc21b8b325a2853c31933a81e7b780f9a59a5d078be9008b13a",
strip_prefix = "rules_foreign_cc-0.7.0",
urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/refs/tags/0.7.0.tar.gz"],
),
com_github_gperftools_gperftools = dict(
sha256 = "ea566e528605befb830671e359118c2da718f721c27225cbbc93858c7520fee3",
strip_prefix = "gperftools-2.9.1",
urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-2.9.1/gperftools-2.9.1.tar.gz"],
),
com_github_h2o_picohttpparser = dict(
sha256 = "cb47971984d77dc81ed5684d51d668a7bc7804d3b7814a3072c2187dfa37a013",
strip_prefix = "picohttpparser-1d2b8a184e7ebe6651c30dcede37ba1d89691351",
urls = ["https://github.com/h2o/picohttpparser/archive/1d2b8a184e7ebe6651c30dcede37ba1d89691351.tar.gz"],
),
distroless = dict(
sha256 = "54273175a54eedc558b8188ca810b184b0784815d3af17cc5fd9c296be4c150e",
strip_prefix = "distroless-18b2d2c5ebfa58fe3e0e4ee3ffe0e2651ec0f7f6",
urls = ["https://github.com/GoogleContainerTools/distroless/" +
"archive/18b2d2c5ebfa58fe3e0e4ee3ffe0e2651ec0f7f6.tar.gz"],
),
com_github_nats_io_natsc = dict(
sha256 = "c2b5a5e62dfbdcb110f00960c413ab6e8ef09dd71863c15c9f81aa598dcd339d",
strip_prefix = "nats.c-2.6.0",
urls = ["https://github.com/nats-io/nats.c/archive/refs/tags/v2.6.0.tar.gz"],
),
com_github_libuv_libuv = dict(
sha256 = "371e5419708f6aaeb8656671f89400b92a9bba6443369af1bb70bcd6e4b3c764",
strip_prefix = "libuv-1.42.0",
urls = ["https://github.com/libuv/libuv/archive/refs/tags/v1.42.0.tar.gz"],
),
com_github_cameron314_concurrentqueue = dict(
sha256 = "eb37336bf9ae59aca7b954db3350d9b30d1cab24b96c7676f36040aa76e915e8",
strip_prefix = "concurrentqueue-1.0.3",
urls = ["https://github.com/cameron314/concurrentqueue/archive/refs/tags/v1.0.3.tar.gz"],
),
com_github_neargye_magic_enum = dict(
sha256 = "4fe6627407a656d0d73879c0346b251ccdcfb718c37bef5410ba172c7c7d5f9a",
strip_prefix = "magic_enum-0.7.0",
urls = ["https://github.com/Neargye/magic_enum/archive/refs/tags/v0.7.0.tar.gz"],
),
com_github_arun11299_cpp_jwt = dict(
sha256 = "6dbf93969ec48d97ecb6c157014985846df8c01995a0011c21f4e2c146594922",
strip_prefix = "cpp-jwt-1.1.1",
urls = ["https://github.com/arun11299/cpp-jwt/archive/refs/tags/v1.1.1.tar.gz"],
),
com_github_cyan4973_xxhash = dict(
sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7",
strip_prefix = "xxHash-0.7.3",
urls = ["https://github.com/Cyan4973/xxHash/archive/refs/tags/v0.7.3.tar.gz"],
),
com_github_nlohmann_json = dict(
sha256 = "87b5884741427220d3a33df1363ae0e8b898099fbc59f1c451113f6732891014",
urls = ["https://github.com/nlohmann/json/releases/download/v3.7.3/include.zip"],
),
org_tensorflow = dict(
sha256 = "f681331f8fc0800883761c7709d13cda11942d4ad5ff9f44ad855e9dc78387e0",
strip_prefix = "tensorflow-2.4.1",
urls = ["https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.4.1.tar.gz"],
),
io_bazel_rules_closure = dict(
sha256 = "9498e57368efb82b985db1ed426a767cbf1ba0398fd7aed632fc3908654e1b1e",
strip_prefix = "rules_closure-0.12.0",
urls = ["https://github.com/bazelbuild/rules_closure/archive/refs/tags/0.12.0.tar.gz"],
),
# August 19, 2020.
com_github_google_sentencepiece = dict(
sha256 = "1c0bd83e03f71a10fc934b7ce996e327488b838587f03159fd392c77c7701389",
strip_prefix = "sentencepiece-0.1.95",
urls = ["https://github.com/google/sentencepiece/archive/refs/tags/v0.1.95.tar.gz"],
),
rules_python = dict(
urls = ["https://github.com/bazelbuild/rules_python/releases/download/0.2.0/rules_python-0.2.0.tar.gz"],
sha256 = "778197e26c5fbeb07ac2a2c5ae405b30f6cb7ad1f5510ea6fdac03bded96cc6f",
),
com_github_cmcqueen_aes_min = dict(
sha256 = "dd82d23976695d857924780c262952cdb12ddbb56e6bdaf5a2270dccc851d279",
strip_prefix = "aes-min-0.3.1",
urls = ["https://github.com/cmcqueen/aes-min/releases/download/0.3.1/aes-min-0.3.1.tar.gz"],
),
com_github_derrickburns_tdigest = dict(
sha256 = "e420c7f9c73fe2af59ab69f302ea8279ec41ae3d241b749277761fdc2e8abfd7",
strip_prefix = "tdigest-85e0f70092460e60236821db4c25143768d3da12",
urls = ["https://github.com/pixie-io/tdigest/archive/85e0f70092460e60236821db4c25143768d3da12.tar.gz"],
),
com_github_vinzenz_libpypa = dict(
sha256 = "7ea0fac21dbf4e2496145c8d73b03250d3f31b46147a0abce174ea23dc1dd7ea",
strip_prefix = "libpypa-32a0959ab43b1f31db89bc3e8d0133a515af945e",
urls = ["https://github.com/pixie-io/libpypa/archive/32a0959ab43b1f31db89bc3e8d0133a515af945e.tar.gz"],
),
com_github_thoughtspot_threadstacks = dict(
sha256 = "e54d4c3cd5af3cc136cc952c1ef77cd90b41133cd61140d8488e14c6d6f795e9",
strip_prefix = "threadstacks-94adbe26c4aaf9ca945fd7936670d40ec6f228fb",
urls = ["https://github.com/pixie-io/threadstacks/archive/94adbe26c4aaf9ca945fd7936670d40ec6f228fb.tar.gz"],
),
com_github_antlr_antlr4 = dict(
urls = ["https://github.com/antlr/antlr4/archive/refs/tags/4.9.2.tar.gz"],
strip_prefix = "antlr4-4.9.2",
sha256 = "6c86ebe2f3583ac19b199e704bdff9d70379f12347f7f2f1efa38051cd9a18cf",
),
com_github_antlr_grammars_v4 = dict(
urls = ["https://github.com/antlr/grammars-v4/archive/e53d7a1228505bfc80d8637808ef60e7eea92cc2.tar.gz"],
strip_prefix = "grammars-v4-e53d7a1228505bfc80d8637808ef60e7eea92cc2",
sha256 = "9858e4a9944cac85830e6cf8edd9d567227af96d8b75f0b31accc525ec842c30",
),
com_github_pgcodekeeper_pgcodekeeper = dict(
urls = ["https://github.com/pgcodekeeper/pgcodekeeper/archive/refs/tags/v5.11.3.tar.gz"],
strip_prefix = "pgcodekeeper-5.11.3",
sha256 = "b80d88f447566733f887a2c21ad6072751297459e79fa5acfc99e5db3a9418a1",
),
com_github_google_re2 = dict(
urls = ["https://github.com/google/re2/archive/refs/tags/2021-08-01.tar.gz"],
strip_prefix = "re2-2021-08-01",
sha256 = "cd8c950b528f413e02c12970dce62a7b6f37733d7f68807e73a2d9bc9db79bc8",
),
com_github_simdutf_simdutf = dict(
urls = ["https://github.com/simdutf/simdutf/archive/refs/tags/v1.0.0.tar.gz"],
strip_prefix = "simdutf-1.0.0",
sha256 = "a91056e53e566070068203b77a4607fec41920b923712464cf54e12a760cd0a6",
),
com_github_opentelemetry_proto = dict(
urls = ["https://github.com/open-telemetry/opentelemetry-proto/archive/refs/tags/v0.10.0.tar.gz"],
strip_prefix = "opentelemetry-proto-0.10.0",
sha256 = "f1004a49f40d7acb43e86b1fd95f73e80c778acb163e309bba86f0cbd7fa8a71",
),
com_github_jupp0r_prometheus_cpp = dict(
urls = ["https://github.com/jupp0r/prometheus-cpp/archive/refs/tags/v0.13.0.tar.gz"],
strip_prefix = "prometheus-cpp-0.13.0",
sha256 = "5319b77d6dc73af34bc256e7b18a7e0da50c787ef6f9e32785d045428b6473cc",
),
com_github_USCiLab_cereal = dict(
urls = ["https://github.com/USCiLab/cereal/archive/refs/tags/v1.3.1.tar.gz"],
strip_prefix = "cereal-1.3.1",
sha256 = "65ea6ddda98f4274f5c10fb3e07b2269ccdd1e5cbb227be6a2fd78b8f382c976",
),
rules_jvm_external = dict(
urls = ["https://github.com/bazelbuild/rules_jvm_external/archive/refs/tags/4.2.tar.gz"],
sha256 = "2cd77de091e5376afaf9cc391c15f093ebd0105192373b334f0a855d89092ad5",
strip_prefix = "rules_jvm_external-4.2",
),
)
# To modify one of the forked repos below:
# 1. Make the changes to the repo and push the changes to the `pixie` on github.
# 2. Update the commit below to point to the commit hash of the new `pixie` branch.
#
# To use a local repo for local development, change `remote` to a file path.
# ex: remote = "/home/user/src/pixie-io/bcc"
# Then change the local repo, commit the change, and replace `commit` with your new commit.
# See LOCAL_REPOSITORY_LOCATIONS for an alternative approach.
GIT_REPOSITORY_LOCATIONS = dict(
com_github_iovisor_bcc = dict(
remote = "https://github.com/pixie-io/bcc.git",
commit = "f55c4deb76aaf483a50fcf7dff30e9076ef42b99",
shallow_since = "1643059595 -0800",
),
com_github_iovisor_bpftrace = dict(
remote = "https://github.com/pixie-io/bpftrace.git",
commit = "4dba68c62465ac35ff52c47eec376d1bfc035368",
shallow_since = "1637018996 -0800",
),
com_github_apangin_jattach = dict(
remote = "https://github.com/pixie-io/jattach.git",
commit = "fa36a4fa141b4e9486b9126640d54a94c1d36fce",
shallow_since = "1638898188 -0800",
),
)
# To use a local repo for local development, update the path to point to your local repo.
# ex: path = "/home/user/pixie-io/bcc"
# then uncomment the lines with `_local_repo(name_of_repo_you_care_about, ...)` in `repositories.bzl` and
# comment out the corresponding lines with `_git_repo(name_of_repo_you_care_about, ...)`.
# Note that if you do this, you have to handle the building of these repos' artifacts yourself.
# See `bazel/external/local_dev` for more info about the right cmake commands for building these repos yourself.
# WARNING: doing this has some downsides, so don't do it for production builds. For instance,
# cflags and other settings set by bazel (eg -O3) won't be used, since you have to do the building manually.
LOCAL_REPOSITORY_LOCATIONS = dict(
com_github_iovisor_bcc = dict(
path = "/home/user/pixie-io/bcc",
),
com_github_iovisor_bpftrace = dict(
path = "/home/user/pixie-io/bpftrace",
),
)
| 54.106628 | 132 | 0.717603 |
65a465cfec24f498e82ffbfa0ca0cb46e648c2f6 | 467 | py | Python | src/sentry/web/frontend/debug/debug_note_email.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | src/sentry/web/frontend/debug/debug_note_email.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/frontend/debug/debug_note_email.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from sentry.models import Activity
from .mail import ActivityMailDebugView, get_random, make_message
class DebugNoteEmailView(ActivityMailDebugView):
def get_activity(self, request, event):
random = get_random(request)
return {
"type": Activity.NOTE,
"user": request.user,
"data": {"text": make_message(random, max(2, int(random.weibullvariate(12, 0.4))))},
}
| 29.1875 | 96 | 0.672377 |
f7cc26e803be8a25bf0c6da550b983ec00c7ca18 | 1,395 | py | Python | serpent/analytics_client.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
] | 6,762 | 2017-09-17T20:28:40.000Z | 2022-03-31T12:35:47.000Z | serpent/analytics_client.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
] | 159 | 2017-09-19T21:54:58.000Z | 2021-03-26T18:15:58.000Z | serpent/analytics_client.py | nanpuhaha/SerpentAI | 6af1105fc0a970227a0d7c11e6a0da1bd0bacec6 | [
"MIT"
] | 880 | 2017-09-23T01:16:50.000Z | 2022-03-27T18:58:30.000Z | from redis import StrictRedis
from datetime import datetime
from pprint import pprint
from serpent.config import config
import json
class AnalyticsClientError(BaseException):
pass
class AnalyticsClient:
def __init__(self, project_key=None):
if project_key is None:
raise AnalyticsClientError("'project_key' kwarg is expected...")
self.project_key = project_key
self.redis_client = StrictRedis(**config["redis"])
self.broadcast = config["analytics"].get("broadcast", False)
self.debug = config["analytics"].get("debug", False)
self.event_whitelist = config["analytics"].get("event_whitelist")
@property
def redis_key(self):
return f"SERPENT:{self.project_key}:EVENTS"
def track(self, event_key=None, data=None, timestamp=None, is_persistable=True):
if self.event_whitelist is None or event_key in self.event_whitelist:
event = {
"project_key": self.project_key,
"event_key": event_key,
"data": data,
"timestamp": timestamp if timestamp is not None else datetime.utcnow().isoformat(),
"is_persistable": is_persistable
}
if self.debug:
pprint(event)
if self.broadcast:
self.redis_client.lpush(self.redis_key, json.dumps(event))
| 29.680851 | 99 | 0.637993 |
7e4c7eaf745a4d9438664d046efbb37a85962a0d | 7,697 | py | Python | codetest/_codetest.py | anuragpatil94/codetest | 03768957f98fb9fb4bc5a19a3c0c875e3c33e0f6 | [
"MIT"
] | null | null | null | codetest/_codetest.py | anuragpatil94/codetest | 03768957f98fb9fb4bc5a19a3c0c875e3c33e0f6 | [
"MIT"
] | null | null | null | codetest/_codetest.py | anuragpatil94/codetest | 03768957f98fb9fb4bc5a19a3c0c875e3c33e0f6 | [
"MIT"
] | 1 | 2021-12-07T01:29:32.000Z | 2021-12-07T01:29:32.000Z | from timeit import default_timer as timer
if __package__:
from ._utils import _LinkedList, _BinaryTree, _ListNode, _BinaryTreeNode
else:
from _utils import _LinkedList, _BinaryTree, _ListNode, _BinaryTreeNode
####################################################
# I/O OBJECT
####################################################
class _IOObject:
def __init__(self, value, type=None, default=None, options={}):
self.value = value
self.type = type
self.default = default
self.options = options
def __repr__(self):
return str(
{
"value": self.value,
"type": self.type,
"default": self.default,
"options": self.options,
}
)
####################################################
# TYPE
####################################################
class _Type:
def __init__(self):
pass
def getDefaultTypeAsClass(self, typeAsString):
defaultTypes = {
"int": int,
"float": float,
"list": list,
"tuple": tuple,
"dict": dict,
"set": set,
"bool": bool,
"str": str,
}
return defaultTypes.get(typeAsString, None)
def getCustomTypeAsClass(self, typeAsString):
customTypes = {
"linkedlist": _LinkedList,
"binarytree": _BinaryTree,
}
return customTypes.get(typeAsString, None)
def getTypeAsString(self, data) -> str:
if isinstance(data, int):
return "int"
elif isinstance(data, str):
return "str"
elif isinstance(data, float):
return "float"
elif isinstance(data, list):
return "list"
elif isinstance(data, tuple):
return "tuple"
elif isinstance(data, dict):
return "dict"
elif isinstance(data, set):
return "set"
elif isinstance(data, bool):
return "bool"
elif isinstance(data, _ListNode):
return "linkedlist"
elif isinstance(data, _BinaryTreeNode):
return "binarytree"
def getConversionType(self, data, conversionTypeString=None):
actualDataTypeString = self.getTypeAsString(data)
if self.getCustomTypeAsClass(actualDataTypeString) or not conversionTypeString:
return (
actualDataTypeString,
self.getCustomTypeAsClass(actualDataTypeString)
or self.getDefaultTypeAsClass(actualDataTypeString),
)
if conversionTypeString:
return (
conversionTypeString,
self.getCustomTypeAsClass(conversionTypeString)
or self.getDefaultTypeAsClass(conversionTypeString),
)
class _CodeTest:
def __init__(self, tests: list) -> None:
self.tests = tests
def run(self, Problem: object):
# For each test get input and outputs
for index, test in enumerate(self.tests):
# function to test
function = test["function"] if "function" in test else "main"
# get input and output params and create list of _IOObject
inputParams = None
outputParams = None
if "params" in test:
params = test["params"]
inputParams = (
self._containerize(params["input"]) if "input" in params else None
)
outputParams = (
self._containerize(params["output"]) if "output" in params else None
)
# Run a test on the function
sTest = _SingleTest(Problem, function, index, inputParams, outputParams)
sTest.run()
def _containerize(self, ios: list) -> list:
"""Creates a list of IOObject containing Input or Output data"""
Type = _Type()
arr = []
try:
for io in ios:
data = io.pop("value")
convTypeStr = io.pop("type") if "type" in io else None
default = io.pop("default") if "default" in io else None
options = io
if data is None:
arr.append(_IOObject(data, None, default, options))
continue
convTypeStr, convTypeCls = Type.getConversionType(data, convTypeStr)
try:
data = convTypeCls(data)
except TypeError as te:
raise TypeError(
"data `{}` cannot be converted to type:`{}`".format(
str(data), convTypeCls
)
)
obj = _IOObject(data, convTypeStr, default, options)
arr.append(obj)
return arr
except Exception as e:
print(e)
def visualize(self):
pass
class _SingleTest:
def __init__(
self,
cls: object,
fn: str,
testIndex: int,
input: [_IOObject],
output: [_IOObject],
):
self.cls = cls
self.fn = fn
self.testIndex = testIndex
self.input = input
self.output = output
def _getInputArray(self):
pass
def _getOutputArray(self):
pass
def _getErrorMessage(self, expectedOutput, computedOutput, time):
# Any output here will be in the std type format which can be easily converted to string
strExpectedOp = str(expectedOutput)
strComputedOp = str(computedOutput)
minHorizontalLen = 60
heading = "[TEST {}]".format(str(self.testIndex)).center(minHorizontalLen, "-")
txt = """{}\nExpected Output: {}\nComputed Output: {}\n{}\n{}
""".format(
heading,
strExpectedOp,
strComputedOp,
str(("[Time: " + str(round(time * 1000, 3))) + "ms]").rjust(
minHorizontalLen
),
"".center(minHorizontalLen, "-"),
)
return txt
def run(self):
# get input list
inputParams = []
if self.input is not None:
for input in self.input:
inputParams.append(input.value)
# get output
expectedOpObj = None
if self.output is not None:
expectedOpObj = self.output[0]
# run test
try:
DynamicClass = self.cls()
fnToExecute = getattr(DynamicClass, self.fn)
except:
print("Cannot find method ", self.fn)
start = timer()
computedOp = fnToExecute(*inputParams)
end = timer()
totaltime = end - start
try:
# type cast
if computedOp is not None and expectedOpObj is not None:
# Find if output needs to be converted
convTypeStr, convTypeCls = _Type().getConversionType(
computedOp, expectedOpObj.type
)
computedOp = convTypeCls(computedOp)
except TypeError as te:
raise TypeError(
"data `{}` cannot be converted to type:`{}`".format(
str(computedOp), convTypeCls
)
)
expectedOp = None
if expectedOpObj is not None:
expectedOp = expectedOpObj.value
try:
assert computedOp == expectedOp
except Exception as e:
print(self._getErrorMessage(expectedOp, computedOp, totaltime))
def _execute(self, *args):
pass
| 31.03629 | 96 | 0.516825 |
9eb65b71624210fafca63027ff073335b56d0cdf | 15,231 | py | Python | src/read_data.py | BII-wushuang/Lie-Group-Motion-Prediction | 2ce6ca6d798de99d1d0777c418cdc79596a4471d | [
"MIT"
] | 79 | 2019-07-05T04:16:04.000Z | 2022-02-24T06:07:09.000Z | src/read_data.py | BII-wushuang/Lie-Group-Motion-Prediction | 2ce6ca6d798de99d1d0777c418cdc79596a4471d | [
"MIT"
] | 8 | 2019-07-30T08:59:30.000Z | 2022-02-28T08:55:39.000Z | src/read_data.py | BII-wushuang/Lie-Group-Motion-Prediction | 2ce6ca6d798de99d1d0777c418cdc79596a4471d | [
"MIT"
] | 19 | 2019-07-11T12:18:30.000Z | 2022-01-13T13:53:28.000Z | import numpy as np
import tensorflow as tf
import scipy.io as sio
import general_utils as data_utils
import copy
def read_data(config, training=False):
if config.dataset == 'Human':
train_set, test_set, x_test, y_test, dec_in_test, config = read_human(config, training)
elif config.dataset == 'Mouse':
train_set, test_set, x_test, y_test, dec_in_test, config = read_mouse(config, training)
elif config.dataset == 'Fish':
train_set, test_set, x_test, y_test, dec_in_test, config = read_fish(config, training)
return [train_set, test_set, x_test, y_test, dec_in_test, config]
def read_human(config, training):
seq_length_in = config.input_window_size
if training:
seq_length_out = config.output_window_size
else:
seq_length_out = config.test_output_window
if training:
print("Reading {0} data for training: Input Sequence Length = {1}, Output Sequence Length = {2}.".format(
config.dataset, seq_length_in, seq_length_out))
else:
print("Reading {0} data for testing: Input Sequence Length = {1}, Output Sequence Length = {2}.".format(
config.dataset, seq_length_in, seq_length_out))
if config.filename == 'all':
actions = ['discussion', 'greeting', 'posing', 'walkingdog', 'directions', 'eating', 'phoning','purchases', 'sitting',
'sittingdown', 'smoking', 'takingphoto', 'waiting', 'walking', 'walkingtogether']
# actions = ['walking', 'eating', 'smoking', 'discussion', 'directions', 'greeting', 'phoning', 'posing',
# 'purchases', 'sitting', 'sittingdown', 'takingphoto', 'waiting', 'walkingdog', 'walkingtogether']
else:
actions = [config.filename]
train_set = {}
complete_train = []
for subj in [1, 6, 7, 8, 9, 11]:
for action in actions:
for subact in [1, 2]:
if config.datatype == 'lie':
filename = '{0}/S{1}_{2}_{3}_lie.mat'.format('./data/h3.6m/Train/train_lie', subj, action, subact)
train_set[(subj, action, subact)] = sio.loadmat(filename)['lie_parameters']
if config.datatype == 'xyz':
filename = '{0}/S{1}_{2}_{3}_xyz.mat'.format('./data/h3.6m/Train/train_xyz', subj, action, subact)
train_set[(subj, action, subact)] = sio.loadmat(filename)['joint_xyz']
train_set[(subj, action, subact)] = train_set[(subj, action, subact)].reshape(
train_set[(subj, action, subact)].shape[0], -1)
if len(complete_train) == 0:
complete_train = copy.deepcopy(train_set[(subj, action, subact)])
else:
complete_train = np.append(complete_train, train_set[(subj, action, subact)], axis=0)
test_set = {}
complete_test = []
for subj in [5]:
for action in actions:
for subact in [1, 2]:
if config.datatype == 'lie':
filename = '{0}/S{1}_{2}_{3}_lie.mat'.format('./data/h3.6m/Test/test_lie', subj, action, subact)
test_set[(subj, action, subact)] = sio.loadmat(filename)['lie_parameters']
if config.datatype == 'xyz':
filename = '{0}/S{1}_{2}_{3}_xyz.mat'.format('./data/h3.6m/Test/test_xyz', subj, action, subact)
test_set[(subj, action, subact)] = sio.loadmat(filename)['joint_xyz']
test_set[(subj, action, subact)] = test_set[(subj, action, subact)].reshape(
test_set[(subj, action, subact)].shape[0], -1)
if len(complete_test) == 0:
complete_test = copy.deepcopy(test_set[(subj, action, subact)])
else:
complete_test = np.append(complete_test, test_set[(subj, action, subact)], axis=0)
if config.datatype == 'lie':
# Compute normalization stats
data_mean, data_std, dim_to_ignore, dim_to_use = data_utils.normalization_stats(complete_train)
# The global translation and rotation are not considered since we perform procrustes alignment
# dim_to_ignore = [0,1,2,3,4,5] + dim_to_ignore
# dim_to_use = dim_to_use[6:]
config.data_mean = data_mean
config.data_std = data_std
config.dim_to_ignore = dim_to_ignore
config.dim_to_use = dim_to_use
# Normalize: subtract mean, divide by std
train_set = data_utils.normalize_data(train_set, data_mean, data_std, dim_to_use)
test_set = data_utils.normalize_data(test_set, data_mean, data_std, dim_to_use)
expmapInd = np.split(np.arange(4, 100) - 1, 32)
weights = np.zeros([len(config.dim_to_use)])
for j in range(len(config.dim_to_use)):
for i in range(len(expmapInd)):
if config.dim_to_use[j] in expmapInd[i]:
weights[j] = i + 1
break
weights = list(map(int, weights))
chain = [[0], [132.95, 442.89, 454.21, 162.77, 75], [132.95, 442.89, 454.21, 162.77, 75],
[132.95, 253.38, 257.08, 121.13, 115], [0, 151.03, 278.88, 251.73, 100, 0, 0, 0],
[0, 151.03, 278.88, 251.73, 100, 0, 0, 0]]
for x in chain:
s = sum(x)
if s == 0:
continue
for i in range(len(x)):
x[i] = (i+1)*sum(x[i:])/s
chain = [item for sublist in chain for item in sublist]
config.weights = []
for i in range(len(weights)):
config.weights.append(chain[weights[i]])
config.input_size = train_set[list(train_set.keys())[0]].shape[1]
x_test = {}
y_test = {}
dec_in_test = {}
for action in actions:
encoder_inputs, decoder_inputs, decoder_outputs = get_batch_srnn(config, test_set, action, seq_length_out)
x_test[action] = encoder_inputs
y_test[action] = decoder_outputs
dec_in_test[action] = np.zeros(decoder_inputs.shape)
dec_in_test[action][:, 0, :] = decoder_inputs[:, 0, :]
print("Done reading data.")
return [train_set, test_set, x_test, y_test, dec_in_test, config]
def read_animals(config, training, train_subjects, test_subjects, train_path, x_test_path, y_test_path):
seq_length_in = config.input_window_size
if training:
seq_length_out = config.output_window_size
else:
seq_length_out = config.test_output_window
# Read a base file to obtain bone lengths
if config.datatype == 'lie':
filename = y_test_path + 'test_0' + '_lie.mat'
rawdata = sio.loadmat(filename)
matdict_key = list(rawdata.keys())[3]
rawdata = rawdata[matdict_key]
njoints = rawdata.shape[1]
bone = np.zeros([njoints, 3])
# Bone lengths
for i in range(njoints):
bone[i, 0] = round(rawdata[0, i, 3], 2)
elif config.datatype == 'xyz':
filename = y_test_path + 'test_0' + '.mat'
rawdata = sio.loadmat(filename)
matdict_key = list(rawdata.keys())[3]
rawdata = rawdata[matdict_key]
njoints = rawdata.shape[1]
bone = np.zeros([njoints, 3])
# Bone lengths
for i in range(1,njoints):
bone[i, 0] = round(np.linalg.norm(rawdata[0, i, :] - rawdata[0, i - 1, :]), 2)
bone_params = tf.convert_to_tensor(bone)
bone_params = tf.cast(bone_params, tf.float32)
config.bone = bone
config.bone_params = bone_params
config.output_window_size = np.min([config.output_window_size, rawdata.shape[0]])
config.test_output_window = np.min([config.test_output_window, rawdata.shape[0]])
seq_length_out = np.min([seq_length_out, rawdata.shape[0]])
if training:
print("Reading {0} data for training: Input Sequence Length = {1}, Output Sequence Length = {2}.".format(config.dataset, seq_length_in, seq_length_out))
else:
print("Reading {0} data for testing: Input Sequence Length = {1}, Output Sequence Length = {2}.".format(config.dataset, seq_length_in, seq_length_out))
# Read and prepare training data
train_set = {}
for id in train_subjects:
if config.datatype == 'lie':
filename = train_path + id + '_lie.mat'
rawdata = sio.loadmat(filename)
matdict_key = list(rawdata.keys())[3]
rawdata = rawdata[matdict_key]
data = rawdata[:, :-1, :3].reshape(rawdata.shape[0], -1)
train_set[id] = data
elif config.datatype == 'xyz':
filename = train_path + id + '_xyz.mat'
rawdata = sio.loadmat(filename)
matdict_key = list(rawdata.keys())[3]
rawdata = rawdata[matdict_key]
data = rawdata.reshape(rawdata.shape[0], -1)
train_set[id] = data
test_set = {}
for id in test_subjects:
if config.datatype == 'lie':
filename = train_path + id + '_lie.mat'
rawdata = sio.loadmat(filename)
matdict_key = list(rawdata.keys())[3]
rawdata = rawdata[matdict_key]
data = rawdata[:, :-1, :3].reshape(rawdata.shape[0], -1)
test_set[id] = data
elif config.datatype == 'xyz':
filename = train_path + id + '_xyz.mat'
rawdata = sio.loadmat(filename)
matdict_key = list(rawdata.keys())[3]
rawdata = rawdata[matdict_key]
data = rawdata.reshape(rawdata.shape[0], -1)
test_set[id] = data
# Read and prepare test data
x_test = []
y_test = []
for i in range(8):
if config.datatype == 'lie':
x_filename = x_test_path + 'test_' + str(i) + '_lie.mat'
y_filename = y_test_path + 'test_' + str(i) + '_lie.mat'
x_rawdata = sio.loadmat(x_filename)
matdict_key = list(x_rawdata.keys())[3]
x_rawdata = x_rawdata[matdict_key]
y_rawdata = sio.loadmat(y_filename)
matdict_key = list(y_rawdata.keys())[3]
y_rawdata = y_rawdata[matdict_key]
x_data = x_rawdata[:, :-1, :3].reshape(x_rawdata.shape[0], -1)
x_test.append(x_data)
y_data = y_rawdata[:, :-1, :3].reshape(y_rawdata.shape[0], -1)
y_test.append(y_data)
elif config.datatype == 'xyz':
x_filename = x_test_path + 'test_' + str(i) + '.mat'
y_filename = y_test_path + 'test_' + str(i) + '.mat'
x_rawdata = sio.loadmat(x_filename)
matdict_key = list(x_rawdata.keys())[3]
x_rawdata = x_rawdata[matdict_key]
y_rawdata = sio.loadmat(y_filename)
matdict_key = list(y_rawdata.keys())[3]
y_rawdata = y_rawdata[matdict_key]
x_data = x_rawdata.reshape(x_rawdata.shape[0], -1)
x_test.append(x_data)
y_data = y_rawdata.reshape(y_rawdata.shape[0], -1)
y_data = y_data[:seq_length_out, :]
y_test.append(y_data)
x_test = np.array(x_test)
y_test = np.array(y_test)
dec_in_test = np.concatenate(
(np.reshape(x_test[:, -1, :], [x_test.shape[0], 1, x_test.shape[2]]), y_test[:, 0:-1, :]), axis=1)
x_test = x_test[:, 0:-1, :]
x_test_dict = {}
y_test_dict = {}
dec_in_test_dict = {}
x_test_dict['default'] = x_test
y_test_dict['default'] = y_test
dec_in_test_dict['default'] = dec_in_test
print("Done reading data.")
config.input_size = x_test.shape[2]
return [train_set, test_set, x_test_dict, y_test_dict, dec_in_test_dict, config]
def read_mouse(config, training):
if config.datatype == 'lie':
train_path = './data/Mouse/Train/train_lie/'
x_test_path = './data/Mouse/Test/x_test_lie/'
y_test_path = './data/Mouse/Test/y_test_lie/'
elif config.datatype == 'xyz':
train_path = './data/Mouse/Train/train_xyz/'
x_test_path = './data/Mouse/Test/x_test_xyz/'
y_test_path = './data/Mouse/Test/y_test_xyz/'
train_subjects = ['S1', 'S3', 'S4']
test_subjects = ['S2']
train_set, test_set, x_test_dict, y_test_dict, dec_in_test_dict, config = read_animals(config, training, train_subjects, test_subjects, train_path, x_test_path, y_test_path)
return [train_set, test_set, x_test_dict, y_test_dict, dec_in_test_dict, config]
def read_fish(config, training):
if config.datatype == 'lie':
train_path = './data/Fish/Train/train_lie/'
x_test_path = './data/Fish/Test/x_test_lie/'
y_test_path = './data/Fish/Test/y_test_lie/'
elif config.datatype == 'xyz':
train_path = './data/Fish/Train/train_xyz/'
x_test_path = './data/Fish/Test/x_test_xyz/'
y_test_path = './data/Fish/Test/y_test_xyz/'
train_subjects = ['S1', 'S2', 'S3', 'S4', 'S5', 'S7', 'S8']
test_subjects = ['S6']
train_set, test_set, x_test_dict, y_test_dict, dec_in_test_dict, config = read_animals(config, training, train_subjects, test_subjects, train_path, x_test_path, y_test_path)
return [train_set, test_set, x_test_dict, y_test_dict, dec_in_test_dict, config]
def get_batch_srnn(config, data, action, target_seq_len):
# Obtain SRNN test sequences using the specified random seeds
frames = {}
frames[action] = find_indices_srnn( data, action )
batch_size = 8
subject = 5
source_seq_len = config.input_window_size
seeds = [(action, (i%2)+1, frames[action][i]) for i in range(batch_size)]
encoder_inputs = np.zeros((batch_size, source_seq_len-1, config.input_size), dtype=float )
decoder_inputs = np.zeros((batch_size, target_seq_len, config.input_size), dtype=float )
decoder_outputs = np.zeros((batch_size, target_seq_len, config.input_size), dtype=float )
for i in range(batch_size):
_, subsequence, idx = seeds[i]
idx = idx + 50
data_sel = data[(subject, action, subsequence)]
data_sel = data_sel[(idx-source_seq_len):(idx+target_seq_len), :]
encoder_inputs[i, :, :] = data_sel[0:source_seq_len-1, :] #x_test
decoder_inputs[i, :, :] = data_sel[source_seq_len-1:(source_seq_len+target_seq_len-1), :] #decoder_in_test
decoder_outputs[i, :, :] = data_sel[source_seq_len:, :] #y_test
return [encoder_inputs, decoder_inputs, decoder_outputs]
def find_indices_srnn(data, action):
"""
Obtain the same action indices as in SRNN using a fixed random seed
See https://github.com/asheshjain399/RNNexp/blob/master/structural_rnn/CRFProblems/H3.6m/processdata.py
"""
SEED = 1234567890
rng = np.random.RandomState(SEED)
subject = 5
subaction1 = 1
subaction2 = 2
T1 = data[(subject, action, subaction1)].shape[0]
T2 = data[(subject, action, subaction2)].shape[0]
prefix, suffix = 50, 100
idx = []
idx.append(rng.randint(16,T1-prefix-suffix))
idx.append(rng.randint(16,T2-prefix-suffix))
idx.append(rng.randint(16,T1-prefix-suffix))
idx.append(rng.randint(16,T2-prefix-suffix))
idx.append(rng.randint(16,T1-prefix-suffix))
idx.append(rng.randint(16,T2-prefix-suffix))
idx.append(rng.randint(16,T1-prefix-suffix))
idx.append(rng.randint(16,T2-prefix-suffix))
return idx
| 39.053846 | 177 | 0.614077 |
c2ab68ba5978aeb9f024a292527c58f092284131 | 4,928 | py | Python | setup.py | chaowentao/mmpose | b528c60ef4fab56d35d1ed7e187023794639be26 | [
"Apache-2.0"
] | 367 | 2022-01-14T03:32:25.000Z | 2022-03-31T04:48:20.000Z | setup.py | chaowentao/mmpose | b528c60ef4fab56d35d1ed7e187023794639be26 | [
"Apache-2.0"
] | 27 | 2022-01-27T07:12:49.000Z | 2022-03-31T04:31:13.000Z | setup.py | chaowentao/mmpose | b528c60ef4fab56d35d1ed7e187023794639be26 | [
"Apache-2.0"
] | 53 | 2022-01-18T11:21:43.000Z | 2022-03-31T06:42:41.000Z | from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmpose/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
import sys
# return short version for sdist
if 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
return locals()['short_version']
else:
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == '__main__':
setup(
name='mmpose',
version=get_version(),
description='OpenMMLab Pose Estimation Toolbox and Benchmark.',
maintainer='MMPose Authors',
maintainer_email='openmmlab@gmail.com',
long_description=readme(),
long_description_content_type='text/markdown',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
package_data={'mmpose.ops': ['*/*.so']},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
url='https://github.com/open-mmlab/mmpose',
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'runtime': parse_requirements('requirements/runtime.txt')
},
zip_safe=False)
| 37.052632 | 125 | 0.550528 |
8f38ab4df308814bf78bdf78a7729c10193bbd98 | 56 | py | Python | shand/__init__.py | ryneches/Shand | ec7f8c914d7099b96e755c75b5313cbae1cab3c5 | [
"BSD-3-Clause"
] | null | null | null | shand/__init__.py | ryneches/Shand | ec7f8c914d7099b96e755c75b5313cbae1cab3c5 | [
"BSD-3-Clause"
] | 2 | 2016-02-17T03:16:00.000Z | 2016-02-17T03:16:42.000Z | shand/__init__.py | ryneches/Shand | ec7f8c914d7099b96e755c75b5313cbae1cab3c5 | [
"BSD-3-Clause"
] | null | null | null | from shand import Problem
import stats
import quicktree
| 14 | 25 | 0.857143 |
462dd5be854bf4ff79dc7b002e2cb8a7e7c35a9c | 781 | py | Python | app/Alumno/models.py | diexmoh/professionalnotes | 87fcda7c142ce65ef4e7acacc6a0bad1e1bd98d0 | [
"MIT"
] | null | null | null | app/Alumno/models.py | diexmoh/professionalnotes | 87fcda7c142ce65ef4e7acacc6a0bad1e1bd98d0 | [
"MIT"
] | null | null | null | app/Alumno/models.py | diexmoh/professionalnotes | 87fcda7c142ce65ef4e7acacc6a0bad1e1bd98d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import smart_str, smart_unicode
# Create your models here.
class Alumno(models.Model):
idAlumno = models.PositiveSmallIntegerField(primary_key = True, null = False, blank = False,)
nombre = models.CharField(max_length = 50, null = False, blank = False)
apePat = models.CharField(max_length = 50, null = False, blank = False)
apeMat = models.CharField(max_length = 50, null = False, blank = False)
asesorias = models.PositiveSmallIntegerField(null = True, blank = True)
asesoriaDesc = models.CharField(max_length = 255, null = True, blank = True,)
def __str__(self):
return smart_str(self.nombre) + smart_str(self.apePat)
| 41.105263 | 97 | 0.722151 |
260dcd8eaa6d5b412aeca338fe48a9f1aac17877 | 104 | py | Python | device_manager/serializers/device_encoder.py | VladX09/clickpecker-device-manager | 5636dd990ab0ddb359255998536ecd390394799f | [
"MIT"
] | null | null | null | device_manager/serializers/device_encoder.py | VladX09/clickpecker-device-manager | 5636dd990ab0ddb359255998536ecd390394799f | [
"MIT"
] | null | null | null | device_manager/serializers/device_encoder.py | VladX09/clickpecker-device-manager | 5636dd990ab0ddb359255998536ecd390394799f | [
"MIT"
] | null | null | null | import json
class DeviceEncoder(json.JSONEncoder):
def default(self,o):
return o.to_dict()
| 17.333333 | 38 | 0.692308 |
0265178effa190f526b1f966acbb97d98520d266 | 5,434 | py | Python | tests/test_app_routers_accounts_POST.py | BoostryJP/ibet-Prime | 924e7f8da4f8feea0a572e8b5532e09bcdf2dc99 | [
"Apache-2.0"
] | 2 | 2021-08-19T12:35:25.000Z | 2022-02-16T04:13:38.000Z | tests/test_app_routers_accounts_POST.py | BoostryJP/ibet-Prime | 924e7f8da4f8feea0a572e8b5532e09bcdf2dc99 | [
"Apache-2.0"
] | 46 | 2021-09-02T03:22:05.000Z | 2022-03-31T09:20:00.000Z | tests/test_app_routers_accounts_POST.py | BoostryJP/ibet-Prime | 924e7f8da4f8feea0a572e8b5532e09bcdf2dc99 | [
"Apache-2.0"
] | 1 | 2021-11-17T23:18:27.000Z | 2021-11-17T23:18:27.000Z | """
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import base64
from unittest import mock
from config import EOA_PASSWORD_PATTERN_MSG
from app.model.db import Account, AccountRsaStatus
from app.utils.e2ee_utils import E2EEUtils
class TestAppRoutersAccountsPOST:
# target API endpoint
apiurl = "/accounts"
valid_password = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 \*\+\.\\\(\)\?\[\]\^\$\-\|!#%&\"',/:;<=>@_`{}~"
invalid_password = "password🚀"
###########################################################################
# Normal Case
###########################################################################
# <Normal_1>
def test_normal_1(self, client, db):
accounts_before = db.query(Account).all()
password = self.valid_password
req_param = {
"eoa_password": E2EEUtils.encrypt(password)
}
resp = client.post(self.apiurl, json=req_param)
# assertion
assert resp.status_code == 200
assert resp.json()["issuer_address"] is not None
assert resp.json()["rsa_public_key"] == ""
assert resp.json()["rsa_status"] == AccountRsaStatus.UNSET.value
assert resp.json()["is_deleted"] is False
accounts_after = db.query(Account).all()
assert 0 == len(accounts_before)
assert 1 == len(accounts_after)
account_1 = accounts_after[0]
assert account_1.issuer_address == resp.json()["issuer_address"]
assert account_1.keyfile is not None
assert E2EEUtils.decrypt(account_1.eoa_password) == password
assert account_1.rsa_private_key is None
assert account_1.rsa_public_key is None
assert account_1.rsa_passphrase is None
assert account_1.rsa_status == AccountRsaStatus.UNSET.value
assert account_1.is_deleted is False
# <Normal_2>
# AWS KMS
@mock.patch("app.routers.account.AWS_KMS_GENERATE_RANDOM_ENABLED", True)
@mock.patch("boto3.client")
def test_normal_2(self, boto3_mock, client, db):
accounts_before = db.query(Account).all()
password = self.valid_password
req_param = {
"eoa_password": E2EEUtils.encrypt(password)
}
# mock
class KMSClientMock:
def generate_random(self, NumberOfBytes):
assert NumberOfBytes == 32
return {
"Plaintext": b"12345678901234567890123456789012"
}
boto3_mock.side_effect = [
KMSClientMock()
]
resp = client.post(self.apiurl, json=req_param)
# assertion
assert resp.status_code == 200
assert resp.json()["issuer_address"] is not None
assert resp.json()["rsa_public_key"] == ""
assert resp.json()["rsa_status"] == AccountRsaStatus.UNSET.value
assert resp.json()["is_deleted"] is False
accounts_after = db.query(Account).all()
assert 0 == len(accounts_before)
assert 1 == len(accounts_after)
account_1 = accounts_after[0]
assert account_1.issuer_address == resp.json()["issuer_address"]
assert account_1.keyfile is not None
assert E2EEUtils.decrypt(account_1.eoa_password) == password
assert account_1.rsa_private_key is None
assert account_1.rsa_public_key is None
assert account_1.rsa_passphrase is None
assert account_1.rsa_status == AccountRsaStatus.UNSET.value
assert account_1.is_deleted is False
###########################################################################
# Error Case
###########################################################################
# <Error_1>
# Password Policy Violation
def test_error_1(self, client, db):
req_param = {
"eoa_password": base64.encodebytes("password".encode("utf-8")).decode()
}
resp = client.post(self.apiurl, json=req_param)
# assertion
assert resp.status_code == 422
assert resp.json() == {
"meta": {
"code": 1,
"title": "RequestValidationError"
},
"detail": [{
"loc": ["body", "eoa_password"],
"msg": "eoa_password is not a Base64-encoded encrypted data",
"type": "value_error"
}]
}
# <Error_2>
# Invalid Password
def test_error_2(self, client, db):
req_param = {
"eoa_password": E2EEUtils.encrypt(self.invalid_password)
}
resp = client.post(self.apiurl, json=req_param)
# assertion
assert resp.status_code == 400
assert resp.json() == {
"meta": {
"code": 1,
"title": "InvalidParameterError"
},
"detail": EOA_PASSWORD_PATTERN_MSG
}
| 33.337423 | 132 | 0.589253 |
507cce4f26b31c772fa520a3715f5aaca505b118 | 1,137 | py | Python | tests/test_audio_server.py | rotdrop/rhasspy-hermes | ab822ad954da6da90368b65d72ed7e53694f085f | [
"MIT"
] | 1 | 2020-07-11T19:25:32.000Z | 2020-07-11T19:25:32.000Z | tests/test_audio_server.py | rotdrop/rhasspy-hermes | ab822ad954da6da90368b65d72ed7e53694f085f | [
"MIT"
] | 15 | 2019-12-31T13:19:25.000Z | 2022-01-17T17:40:13.000Z | tests/test_audio_server.py | rotdrop/rhasspy-hermes | ab822ad954da6da90368b65d72ed7e53694f085f | [
"MIT"
] | 8 | 2019-12-31T10:40:53.000Z | 2020-12-04T18:48:08.000Z | """Tests for rhasspyhermes.audioserver"""
from rhasspyhermes.audioserver import AudioFrame, AudioPlayBytes, AudioPlayFinished
site_id = "testSiteId"
request_id = "testRequestId"
def test_audio_frame():
"""Test AudioFrame."""
assert AudioFrame.is_topic(AudioFrame.topic(site_id=site_id))
assert AudioFrame.get_site_id(AudioFrame.topic(site_id=site_id)) == site_id
def test_audio_play_bytes():
"""Test AudioPlayBytes."""
assert AudioPlayBytes.is_topic(
AudioPlayBytes.topic(site_id=site_id, request_id=request_id)
)
assert (
AudioPlayBytes.get_site_id(
AudioPlayBytes.topic(site_id=site_id, request_id=request_id)
)
== site_id
)
assert (
AudioPlayBytes.get_request_id(
AudioPlayBytes.topic(site_id=site_id, request_id=request_id)
)
== request_id
)
def test_audio_play_finished():
"""Test AudioPlayFinished."""
assert AudioPlayFinished.is_topic(AudioPlayFinished.topic(site_id=site_id))
assert (
AudioPlayFinished.get_site_id(AudioPlayFinished.topic(site_id=site_id))
== site_id
)
| 28.425 | 83 | 0.702726 |
6286acf23701f037558a7c25571efe6337246ec8 | 401 | py | Python | TIPE/rename images/sanstitre0.py | clementlagneau/TIPE-2018 | 5f6570377da8e18463d774a5872096991c9126fb | [
"MIT"
] | null | null | null | TIPE/rename images/sanstitre0.py | clementlagneau/TIPE-2018 | 5f6570377da8e18463d774a5872096991c9126fb | [
"MIT"
] | null | null | null | TIPE/rename images/sanstitre0.py | clementlagneau/TIPE-2018 | 5f6570377da8e18463d774a5872096991c9126fb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 18 10:50:21 2018
@author: Clement LAGNEAU
"""
indice = 0
import os
monRepertoire='D:/Cours/MP/TIPE/Banque_image/ville'
os.chdir(monRepertoire)
fichiers = [f for f in os.listdir(monRepertoire) if os.path.isfile(os.path.join(monRepertoire, f))]
for x in fichiers:
os.rename(x,monRepertoire+'/image'+str(indice)+'.jpg')
indice += 1
| 25.0625 | 100 | 0.670823 |
3a373e54a35dbc5c8b9e75b6cdf03ac5bebafefa | 56,247 | py | Python | costar_dataset/hypertree_pose_metrics_tf.py | priyankahubli/costar_dataset | 559ffb16e758817f40abd07c26a573eeb8db3e97 | [
"Apache-2.0"
] | 1 | 2018-12-03T05:39:59.000Z | 2018-12-03T05:39:59.000Z | costar_dataset/hypertree_pose_metrics_tf.py | priyankahubli/costar_dataset | 559ffb16e758817f40abd07c26a573eeb8db3e97 | [
"Apache-2.0"
] | 5 | 2018-11-15T22:51:48.000Z | 2019-07-12T19:57:37.000Z | costar_dataset/hypertree_pose_metrics_tf.py | priyankahubli/costar_dataset | 559ffb16e758817f40abd07c26a573eeb8db3e97 | [
"Apache-2.0"
] | 4 | 2018-11-13T16:05:49.000Z | 2021-01-18T21:00:25.000Z |
import os
import copy
import math
import numpy as np
from tqdm import tqdm
import keras
import tensorflow as tf
from tensorflow.python.platform import flags
from shapely.geometry import Polygon
from pyquaternion import Quaternion
import sklearn
import costar_dataset.hypertree_utilities
# class Vector:
# # http://www.mathopenref.com/coordpolygonarea.html
# # https://stackoverflow.com/a/45268241/99379
# def __init__(self, x, y):
# self.x = x
# self.y = y
# def __add__(self, v):
# if not isinstance(v, Vector):
# return NotImplemented
# return Vector(self.x + v.x, self.y + v.y)
# def __sub__(self, v):
# if not isinstance(v, Vector):
# return NotImplemented
# return Vector(self.x - v.x, self.y - v.y)
# def cross(self, v):
# if not isinstance(v, Vector):
# return NotImplemented
# return self.x*v.y - self.y*v.x
# class Line:
# # ax + by + c = 0
# def __init__(self, v1, v2):
# self.a = v2.y - v1.y
# self.b = v1.x - v2.x
# self.c = v2.cross(v1)
# def __call__(self, p):
# return self.a*p.x + self.b*p.y + self.c
# def intersection(self, other):
# # http://www.mathopenref.com/coordpolygonarea.html
# # https://stackoverflow.com/a/45268241/99379
# # See e.g. https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection#Using_homogeneous_coordinates
# if not isinstance(other, Line):
# return NotImplemented
# w = self.a*other.b - self.b*other.a
# return Vector(
# (self.b*other.c - self.c*other.b)/w,
# (self.c*other.a - self.a*other.c)/w
# )
# def rectangle_vertices(cx, cy, w, h, theta):
# # http://www.mathopenref.com/coordpolygonarea.html
# # https://stackoverflow.com/a/45268241/99379
# dx = w/2
# dy = h/2
# dxcos = dx*cos(theta)
# dxsin = dx*sin(theta)
# dycos = dy*cos(theta)
# dysin = dy*sin(theta)
# return (
# Vector(cx, cy) + Vector(-dxcos - -dysin, -dxsin + -dycos),
# Vector(cx, cy) + Vector( dxcos - -dysin, dxsin + -dycos),
# Vector(cx, cy) + Vector( dxcos - dysin, dxsin + dycos),
# Vector(cx, cy) + Vector(-dxcos - dysin, -dxsin + dycos)
# )
# def intersection_area(r1, r2):
# # http://www.mathopenref.com/coordpolygonarea.html
# # https://stackoverflow.com/a/45268241/99379
# # r1 and r2 are in (center, width, height, rotation) representation
# # First convert these into a sequence of vertices
# rect0 = rectangle_vertices(*r1)
# rect1 = rectangle_vertices(*r2)
# # Use the vertices of the first rectangle as
# # starting vertices of the intersection polygon.
# rect0 = rect0
# # Loop over the edges of the second rectangle
# for p, q in zip(rect1, rect1[1:] + rect1[:1]):
# if len(rect0) <= 2:
# break # No intersection
# line = Line(p, q)
# # Any point p with line(p) <= 0 is on the "inside" (or on the boundary),
# # any point p with line(p) > 0 is on the "outside".
# # Loop over the edges of the rect0 polygon,
# # and determine which part is inside and which is outside.
# new_intersection = []
# line_values = [line(t) for t in rect0]
# for s, t, s_value, t_value in zip(
# rect0, rect0[1:] + rect0[:1],
# line_values, line_values[1:] + line_values[:1]):
# if s_value <= 0:
# new_intersection.append(s)
# if s_value * t_value < 0:
# # Points are on opposite sides.
# # Add the intersection of the lines to new_intersection.
# intersection_point = line.intersection(Line(s, t))
# new_intersection.append(intersection_point)
# intersection = new_intersection
# # Calculate area
# if len(intersection) <= 2:
# return 0
# return 0.5 * sum(p.x*q.y - p.y*q.x for p, q in
# zip(intersection, intersection[1:] + intersection[:1]))
# intersection_area(r0y0, r0x0, r0y1, r0x1, r0y2, r0x2, r0y3, r0x3, r1y0, r1x0, r1y1, r1x1, r1y2, r1x2, r1y3, r1x3):
def rectangle_points(r0y0, r0x0, r0y1, r0x1, r0y2, r0x2, r0y3, r0x3):
p0yx = np.array([r0y0, r0x0])
p1yx = np.array([r0y1, r0x1])
p2yx = np.array([r0y2, r0x2])
p3yx = np.array([r0y3, r0x3])
return [p0yx, p1yx, p2yx, p3yx]
def rectangle_vectors(rp):
"""
# Arguments
rp: rectangle points [p0yx, p1yx, p2yx, p3yx]
"""
v0 = rp[1] - rp[0]
v1 = rp[2] - rp[1]
v2 = rp[3] - rp[2]
v3 = rp[0] - rp[3]
return [v0, v1, v2, v3]
def rectangle_homogeneous_lines(rv):
"""
# Arguments
rv: rectangle vectors [v0yx, v1yx, v2yx, v3yx]
# Returns
[r0abc, r1abc, r2abc, r3abc]
"""
# ax + by + c = 0
dv = rv[0] - rv[1]
# TODO(ahundt) make sure cross product doesn't need to be in xy order
r0abc = K.concatenate([dv[0], dv[1], tf.cross(rv[0], rv[1])])
dv = rv[1] - rv[2]
r1abc = K.concatenate([dv[1], dv[2], tf.cross(rv[1], rv[2])])
dv = rv[2] - rv[3]
r2abc = K.concatenate([dv[2], dv[3], tf.cross(rv[2], rv[3])])
dv = rv[3] - rv[0]
r3abc = K.concatenate([dv[3], dv[0], tf.cross(rv[3], rv[0])])
return [r0abc, r1abc, r2abc, r3abc]
def homogeneous_line_intersection(hl0abc, hl1abc):
""" Given two homogenous lines return the intersection point in y,x coordinates
"""
a0 = hl0abc[0]
b0 = hl0abc[1]
c0 = hl0abc[2]
a1 = hl1abc[0]
b1 = hl1abc[1]
c1 = hl1abc[2]
w = a0 * b1 - b0 * a1
py = (c0 * a1 - a0 * c1) / w
px = (b0 * c1 - c0 * b1) / w
return [py, px]
def line_at_point(l_abc, p_yx):
"""
# Arguments
l_abc: a line in homogenous coodinates
p_yx: a point with y, x coordinates
"""
return l_abc[0] * p_yx[1] + l_abc[1] * p_yx[0] + l_abc[2]
def intersection_points(rl0, rp1):
""" Evaluate rectangle lines at another rectangle's points
"""
lv = [
line_at_point(rl0[0], rp1[0]),
line_at_point(rl0[1], rp1[1]),
line_at_point(rl0[2], rp1[2]),
line_at_point(rl0[3], rp1[3]),
]
return lv
def rectangle_intersection_polygon(rp0, rl0, rp1, rl1):
""" Given two homogenous line rectangles, it returns the points for the polygon representing their intersection.
# Arguments
rp0: rectangle 0 defined with points
rl0: rectangle 0 defined with homogeneous lines
rp1: rectangle 1 defined with points
rp1: rectangle 1 defined with homogeneous lines
# Returns
Intersection polygon consisting of up to 8 points.
"""
# TODO(ahundt) this function is still set up for eager execution... figure it out as tf calls...
# http://www.mathopenref.com/coordpolygonarea.html
# https://stackoverflow.com/a/45268241/99379
# Use the vertices of the first rectangle as
# starting vertices of the intersection polygon.
intersection = []
for line1 in rl1:
line_values = [line_at_point(line1, t) for t in rp0]
# Any point p with line(p) <= 0 is on the "inside" (or on the boundary),
# any point p with line(p) > 0 is on the "outside".
# Loop over the edges of the rect0 polygon,
# and determine which part is inside and which is outside.
new_intersection = []
# points in rp0 rotated around by one
rp0_rot = hypertree_utilities.rotate(rp0)
line_values_rot = hypertree_utilities.rotate(line_values)
for s, t, s_value, t_value, line0 in zip(
rp0, rp0_rot, line_values, line_values_rot, rl0):
if s_value <= 0:
new_intersection.append(s)
st_value = s_value * t_value
intersection_point = homogeneous_line_intersection(line1, line0)
if st_value < 0:
# Points are on opposite sides.
# Add the intersection of the lines to new_intersection.
new_intersection.append(intersection_point)
intersection = new_intersection
return intersection
def polygon_area_four_points(rp):
"""
# Arguments
rp: polygon defined by 4 points in y,x order
"""
# partial = p0x * p1y - p0y * p1x
partial0 = rp[0][1] * rp[1][0] - rp[0][0] * rp[1][1]
partial1 = rp[1][1] * rp[2][0] - rp[1][0] * rp[2][1]
partial2 = rp[2][1] * rp[3][0] - rp[2][0] * rp[3][1]
partial3 = rp[3][1] * rp[0][0] - rp[3][0] * rp[0][1]
full_sum = partial0 + partial1 + partial2 + partial3
return 0.5 * full_sum
def polygon_area(poly):
# Calculate area
if len(poly) <= 2:
return 0
poly_rot = poly[1:] + poly[:1]
return 0.5 * sum(p[1]*q[0] - p[0]*q[1] for p, q in zip(poly, poly_rot))
def rectangle_vertices(h, w, cy, cx, sin_theta=None, cos_theta=None, theta=None):
""" Get the vertices from a parameterized bounding box.
y, x ordering where 0,0 is the top left corner.
This matches matrix indexing.
# http://www.mathopenref.com/coordpolygonarea.html
# https://stackoverflow.com/a/45268241/99379
"""
if theta is not None:
sin_theta = np.sin(theta)
cos_theta = np.cos(theta)
# normalizing because this may be using the output of the neural network,
# so we turn it into an x y coordinate on the unit circle without changing
# the vector.
sin_theta, cos_theta = normalize_sin_theta_cos_theta(sin_theta, cos_theta)
dx = w/2
dy = h/2
dxcos = dx * cos_theta
dxsin = dx * sin_theta
dycos = dy * cos_theta
dysin = dy * sin_theta
return [
np.array([cy, cx]) + np.array([-dxsin + -dycos, -dxcos - -dysin]),
np.array([cy, cx]) + np.array([ dxsin + -dycos, dxcos - -dysin]),
np.array([cy, cx]) + np.array([ dxsin + dycos, dxcos - dysin]),
np.array([cy, cx]) + np.array([-dxsin + dycos, -dxcos - dysin])
]
def encode_sin2_cos2(sin2_cos2):
""" Converts values from the range (-1, 1) to the range (0, 1).
The value passed is already expected to be in the format:
np.array([np.sin(2 * theta), np.cos(2 * theta)])
If you have 2 theta and want to encode that see `encode_2theta()`.
"""
return (sin2_cos2 / 2.0) + 0.5
def encode_sin_cos(sin_cos):
""" Converts values from the range (-1, 1) to the range (0, 1).
The value passed is already expected to be in the format:
np.array([np.sin(theta), np.cos(theta)])
If you have theta and want to encode that see `encode_theta()`.
"""
return (sin_cos / 2.0) + 0.5
def encode_2theta(theta):
""" Encodes theta in radians to handle gripper symmetry in 0 to 1 domain
# Returns
[sin(2 * theta), cos(2 * theta)] / 2 + 0.5
"""
theta2 = theta * 2.0
return encode_theta(theta2)
def encode_theta(theta):
""" Encodes theta in radians to asymmetric grippers in 0 to 1 domain
# Returns
[sin(theta), cos(theta)] / 2 + 0.5
"""
norm_sin_cos = encode_sin_cos(np.array([np.sin(theta), np.cos(theta)]))
return norm_sin_cos
def denorm_sin2_cos2(norm_sin2_cos2):
""" Undo normalization step of `encode_2theta_np()`
This converts values from the range (0, 1) to (-1, 1)
by subtracting 0.5 and multiplying by 2.0.
This function does not take any steps to ensure
the input obeys the law:
sin ** 2 + cos ** 2 == 1
Since the values may have been generated by a neural network
it is important to fix this w.r.t. the provided values.
# Arguments
norm_sin2_cos2: normalized sin(2*theta) cos(2*theta)
# Returns
return actual sin(2*theta) cos(2*theta)
"""
return (norm_sin2_cos2 - 0.5) * 2.0
def denorm_sin_cos(norm_sin_cos):
""" Undo normalization step of `encode_theta_np()`
This converts values from the range (0, 1) to (-1, 1)
by subtracting 0.5 and multiplying by 2.0.
This function does not take any steps to ensure
the input obeys the law:
sin ** 2 + cos ** 2 == 1
Since the values may have been generated by a neural network
it is important to fix this w.r.t. the provided values.
# Arguments
norm_sin2_cos2: normalized sin(2*theta) cos(2*theta)
# Returns
return actual sin(theta) cos(theta)
"""
return (norm_sin_cos - 0.5) * 2.0
def decode_sin2_cos2(norm_sin2_cos2):
""" Decodes the result of encode_2theta() back into an angle theta in radians.
"""
return decode_sin_cos(norm_sin2_cos2) / 2.0
def decode_sin_cos(norm_sin2_cos2):
""" Decodes the result of encode_theta() back into an angle theta in radians.
"""
# rescale and shift from (0, 1) range
# back to (-1, 1) range
#
# note that denorm step is the same for both sin_cos and sin2_cos2
sin2, cos2 = denorm_sin2_cos2(norm_sin2_cos2)
# normalize the values so they are on the unit circle
sin2, cos2 = normalize_sin_theta_cos_theta(sin2, cos2)
# extract 2x the angle
theta2 = np.arctan2(sin2, cos2)
# return the angle
return theta2
def parse_rectangle_vertices(s2t_c2t_hw_cycx):
""" Convert a dimensions, angle, grasp center, based rectangle to vertices.
s2t_c2t_hw_cycx: [sin(2*theta), cos(2*theta), height, width, center x, center y]
"""
# sin(2*theta), cos(2*theta)
theta = decode_sin2_cos2(s2t_c2t_hw_cycx[:2])
rect_vertices = rectangle_vertices(
s2t_c2t_hw_cycx[2], # height
s2t_c2t_hw_cycx[3], # width
s2t_c2t_hw_cycx[4], # center y
s2t_c2t_hw_cycx[5], # center x
theta=theta)
return rect_vertices
def parse_rectangle_params(s2t_c2t_hw_cycx):
rect_vertices = parse_rectangle_vertices(s2t_c2t_hw_cycx)
rect_hlines = rectangle_homogeneous_lines(rect_vertices)
return rect_vertices, rect_hlines
def intersection_over_union(true_rp, pred_rp, true_rl, pred_rl):
""" Intersection over union of two oriented rectangles.
Also known as the jaccard metric.
# Arguments
true_rp: oriented rectanle 0 points
pred_rp: oriented rectangle 1 points
true_rl: oriented rectangle 0 homogeneous lines
pred_rl: oriented rectangle 1 homogeneous lines
"""
true_area = polygon_area_four_points(true_rp)
pred_area = polygon_area_four_points(pred_rp)
intersection_polygon = rectangle_intersection_polygon(true_rp, true_rl, pred_rp, pred_rl)
intersection_area = polygon_area(intersection_polygon)
iou = intersection_area / (true_area + pred_area - intersection_area)
return iou
def shapely_intersection_over_union(rect0_points, rect1_points, verbose=0):
""" Find the intersection over union of two polygons using shapely
"""
# create and clean the polygons to eliminate any overlapping points
# https://toblerity.org/shapely/manual.html
p0 = Polygon(rect0_points).buffer(0)
p1 = Polygon(rect1_points).buffer(0)
if p0.is_valid and p1.is_valid:
intersection_area = p0.intersection(p1).area
iou = intersection_area / (p0.area + p1.area - intersection_area)
if verbose > 0:
print('iou: ' + str(iou))
return iou
else:
# TODO(ahundt) determine and fix the source of invalid polygons.
print('Warning: shapely_intersection_over_union() encountered an '
'invalid polygon. We will return an IOU of 0 so execution '
'might continue, but this bug should be addressed. '
'p0: ' + str(p0) + ' p1: ' + str(p1))
return 0.0
def normalize_sin_theta_cos_theta(sin_theta, cos_theta):
""" Put sin(theta) cos(theta) on the unit circle.
Output values will be in (-1, 1).
normalize the prediction but keep the vector direction the same
"""
arr = sklearn.preprocessing.normalize(np.array([[sin_theta, cos_theta]], dtype=np.float))
sin_theta = arr[0, 0]
cos_theta = arr[0, 1]
return sin_theta, cos_theta
def prediction_vector_has_grasp_success(y_pred):
has_grasp_success = (y_pred.size == 7)
return has_grasp_success
def get_prediction_vector_rectangle_start_index(y_pred):
""" Get the rectangle start index from an encoded prediction vector of length 6 or 7
"""
has_grasp_success = prediction_vector_has_grasp_success(y_pred)
# the grasp rectangle start index
rect_index = 0
if has_grasp_success:
rect_index = 1
return rect_index
def decode_prediction_vector(y_true):
""" Decode a prediction vector into sin(2 * theta), cos(2 * theta), and 4 vertices
"""
rect_index = get_prediction_vector_rectangle_start_index(y_true)
end_angle_index = rect_index + 2
y_true[rect_index: end_angle_index] = denorm_sin2_cos2(y_true[rect_index:end_angle_index])
true_y_sin_theta, true_x_cos_theta = y_true[rect_index:end_angle_index]
true_rp = parse_rectangle_vertices(y_true[rect_index:])
return true_y_sin_theta, true_x_cos_theta, true_rp
def decode_prediction_vector_theta_center_polygon(y_true):
""" Decode a prediction vector into theta and four rectangle vertices
Only supports vector format that includes center information!
"""
rect_index = get_prediction_vector_rectangle_start_index(y_true)
end_angle_index = rect_index + 2
y_true[rect_index: end_angle_index] = denorm_sin2_cos2(y_true[rect_index:end_angle_index])
true_y_sin_theta, true_x_cos_theta = y_true[rect_index:end_angle_index]
true_rp = parse_rectangle_vertices(y_true[rect_index:])
true_y_sin_theta, true_x_cos_theta = normalize_sin_theta_cos_theta(true_y_sin_theta, true_x_cos_theta)
# right now it is 2 theta, so get theta
theta = np.arctan2(true_y_sin_theta, true_x_cos_theta) / 2.0
# center should be last two entries y, x order
center = y_true[-2:]
return theta, center, true_rp
def angle_difference_less_than_threshold(
true_y_sin_theta, true_x_cos_theta,
pred_y_sin_theta, pred_x_cos_theta,
angle_threshold=np.radians(60.0),
verbose=0):
""" Returns true if the angle difference is less than the threshold, false otherwise.
Recall that angle differences are around a circle, so the shortest angular difference
may be in +theta or the -theta direction with wrapping around the boundaries.
Note that the angle threshold is set to 60 because we are working with 2*theta.
TODO(ahundt) double check the implications of this.
# Arguments
angle_threshold: The maximum absolute angular difference permitted.
"""
# print('ad0 ' + str(true_y_sin_theta) + ' cos: ' + str(true_x_cos_theta))
# normalize the prediction but keep the vector direction the same
true_y_sin_theta, true_x_cos_theta = normalize_sin_theta_cos_theta(true_y_sin_theta, true_x_cos_theta)
# print('ad1')
true_angle = np.arctan2(true_y_sin_theta, true_x_cos_theta)
# print('ad2')
# normalize the prediction but keep the vector direction the same
pred_y_sin_theta, pred_x_cos_theta = normalize_sin_theta_cos_theta(pred_y_sin_theta, pred_x_cos_theta)
pred_angle = np.arctan2(pred_y_sin_theta, pred_x_cos_theta)
# print('pred angle: ' + str(pred_angle) + ' true angle: ' + str(true_angle))
true_pred_diff = true_angle - pred_angle
# we would have just done this directly at the start if the angle_multiplier wasn't needed
angle_difference = np.arctan2(np.sin(true_pred_diff), np.cos(true_pred_diff))
# print('angle_difference: ' + str(angle_difference) + ' deg: ' + str(np.degrees(angle_difference)))
is_within_angle_threshold = np.abs(angle_difference) <= angle_threshold
if verbose > 0:
print(' angle_difference_less_than_threshold(): ' +
' angle_difference: ' + str(int(np.degrees(angle_difference))) +
' threshold: ' + str(int(np.degrees(angle_threshold))) +
' is_within_angle_threshold: ' + str(is_within_angle_threshold) +
' true_angle: ' + str(np.degrees(true_angle)) +
' pred_angle: ' + str(np.degrees(pred_angle)) +
' units: degrees ')
return is_within_angle_threshold
def jaccard_score(y_true, y_pred, angle_threshold=np.radians(60.0), iou_threshold=0.25, verbose=0):
""" Scoring for regression
Note that the angle threshold is set to 60 because we are working with 2*theta.
TODO(ahundt) double check the implications of this.
# Arguments
Feature formats accepted:
grasp_success_norm_sin2_cos2_hw_yx_7:
[grasp_success, sin_2theta, cos2_theta, height, width, center_y, center_x]
[ 0, 1, 2, 3, 4, 5, 6]
norm_sin2_cos2_hw_yx_6:
[sin_2theta, cos2_theta, height, width, center_y, center_x]
[ 0, 1, 2, 3, 4, 5, 6]
Not yet accepted:
norm_sin2_cos2_hw_5
[sin2_theta, cos_2theta, height, width, center_y, center_x]
[ 0, 1, 2, 3, 4, 5]
grasp_success_norm_sin2_cos2_hw_5
[grasp_success, sin_2theta, cos2_theta, height, width]
[ 0, 1, 2, 3, 4,]
y_true: a numpy array of features
y_pred: a numpy array of features
angle_threshold: The maximum allowed difference in
angles for a grasp to be considered successful.
Default of 60 degrees is for 2 * theta, which is 30 degrees for theta.
theta_multiplier: Either 1.0 or 2.0.
If it is 1.0 theta angles are compared directly.
If it is 2.0 (the default), angles that are off by 180 degrees
are considered equal, which is the case for a gripper with two plates.
"""
has_grasp_success = prediction_vector_has_grasp_success(y_pred)
# print('0')
# round grasp success to 0 or 1
# note this is not valid and not used if
# has grasp success is false.
predicted_success = np.rint(y_pred[0])
# print('1')
if has_grasp_success and predicted_success != int(y_true[0]):
# grasp success prediction doesn't match, return 0 score
# print('2')
return 0.0
elif has_grasp_success and predicted_success == 0:
# The success prediction correctly matches the ground truth,
# plus both are False so this is a true negative.
# Any true negative where failure to grasp is predicted correctly
# gets credit regardless of box contents
# print('3')
return 1.0
else:
# We're looking at a successful grasp and we've correctly predicted grasp_success.
# First check if the angles are close enough to matching the angle_threshold.
# print('4')
# denormalize the values from (0, 1) back to (-1, 1 range) and get the array entries
true_y_sin_theta, true_x_cos_theta, true_rp = decode_prediction_vector(y_true)
pred_y_sin_theta, pred_x_cos_theta, pred_rp = decode_prediction_vector(y_pred)
# print('5')
# if the angle difference isn't close enough to ground truth return 0.0
if not angle_difference_less_than_threshold(
true_y_sin_theta, true_x_cos_theta,
pred_y_sin_theta, pred_x_cos_theta,
angle_threshold,
verbose=verbose):
return 0.0
# print('6')
# We passed all the other checks so
# let's find out if the grasp boxes match
# via the jaccard distance.
iou = shapely_intersection_over_union(true_rp, pred_rp)
if verbose:
print('iou: ' + str(iou))
# print('8')
if iou >= iou_threshold:
# passed iou threshold
return 1.0
else:
# didn't meet iou threshold
return 0.0
def grasp_jaccard_batch(y_true, y_pred, verbose=0):
# print('y_true.shape: ' + str(y_true.shape))
# print('y_pred.shape: ' + str(y_pred.shape))
scores = []
for i in range(y_true.shape[0]):
# print(' i: ' + str(i))
# TODO(ahundt) comment the next few lines when not debugging
verbose = 0
if np.random.randint(0, 10000) % 10000 == 0:
verbose = 1
print('')
print('')
print('hypertree_pose_metrics.py sample of ground_truth and prediction:')
this_true = y_true[i, :]
this_pred = y_pred[i, :]
score = jaccard_score(this_true, this_pred, verbose=verbose)
if verbose:
print('s2t_c2t_hw_cycx_true: ' + str(this_true))
print('s2t_c2t_hw_cycx_pred: ' + str(this_pred))
print('score:' + str(score))
scores += [score]
scores = np.array(scores, dtype=np.float32)
# print('scores.shape: ' + str(scores.shape))
return scores
def grasp_jaccard(y_true, y_pred):
""" Calculates the jaccard metric score in a manner compatible with tf and keras metrics.
This is an IOU metric with angle difference and IOU score thresholds.
Feature formats accepted as a 2d array containing a batch of data ordered as:
[grasp_success, sin_2theta, cos_2theta, height, width, center_y, center_x]
[ 0, 1, 2, 3, 4, 5, 6]
[sin_2theta, cos_2theta, height, width, center_y, center_x]
[ 0, 1, 2, 3, 4, 5]
It is very important to be aware that sin(2*theta) and cos(2*theta) are expected,
additionally all coordinates and height/width are normalized by the network's input dimensions.
"""
scores = tf.py_func(func=grasp_jaccard_batch, inp=[y_true, y_pred], Tout=tf.float32, stateful=False)
return scores
def rotation_to_xyz_theta(rotation, verbose=0):
"""Convert a rotation to an angle theta
From above, a rotation to the right should be a positive theta,
and a rotation to the left negative theta. The initial pose is with the
z axis pointing down, the y axis to the right and the x axis forward.
This format does not allow for arbitrary rotation commands to be defined,
and originates from the costar dataset.
In the google brain dataset the gripper is only commanded to
rotate around a single vertical axis,
so you might clearly visualize it, this also happens to
approximately match the vector defined by gravity.
Furthermore, the original paper had the geometry of the
arm joints on which params could easily be extracted,
which is not available here. To resolve this discrepancy
Here we assume that the gripper generally starts off at a
quaternion orientation of approximately [qx=-1, qy=0, qz=0, qw=0].
This is equivalent to the angle axis
representation of [a=np.pi, x=-1, y=0, z=0],
which I'll name default_rot.
It is also important to note the ambiguity of the
angular distance between any current pose
and the end pose. This angular distance will
always have a positive value so the network
could not naturally discriminate between
turning left and turning right.
For this reason, we use the angular distance
from default_rot to define the input angle parameter,
and if the angle axis x axis component is > 0
we will use theta for rotation,
but if the angle axis x axis component is < 0
we will use -theta.
"""
# pyquaternion is in xyzw format!
aa = Quaternion(rotation)
# angle in radians
theta = aa.angle
if aa.axis[2] < 0:
multiply = 1.0
else:
multiply = -1.0
if verbose > 0:
print("ANGLE_AXIS_MULTIPLY: ", aa.angle, np.array(aa.axis), multiply)
theta *= multiply
return np.concatenate([aa.axis, [theta]], axis=-1)
def normalize_axis(aaxyz, epsilon=1e-5, verbose=0):
""" Normalize an axis in angle axis format data.
If axis is all zeros, epsilon is added to the final axis.
"""
if not np.any(aaxyz):
# source: https://stackoverflow.com/a/23567941/99379
# we checked if all values are zero, fix missing axis
aaxyz[-1] += epsilon
arr = sklearn.preprocessing.normalize(np.array([aaxyz], dtype=np.float))
aaxyz = np.squeeze(arr[0, :])
if verbose:
print('normalize_axis: ' + str(aaxyz))
return aaxyz
def encode_xyz_qxyzw_to_xyz_aaxyz_nsc(xyz_qxyzw, rescale_meters=4, rotation_weight=0.001, random_augmentation=None):
""" Encode a translation + quaternion pose to an encoded xyz, axis, and an angle as sin(theta) cos(theta)
rescale_meters: Divide the number of meters by this number so
positions will be encoded between 0 and 1.
For example if you want to be able to reach forward and back by 2 meters, divide by 4.
rotation_weight: scale down rotation values by this factor to a smaller range
so mse gives similar weight to both rotations and translations.
Use 1.0 for no adjustment. Default of 0.001 makes 1 radian
about equal weight to 1 millimeter.
random_augmentation: default None means no data modification,
otherwise a value between 0.0 and 1.0 for the probability
of randomly modifying the data with a small translation and rotation.
Enabling random_augmentation is not recommended.
"""
xyz = (xyz_qxyzw[:3] / rescale_meters) + 0.5
length = len(xyz_qxyzw)
if length == 7:
# print('xyz: ' + str(xyz))
rotation = Quaternion(xyz_qxyzw[3:])
# pose augmentation with no feedback or correspondingly adjusted transform poses
if random_augmentation is not None and np.random.random() > random_augmentation:
# random rotation change
# random = Quaternion.random()
# # only take rotations less than 5 degrees
# while random.angle > np.pi / 36.:
# # TODO(ahundt) make more efficient and re-enable
# random = Quaternion.random()
# rotation = rotation * random
# random translation change of up to 0.5 cm
random = (np.random.random(3) - 0.5) / 10.
xyz = xyz + random
aaxyz_theta = rotation_to_xyz_theta(rotation)
# encode the unit axis vector into the [0,1] range
# rotation_weight makes it so mse applied to rotation values
# is on a similar scale to the translation values.
aaxyz = ((aaxyz_theta[:-1] / 2) * rotation_weight) + 0.5
nsc = encode_theta(aaxyz_theta[-1])
# print('nsc: ' + str(nsc))
xyz_aaxyz_nsc = np.concatenate([xyz, aaxyz, nsc], axis=-1)
return xyz_aaxyz_nsc
elif length == 3:
if random_augmentation is not None and np.random.random() > random_augmentation:
# random translation change of up to 0.5 cm
random = (np.random.random(3) - 0.5) / 10.
xyz = xyz + random
return xyz
else:
raise ValueError('encode_xyz_qxyzw_to_xyz_aaxyz_nsc: unsupported input data length of ' + str(length))
def batch_encode_xyz_qxyzw_to_xyz_aaxyz_nsc(batch_xyz_qxyzw, rescale_meters=4, rotation_weight=0.001, random_augmentation=None):
""" Expects n by 7 batch with xyz_qxyzw
rescale_meters: Divide the number of meters by this number so
positions will be encoded between 0 and 1.
For example if you want to be able to reach forward and back by 2 meters, divide by 4.
rotation_weight: scale down rotation values by this factor to a smaller range
so mse gives similar weight to both rotations and translations.
Use 1.0 for no adjustment.
random_augmentation: default None means no data modification,
otherwise a value between 0.0 and 1.0 for the probability
of randomly modifying the data with a small translation and rotation.
Enabling random_augmentation is not recommended.
"""
encoded_poses = []
for xyz_qxyzw in batch_xyz_qxyzw:
# print('xyz_qxyzw: ' + str(xyz_qxyzw))
xyz_aaxyz_nsc = encode_xyz_qxyzw_to_xyz_aaxyz_nsc(
xyz_qxyzw, rescale_meters=rescale_meters, rotation_weight=rotation_weight, random_augmentation=random_augmentation)
# print('xyz_aaxyz_nsc: ' + str(xyz_aaxyz_nsc))
encoded_poses.append(xyz_aaxyz_nsc)
return np.stack(encoded_poses, axis=0)
def decode_xyz_aaxyz_nsc_to_xyz_qxyzw(xyz_aaxyz_nsc, rescale_meters=4, rotation_weight=0.001):
""" Encode a translation + quaternion pose to an encoded xyz, axis, and an angle as sin(theta) cos(theta)
rescale_meters: Divide the number of meters by this number so
positions will be encoded between 0 and 1.
For example if you want to be able to reach forward and back by 2 meters, divide by 4.
rotation_weight: scale down rotation values by this factor to a smaller range
so mse gives similar weight to both rotations and translations.
Use 1.0 for no adjustment.
"""
xyz = (xyz_aaxyz_nsc[:3] - 0.5) * rescale_meters
length = len(xyz_aaxyz_nsc)
if length == 8:
theta = decode_sin_cos(xyz_aaxyz_nsc[-2:])
# decode ([0, 1] * rotation_weight) range to [-1, 1] range
aaxyz = ((xyz_aaxyz_nsc[3:-2] - 0.5) * 2) / rotation_weight
# aaxyz is axis component of angle axis format,
# Note that rotation_weight is automatically removed by normalization step.
aaxyz = normalize_axis(aaxyz)
q = Quaternion(axis=aaxyz, angle=theta)
xyz_qxyzw = np.concatenate([xyz, q.elements], axis=-1)
return xyz_qxyzw
elif length != 3:
raise ValueError('decode_xyz_aaxyz_nsc_to_xyz_qxyzw: unsupported input data length of ' + str(length))
return xyz
def grasp_acc(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.01, max_rotation=0.261799):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Limits default to 15 degrees and 1cm.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.01 meters, or 1cm,
translations must be less than this distance away.
max_rotation: defaults to 15 degrees in radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_5mm_7_5deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.005, max_rotation=0.1308995):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Limits default to 7.5 degrees and 0.5cm.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.005 meters, which is 0.5cm,
translations must be less than this distance away.
max_rotation: defaults to 7.5 degrees, which is 0.1308995 radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_1cm_15deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.01, max_rotation=0.261799):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Limits default to 15 degrees and 1cm.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.01 meters, which is 1cm,
translations must be less than this distance away.
max_rotation: defaults to 15 degrees in radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_2cm_30deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.02, max_rotation=0.523598):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.02 meters, which is 2cm,
translations must be less than this distance away.
max_rotation: defaults to 30 degrees, which is 0.523598 radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_4cm_60deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.04, max_rotation=1.047196):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.02 meters, which is 2cm,
translations must be less than this distance away.
max_rotation: defaults to 30 degrees, which is 0.523598 radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_8cm_120deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.08, max_rotation=2.094392):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.02 meters, which is 2cm,
translations must be less than this distance away.
max_rotation: defaults to 30 degrees, which is 0.523598 radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_16cm_240deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.16, max_rotation=4.188784):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.02 meters, which is 2cm,
translations must be less than this distance away.
max_rotation: defaults to 30 degrees, which is 0.523598 radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_32cm_360deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.32, max_rotation=6.2832):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.02 meters, which is 2cm,
translations must be less than this distance away.
max_rotation: defaults to 30 degrees, which is 0.523598 radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_64cm_360deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.64, max_rotation=6.2832):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.02 meters, which is 2cm,
translations must be less than this distance away.
max_rotation: defaults to 30 degrees, which is 0.523598 radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_128cm_360deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=1.28, max_rotation=6.2832):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.02 meters, which is 2cm,
translations must be less than this distance away.
max_rotation: defaults to 30 degrees, which is 0.523598 radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_256cm_360deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=2.56, max_rotation=6.2832):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.02 meters, which is 2cm,
translations must be less than this distance away.
max_rotation: defaults to 30 degrees, which is 0.523598 radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def grasp_acc_512cm_360deg(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=5.12, max_rotation=6.2832):
""" Calculate 3D grasp accuracy for a single result with grasp_accuracy_xyz_aaxyz_nsc encoding.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.02 meters, which is 2cm,
translations must be less than this distance away.
max_rotation: defaults to 30 degrees, which is 0.523598 radians,
rotations must be less than this angular distance away.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
grasp_accuracy_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation, max_rotation],
[tf.float32], stateful=False,
name='py_func/grasp_accuracy_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def cart_error(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc):
""" Calculate 3D grasp accuracy for a single result
grasp_accuracy_xyz_aaxyz_nsc
max_translation defaults to 0.1 meters, or 1cm.
max_rotation defaults to 15 degrees in radians.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
absolute_cart_distance_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc],
[tf.float32], stateful=False,
name='py_func/absolute_cart_distance_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def angle_error(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc):
""" Calculate 3D grasp accuracy for a single result
Input format is xyz_aaxyz_nsc.
max_translation defaults to 0.1 meters, or 1cm.
max_rotation defaults to 15 degrees in radians.
"""
# TODO(ahundt) make a single, simple call for grasp_accuracy_xyz_aaxyz_nsc, no py_func etc
[filter_result] = tf.py_func(
absolute_angle_distance_xyz_aaxyz_nsc_batch,
[y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc],
[tf.float32], stateful=False,
name='py_func/absolute_angle_distance_xyz_aaxyz_nsc_batch')
filter_result.set_shape(y_true_xyz_aaxyz_nsc.get_shape()[0])
return filter_result
def absolute_angle_distance_xyz_aaxyz_nsc_single(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc):
""" Calculate 3D grasp accuracy for a single result
max_translation is 0.01 meters, or 1cm.
max_rotation is 15 degrees in radians.
Input format is xyz_aaxyz_nsc.
This version is for a single pair of numpy arrays of length 8.
"""
length = len(y_true_xyz_aaxyz_nsc)
if length == 5:
# workaround rotation distance only,
# just use [0.5, 0.5, 0.5] for translation component
# so existing code can be utilized
fake_translation = np.array([0.5, 0.5, 0.5])
y_true_xyz_aaxyz_nsc = np.concatenate([fake_translation, y_true_xyz_aaxyz_nsc])
y_pred_xyz_aaxyz_nsc = np.concatenate([fake_translation, y_pred_xyz_aaxyz_nsc])
y_true_xyz_qxyzw = decode_xyz_aaxyz_nsc_to_xyz_qxyzw(y_true_xyz_aaxyz_nsc)
y_pred_xyz_qxyzw = decode_xyz_aaxyz_nsc_to_xyz_qxyzw(y_pred_xyz_aaxyz_nsc)
y_true_q = Quaternion(y_true_xyz_qxyzw[3:])
y_pred_q = Quaternion(y_pred_xyz_qxyzw[3:])
return Quaternion.absolute_distance(y_true_q, y_pred_q)
def absolute_angle_distance_xyz_aaxyz_nsc_batch(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc):
""" Calculate 3D grasp accuracy for a single result
Expects batch of data as an nx8 array. Eager execution / numpy version.
max_translation defaults to 0.01 meters, or 1cm.
max_rotation defaults to 15 degrees in radians.
Input format is xyz_aaxyz_nsc.
"""
# print('type of y_true_xyz_aaxyz_nsc: ' + str(type(y_true_xyz_aaxyz_nsc)))
accuracies = []
for y_true, y_pred in zip(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc):
one_accuracy = absolute_angle_distance_xyz_aaxyz_nsc_single(y_true, y_pred)
# print('one grasp acc: ' + str(one_accuracy))
accuracies.append(one_accuracy)
accuracies = np.array(accuracies, np.float32)
return accuracies
def absolute_cart_distance_xyz_aaxyz_nsc_single(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc):
""" Calculate cartesian distance of encoded pose
This version is for a single pair of numpy arrays of length 8.
Input format is xyz_aaxyz_nsc.
"""
y_true_xyz_qxyzw = decode_xyz_aaxyz_nsc_to_xyz_qxyzw(y_true_xyz_aaxyz_nsc)
y_pred_xyz_qxyzw = decode_xyz_aaxyz_nsc_to_xyz_qxyzw(y_pred_xyz_aaxyz_nsc)
# translation distance
return np.linalg.norm(y_true_xyz_qxyzw[:3] - y_pred_xyz_qxyzw[:3])
def absolute_cart_distance_xyz_aaxyz_nsc_batch(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc):
""" Calculate 3D grasp accuracy for a single result
Expects batch of data as an nx8 array. Eager execution / numpy version.
max_translation defaults to 0.01 meters, or 1cm.
max_rotation defaults to 15 degrees in radians.
"""
# print('type of y_true_xyz_aaxyz_nsc: ' + str(type(y_true_xyz_aaxyz_nsc)))
accuracies = []
for y_true, y_pred in zip(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc):
one_accuracy = absolute_cart_distance_xyz_aaxyz_nsc_single(y_true, y_pred)
# print('one grasp acc: ' + str(one_accuracy))
accuracies.append(one_accuracy)
accuracies = np.array(accuracies, np.float32)
return accuracies
def grasp_accuracy_xyz_aaxyz_nsc_single(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.01, max_rotation=0.261799):
""" Calculate 3D grasp accuracy for a single 1D numpy array for the ground truth and predicted value.
Return 1 if the prediction meets both the translation and rotation accuracy criteria, 0 otherwise.
Supported formats are translation xyz with length 3,
aaxyz_nsc which is an axis and normalized sin(theta) cos(theta) with length 5,
or xyz_aaxyz_nsc which incorporates both of the above with length 8.
max_translation: defaults to 0.01 meters, or 1cm,
translations must be less than this distance away.
max_rotation: defaults to 15 degrees in radians,
rotations must be less than this angular distance away.
"""
length = len(y_true_xyz_aaxyz_nsc)
if length == 3 or length == 8:
# translation distance
translation = absolute_cart_distance_xyz_aaxyz_nsc_single(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc)
if length == 3:
# translation component only
if translation < max_translation:
return 1.
# translation and rotation
elif length == 8:
# rotation distance
angle_distance = absolute_angle_distance_xyz_aaxyz_nsc_single(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc)
if angle_distance < max_rotation and translation < max_translation:
return 1.
elif length == 5:
# rotation distance only, just use [0.5, 0.5, 0.5] for translation component so existing code can be utilized
fake_translation = np.array([0.5, 0.5, 0.5])
angle_distance = absolute_angle_distance_xyz_aaxyz_nsc_single(
np.concatenate([fake_translation, y_true_xyz_aaxyz_nsc]),
np.concatenate([fake_translation, y_pred_xyz_aaxyz_nsc]))
if angle_distance < max_rotation:
return 1.
else:
raise ValueError('grasp_accuracy_xyz_aaxyz_nsc_single: unsupported label value format of length ' + str(length))
return 0.
def grasp_accuracy_xyz_aaxyz_nsc_batch(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc, max_translation=0.01, max_rotation=0.261799):
""" Calculate 3D grasp accuracy for a single result
Expects batch of data as an nx8 array. Eager execution / numpy version.
max_translation defaults to 0.01 meters, or 1cm.
max_rotation defaults to 15 degrees in radians.
"""
# print('type of y_true_xyz_aaxyz_nsc: ' + str(type(y_true_xyz_aaxyz_nsc)))
accuracies = []
for y_true, y_pred in zip(y_true_xyz_aaxyz_nsc, y_pred_xyz_aaxyz_nsc):
one_accuracy = grasp_accuracy_xyz_aaxyz_nsc_single(
y_true, y_pred, max_translation=max_translation, max_rotation=max_rotation)
# print('one grasp acc: ' + str(one_accuracy))
accuracies.append(one_accuracy)
accuracies = np.array(accuracies, np.float32)
return accuracies
| 41.146306 | 129 | 0.688446 |
db4a1fbfe14e0c4ace931edc0107c35d48b3002f | 4,390 | py | Python | pytools/codegen.py | alexfikl/pytools | 8ac46ed6564f8ef0dbf0306e348099ada8c9230a | [
"MIT"
] | null | null | null | pytools/codegen.py | alexfikl/pytools | 8ac46ed6564f8ef0dbf0306e348099ada8c9230a | [
"MIT"
] | null | null | null | pytools/codegen.py | alexfikl/pytools | 8ac46ed6564f8ef0dbf0306e348099ada8c9230a | [
"MIT"
] | null | null | null | __copyright__ = "Copyright (C) 2009-2013 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__doc__ = """
Tools for Source Code Generation
================================
.. autoclass:: CodeGenerator
.. autoclass:: Indentation
.. autofunction:: remove_common_indentation
"""
from typing import Any, List
# {{{ code generation
# loosely based on
# http://effbot.org/zone/python-code-generator.htm
class CodeGenerator(object):
"""Language-agnostic functionality for source code generation.
.. automethod:: extend
.. automethod:: get
.. automethod:: add_to_preamble
.. automethod:: __call__
.. automethod:: indent
.. automethod:: dedent
"""
def __init__(self) -> None:
self.preamble: List[str] = []
self.code: List[str] = []
self.level = 0
self.indent_amount = 4
def extend(self, sub_generator: "CodeGenerator") -> None:
for line in sub_generator.code:
self.code.append(" "*(self.indent_amount*self.level) + line)
def get(self) -> str:
result = "\n".join(self.code)
if self.preamble:
result = "\n".join(self.preamble) + "\n" + result
return result
def add_to_preamble(self, s: str) -> None:
self.preamble.append(s)
def __call__(self, s: str) -> None:
if not s.strip():
self.code.append("")
else:
if "\n" in s:
s = remove_common_indentation(s)
for line in s.split("\n"):
self.code.append(" "*(self.indent_amount*self.level) + line)
def indent(self) -> None:
self.level += 1
def dedent(self) -> None:
if self.level == 0:
raise RuntimeError("cannot decrease indentation level")
self.level -= 1
class Indentation(object):
"""A context manager for indentation for use with :class:`CodeGenerator`.
.. attribute:: generator
.. automethod:: __enter__
.. automethod:: __exit__
"""
def __init__(self, generator: CodeGenerator):
self.generator = generator
def __enter__(self) -> None:
self.generator.indent()
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.generator.dedent()
# }}}
# {{{ remove common indentation
def remove_common_indentation(code: str, require_leading_newline: bool = True):
r"""Remove leading indentation from one or more lines of code.
Removes an amount of indentation equal to the indentation level of the first
nonempty line in *code*.
:param code: Input string.
:param require_leading_newline: If *True*, only remove indentation if *code*
starts with ``\n``.
:returns: A copy of *code* stripped of leading common indentation.
"""
if "\n" not in code:
return code
if require_leading_newline and not code.startswith("\n"):
return code
lines = code.split("\n")
while lines[0].strip() == "":
lines.pop(0)
while lines[-1].strip() == "":
lines.pop(-1)
if lines:
base_indent = 0
while lines[0][base_indent] in " \t":
base_indent += 1
for line in lines[1:]:
if line[:base_indent].strip():
raise ValueError("inconsistent indentation")
return "\n".join(line[base_indent:] for line in lines)
# }}}
# vim: foldmethod=marker
| 29.863946 | 80 | 0.650114 |
d583b0d3707d7f93c0abb7fb1d9175211e28781d | 5,938 | py | Python | lambda/py/app.py | Leopold-007/Google-Alexa-Smart_home_Skill | f48cf8760b82b3412220fe9a4d5994b4b415340f | [
"Apache-2.0"
] | 9 | 2018-11-24T18:08:26.000Z | 2021-03-18T18:51:10.000Z | lambda/py/app.py | Leopold-007/Google-Alexa-Smart_home_Skill | f48cf8760b82b3412220fe9a4d5994b4b415340f | [
"Apache-2.0"
] | 2 | 2018-11-28T17:14:26.000Z | 2019-06-06T21:19:02.000Z | lambda/py/app.py | Leopold-007/Google-Alexa-Smart_home_Skill | f48cf8760b82b3412220fe9a4d5994b4b415340f | [
"Apache-2.0"
] | 3 | 2018-11-28T17:11:29.000Z | 2022-01-30T20:11:40.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 Francesco Circhetta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unofficial Google Assistant skill for the Amazon Echo."""
import gettext
import logging
from functools import wraps
from typing import Callable
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk.standard import StandardSkillBuilder
from ask_sdk_core.utils import is_intent_name, is_request_type
from ask_sdk_model import Response
import assistant
import skill_helpers
import data
from device_helpers import register_device, RegistrationError
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
_sb = StandardSkillBuilder(table_name=data.DYNAMODB_TABLE, auto_create_table=True)
def preflight_check(f: Callable) -> Callable:
@wraps(f)
def decorated_function(handler_input: HandlerInput) -> Response:
_logger.info('Pre-flight check')
# Obtain credentials
credentials = skill_helpers.get_credentials(handler_input)
# Obtain the deviceId
device_id = skill_helpers.get_device_id(handler_input)
last_device_id = skill_helpers.get_persistent_attribute(handler_input, 'device_id')
project_id = data.GOOGLE_ASSISTANT_API['project_id']
model_id = data.GOOGLE_ASSISTANT_API['model_id']
# Re-register if "device_id" is different from the last "device_id":
if device_id != last_device_id:
_logger.info('Trying to register device...')
try:
register_device(project_id, credentials, model_id, device_id)
except RegistrationError as e:
_logger.error('Error in device registration: %s', e)
_: Callable = handler_input.attributes_manager.request_attributes["_"]
handler_input.response_builder.speak(_(data.ERROR_REGISTRATION))
return handler_input.response_builder.response
_logger.info('Device was registered successfully')
skill_helpers.set_persistent_attribute(handler_input, 'device_id', device_id, save=True)
_logger.info('New device_id was saved into persistent storage')
return f(handler_input)
return decorated_function
@_sb.request_handler(can_handle_func=is_request_type("LaunchRequest"))
@preflight_check
def launch_request_handler(handler_input: HandlerInput) -> Response:
"""Handler for Skill Launch."""
_logger.info('LaunchRequest')
_: Callable = handler_input.attributes_manager.request_attributes["_"]
return assistant.assist(handler_input, _(data.HELLO))
@_sb.request_handler(can_handle_func=is_intent_name("SearchIntent"))
@preflight_check
def search_intent_handler(handler_input: HandlerInput) -> Response:
"""Handler for Search Intent."""
_logger.info('SearchIntent')
alexa_utterance = handler_input.request_envelope.request.intent.slots['search'].value
return assistant.assist(handler_input, alexa_utterance)
@_sb.request_handler(can_handle_func=is_request_type("SessionEndedRequest"))
def session_ended_request_handler(handler_input: HandlerInput) -> Response:
"""Handler for Session End."""
_logger.info('Session ended with reason: %s', handler_input.request_envelope.request.reason)
return handler_input.response_builder.response
@_sb.request_handler(can_handle_func=lambda i: True)
def unhandled_intent_handler(handler_input: HandlerInput) -> Response:
"""Handler for all other unhandled requests."""
_logger.debug(handler_input.request_envelope.request)
_: Callable = handler_input.attributes_manager.request_attributes["_"]
handler_input.response_builder.speak(_(data.FALLBACK))
return handler_input.response_builder.response
@_sb.exception_handler(can_handle_func=lambda i, e: True)
def all_exception_handler(handler_input: HandlerInput, exception: Exception) -> Response:
"""Catch all exception handler, log exception and
respond with custom message.
"""
_logger.error(exception, exc_info=True)
_: Callable = handler_input.attributes_manager.request_attributes["_"]
handler_input.response_builder.speak(_(data.ERROR_GENERIC))
return handler_input.response_builder.response
@_sb.global_request_interceptor()
def process(handler_input: HandlerInput) -> None:
"""Process the locale in request and load localized strings for response.
This interceptors processes the locale in request, and loads the locale
specific localization strings for the function `_`, that is used during
responses.
"""
locale = getattr(handler_input.request_envelope.request, 'locale', None)
_logger.info("Locale is {}".format(locale))
if locale:
if locale.startswith("fr"):
locale_file_name = "fr-FR"
elif locale.startswith("it"):
locale_file_name = "it-IT"
elif locale.startswith("es"):
locale_file_name = "es-ES"
else:
locale_file_name = locale
_logger.info("Loading locale file: {}".format(locale_file_name))
i18n = gettext.translation('data', localedir='locales', languages=[locale_file_name], fallback=True)
handler_input.attributes_manager.request_attributes["_"] = i18n.gettext
else:
handler_input.attributes_manager.request_attributes["_"] = gettext.gettext
_logger.info('Loading Alexa Assistant...')
# Handler name that is used on AWS lambda
lambda_handler = _sb.lambda_handler()
| 39.065789 | 108 | 0.743516 |
2834c8f56de53380f17ddfd8db783058834ad664 | 719 | py | Python | contrib/meta_converter.py | sungwoncho/tru | d4013b6c604fe688dd3f8afb8e18cbbffa4112de | [
"Apache-2.0"
] | 2 | 2020-08-09T08:38:00.000Z | 2020-08-17T07:03:52.000Z | contrib/meta_converter.py | sungwoncho/tru | d4013b6c604fe688dd3f8afb8e18cbbffa4112de | [
"Apache-2.0"
] | null | null | null | contrib/meta_converter.py | sungwoncho/tru | d4013b6c604fe688dd3f8afb8e18cbbffa4112de | [
"Apache-2.0"
] | null | null | null | import os
import sys
code = {
"<": "60",
">": "62",
"(": "40",
")": "41",
"[": "91",
"]": "93",
}
cur = ""
if __name__ == '__main__':
abspath = os.path.abspath(sys.argv[1])
with open(abspath) as src:
content = src.read()
out = []
for char in content:
if char in code:
if cur != "":
out.append("(" + str(cur) + ")")
cur = ""
out.append("("+code[char]+")")
else:
cur = cur + str(char)
if cur != "":
out.append("(" + str(cur) + ")")
out.reverse()
res = ''.join(out)
print(res)
| 18.435897 | 52 | 0.350487 |
553ed6aa90cf56c57f50e52c7d368ff382f3db19 | 1,103 | py | Python | tests/test_reaction.py | ModelEngineering/Kinetics-Validator | 9350da492fd9c1482b50332f386632e6db0e7ed2 | [
"MIT"
] | null | null | null | tests/test_reaction.py | ModelEngineering/Kinetics-Validator | 9350da492fd9c1482b50332f386632e6db0e7ed2 | [
"MIT"
] | null | null | null | tests/test_reaction.py | ModelEngineering/Kinetics-Validator | 9350da492fd9c1482b50332f386632e6db0e7ed2 | [
"MIT"
] | null | null | null | """
Tests for Reactions
"""
from SBMLKinetics.common import constants as cn
from SBMLKinetics.common.simple_sbml import SimpleSBML
from SBMLKinetics.common.reaction import Reaction
from tests.common import helpers
import copy
import libsbml
import numpy as np
import unittest
IGNORE_TEST = True
IS_PLOT = True
#############################
# Tests
#############################
class TestReaction(unittest.TestCase):
def setUp(self):
self.simple = helpers.getSimple()
self.reactions = self.simple.reactions
self.reaction = self.reactions[2]
def testConstructor(self):
if IGNORE_TEST:
return
def test(a_list, a_type):
self.assertGreater(len(a_list), 0)
self.assertTrue(isinstance(a_list[0], a_type))
#
test(self.reaction.reactants,
libsbml.SpeciesReference)
test(self.reaction.products,
libsbml.SpeciesReference)
def testRepr(self):
# TESTING
reaction_str = str(self.reaction)
for item in ["T2R", "->", "R", "k1c", "*"]:
self.assertTrue(item in reaction_str)
if __name__ == '__main__':
unittest.main()
| 22.06 | 54 | 0.668178 |
ba924e36b5e70b7b75a253d7b0058b00c7fcdcd6 | 1,979 | py | Python | mars/tensor/special/hypergeometric_funcs.py | Alfa-Shashank/mars | a75499185d0e533dd532a34515adefb3065f33cc | [
"Apache-2.0"
] | null | null | null | mars/tensor/special/hypergeometric_funcs.py | Alfa-Shashank/mars | a75499185d0e533dd532a34515adefb3065f33cc | [
"Apache-2.0"
] | null | null | null | mars/tensor/special/hypergeometric_funcs.py | Alfa-Shashank/mars | a75499185d0e533dd532a34515adefb3065f33cc | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy.special as spspecial
from ..arithmetic.utils import arithmetic_operand
from ..utils import infer_dtype, implement_scipy
from .core import TensorSpecialBinOp, _register_special_op
@_register_special_op
@arithmetic_operand(sparse_mode='binary_and')
class TensorHYP2F1(TensorSpecialBinOp):
_func_name = 'hyp2f1'
@implement_scipy(spspecial.hyp2f1)
@infer_dtype(spspecial.hyp2f1)
def hyp2f1(a,b,c,z,**kwargs):
op = TensorHYP2F1(**kwargs)
return op(a,b,c,z)
@_register_special_op
@arithmetic_operand(sparse_mode='binary_and')
class TensorHYP1F1(TensorSpecialBinOp):
_func_name = 'hyp1f1'
@implement_scipy(spspecial.hyp1f1)
@infer_dtype(spspecial.hyp1f1)
def hyp1f1(a,b,x,out = None,**kwargs):
op = TensorHYP1F1(**kwargs)
return op(a,b,x,out)
@_register_special_op
@arithmetic_operand(sparse_mode='binary_and')
class TensorHYPERU(TensorSpecialBinOp):
_func_name = 'hyperu'
@implement_scipy(spspecial.hyperu)
@infer_dtype(spspecial.hyperu)
def hyperu(a,b,x,out = None,**kwargs):
op = TensorHYPERU(**kwargs)
return op(a,b,x,out)
@_register_special_op
@arithmetic_operand(sparse_mode='binary_and')
class TensorHYP0F1(TensorSpecialBinOp):
_func_name = 'hyp0f1'
@implement_scipy(spspecial.hyp0f1)
@infer_dtype(spspecial.hyp0f1)
def hyp0f1(v, z, out = None,**kwargs):
op = TensorHYP0F1(**kwargs)
return op(v, z, out)
| 27.486111 | 74 | 0.763012 |
01c6c0383d6b33719938a3e382c44a7283dad19f | 2,241 | py | Python | code/other-sims/case3_mp.py | filecoin-project/consensus | 8824ad5fb8948706995805692d594f6ccf199176 | [
"Apache-2.0",
"MIT"
] | 43 | 2019-02-14T21:02:53.000Z | 2021-12-10T22:53:02.000Z | code/other-sims/case3_mp.py | filecoin-project/consensus | 8824ad5fb8948706995805692d594f6ccf199176 | [
"Apache-2.0",
"MIT"
] | 41 | 2019-02-23T02:16:42.000Z | 2020-06-18T20:17:52.000Z | code/other-sims/case3_mp.py | filecoin-project/consensus | 8824ad5fb8948706995805692d594f6ccf199176 | [
"Apache-2.0",
"MIT"
] | 4 | 2019-03-27T09:15:53.000Z | 2022-03-25T07:54:18.000Z | import numpy as np
import time
from math import floor
import multiprocessing as mp
#sim=10000
ec =[]
num=1
e=5
print "e = ", e
Num_of_sim_per_proc = 100
start_time = time.time()
# This script simulates the worst length of n consecutive "small headstart" attacks (case 3)
def simu(sim):
na = 33
nh = 67
ntot=na+nh
height = 250 #the height is chosen to avoid infinite loop, in practice a selfish mining
#attack will not last 250 except with negligible probabilities
p=float(e)/float(1*ntot)
#num corresponds to the number of iterations of the attack that the adversary can perform
#(calculated previously)
if e==1: num = 77
if e==5: num = 43
win_ec = 0
longestfork =[]
np.random.seed()#initialise random seed for different processors
for i in range(sim):
ch = np.random.binomial(nh, p, height)
ca = np.random.binomial(na, p, height)
# result of flipping a coin nha times, tested height times.
j=0
win =1
sumh = ch[0]+ch[1] #this is the beginning weight of the honest chain
suma = ch[0]+ca[0]+ca[1] #this is the beginning weight of the adversarial chain using headstart
ind = 1
while sumh>suma and ind<height: #keep selfish mining until adversarial weight is higher than honest
sumh+=ch[ind]
suma+=ca[ind]#at each round players extend their chain, separetely
ind+=1
if ind == height: #reach the end of the attack without ever "catching up" -> loosing
win =0
break
j=1
if ind <height: #selfish mining was successful (i.e. stopped)
win = 1
longestfork.append(ind) #this is the length of the selfish mining attack
if win ==1:
win_ec+=1
#return float(win_ec)/float(sim)
stop = int(floor(sim/num)*num) #need to stop before the end of the longest fork
#if it is not a multiple of num
groupedfork=[ sum(longestfork[x:x+num]) for x in range(0, stop, num)]# we grouped the num
#successive attacks toigether and sums them up to get the length of num successives attacks
return max(groupedfork)#we take the worst case
#we rune the simulations in parallel:
pool = mp.Pool(mp.cpu_count())
print mp.cpu_count()
results = pool.map(simu, [Num_of_sim_per_proc]*mp.cpu_count())
pool.close()
print results, max(results)
print("--- %s seconds ---" % (time.time() - start_time)) | 30.69863 | 101 | 0.715752 |
77fa8d438267ea2375a344a2638d16bae9573cf0 | 3,000 | py | Python | pynq/lib/pmod/tests/test_pmod_oled.py | michalkouril/PYNQ | c72febc2decc83816f40b91a7f60e11fe707c248 | [
"BSD-3-Clause"
] | 1,537 | 2016-09-26T22:51:50.000Z | 2022-03-31T13:33:54.000Z | pynq/lib/pmod/tests/test_pmod_oled.py | michalkouril/PYNQ | c72febc2decc83816f40b91a7f60e11fe707c248 | [
"BSD-3-Clause"
] | 414 | 2016-10-03T21:12:10.000Z | 2022-03-21T14:55:02.000Z | pynq/lib/pmod/tests/test_pmod_oled.py | michalkouril/PYNQ | c72febc2decc83816f40b91a7f60e11fe707c248 | [
"BSD-3-Clause"
] | 826 | 2016-09-23T22:29:43.000Z | 2022-03-29T11:02:09.000Z | # Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
from pynq import Overlay
from pynq.lib.pmod import Pmod_OLED
from pynq.lib.pmod import PMODA
from pynq.lib.pmod import PMODB
from pynq.tests.util import user_answer_yes
from pynq.tests.util import get_interface_id
__author__ = "Giuseppe Natale, Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
try:
_ = Overlay('base.bit', download=False)
flag0 = True
except IOError:
flag0 = False
flag1 = user_answer_yes("\nPmod OLED attached to the board?")
if flag1:
oled_id = eval(get_interface_id('Pmod OLED', options=['PMODA', 'PMODB']))
flag = flag0 and flag1
@pytest.mark.skipif(not flag,
reason="need OLED attached to the base overlay")
def test_write_string():
"""Test for the OLED Pmod.
Writes on the OLED the string 'Welcome to PYNQ.' and asks the user to
confirm if it is shown on the OLED. After that, it clears the screen.
This test can be skipped.
"""
Overlay('base.bit').download()
oled = Pmod_OLED(oled_id)
oled.draw_line(0, 0, 255, 0)
oled.draw_line(0, 2, 255, 2)
oled.write('Welcome to PYNQ.', 0, 1)
oled.draw_line(0, 20, 255, 20)
oled.draw_line(0, 22, 255, 22)
assert user_answer_yes("\nWelcome message shown on the OLED?")
oled.clear()
assert user_answer_yes("OLED screen clear now?")
del oled
| 37.5 | 79 | 0.719667 |
22c8c94b5fd890494eab94d8b2f1f342d54fe956 | 106,038 | py | Python | sympy/solvers/tests/test_solveset.py | jsmolic/sympy | 4c8422b989a46baf7ad3b1d26c0e9dbb72e07aee | [
"BSD-3-Clause"
] | 1 | 2021-12-15T11:34:16.000Z | 2021-12-15T11:34:16.000Z | sympy/solvers/tests/test_solveset.py | jsmolic/sympy | 4c8422b989a46baf7ad3b1d26c0e9dbb72e07aee | [
"BSD-3-Clause"
] | null | null | null | sympy/solvers/tests/test_solveset.py | jsmolic/sympy | 4c8422b989a46baf7ad3b1d26c0e9dbb72e07aee | [
"BSD-3-Clause"
] | null | null | null | from sympy.core.containers import Tuple
from sympy.core.function import (Function, Lambda, nfloat, diff)
from sympy.core.mod import Mod
from sympy.core.numbers import (E, I, Rational, oo, pi)
from sympy.core.relational import (Eq, Gt,
Ne)
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, symbols)
from sympy.functions.elementary.complexes import (Abs, arg, im, re, sign)
from sympy.functions.elementary.exponential import (LambertW, exp, log)
from sympy.functions.elementary.hyperbolic import (HyperbolicFunction,
sinh, tanh, cosh, sech, coth)
from sympy.functions.elementary.miscellaneous import sqrt, Min, Max
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (
TrigonometricFunction, acos, acot, acsc, asec, asin, atan, atan2,
cos, cot, csc, sec, sin, tan)
from sympy.functions.special.error_functions import (erf, erfc,
erfcinv, erfinv)
from sympy.logic.boolalg import And
from sympy.matrices.dense import MutableDenseMatrix as Matrix
from sympy.matrices.immutable import ImmutableDenseMatrix
from sympy.polys.polytools import Poly
from sympy.polys.rootoftools import CRootOf
from sympy.sets.contains import Contains
from sympy.sets.conditionset import ConditionSet
from sympy.sets.fancysets import ImageSet
from sympy.sets.sets import (Complement, EmptySet, FiniteSet,
Intersection, Interval, Union, imageset, ProductSet)
from sympy.simplify import simplify
from sympy.tensor.indexed import Indexed
from sympy.utilities.iterables import numbered_symbols
from sympy.testing.pytest import (XFAIL, raises, skip, slow, SKIP, _both_exp_pow)
from sympy.testing.randtest import verify_numerically as tn
from sympy.physics.units import cm
from sympy.solvers import solve
from sympy.solvers.solveset import (
solveset_real, domain_check, solveset_complex, linear_eq_to_matrix,
linsolve, _is_function_class_equation, invert_real, invert_complex,
solveset, solve_decomposition, substitution, nonlinsolve, solvify,
_is_finite_with_finite_vars, _transolve, _is_exponential,
_solve_exponential, _is_logarithmic,
_solve_logarithm, _term_factors, _is_modular, NonlinearError)
from sympy.abc import (a, b, c, d, e, f, g, h, i, j, k, l, m, n, q, r,
t, w, x, y, z)
def dumeq(i, j):
if type(i) in (list, tuple):
return all(dumeq(i, j) for i, j in zip(i, j))
return i == j or i.dummy_eq(j)
@_both_exp_pow
def test_invert_real():
x = Symbol('x', real=True)
def ireal(x, s=S.Reals):
return Intersection(s, x)
# issue 14223
assert invert_real(x, 0, x, Interval(1, 2)) == (x, S.EmptySet)
assert invert_real(exp(x), z, x) == (x, ireal(FiniteSet(log(z))))
y = Symbol('y', positive=True)
n = Symbol('n', real=True)
assert invert_real(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_real(x*3, y, x) == (x, FiniteSet(y / 3))
assert invert_real(exp(x), y, x) == (x, FiniteSet(log(y)))
assert invert_real(exp(3*x), y, x) == (x, FiniteSet(log(y) / 3))
assert invert_real(exp(x + 3), y, x) == (x, FiniteSet(log(y) - 3))
assert invert_real(exp(x) + 3, y, x) == (x, ireal(FiniteSet(log(y - 3))))
assert invert_real(exp(x)*3, y, x) == (x, FiniteSet(log(y / 3)))
assert invert_real(log(x), y, x) == (x, FiniteSet(exp(y)))
assert invert_real(log(3*x), y, x) == (x, FiniteSet(exp(y) / 3))
assert invert_real(log(x + 3), y, x) == (x, FiniteSet(exp(y) - 3))
assert invert_real(Abs(x), y, x) == (x, FiniteSet(y, -y))
assert invert_real(2**x, y, x) == (x, FiniteSet(log(y)/log(2)))
assert invert_real(2**exp(x), y, x) == (x, ireal(FiniteSet(log(log(y)/log(2)))))
assert invert_real(x**2, y, x) == (x, FiniteSet(sqrt(y), -sqrt(y)))
assert invert_real(x**S.Half, y, x) == (x, FiniteSet(y**2))
raises(ValueError, lambda: invert_real(x, x, x))
raises(ValueError, lambda: invert_real(x**pi, y, x))
raises(ValueError, lambda: invert_real(S.One, y, x))
assert invert_real(x**31 + x, y, x) == (x**31 + x, FiniteSet(y))
lhs = x**31 + x
base_values = FiniteSet(y - 1, -y - 1)
assert invert_real(Abs(x**31 + x + 1), y, x) == (lhs, base_values)
assert dumeq(invert_real(sin(x), y, x),
(x, imageset(Lambda(n, n*pi + (-1)**n*asin(y)), S.Integers)))
assert dumeq(invert_real(sin(exp(x)), y, x),
(x, imageset(Lambda(n, log((-1)**n*asin(y) + n*pi)), S.Integers)))
assert dumeq(invert_real(csc(x), y, x),
(x, imageset(Lambda(n, n*pi + (-1)**n*acsc(y)), S.Integers)))
assert dumeq(invert_real(csc(exp(x)), y, x),
(x, imageset(Lambda(n, log((-1)**n*acsc(y) + n*pi)), S.Integers)))
assert dumeq(invert_real(cos(x), y, x),
(x, Union(imageset(Lambda(n, 2*n*pi + acos(y)), S.Integers), \
imageset(Lambda(n, 2*n*pi - acos(y)), S.Integers))))
assert dumeq(invert_real(cos(exp(x)), y, x),
(x, Union(imageset(Lambda(n, log(2*n*pi + acos(y))), S.Integers), \
imageset(Lambda(n, log(2*n*pi - acos(y))), S.Integers))))
assert dumeq(invert_real(sec(x), y, x),
(x, Union(imageset(Lambda(n, 2*n*pi + asec(y)), S.Integers), \
imageset(Lambda(n, 2*n*pi - asec(y)), S.Integers))))
assert dumeq(invert_real(sec(exp(x)), y, x),
(x, Union(imageset(Lambda(n, log(2*n*pi + asec(y))), S.Integers), \
imageset(Lambda(n, log(2*n*pi - asec(y))), S.Integers))))
assert dumeq(invert_real(tan(x), y, x),
(x, imageset(Lambda(n, n*pi + atan(y)), S.Integers)))
assert dumeq(invert_real(tan(exp(x)), y, x),
(x, imageset(Lambda(n, log(n*pi + atan(y))), S.Integers)))
assert dumeq(invert_real(cot(x), y, x),
(x, imageset(Lambda(n, n*pi + acot(y)), S.Integers)))
assert dumeq(invert_real(cot(exp(x)), y, x),
(x, imageset(Lambda(n, log(n*pi + acot(y))), S.Integers)))
assert dumeq(invert_real(tan(tan(x)), y, x),
(tan(x), imageset(Lambda(n, n*pi + atan(y)), S.Integers)))
x = Symbol('x', positive=True)
assert invert_real(x**pi, y, x) == (x, FiniteSet(y**(1/pi)))
def test_invert_complex():
assert invert_complex(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_complex(x*3, y, x) == (x, FiniteSet(y / 3))
assert dumeq(invert_complex(exp(x), y, x),
(x, imageset(Lambda(n, I*(2*pi*n + arg(y)) + log(Abs(y))), S.Integers)))
assert invert_complex(log(x), y, x) == (x, FiniteSet(exp(y)))
raises(ValueError, lambda: invert_real(1, y, x))
raises(ValueError, lambda: invert_complex(x, x, x))
raises(ValueError, lambda: invert_complex(x, x, 1))
# https://github.com/skirpichev/omg/issues/16
assert invert_complex(sinh(x), 0, x) != (x, FiniteSet(0))
def test_domain_check():
assert domain_check(1/(1 + (1/(x+1))**2), x, -1) is False
assert domain_check(x**2, x, 0) is True
assert domain_check(x, x, oo) is False
assert domain_check(0, x, oo) is False
def test_issue_11536():
assert solveset(0**x - 100, x, S.Reals) == S.EmptySet
assert solveset(0**x - 1, x, S.Reals) == FiniteSet(0)
def test_issue_17479():
from sympy.solvers.solveset import nonlinsolve
f = (x**2 + y**2)**2 + (x**2 + z**2)**2 - 2*(2*x**2 + y**2 + z**2)
fx = f.diff(x)
fy = f.diff(y)
fz = f.diff(z)
sol = nonlinsolve([fx, fy, fz], [x, y, z])
assert len(sol) >= 4 and len(sol) <= 20
# nonlinsolve has been giving a varying number of solutions
# (originally 18, then 20, now 19) due to various internal changes.
# Unfortunately not all the solutions are actually valid and some are
# redundant. Since the original issue was that an exception was raised,
# this first test only checks that nonlinsolve returns a "plausible"
# solution set. The next test checks the result for correctness.
@XFAIL
def test_issue_18449():
x, y, z = symbols("x, y, z")
f = (x**2 + y**2)**2 + (x**2 + z**2)**2 - 2*(2*x**2 + y**2 + z**2)
fx = diff(f, x)
fy = diff(f, y)
fz = diff(f, z)
sol = nonlinsolve([fx, fy, fz], [x, y, z])
for (xs, ys, zs) in sol:
d = {x: xs, y: ys, z: zs}
assert tuple(_.subs(d).simplify() for _ in (fx, fy, fz)) == (0, 0, 0)
# After simplification and removal of duplicate elements, there should
# only be 4 parametric solutions left:
# simplifiedsolutions = FiniteSet((sqrt(1 - z**2), z, z),
# (-sqrt(1 - z**2), z, z),
# (sqrt(1 - z**2), -z, z),
# (-sqrt(1 - z**2), -z, z))
# TODO: Is the above solution set definitely complete?
def test_is_function_class_equation():
from sympy.abc import x, a
assert _is_function_class_equation(TrigonometricFunction,
tan(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x) - a, x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x + a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x*a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
a*tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**2 + sin(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + x, x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2) + sin(x), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**sin(x), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(sin(x)) + sin(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + sinh(x) - a, x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x + a) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x*a) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
a*tanh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x)**2 + sinh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + x, x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x**2), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x**2) + sinh(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x)**sinh(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(sinh(x)) + sinh(x), x) is False
def test_garbage_input():
raises(ValueError, lambda: solveset_real([y], y))
x = Symbol('x', real=True)
assert solveset_real(x, 1) == S.EmptySet
assert solveset_real(x - 1, 1) == FiniteSet(x)
assert solveset_real(x, pi) == S.EmptySet
assert solveset_real(x, x**2) == S.EmptySet
raises(ValueError, lambda: solveset_complex([x], x))
assert solveset_complex(x, pi) == S.EmptySet
raises(ValueError, lambda: solveset((x, y), x))
raises(ValueError, lambda: solveset(x + 1, S.Reals))
raises(ValueError, lambda: solveset(x + 1, x, 2))
def test_solve_mul():
assert solveset_real((a*x + b)*(exp(x) - 3), x) == \
Union({log(3)}, Intersection({-b/a}, S.Reals))
anz = Symbol('anz', nonzero=True)
bb = Symbol('bb', real=True)
assert solveset_real((anz*x + bb)*(exp(x) - 3), x) == \
FiniteSet(-bb/anz, log(3))
assert solveset_real((2*x + 8)*(8 + exp(x)), x) == FiniteSet(S(-4))
assert solveset_real(x/log(x), x) == EmptySet()
def test_solve_invert():
assert solveset_real(exp(x) - 3, x) == FiniteSet(log(3))
assert solveset_real(log(x) - 3, x) == FiniteSet(exp(3))
assert solveset_real(3**(x + 2), x) == FiniteSet()
assert solveset_real(3**(2 - x), x) == FiniteSet()
assert solveset_real(y - b*exp(a/x), x) == Intersection(
S.Reals, FiniteSet(a/log(y/b)))
# issue 4504
assert solveset_real(2**x - 10, x) == FiniteSet(1 + log(5)/log(2))
def test_errorinverses():
assert solveset_real(erf(x) - S.Half, x) == \
FiniteSet(erfinv(S.Half))
assert solveset_real(erfinv(x) - 2, x) == \
FiniteSet(erf(2))
assert solveset_real(erfc(x) - S.One, x) == \
FiniteSet(erfcinv(S.One))
assert solveset_real(erfcinv(x) - 2, x) == FiniteSet(erfc(2))
def test_solve_polynomial():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert solveset_real(3*x - 2, x) == FiniteSet(Rational(2, 3))
assert solveset_real(x**2 - 1, x) == FiniteSet(-S.One, S.One)
assert solveset_real(x - y**3, x) == FiniteSet(y ** 3)
a11, a12, a21, a22, b1, b2 = symbols('a11, a12, a21, a22, b1, b2')
assert solveset_real(x**3 - 15*x - 4, x) == FiniteSet(
-2 + 3 ** S.Half,
S(4),
-2 - 3 ** S.Half)
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert len(solveset_real(x**5 + x**3 + 1, x)) == 1
assert len(solveset_real(-2*x**3 + 4*x**2 - 2*x + 6, x)) > 0
assert solveset_real(x**6 + x**4 + I, x) is S.EmptySet
def test_return_root_of():
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = list(solveset_complex(f, x))
for root in s:
assert root.func == CRootOf
# if one uses solve to get the roots of a polynomial that has a CRootOf
# solution, make sure that the use of nfloat during the solve process
# doesn't fail. Note: if you want numerical solutions to a polynomial
# it is *much* faster to use nroots to get them than to solve the
# equation only to get CRootOf solutions which are then numerically
# evaluated. So for eq = x**5 + 3*x + 7 do Poly(eq).nroots() rather
# than [i.n() for i in solve(eq)] to get the numerical roots of eq.
assert nfloat(list(solveset_complex(x**5 + 3*x**3 + 7, x))[0],
exponent=False) == CRootOf(x**5 + 3*x**3 + 7, 0).n()
sol = list(solveset_complex(x**6 - 2*x + 2, x))
assert all(isinstance(i, CRootOf) for i in sol) and len(sol) == 6
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = list(solveset_complex(f, x))
for root in s:
assert root.func == CRootOf
s = x**5 + 4*x**3 + 3*x**2 + Rational(7, 4)
assert solveset_complex(s, x) == \
FiniteSet(*Poly(s*4, domain='ZZ').all_roots())
# Refer issue #7876
eq = x*(x - 1)**2*(x + 1)*(x**6 - x + 1)
assert solveset_complex(eq, x) == \
FiniteSet(-1, 0, 1, CRootOf(x**6 - x + 1, 0),
CRootOf(x**6 - x + 1, 1),
CRootOf(x**6 - x + 1, 2),
CRootOf(x**6 - x + 1, 3),
CRootOf(x**6 - x + 1, 4),
CRootOf(x**6 - x + 1, 5))
def test__has_rational_power():
from sympy.solvers.solveset import _has_rational_power
assert _has_rational_power(sqrt(2), x)[0] is False
assert _has_rational_power(x*sqrt(2), x)[0] is False
assert _has_rational_power(x**2*sqrt(x), x) == (True, 2)
assert _has_rational_power(sqrt(2)*x**Rational(1, 3), x) == (True, 3)
assert _has_rational_power(sqrt(x)*x**Rational(1, 3), x) == (True, 6)
def test_solveset_sqrt_1():
assert solveset_real(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S.One, S(2))
assert solveset_real(sqrt(x - 1) - x + 7, x) == FiniteSet(10)
assert solveset_real(sqrt(x - 2) - 5, x) == FiniteSet(27)
assert solveset_real(sqrt(x) - 2 - 5, x) == FiniteSet(49)
assert solveset_real(sqrt(x**3), x) == FiniteSet(0)
assert solveset_real(sqrt(x - 1), x) == FiniteSet(1)
def test_solveset_sqrt_2():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
# http://tutorial.math.lamar.edu/Classes/Alg/SolveRadicalEqns.aspx#Solve_Rad_Ex2_a
assert solveset_real(sqrt(2*x - 1) - sqrt(x - 4) - 2, x) == \
FiniteSet(S(5), S(13))
assert solveset_real(sqrt(x + 7) + 2 - sqrt(3 - x), x) == \
FiniteSet(-6)
# http://www.purplemath.com/modules/solverad.htm
assert solveset_real(sqrt(17*x - sqrt(x**2 - 5)) - 7, x) == \
FiniteSet(3)
eq = x + 1 - (x**4 + 4*x**3 - x)**Rational(1, 4)
assert solveset_real(eq, x) == FiniteSet(Rational(-1, 2), Rational(-1, 3))
eq = sqrt(2*x + 9) - sqrt(x + 1) - sqrt(x + 4)
assert solveset_real(eq, x) == FiniteSet(0)
eq = sqrt(x + 4) + sqrt(2*x - 1) - 3*sqrt(x - 1)
assert solveset_real(eq, x) == FiniteSet(5)
eq = sqrt(x)*sqrt(x - 7) - 12
assert solveset_real(eq, x) == FiniteSet(16)
eq = sqrt(x - 3) + sqrt(x) - 3
assert solveset_real(eq, x) == FiniteSet(4)
eq = sqrt(2*x**2 - 7) - (3 - x)
assert solveset_real(eq, x) == FiniteSet(-S(8), S(2))
# others
eq = sqrt(9*x**2 + 4) - (3*x + 2)
assert solveset_real(eq, x) == FiniteSet(0)
assert solveset_real(sqrt(x - 3) - sqrt(x) - 3, x) == FiniteSet()
eq = (2*x - 5)**Rational(1, 3) - 3
assert solveset_real(eq, x) == FiniteSet(16)
assert solveset_real(sqrt(x) + sqrt(sqrt(x)) - 4, x) == \
FiniteSet((Rational(-1, 2) + sqrt(17)/2)**4)
eq = sqrt(x) - sqrt(x - 1) + sqrt(sqrt(x))
assert solveset_real(eq, x) == FiniteSet()
eq = (sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
ans = solveset_real(eq, x)
ra = S('''-1484/375 - 4*(-1/2 + sqrt(3)*I/2)*(-12459439/52734375 +
114*sqrt(12657)/78125)**(1/3) - 172564/(140625*(-1/2 +
sqrt(3)*I/2)*(-12459439/52734375 + 114*sqrt(12657)/78125)**(1/3))''')
rb = Rational(4, 5)
assert all(abs(eq.subs(x, i).n()) < 1e-10 for i in (ra, rb)) and \
len(ans) == 2 and \
{i.n(chop=True) for i in ans} == \
{i.n(chop=True) for i in (ra, rb)}
assert solveset_real(sqrt(x) + x**Rational(1, 3) +
x**Rational(1, 4), x) == FiniteSet(0)
assert solveset_real(x/sqrt(x**2 + 1), x) == FiniteSet(0)
eq = (x - y**3)/((y**2)*sqrt(1 - y**2))
assert solveset_real(eq, x) == FiniteSet(y**3)
# issue 4497
assert solveset_real(1/(5 + x)**Rational(1, 5) - 9, x) == \
FiniteSet(Rational(-295244, 59049))
@XFAIL
def test_solve_sqrt_fail():
# this only works if we check real_root(eq.subs(x, Rational(1, 3)))
# but checksol doesn't work like that
eq = (x**3 - 3*x**2)**Rational(1, 3) + 1 - x
assert solveset_real(eq, x) == FiniteSet(Rational(1, 3))
@slow
def test_solve_sqrt_3():
R = Symbol('R')
eq = sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1)
sol = solveset_complex(eq, R)
fset = [Rational(5, 3) + 4*sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3,
-sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3 +
40*re(1/((Rational(-1, 2) - sqrt(3)*I/2)*(Rational(251, 27) + sqrt(111)*I/9)**Rational(1, 3)))/9 +
sqrt(30)*sin(atan(3*sqrt(111)/251)/3)/3 + Rational(5, 3) +
I*(-sqrt(30)*cos(atan(3*sqrt(111)/251)/3)/3 -
sqrt(10)*sin(atan(3*sqrt(111)/251)/3)/3 +
40*im(1/((Rational(-1, 2) - sqrt(3)*I/2)*(Rational(251, 27) + sqrt(111)*I/9)**Rational(1, 3)))/9)]
cset = [40*re(1/((Rational(-1, 2) + sqrt(3)*I/2)*(Rational(251, 27) + sqrt(111)*I/9)**Rational(1, 3)))/9 -
sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3 - sqrt(30)*sin(atan(3*sqrt(111)/251)/3)/3 +
Rational(5, 3) +
I*(40*im(1/((Rational(-1, 2) + sqrt(3)*I/2)*(Rational(251, 27) + sqrt(111)*I/9)**Rational(1, 3)))/9 -
sqrt(10)*sin(atan(3*sqrt(111)/251)/3)/3 +
sqrt(30)*cos(atan(3*sqrt(111)/251)/3)/3)]
assert sol._args[0] == FiniteSet(*fset)
assert sol._args[1] == ConditionSet(
R,
Eq(sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1), 0),
FiniteSet(*cset))
# the number of real roots will depend on the value of m: for m=1 there are 4
# and for m=-1 there are none.
eq = -sqrt((m - q)**2 + (-m/(2*q) + S.Half)**2) + sqrt((-m**2/2 - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - Rational(1, 4))**2 + (m**2/2 - m - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - Rational(1, 4))**2)
unsolved_object = ConditionSet(q, Eq(sqrt((m - q)**2 + (-m/(2*q) + S.Half)**2) -
sqrt((-m**2/2 - sqrt(4*m**4 - 4*m**2 + 8*m + 1)/4 - Rational(1, 4))**2 + (m**2/2 - m -
sqrt(4*m**4 - 4*m**2 + 8*m + 1)/4 - Rational(1, 4))**2), 0), S.Reals)
assert solveset_real(eq, q) == unsolved_object
def test_solve_polynomial_symbolic_param():
assert solveset_complex((x**2 - 1)**2 - a, x) == \
FiniteSet(sqrt(1 + sqrt(a)), -sqrt(1 + sqrt(a)),
sqrt(1 - sqrt(a)), -sqrt(1 - sqrt(a)))
# issue 4507
assert solveset_complex(y - b/(1 + a*x), x) == \
FiniteSet((b/y - 1)/a) - FiniteSet(-1/a)
# issue 4508
assert solveset_complex(y - b*x/(a + x), x) == \
FiniteSet(-a*y/(y - b)) - FiniteSet(-a)
def test_solve_rational():
assert solveset_real(1/x + 1, x) == FiniteSet(-S.One)
assert solveset_real(1/exp(x) - 1, x) == FiniteSet(0)
assert solveset_real(x*(1 - 5/x), x) == FiniteSet(5)
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
assert solveset_real((x**2/(7 - x)).diff(x), x) == \
FiniteSet(S.Zero, S(14))
def test_solveset_real_gen_is_pow():
assert solveset_real(sqrt(1) + 1, x) == EmptySet()
def test_no_sol():
assert solveset(1 - oo*x) == EmptySet()
assert solveset(oo*x, x) == EmptySet()
assert solveset(oo*x - oo, x) == EmptySet()
assert solveset_real(4, x) == EmptySet()
assert solveset_real(exp(x), x) == EmptySet()
assert solveset_real(x**2 + 1, x) == EmptySet()
assert solveset_real(-3*a/sqrt(x), x) == EmptySet()
assert solveset_real(1/x, x) == EmptySet()
assert solveset_real(-(1 + x)/(2 + x)**2 + 1/(2 + x), x) == \
EmptySet()
def test_sol_zero_real():
assert solveset_real(0, x) == S.Reals
assert solveset(0, x, Interval(1, 2)) == Interval(1, 2)
assert solveset_real(-x**2 - 2*x + (x + 1)**2 - 1, x) == S.Reals
def test_no_sol_rational_extragenous():
assert solveset_real((x/(x + 1) + 3)**(-2), x) == EmptySet()
assert solveset_real((x - 1)/(1 + 1/(x - 1)), x) == EmptySet()
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to
a polynomial equation using the change of variable y -> x**Rational(p, q)
"""
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert solveset_real(x*(x**(S.One / 3) - 3), x) == \
FiniteSet(S.Zero, S(27))
def test_solveset_real_rational():
"""Test solveset_real for rational functions"""
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert solveset_real((x - y**3) / ((y**2)*sqrt(1 - y**2)), x) \
== FiniteSet(y**3)
# issue 4486
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
def test_solveset_real_log():
assert solveset_real(log((x-1)*(x+1)), x) == \
FiniteSet(sqrt(2), -sqrt(2))
def test_poly_gens():
assert solveset_real(4**(2*(x**2) + 2*x) - 8, x) == \
FiniteSet(Rational(-3, 2), S.Half)
def test_solve_abs():
n = Dummy('n')
raises(ValueError, lambda: solveset(Abs(x) - 1, x))
assert solveset(Abs(x) - n, x, S.Reals).dummy_eq(
ConditionSet(x, Contains(n, Interval(0, oo)), {-n, n}))
assert solveset_real(Abs(x) - 2, x) == FiniteSet(-2, 2)
assert solveset_real(Abs(x) + 2, x) is S.EmptySet
assert solveset_real(Abs(x + 3) - 2*Abs(x - 3), x) == \
FiniteSet(1, 9)
assert solveset_real(2*Abs(x) - Abs(x - 1), x) == \
FiniteSet(-1, Rational(1, 3))
sol = ConditionSet(
x,
And(
Contains(b, Interval(0, oo)),
Contains(a + b, Interval(0, oo)),
Contains(a - b, Interval(0, oo))),
FiniteSet(-a - b - 3, -a + b - 3, a - b - 3, a + b - 3))
eq = Abs(Abs(x + 3) - a) - b
assert invert_real(eq, 0, x)[1] == sol
reps = {a: 3, b: 1}
eqab = eq.subs(reps)
for si in sol.subs(reps):
assert not eqab.subs(x, si)
assert dumeq(solveset(Eq(sin(Abs(x)), 1), x, domain=S.Reals), Union(
Intersection(Interval(0, oo),
ImageSet(Lambda(n, (-1)**n*pi/2 + n*pi), S.Integers)),
Intersection(Interval(-oo, 0),
ImageSet(Lambda(n, n*pi - (-1)**(-n)*pi/2), S.Integers))))
def test_issue_9824():
assert dumeq(solveset(sin(x)**2 - 2*sin(x) + 1, x), ImageSet(Lambda(n, 2*n*pi + pi/2), S.Integers))
assert dumeq(solveset(cos(x)**2 - 2*cos(x) + 1, x), ImageSet(Lambda(n, 2*n*pi), S.Integers))
def test_issue_9565():
assert solveset_real(Abs((x - 1)/(x - 5)) <= Rational(1, 3), x) == Interval(-1, 2)
def test_issue_10069():
eq = abs(1/(x - 1)) - 1 > 0
assert solveset_real(eq, x) == Union(
Interval.open(0, 1), Interval.open(1, 2))
def test_real_imag_splitting():
a, b = symbols('a b', real=True)
assert solveset_real(sqrt(a**2 - b**2) - 3, a) == \
FiniteSet(-sqrt(b**2 + 9), sqrt(b**2 + 9))
assert solveset_real(sqrt(a**2 + b**2) - 3, a) != \
S.EmptySet
def test_units():
assert solveset_real(1/x - 1/(2*cm), x) == FiniteSet(2*cm)
def test_solve_only_exp_1():
y = Symbol('y', positive=True)
assert solveset_real(exp(x) - y, x) == FiniteSet(log(y))
assert solveset_real(exp(x) + exp(-x) - 4, x) == \
FiniteSet(log(-sqrt(3) + 2), log(sqrt(3) + 2))
assert solveset_real(exp(x) + exp(-x) - y, x) != S.EmptySet
def test_atan2():
# The .inverse() method on atan2 works only if x.is_real is True and the
# second argument is a real constant
assert solveset_real(atan2(x, 2) - pi/3, x) == FiniteSet(2*sqrt(3))
def test_piecewise_solveset():
eq = Piecewise((x - 2, Gt(x, 2)), (2 - x, True)) - 3
assert set(solveset_real(eq, x)) == set(FiniteSet(-1, 5))
absxm3 = Piecewise(
(x - 3, 0 <= x - 3),
(3 - x, 0 > x - 3))
y = Symbol('y', positive=True)
assert solveset_real(absxm3 - y, x) == FiniteSet(-y + 3, y + 3)
f = Piecewise(((x - 2)**2, x >= 0), (0, True))
assert solveset(f, x, domain=S.Reals) == Union(FiniteSet(2), Interval(-oo, 0, True, True))
assert solveset(
Piecewise((x + 1, x > 0), (I, True)) - I, x, S.Reals
) == Interval(-oo, 0)
assert solveset(Piecewise((x - 1, Ne(x, I)), (x, True)), x) == FiniteSet(1)
# issue 19718
g = Piecewise((1, x > 10), (0, True))
assert solveset(g > 0, x, S.Reals) == Interval.open(10, oo)
from sympy.logic.boolalg import BooleanTrue
f = BooleanTrue()
assert solveset(f, x, domain=Interval(-3, 10)) == Interval(-3, 10)
# issue 20552
f = Piecewise((0, Eq(x, 0)), (x**2/Abs(x), True))
g = Piecewise((0, Eq(x, pi)), ((x - pi)/sin(x), True))
assert solveset(f, x, domain=S.Reals) == FiniteSet(0)
assert solveset(g) == FiniteSet(pi)
def test_solveset_complex_polynomial():
assert solveset_complex(a*x**2 + b*x + c, x) == \
FiniteSet(-b/(2*a) - sqrt(-4*a*c + b**2)/(2*a),
-b/(2*a) + sqrt(-4*a*c + b**2)/(2*a))
assert solveset_complex(x - y**3, y) == FiniteSet(
(-x**Rational(1, 3))/2 + I*sqrt(3)*x**Rational(1, 3)/2,
x**Rational(1, 3),
(-x**Rational(1, 3))/2 - I*sqrt(3)*x**Rational(1, 3)/2)
assert solveset_complex(x + 1/x - 1, x) == \
FiniteSet(S.Half + I*sqrt(3)/2, S.Half - I*sqrt(3)/2)
def test_sol_zero_complex():
assert solveset_complex(0, x) == S.Complexes
def test_solveset_complex_rational():
assert solveset_complex((x - 1)*(x - I)/(x - 3), x) == \
FiniteSet(1, I)
assert solveset_complex((x - y**3)/((y**2)*sqrt(1 - y**2)), x) == \
FiniteSet(y**3)
assert solveset_complex(-x**2 - I, x) == \
FiniteSet(-sqrt(2)/2 + sqrt(2)*I/2, sqrt(2)/2 - sqrt(2)*I/2)
def test_solve_quintics():
skip("This test is too slow")
f = x**5 - 110*x**3 - 55*x**2 + 2310*x + 979
s = solveset_complex(f, x)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 + 15*x + 12
s = solveset_complex(f, x)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
def test_solveset_complex_exp():
from sympy.abc import x, n
assert dumeq(solveset_complex(exp(x) - 1, x),
imageset(Lambda(n, I*2*n*pi), S.Integers))
assert dumeq(solveset_complex(exp(x) - I, x),
imageset(Lambda(n, I*(2*n*pi + pi/2)), S.Integers))
assert solveset_complex(1/exp(x), x) == S.EmptySet
assert dumeq(solveset_complex(sinh(x).rewrite(exp), x),
imageset(Lambda(n, n*pi*I), S.Integers))
def test_solveset_real_exp():
from sympy.abc import x, y
assert solveset(Eq((-2)**x, 4), x, S.Reals) == FiniteSet(2)
assert solveset(Eq(-2**x, 4), x, S.Reals) == S.EmptySet
assert solveset(Eq((-3)**x, 27), x, S.Reals) == S.EmptySet
assert solveset(Eq((-5)**(x+1), 625), x, S.Reals) == FiniteSet(3)
assert solveset(Eq(2**(x-3), -16), x, S.Reals) == S.EmptySet
assert solveset(Eq((-3)**(x - 3), -3**39), x, S.Reals) == FiniteSet(42)
assert solveset(Eq(2**x, y), x, S.Reals) == Intersection(S.Reals, FiniteSet(log(y)/log(2)))
assert invert_real((-2)**(2*x) - 16, 0, x) == (x, FiniteSet(2))
def test_solve_complex_log():
assert solveset_complex(log(x), x) == FiniteSet(1)
assert solveset_complex(1 - log(a + 4*x**2), x) == \
FiniteSet(-sqrt(-a + E)/2, sqrt(-a + E)/2)
def test_solve_complex_sqrt():
assert solveset_complex(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S.One, S(2))
assert solveset_complex(sqrt(5*x + 6) - (2 + 2*I) - x, x) == \
FiniteSet(-S(2), 3 - 4*I)
assert solveset_complex(4*x*(1 - a * sqrt(x)), x) == \
FiniteSet(S.Zero, 1 / a ** 2)
def test_solveset_complex_tan():
s = solveset_complex(tan(x).rewrite(exp), x)
assert dumeq(s, imageset(Lambda(n, pi*n), S.Integers) - \
imageset(Lambda(n, pi*n + pi/2), S.Integers))
@_both_exp_pow
def test_solve_trig():
from sympy.abc import n
assert dumeq(solveset_real(sin(x), x),
Union(imageset(Lambda(n, 2*pi*n), S.Integers),
imageset(Lambda(n, 2*pi*n + pi), S.Integers)))
assert dumeq(solveset_real(sin(x) - 1, x),
imageset(Lambda(n, 2*pi*n + pi/2), S.Integers))
assert dumeq(solveset_real(cos(x), x),
Union(imageset(Lambda(n, 2*pi*n + pi/2), S.Integers),
imageset(Lambda(n, 2*pi*n + pi*Rational(3, 2)), S.Integers)))
assert dumeq(solveset_real(sin(x) + cos(x), x),
Union(imageset(Lambda(n, 2*n*pi + pi*Rational(3, 4)), S.Integers),
imageset(Lambda(n, 2*n*pi + pi*Rational(7, 4)), S.Integers)))
assert solveset_real(sin(x)**2 + cos(x)**2, x) == S.EmptySet
assert dumeq(solveset_complex(cos(x) - S.Half, x),
Union(imageset(Lambda(n, 2*n*pi + pi*Rational(5, 3)), S.Integers),
imageset(Lambda(n, 2*n*pi + pi/3), S.Integers)))
assert dumeq(solveset(sin(y + a) - sin(y), a, domain=S.Reals),
Union(ImageSet(Lambda(n, 2*n*pi), S.Integers),
Intersection(ImageSet(Lambda(n, -I*(I*(
2*n*pi + arg(-exp(-2*I*y))) +
2*im(y))), S.Integers), S.Reals)))
assert dumeq(solveset_real(sin(2*x)*cos(x) + cos(2*x)*sin(x)-1, x),
ImageSet(Lambda(n, n*pi*Rational(2, 3) + pi/6), S.Integers))
assert dumeq(solveset_real(2*tan(x)*sin(x) + 1, x), Union(
ImageSet(Lambda(n, 2*n*pi + atan(sqrt(2)*sqrt(-1 + sqrt(17))/
(1 - sqrt(17))) + pi), S.Integers),
ImageSet(Lambda(n, 2*n*pi - atan(sqrt(2)*sqrt(-1 + sqrt(17))/
(1 - sqrt(17))) + pi), S.Integers)))
assert dumeq(solveset_real(cos(2*x)*cos(4*x) - 1, x),
ImageSet(Lambda(n, n*pi), S.Integers))
assert dumeq(solveset(sin(x/10) + Rational(3, 4)), Union(
ImageSet(Lambda(n, 20*n*pi + 10*atan(3*sqrt(7)/7) + 10*pi), S.Integers),
ImageSet(Lambda(n, 20*n*pi - 10*atan(3*sqrt(7)/7) + 20*pi), S.Integers)))
assert dumeq(solveset(cos(x/15) + cos(x/5)), Union(
ImageSet(Lambda(n, 30*n*pi + 15*pi/2), S.Integers),
ImageSet(Lambda(n, 30*n*pi + 45*pi/2), S.Integers),
ImageSet(Lambda(n, 30*n*pi + 75*pi/4), S.Integers),
ImageSet(Lambda(n, 30*n*pi + 45*pi/4), S.Integers),
ImageSet(Lambda(n, 30*n*pi + 105*pi/4), S.Integers),
ImageSet(Lambda(n, 30*n*pi + 15*pi/4), S.Integers)))
assert dumeq(solveset(sec(sqrt(2)*x/3) + 5), Union(
ImageSet(Lambda(n, 3*sqrt(2)*(2*n*pi - pi + atan(2*sqrt(6)))/2), S.Integers),
ImageSet(Lambda(n, 3*sqrt(2)*(2*n*pi - atan(2*sqrt(6)) + pi)/2), S.Integers)))
assert dumeq(simplify(solveset(tan(pi*x) - cot(pi/2*x))), Union(
ImageSet(Lambda(n, 4*n + 1), S.Integers),
ImageSet(Lambda(n, 4*n + 3), S.Integers),
ImageSet(Lambda(n, 4*n + Rational(7, 3)), S.Integers),
ImageSet(Lambda(n, 4*n + Rational(5, 3)), S.Integers),
ImageSet(Lambda(n, 4*n + Rational(11, 3)), S.Integers),
ImageSet(Lambda(n, 4*n + Rational(1, 3)), S.Integers)))
assert dumeq(solveset(cos(9*x)), Union(
ImageSet(Lambda(n, 2*n*pi/9 + pi/18), S.Integers),
ImageSet(Lambda(n, 2*n*pi/9 + pi/6), S.Integers)))
assert dumeq(solveset(sin(8*x) + cot(12*x), x, S.Reals), Union(
ImageSet(Lambda(n, n*pi/2 + pi/8), S.Integers),
ImageSet(Lambda(n, n*pi/2 + 3*pi/8), S.Integers),
ImageSet(Lambda(n, n*pi/2 + 5*pi/16), S.Integers),
ImageSet(Lambda(n, n*pi/2 + 3*pi/16), S.Integers),
ImageSet(Lambda(n, n*pi/2 + 7*pi/16), S.Integers),
ImageSet(Lambda(n, n*pi/2 + pi/16), S.Integers)))
# This is the only remaining solveset test that actually ends up being solved
# by _solve_trig2(). All others are handled by the improved _solve_trig1.
assert dumeq(solveset_real(2*cos(x)*cos(2*x) - 1, x),
Union(ImageSet(Lambda(n, 2*n*pi + 2*atan(sqrt(-2*2**Rational(1, 3)*(67 +
9*sqrt(57))**Rational(2, 3) + 8*2**Rational(2, 3) + 11*(67 +
9*sqrt(57))**Rational(1, 3))/(3*(67 + 9*sqrt(57))**Rational(1, 6)))), S.Integers),
ImageSet(Lambda(n, 2*n*pi - 2*atan(sqrt(-2*2**Rational(1, 3)*(67 +
9*sqrt(57))**Rational(2, 3) + 8*2**Rational(2, 3) + 11*(67 +
9*sqrt(57))**Rational(1, 3))/(3*(67 + 9*sqrt(57))**Rational(1, 6))) +
2*pi), S.Integers)))
# issue #16870
assert dumeq(simplify(solveset(sin(x/180*pi) - S.Half, x, S.Reals)), Union(
ImageSet(Lambda(n, 360*n + 150), S.Integers),
ImageSet(Lambda(n, 360*n + 30), S.Integers)))
def test_solve_hyperbolic():
# actual solver: _solve_trig1
n = Dummy('n')
assert solveset(sinh(x) + cosh(x), x) == S.EmptySet
assert solveset(sinh(x) + cos(x), x) == ConditionSet(x,
Eq(cos(x) + sinh(x), 0), S.Complexes)
assert solveset_real(sinh(x) + sech(x), x) == FiniteSet(
log(sqrt(sqrt(5) - 2)))
assert solveset_real(3*cosh(2*x) - 5, x) == FiniteSet(
-log(3)/2, log(3)/2)
assert solveset_real(sinh(x - 3) - 2, x) == FiniteSet(
log((2 + sqrt(5))*exp(3)))
assert solveset_real(cosh(2*x) + 2*sinh(x) - 5, x) == FiniteSet(
log(-2 + sqrt(5)), log(1 + sqrt(2)))
assert solveset_real((coth(x) + sinh(2*x))/cosh(x) - 3, x) == FiniteSet(
log(S.Half + sqrt(5)/2), log(1 + sqrt(2)))
assert solveset_real(cosh(x)*sinh(x) - 2, x) == FiniteSet(
log(4 + sqrt(17))/2)
assert solveset_real(sinh(x) + tanh(x) - 1, x) == FiniteSet(
log(sqrt(2)/2 + sqrt(-S(1)/2 + sqrt(2))))
assert dumeq(solveset_complex(sinh(x) - I/2, x), Union(
ImageSet(Lambda(n, I*(2*n*pi + 5*pi/6)), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi/6)), S.Integers)))
assert dumeq(solveset_complex(sinh(x) + sech(x), x), Union(
ImageSet(Lambda(n, 2*n*I*pi + log(sqrt(-2 + sqrt(5)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi/2) + log(sqrt(2 + sqrt(5)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi) + log(sqrt(-2 + sqrt(5)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi - pi/2) + log(sqrt(2 + sqrt(5)))), S.Integers)))
assert dumeq(solveset(sinh(x/10) + Rational(3, 4)), Union(
ImageSet(Lambda(n, 10*I*(2*n*pi + pi) + 10*log(2)), S.Integers),
ImageSet(Lambda(n, 20*n*I*pi - 10*log(2)), S.Integers)))
assert dumeq(solveset(cosh(x/15) + cosh(x/5)), Union(
ImageSet(Lambda(n, 15*I*(2*n*pi + pi/2)), S.Integers),
ImageSet(Lambda(n, 15*I*(2*n*pi - pi/2)), S.Integers),
ImageSet(Lambda(n, 15*I*(2*n*pi - 3*pi/4)), S.Integers),
ImageSet(Lambda(n, 15*I*(2*n*pi + 3*pi/4)), S.Integers),
ImageSet(Lambda(n, 15*I*(2*n*pi - pi/4)), S.Integers),
ImageSet(Lambda(n, 15*I*(2*n*pi + pi/4)), S.Integers)))
assert dumeq(solveset(sech(sqrt(2)*x/3) + 5), Union(
ImageSet(Lambda(n, 3*sqrt(2)*I*(2*n*pi - pi + atan(2*sqrt(6)))/2), S.Integers),
ImageSet(Lambda(n, 3*sqrt(2)*I*(2*n*pi - atan(2*sqrt(6)) + pi)/2), S.Integers)))
assert dumeq(solveset(tanh(pi*x) - coth(pi/2*x)), Union(
ImageSet(Lambda(n, 2*I*(2*n*pi + pi/2)/pi), S.Integers),
ImageSet(Lambda(n, 2*I*(2*n*pi - pi/2)/pi), S.Integers)))
assert dumeq(solveset(cosh(9*x)), Union(
ImageSet(Lambda(n, I*(2*n*pi + pi/2)/9), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi - pi/2)/9), S.Integers)))
# issues #9606 / #9531:
assert solveset(sinh(x), x, S.Reals) == FiniteSet(0)
assert dumeq(solveset(sinh(x), x, S.Complexes), Union(
ImageSet(Lambda(n, I*(2*n*pi + pi)), S.Integers),
ImageSet(Lambda(n, 2*n*I*pi), S.Integers)))
# issues #11218 / #18427
assert dumeq(solveset(sin(pi*x), x, S.Reals), Union(
ImageSet(Lambda(n, (2*n*pi + pi)/pi), S.Integers),
ImageSet(Lambda(n, 2*n), S.Integers)))
assert dumeq(solveset(sin(pi*x), x), Union(
ImageSet(Lambda(n, (2*n*pi + pi)/pi), S.Integers),
ImageSet(Lambda(n, 2*n), S.Integers)))
# issue #17543
assert dumeq(simplify(solveset(I*cot(8*x - 8*E), x)), Union(
ImageSet(Lambda(n, n*pi/4 - 13*pi/16 + E), S.Integers),
ImageSet(Lambda(n, n*pi/4 - 11*pi/16 + E), S.Integers)))
# issues #18490 / #19489
assert solveset(cosh(x) + cosh(3*x) - cosh(5*x), x, S.Reals
).dummy_eq(ConditionSet(x,
Eq(cosh(x) + cosh(3*x) - cosh(5*x), 0), S.Reals))
assert solveset(sinh(8*x) + coth(12*x)).dummy_eq(
ConditionSet(x, Eq(sinh(8*x) + coth(12*x), 0), S.Complexes))
def test_solve_trig_hyp_symbolic():
# actual solver: _solve_trig1
assert dumeq(solveset(sin(a*x), x), ConditionSet(x, Ne(a, 0), Union(
ImageSet(Lambda(n, (2*n*pi + pi)/a), S.Integers),
ImageSet(Lambda(n, 2*n*pi/a), S.Integers))))
assert dumeq(solveset(cosh(x/a), x), ConditionSet(x, Ne(a, 0), Union(
ImageSet(Lambda(n, I*a*(2*n*pi + pi/2)), S.Integers),
ImageSet(Lambda(n, I*a*(2*n*pi - pi/2)), S.Integers))))
assert dumeq(solveset(sin(2*sqrt(3)/3*a**2/(b*pi)*x)
+ cos(4*sqrt(3)/3*a**2/(b*pi)*x), x),
ConditionSet(x, Ne(b, 0) & Ne(a**2, 0), Union(
ImageSet(Lambda(n, sqrt(3)*pi*b*(2*n*pi + pi/2)/(2*a**2)), S.Integers),
ImageSet(Lambda(n, sqrt(3)*pi*b*(2*n*pi - 5*pi/6)/(2*a**2)), S.Integers),
ImageSet(Lambda(n, sqrt(3)*pi*b*(2*n*pi - pi/6)/(2*a**2)), S.Integers))))
assert dumeq(simplify(solveset(cot((1 + I)*x) - cot((3 + 3*I)*x), x)), Union(
ImageSet(Lambda(n, pi*(1 - I)*(4*n + 1)/4), S.Integers),
ImageSet(Lambda(n, pi*(1 - I)*(4*n - 1)/4), S.Integers)))
assert dumeq(solveset(cosh((a**2 + 1)*x) - 3, x),
ConditionSet(x, Ne(a**2 + 1, 0), Union(
ImageSet(Lambda(n, (2*n*I*pi + log(3 - 2*sqrt(2)))/(a**2 + 1)), S.Integers),
ImageSet(Lambda(n, (2*n*I*pi + log(2*sqrt(2) + 3))/(a**2 + 1)), S.Integers))))
ar = Symbol('ar', real=True)
assert solveset(cosh((ar**2 + 1)*x) - 2, x, S.Reals) == FiniteSet(
log(sqrt(3) + 2)/(ar**2 + 1), log(2 - sqrt(3))/(ar**2 + 1))
def test_issue_9616():
assert dumeq(solveset(sinh(x) + tanh(x) - 1, x), Union(
ImageSet(Lambda(n, 2*n*I*pi + log(sqrt(2)/2 + sqrt(-S.Half + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi - atan(sqrt(2)*sqrt(S.Half + sqrt(2))) + pi)
+ log(sqrt(1 + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi) + log(-sqrt(2)/2 + sqrt(-S.Half + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi - pi + atan(sqrt(2)*sqrt(S.Half + sqrt(2))))
+ log(sqrt(1 + sqrt(2)))), S.Integers)))
f1 = (sinh(x)).rewrite(exp)
f2 = (tanh(x)).rewrite(exp)
assert dumeq(solveset(f1 + f2 - 1, x), Union(
Complement(ImageSet(
Lambda(n, I*(2*n*pi + pi) + log(-sqrt(2)/2 + sqrt(-S.Half + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi)/2), S.Integers)),
Complement(ImageSet(Lambda(n, I*(2*n*pi - pi + atan(sqrt(2)*sqrt(S.Half + sqrt(2))))
+ log(sqrt(1 + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi)/2), S.Integers)),
Complement(ImageSet(Lambda(n, I*(2*n*pi - atan(sqrt(2)*sqrt(S.Half + sqrt(2))) + pi)
+ log(sqrt(1 + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi)/2), S.Integers)),
Complement(
ImageSet(Lambda(n, 2*n*I*pi + log(sqrt(2)/2 + sqrt(-S.Half + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi)/2), S.Integers))))
def test_solve_invalid_sol():
assert 0 not in solveset_real(sin(x)/x, x)
assert 0 not in solveset_complex((exp(x) - 1)/x, x)
@XFAIL
def test_solve_trig_simplified():
from sympy.abc import n
assert dumeq(solveset_real(sin(x), x),
imageset(Lambda(n, n*pi), S.Integers))
assert dumeq(solveset_real(cos(x), x),
imageset(Lambda(n, n*pi + pi/2), S.Integers))
assert dumeq(solveset_real(cos(x) + sin(x), x),
imageset(Lambda(n, n*pi - pi/4), S.Integers))
@XFAIL
def test_solve_lambert():
assert solveset_real(x*exp(x) - 1, x) == FiniteSet(LambertW(1))
assert solveset_real(exp(x) + x, x) == FiniteSet(-LambertW(1))
assert solveset_real(x + 2**x, x) == \
FiniteSet(-LambertW(log(2))/log(2))
# issue 4739
ans = solveset_real(3*x + 5 + 2**(-5*x + 3), x)
assert ans == FiniteSet(Rational(-5, 3) +
LambertW(-10240*2**Rational(1, 3)*log(2)/3)/(5*log(2)))
eq = 2*(3*x + 4)**5 - 6*7**(3*x + 9)
result = solveset_real(eq, x)
ans = FiniteSet((log(2401) +
5*LambertW(-log(7**(7*3**Rational(1, 5)/5))))/(3*log(7))/-1)
assert result == ans
assert solveset_real(eq.expand(), x) == result
assert solveset_real(5*x - 1 + 3*exp(2 - 7*x), x) == \
FiniteSet(Rational(1, 5) + LambertW(-21*exp(Rational(3, 5))/5)/7)
assert solveset_real(2*x + 5 + log(3*x - 2), x) == \
FiniteSet(Rational(2, 3) + LambertW(2*exp(Rational(-19, 3))/3)/2)
assert solveset_real(3*x + log(4*x), x) == \
FiniteSet(LambertW(Rational(3, 4))/3)
assert solveset_real(x**x - 2) == FiniteSet(exp(LambertW(log(2))))
a = Symbol('a')
assert solveset_real(-a*x + 2*x*log(x), x) == FiniteSet(exp(a/2))
a = Symbol('a', real=True)
assert solveset_real(a/x + exp(x/2), x) == \
FiniteSet(2*LambertW(-a/2))
assert solveset_real((a/x + exp(x/2)).diff(x), x) == \
FiniteSet(4*LambertW(sqrt(2)*sqrt(a)/4))
# coverage test
assert solveset_real(tanh(x + 3)*tanh(x - 3) - 1, x) == EmptySet()
assert solveset_real((x**2 - 2*x + 1).subs(x, log(x) + 3*x), x) == \
FiniteSet(LambertW(3*S.Exp1)/3)
assert solveset_real((x**2 - 2*x + 1).subs(x, (log(x) + 3*x)**2 - 1), x) == \
FiniteSet(LambertW(3*exp(-sqrt(2)))/3, LambertW(3*exp(sqrt(2)))/3)
assert solveset_real((x**2 - 2*x - 2).subs(x, log(x) + 3*x), x) == \
FiniteSet(LambertW(3*exp(1 + sqrt(3)))/3, LambertW(3*exp(-sqrt(3) + 1))/3)
assert solveset_real(x*log(x) + 3*x + 1, x) == \
FiniteSet(exp(-3 + LambertW(-exp(3))))
eq = (x*exp(x) - 3).subs(x, x*exp(x))
assert solveset_real(eq, x) == \
FiniteSet(LambertW(3*exp(-LambertW(3))))
assert solveset_real(3*log(a**(3*x + 5)) + a**(3*x + 5), x) == \
FiniteSet(-((log(a**5) + LambertW(Rational(1, 3)))/(3*log(a))))
p = symbols('p', positive=True)
assert solveset_real(3*log(p**(3*x + 5)) + p**(3*x + 5), x) == \
FiniteSet(
log((-3**Rational(1, 3) - 3**Rational(5, 6)*I)*LambertW(Rational(1, 3))**Rational(1, 3)/(2*p**Rational(5, 3)))/log(p),
log((-3**Rational(1, 3) + 3**Rational(5, 6)*I)*LambertW(Rational(1, 3))**Rational(1, 3)/(2*p**Rational(5, 3)))/log(p),
log((3*LambertW(Rational(1, 3))/p**5)**(1/(3*log(p)))),) # checked numerically
# check collection
b = Symbol('b')
eq = 3*log(a**(3*x + 5)) + b*log(a**(3*x + 5)) + a**(3*x + 5)
assert solveset_real(eq, x) == FiniteSet(
-((log(a**5) + LambertW(1/(b + 3)))/(3*log(a))))
# issue 4271
assert solveset_real((a/x + exp(x/2)).diff(x, 2), x) == FiniteSet(
6*LambertW((-1)**Rational(1, 3)*a**Rational(1, 3)/3))
assert solveset_real(x**3 - 3**x, x) == \
FiniteSet(-3/log(3)*LambertW(-log(3)/3))
assert solveset_real(3**cos(x) - cos(x)**3) == FiniteSet(
acos(-3*LambertW(-log(3)/3)/log(3)))
assert solveset_real(x**2 - 2**x, x) == \
solveset_real(-x**2 + 2**x, x)
assert solveset_real(3*log(x) - x*log(3)) == FiniteSet(
-3*LambertW(-log(3)/3)/log(3),
-3*LambertW(-log(3)/3, -1)/log(3))
assert solveset_real(LambertW(2*x) - y) == FiniteSet(
y*exp(y)/2)
@XFAIL
def test_other_lambert():
a = Rational(6, 5)
assert solveset_real(x**a - a**x, x) == FiniteSet(
a, -a*LambertW(-log(a)/a)/log(a))
@_both_exp_pow
def test_solveset():
f = Function('f')
raises(ValueError, lambda: solveset(x + y))
assert solveset(x, 1) == S.EmptySet
assert solveset(f(1)**2 + y + 1, f(1)
) == FiniteSet(-sqrt(-y - 1), sqrt(-y - 1))
assert solveset(f(1)**2 - 1, f(1), S.Reals) == FiniteSet(-1, 1)
assert solveset(f(1)**2 + 1, f(1)) == FiniteSet(-I, I)
assert solveset(x - 1, 1) == FiniteSet(x)
assert solveset(sin(x) - cos(x), sin(x)) == FiniteSet(cos(x))
assert solveset(0, domain=S.Reals) == S.Reals
assert solveset(1) == S.EmptySet
assert solveset(True, domain=S.Reals) == S.Reals # issue 10197
assert solveset(False, domain=S.Reals) == S.EmptySet
assert solveset(exp(x) - 1, domain=S.Reals) == FiniteSet(0)
assert solveset(exp(x) - 1, x, S.Reals) == FiniteSet(0)
assert solveset(Eq(exp(x), 1), x, S.Reals) == FiniteSet(0)
assert solveset(exp(x) - 1, exp(x), S.Reals) == FiniteSet(1)
A = Indexed('A', x)
assert solveset(A - 1, A, S.Reals) == FiniteSet(1)
assert solveset(x - 1 >= 0, x, S.Reals) == Interval(1, oo)
assert solveset(exp(x) - 1 >= 0, x, S.Reals) == Interval(0, oo)
assert dumeq(solveset(exp(x) - 1, x), imageset(Lambda(n, 2*I*pi*n), S.Integers))
assert dumeq(solveset(Eq(exp(x), 1), x), imageset(Lambda(n, 2*I*pi*n),
S.Integers))
# issue 13825
assert solveset(x**2 + f(0) + 1, x) == {-sqrt(-f(0) - 1), sqrt(-f(0) - 1)}
# issue 19977
assert solveset(atan(log(x)) > 0, x, domain=Interval.open(0, oo)) == Interval.open(1, oo)
def test__solveset_multi():
from sympy.solvers.solveset import _solveset_multi
from sympy import Reals
# Basic univariate case:
from sympy.abc import x
assert _solveset_multi([x**2-1], [x], [S.Reals]) == FiniteSet((1,), (-1,))
# Linear systems of two equations
from sympy.abc import x, y
assert _solveset_multi([x+y, x+1], [x, y], [Reals, Reals]) == FiniteSet((-1, 1))
assert _solveset_multi([x+y, x+1], [y, x], [Reals, Reals]) == FiniteSet((1, -1))
assert _solveset_multi([x+y, x-y-1], [x, y], [Reals, Reals]) == FiniteSet((S(1)/2, -S(1)/2))
assert _solveset_multi([x-1, y-2], [x, y], [Reals, Reals]) == FiniteSet((1, 2))
# assert dumeq(_solveset_multi([x+y], [x, y], [Reals, Reals]), ImageSet(Lambda(x, (x, -x)), Reals))
assert dumeq(_solveset_multi([x+y], [x, y], [Reals, Reals]), Union(
ImageSet(Lambda(((x,),), (x, -x)), ProductSet(Reals)),
ImageSet(Lambda(((y,),), (-y, y)), ProductSet(Reals))))
assert _solveset_multi([x+y, x+y+1], [x, y], [Reals, Reals]) == S.EmptySet
assert _solveset_multi([x+y, x-y, x-1], [x, y], [Reals, Reals]) == S.EmptySet
assert _solveset_multi([x+y, x-y, x-1], [y, x], [Reals, Reals]) == S.EmptySet
# Systems of three equations:
from sympy.abc import x, y, z
assert _solveset_multi([x+y+z-1, x+y-z-2, x-y-z-3], [x, y, z], [Reals,
Reals, Reals]) == FiniteSet((2, -S.Half, -S.Half))
# Nonlinear systems:
from sympy.abc import r, theta, z, x, y
assert _solveset_multi([x**2+y**2-2, x+y], [x, y], [Reals, Reals]) == FiniteSet((-1, 1), (1, -1))
assert _solveset_multi([x**2-1, y], [x, y], [Reals, Reals]) == FiniteSet((1, 0), (-1, 0))
#assert _solveset_multi([x**2-y**2], [x, y], [Reals, Reals]) == Union(
# ImageSet(Lambda(x, (x, -x)), Reals), ImageSet(Lambda(x, (x, x)), Reals))
assert dumeq(_solveset_multi([x**2-y**2], [x, y], [Reals, Reals]), Union(
ImageSet(Lambda(((x,),), (x, -Abs(x))), ProductSet(Reals)),
ImageSet(Lambda(((x,),), (x, Abs(x))), ProductSet(Reals)),
ImageSet(Lambda(((y,),), (-Abs(y), y)), ProductSet(Reals)),
ImageSet(Lambda(((y,),), (Abs(y), y)), ProductSet(Reals))))
assert _solveset_multi([r*cos(theta)-1, r*sin(theta)], [theta, r],
[Interval(0, pi), Interval(-1, 1)]) == FiniteSet((0, 1), (pi, -1))
assert _solveset_multi([r*cos(theta)-1, r*sin(theta)], [r, theta],
[Interval(0, 1), Interval(0, pi)]) == FiniteSet((1, 0))
#assert _solveset_multi([r*cos(theta)-r, r*sin(theta)], [r, theta],
# [Interval(0, 1), Interval(0, pi)]) == ?
assert dumeq(_solveset_multi([r*cos(theta)-r, r*sin(theta)], [r, theta],
[Interval(0, 1), Interval(0, pi)]), Union(
ImageSet(Lambda(((r,),), (r, 0)), ImageSet(Lambda(r, (r,)), Interval(0, 1))),
ImageSet(Lambda(((theta,),), (0, theta)), ImageSet(Lambda(theta, (theta,)), Interval(0, pi)))))
def test_conditionset():
assert solveset(Eq(sin(x)**2 + cos(x)**2, 1), x, domain=S.Reals
) is S.Reals
assert solveset(Eq(x**2 + x*sin(x), 1), x, domain=S.Reals
).dummy_eq(ConditionSet(x, Eq(x**2 + x*sin(x) - 1, 0), S.Reals))
assert dumeq(solveset(Eq(-I*(exp(I*x) - exp(-I*x))/2, 1), x
), imageset(Lambda(n, 2*n*pi + pi/2), S.Integers))
assert solveset(x + sin(x) > 1, x, domain=S.Reals
).dummy_eq(ConditionSet(x, x + sin(x) > 1, S.Reals))
assert solveset(Eq(sin(Abs(x)), x), x, domain=S.Reals
).dummy_eq(ConditionSet(x, Eq(-x + sin(Abs(x)), 0), S.Reals))
assert solveset(y**x-z, x, S.Reals
).dummy_eq(ConditionSet(x, Eq(y**x - z, 0), S.Reals))
@XFAIL
def test_conditionset_equality():
''' Checking equality of different representations of ConditionSet'''
assert solveset(Eq(tan(x), y), x) == ConditionSet(x, Eq(tan(x), y), S.Complexes)
def test_solveset_domain():
assert solveset(x**2 - x - 6, x, Interval(0, oo)) == FiniteSet(3)
assert solveset(x**2 - 1, x, Interval(0, oo)) == FiniteSet(1)
assert solveset(x**4 - 16, x, Interval(0, 10)) == FiniteSet(2)
def test_improve_coverage():
from sympy.solvers.solveset import _has_rational_power
solution = solveset(exp(x) + sin(x), x, S.Reals)
unsolved_object = ConditionSet(x, Eq(exp(x) + sin(x), 0), S.Reals)
assert solution.dummy_eq(unsolved_object)
assert _has_rational_power(sin(x)*exp(x) + 1, x) == (False, S.One)
assert _has_rational_power((sin(x)**2)*(exp(x) + 1)**3, x) == (False, S.One)
def test_issue_9522():
expr1 = Eq(1/(x**2 - 4) + x, 1/(x**2 - 4) + 2)
expr2 = Eq(1/x + x, 1/x)
assert solveset(expr1, x, S.Reals) == EmptySet()
assert solveset(expr2, x, S.Reals) == EmptySet()
def test_solvify():
assert solvify(x**2 + 10, x, S.Reals) == []
assert solvify(x**3 + 1, x, S.Complexes) == [-1, S.Half - sqrt(3)*I/2,
S.Half + sqrt(3)*I/2]
assert solvify(log(x), x, S.Reals) == [1]
assert solvify(cos(x), x, S.Reals) == [pi/2, pi*Rational(3, 2)]
assert solvify(sin(x) + 1, x, S.Reals) == [pi*Rational(3, 2)]
raises(NotImplementedError, lambda: solvify(sin(exp(x)), x, S.Complexes))
def test_abs_invert_solvify():
assert solvify(sin(Abs(x)), x, S.Reals) is None
def test_linear_eq_to_matrix():
eqns1 = [2*x + y - 2*z - 3, x - y - z, x + y + 3*z - 12]
eqns2 = [Eq(3*x + 2*y - z, 1), Eq(2*x - 2*y + 4*z, -2), -2*x + y - 2*z]
A, B = linear_eq_to_matrix(eqns1, x, y, z)
assert A == Matrix([[2, 1, -2], [1, -1, -1], [1, 1, 3]])
assert B == Matrix([[3], [0], [12]])
A, B = linear_eq_to_matrix(eqns2, x, y, z)
assert A == Matrix([[3, 2, -1], [2, -2, 4], [-2, 1, -2]])
assert B == Matrix([[1], [-2], [0]])
# Pure symbolic coefficients
eqns3 = [a*b*x + b*y + c*z - d, e*x + d*x + f*y + g*z - h, i*x + j*y + k*z - l]
A, B = linear_eq_to_matrix(eqns3, x, y, z)
assert A == Matrix([[a*b, b, c], [d + e, f, g], [i, j, k]])
assert B == Matrix([[d], [h], [l]])
# raise ValueError if
# 1) no symbols are given
raises(ValueError, lambda: linear_eq_to_matrix(eqns3))
# 2) there are duplicates
raises(ValueError, lambda: linear_eq_to_matrix(eqns3, [x, x, y]))
# 3) there are non-symbols
raises(ValueError, lambda: linear_eq_to_matrix(eqns3, [x, 1/a, y]))
# 4) a nonlinear term is detected in the original expression
raises(NonlinearError, lambda: linear_eq_to_matrix(Eq(1/x + x, 1/x), [x]))
assert linear_eq_to_matrix(1, x) == (Matrix([[0]]), Matrix([[-1]]))
# issue 15195
assert linear_eq_to_matrix(x + y*(z*(3*x + 2) + 3), x) == (
Matrix([[3*y*z + 1]]), Matrix([[-y*(2*z + 3)]]))
assert linear_eq_to_matrix(Matrix(
[[a*x + b*y - 7], [5*x + 6*y - c]]), x, y) == (
Matrix([[a, b], [5, 6]]), Matrix([[7], [c]]))
# issue 15312
assert linear_eq_to_matrix(Eq(x + 2, 1), x) == (
Matrix([[1]]), Matrix([[-1]]))
def test_issue_16577():
assert linear_eq_to_matrix(Eq(a*(2*x + 3*y) + 4*y, 5), x, y) == (
Matrix([[2*a, 3*a + 4]]), Matrix([[5]]))
def test_linsolve():
x1, x2, x3, x4 = symbols('x1, x2, x3, x4')
# Test for different input forms
M = Matrix([[1, 2, 1, 1, 7], [1, 2, 2, -1, 12], [2, 4, 0, 6, 4]])
system1 = A, B = M[:, :-1], M[:, -1]
Eqns = [x1 + 2*x2 + x3 + x4 - 7, x1 + 2*x2 + 2*x3 - x4 - 12,
2*x1 + 4*x2 + 6*x4 - 4]
sol = FiniteSet((-2*x2 - 3*x4 + 2, x2, 2*x4 + 5, x4))
assert linsolve(Eqns, (x1, x2, x3, x4)) == sol
assert linsolve(Eqns, *(x1, x2, x3, x4)) == sol
assert linsolve(system1, (x1, x2, x3, x4)) == sol
assert linsolve(system1, *(x1, x2, x3, x4)) == sol
# issue 9667 - symbols can be Dummy symbols
x1, x2, x3, x4 = symbols('x:4', cls=Dummy)
assert linsolve(system1, x1, x2, x3, x4) == FiniteSet(
(-2*x2 - 3*x4 + 2, x2, 2*x4 + 5, x4))
# raise ValueError for garbage value
raises(ValueError, lambda: linsolve(Eqns))
raises(ValueError, lambda: linsolve(x1))
raises(ValueError, lambda: linsolve(x1, x2))
raises(ValueError, lambda: linsolve((A,), x1, x2))
raises(ValueError, lambda: linsolve(A, B, x1, x2))
#raise ValueError if equations are non-linear in given variables
raises(NonlinearError, lambda: linsolve([x + y - 1, x ** 2 + y - 3], [x, y]))
raises(NonlinearError, lambda: linsolve([cos(x) + y, x + y], [x, y]))
assert linsolve([x + z - 1, x ** 2 + y - 3], [z, y]) == {(-x + 1, -x**2 + 3)}
# Fully symbolic test
A = Matrix([[a, b], [c, d]])
B = Matrix([[e], [g]])
system2 = (A, B)
sol = FiniteSet(((-b*g + d*e)/(a*d - b*c), (a*g - c*e)/(a*d - b*c)))
assert linsolve(system2, [x, y]) == sol
# No solution
A = Matrix([[1, 2, 3], [2, 4, 6], [3, 6, 9]])
B = Matrix([0, 0, 1])
assert linsolve((A, B), (x, y, z)) == EmptySet()
# Issue #10056
A, B, J1, J2 = symbols('A B J1 J2')
Augmatrix = Matrix([
[2*I*J1, 2*I*J2, -2/J1],
[-2*I*J2, -2*I*J1, 2/J2],
[0, 2, 2*I/(J1*J2)],
[2, 0, 0],
])
assert linsolve(Augmatrix, A, B) == FiniteSet((0, I/(J1*J2)))
# Issue #10121 - Assignment of free variables
Augmatrix = Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]])
assert linsolve(Augmatrix, a, b, c, d, e) == FiniteSet((a, 0, c, 0, e))
#raises(IndexError, lambda: linsolve(Augmatrix, a, b, c))
x0, x1, x2, _x0 = symbols('tau0 tau1 tau2 _tau0')
assert linsolve(Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, _x0]])
) == FiniteSet((x0, 0, x1, _x0, x2))
x0, x1, x2, _x0 = symbols('tau00 tau01 tau02 tau0')
assert linsolve(Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, _x0]])
) == FiniteSet((x0, 0, x1, _x0, x2))
x0, x1, x2, _x0 = symbols('tau00 tau01 tau02 tau1')
assert linsolve(Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, _x0]])
) == FiniteSet((x0, 0, x1, _x0, x2))
# symbols can be given as generators
x0, x2, x4 = symbols('x0, x2, x4')
assert linsolve(Augmatrix, numbered_symbols('x')
) == FiniteSet((x0, 0, x2, 0, x4))
Augmatrix[-1, -1] = x0
# use Dummy to avoid clash; the names may clash but the symbols
# will not
Augmatrix[-1, -1] = symbols('_x0')
assert len(linsolve(
Augmatrix, numbered_symbols('x', cls=Dummy)).free_symbols) == 4
# Issue #12604
f = Function('f')
assert linsolve([f(x) - 5], f(x)) == FiniteSet((5,))
# Issue #14860
from sympy.physics.units import meter, newton, kilo
kN = kilo*newton
Eqns = [8*kN + x + y, 28*kN*meter + 3*x*meter]
assert linsolve(Eqns, x, y) == {
(kilo*newton*Rational(-28, 3), kN*Rational(4, 3))}
# linsolve fully expands expressions, so removable singularities
# and other nonlinearity does not raise an error
assert linsolve([Eq(x, x + y)], [x, y]) == {(x, 0)}
assert linsolve([Eq(1/x, 1/x + y)], [x, y]) == {(x, 0)}
assert linsolve([Eq(y/x, y/x + y)], [x, y]) == {(x, 0)}
assert linsolve([Eq(x*(x + 1), x**2 + y)], [x, y]) == {(y, y)}
# corner cases
#
# XXX: The case below should give the same as for [0]
# assert linsolve([], [x]) == {(x,)}
assert linsolve([], [x]) == EmptySet()
assert linsolve([0], [x]) == {(x,)}
assert linsolve([x], [x, y]) == {(0, y)}
assert linsolve([x, 0], [x, y]) == {(0, y)}
def test_linsolve_large_sparse():
#
# This is mainly a performance test
#
def _mk_eqs_sol(n):
xs = symbols('x:{}'.format(n))
ys = symbols('y:{}'.format(n))
syms = xs + ys
eqs = []
sol = (-S.Half,) * n + (S.Half,) * n
for xi, yi in zip(xs, ys):
eqs.extend([xi + yi, xi - yi + 1])
return eqs, syms, FiniteSet(sol)
n = 500
eqs, syms, sol = _mk_eqs_sol(n)
assert linsolve(eqs, syms) == sol
def test_linsolve_immutable():
A = ImmutableDenseMatrix([[1, 1, 2], [0, 1, 2], [0, 0, 1]])
B = ImmutableDenseMatrix([2, 1, -1])
assert linsolve([A, B], (x, y, z)) == FiniteSet((1, 3, -1))
A = ImmutableDenseMatrix([[1, 1, 7], [1, -1, 3]])
assert linsolve(A) == FiniteSet((5, 2))
def test_solve_decomposition():
n = Dummy('n')
f1 = exp(3*x) - 6*exp(2*x) + 11*exp(x) - 6
f2 = sin(x)**2 - 2*sin(x) + 1
f3 = sin(x)**2 - sin(x)
f4 = sin(x + 1)
f5 = exp(x + 2) - 1
f6 = 1/log(x)
f7 = 1/x
s1 = ImageSet(Lambda(n, 2*n*pi), S.Integers)
s2 = ImageSet(Lambda(n, 2*n*pi + pi), S.Integers)
s3 = ImageSet(Lambda(n, 2*n*pi + pi/2), S.Integers)
s4 = ImageSet(Lambda(n, 2*n*pi - 1), S.Integers)
s5 = ImageSet(Lambda(n, 2*n*pi - 1 + pi), S.Integers)
assert solve_decomposition(f1, x, S.Reals) == FiniteSet(0, log(2), log(3))
assert dumeq(solve_decomposition(f2, x, S.Reals), s3)
assert dumeq(solve_decomposition(f3, x, S.Reals), Union(s1, s2, s3))
assert dumeq(solve_decomposition(f4, x, S.Reals), Union(s4, s5))
assert solve_decomposition(f5, x, S.Reals) == FiniteSet(-2)
assert solve_decomposition(f6, x, S.Reals) == S.EmptySet
assert solve_decomposition(f7, x, S.Reals) == S.EmptySet
assert solve_decomposition(x, x, Interval(1, 2)) == S.EmptySet
# nonlinsolve testcases
def test_nonlinsolve_basic():
assert nonlinsolve([],[]) == S.EmptySet
assert nonlinsolve([],[x, y]) == S.EmptySet
system = [x, y - x - 5]
assert nonlinsolve([x],[x, y]) == FiniteSet((0, y))
assert nonlinsolve(system, [y]) == FiniteSet((x + 5,))
soln = (ImageSet(Lambda(n, 2*n*pi + pi/2), S.Integers),)
assert dumeq(nonlinsolve([sin(x) - 1], [x]), FiniteSet(tuple(soln)))
assert nonlinsolve([x**2 - 1], [x]) == FiniteSet((-1,), (1,))
soln = FiniteSet((y, y))
assert nonlinsolve([x - y, 0], x, y) == soln
assert nonlinsolve([0, x - y], x, y) == soln
assert nonlinsolve([x - y, x - y], x, y) == soln
assert nonlinsolve([x, 0], x, y) == FiniteSet((0, y))
f = Function('f')
assert nonlinsolve([f(x), 0], f(x), y) == FiniteSet((0, y))
assert nonlinsolve([f(x), 0], f(x), f(y)) == FiniteSet((0, f(y)))
A = Indexed('A', x)
assert nonlinsolve([A, 0], A, y) == FiniteSet((0, y))
assert nonlinsolve([x**2 -1], [sin(x)]) == FiniteSet((S.EmptySet,))
assert nonlinsolve([x**2 -1], sin(x)) == FiniteSet((S.EmptySet,))
assert nonlinsolve([x**2 -1], 1) == FiniteSet((x**2,))
assert nonlinsolve([x**2 -1], x + y) == FiniteSet((S.EmptySet,))
def test_nonlinsolve_abs():
soln = FiniteSet((x, Abs(x)))
assert nonlinsolve([Abs(x) - y], x, y) == soln
def test_raise_exception_nonlinsolve():
raises(IndexError, lambda: nonlinsolve([x**2 -1], []))
raises(ValueError, lambda: nonlinsolve([x**2 -1]))
raises(NotImplementedError, lambda: nonlinsolve([(x+y)**2 - 9, x**2 - y**2 - 0.75], (x, y)))
def test_trig_system():
# TODO: add more simple testcases when solveset returns
# simplified soln for Trig eq
assert nonlinsolve([sin(x) - 1, cos(x) -1 ], x) == S.EmptySet
soln1 = (ImageSet(Lambda(n, 2*n*pi + pi/2), S.Integers),)
soln = FiniteSet(soln1)
assert dumeq(nonlinsolve([sin(x) - 1, cos(x)], x), soln)
@XFAIL
def test_trig_system_fail():
# fails because solveset trig solver is not much smart.
sys = [x + y - pi/2, sin(x) + sin(y) - 1]
# solveset returns conditionset for sin(x) + sin(y) - 1
soln_1 = (ImageSet(Lambda(n, n*pi + pi/2), S.Integers),
ImageSet(Lambda(n, n*pi)), S.Integers)
soln_1 = FiniteSet(soln_1)
soln_2 = (ImageSet(Lambda(n, n*pi), S.Integers),
ImageSet(Lambda(n, n*pi+ pi/2), S.Integers))
soln_2 = FiniteSet(soln_2)
soln = soln_1 + soln_2
assert dumeq(nonlinsolve(sys, [x, y]), soln)
# Add more cases from here
# http://www.vitutor.com/geometry/trigonometry/equations_systems.html#uno
sys = [sin(x) + sin(y) - (sqrt(3)+1)/2, sin(x) - sin(y) - (sqrt(3) - 1)/2]
soln_x = Union(ImageSet(Lambda(n, 2*n*pi + pi/3), S.Integers),
ImageSet(Lambda(n, 2*n*pi + pi*Rational(2, 3)), S.Integers))
soln_y = Union(ImageSet(Lambda(n, 2*n*pi + pi/6), S.Integers),
ImageSet(Lambda(n, 2*n*pi + pi*Rational(5, 6)), S.Integers))
assert dumeq(nonlinsolve(sys, [x, y]), FiniteSet((soln_x, soln_y)))
def test_nonlinsolve_positive_dimensional():
x, y, z, a, b, c, d = symbols('x, y, z, a, b, c, d', extended_real=True)
assert nonlinsolve([x*y, x*y - x], [x, y]) == FiniteSet((0, y))
system = [a**2 + a*c, a - b]
assert nonlinsolve(system, [a, b]) == FiniteSet((0, 0), (-c, -c))
# here (a= 0, b = 0) is independent soln so both is printed.
# if symbols = [a, b, c] then only {a : -c ,b : -c}
eq1 = a + b + c + d
eq2 = a*b + b*c + c*d + d*a
eq3 = a*b*c + b*c*d + c*d*a + d*a*b
eq4 = a*b*c*d - 1
system = [eq1, eq2, eq3, eq4]
sol1 = (-1/d, -d, 1/d, FiniteSet(d) - FiniteSet(0))
sol2 = (1/d, -d, -1/d, FiniteSet(d) - FiniteSet(0))
soln = FiniteSet(sol1, sol2)
assert nonlinsolve(system, [a, b, c, d]) == soln
def test_nonlinsolve_polysys():
x, y, z = symbols('x, y, z', real=True)
assert nonlinsolve([x**2 + y - 2, x**2 + y], [x, y]) == S.EmptySet
s = (-y + 2, y)
assert nonlinsolve([(x + y)**2 - 4, x + y - 2], [x, y]) == FiniteSet(s)
system = [x**2 - y**2]
soln_real = FiniteSet((-y, y), (y, y))
soln_complex = FiniteSet((-Abs(y), y), (Abs(y), y))
soln =soln_real + soln_complex
assert nonlinsolve(system, [x, y]) == soln
system = [x**2 - y**2]
soln_real= FiniteSet((y, -y), (y, y))
soln_complex = FiniteSet((y, -Abs(y)), (y, Abs(y)))
soln = soln_real + soln_complex
assert nonlinsolve(system, [y, x]) == soln
system = [x**2 + y - 3, x - y - 4]
assert nonlinsolve(system, (x, y)) != nonlinsolve(system, (y, x))
def test_nonlinsolve_using_substitution():
x, y, z, n = symbols('x, y, z, n', real = True)
system = [(x + y)*n - y**2 + 2]
s_x = (n*y - y**2 + 2)/n
soln = (-s_x, y)
assert nonlinsolve(system, [x, y]) == FiniteSet(soln)
system = [z**2*x**2 - z**2*y**2/exp(x)]
soln_real_1 = (y, x, 0)
soln_real_2 = (-exp(x/2)*Abs(x), x, z)
soln_real_3 = (exp(x/2)*Abs(x), x, z)
soln_complex_1 = (-x*exp(x/2), x, z)
soln_complex_2 = (x*exp(x/2), x, z)
syms = [y, x, z]
soln = FiniteSet(soln_real_1, soln_complex_1, soln_complex_2,\
soln_real_2, soln_real_3)
assert nonlinsolve(system,syms) == soln
def test_nonlinsolve_complex():
n = Dummy('n')
assert dumeq(nonlinsolve([exp(x) - sin(y), 1/y - 3], [x, y]), {
(ImageSet(Lambda(n, 2*n*I*pi + log(sin(Rational(1, 3)))), S.Integers), Rational(1, 3))})
system = [exp(x) - sin(y), 1/exp(y) - 3]
assert dumeq(nonlinsolve(system, [x, y]), {
(ImageSet(Lambda(n, I*(2*n*pi + pi)
+ log(sin(log(3)))), S.Integers), -log(3)),
(ImageSet(Lambda(n, I*(2*n*pi + arg(sin(2*n*I*pi - log(3))))
+ log(Abs(sin(2*n*I*pi - log(3))))), S.Integers),
ImageSet(Lambda(n, 2*n*I*pi - log(3)), S.Integers))})
system = [exp(x) - sin(y), y**2 - 4]
assert dumeq(nonlinsolve(system, [x, y]), {
(ImageSet(Lambda(n, I*(2*n*pi + pi) + log(sin(2))), S.Integers), -2),
(ImageSet(Lambda(n, 2*n*I*pi + log(sin(2))), S.Integers), 2)})
@XFAIL
def test_solve_nonlinear_trans():
# After the transcendental equation solver these will work
x, y, z = symbols('x, y, z', real=True)
soln1 = FiniteSet((2*LambertW(y/2), y))
soln2 = FiniteSet((-x*sqrt(exp(x)), y), (x*sqrt(exp(x)), y))
soln3 = FiniteSet((x*exp(x/2), x))
soln4 = FiniteSet(2*LambertW(y/2), y)
assert nonlinsolve([x**2 - y**2/exp(x)], [x, y]) == soln1
assert nonlinsolve([x**2 - y**2/exp(x)], [y, x]) == soln2
assert nonlinsolve([x**2 - y**2/exp(x)], [y, x]) == soln3
assert nonlinsolve([x**2 - y**2/exp(x)], [x, y]) == soln4
def test_issue_5132_1():
system = [sqrt(x**2 + y**2) - sqrt(10), x + y - 4]
assert nonlinsolve(system, [x, y]) == FiniteSet((1, 3), (3, 1))
n = Dummy('n')
eqs = [exp(x)**2 - sin(y) + z**2, 1/exp(y) - 3]
s_real_y = -log(3)
s_real_z = sqrt(-exp(2*x) - sin(log(3)))
soln_real = FiniteSet((s_real_y, s_real_z), (s_real_y, -s_real_z))
lam = Lambda(n, 2*n*I*pi + -log(3))
s_complex_y = ImageSet(lam, S.Integers)
lam = Lambda(n, sqrt(-exp(2*x) + sin(2*n*I*pi + -log(3))))
s_complex_z_1 = ImageSet(lam, S.Integers)
lam = Lambda(n, -sqrt(-exp(2*x) + sin(2*n*I*pi + -log(3))))
s_complex_z_2 = ImageSet(lam, S.Integers)
soln_complex = FiniteSet(
(s_complex_y, s_complex_z_1),
(s_complex_y, s_complex_z_2)
)
soln = soln_real + soln_complex
assert dumeq(nonlinsolve(eqs, [y, z]), soln)
def test_issue_5132_2():
x, y = symbols('x, y', real=True)
eqs = [exp(x)**2 - sin(y) + z**2, 1/exp(y) - 3]
n = Dummy('n')
soln_real = (log(-z**2 + sin(y))/2, z)
lam = Lambda( n, I*(2*n*pi + arg(-z**2 + sin(y)))/2 + log(Abs(z**2 - sin(y)))/2)
img = ImageSet(lam, S.Integers)
# not sure about the complex soln. But it looks correct.
soln_complex = (img, z)
soln = FiniteSet(soln_real, soln_complex)
assert dumeq(nonlinsolve(eqs, [x, z]), soln)
system = [r - x**2 - y**2, tan(t) - y/x]
s_x = sqrt(r/(tan(t)**2 + 1))
s_y = sqrt(r/(tan(t)**2 + 1))*tan(t)
soln = FiniteSet((s_x, s_y), (-s_x, -s_y))
assert nonlinsolve(system, [x, y]) == soln
def test_issue_6752():
a,b,c,d = symbols('a, b, c, d', real=True)
assert nonlinsolve([a**2 + a, a - b], [a, b]) == {(-1, -1), (0, 0)}
@SKIP("slow")
def test_issue_5114_solveset():
# slow testcase
from sympy.abc import d, e, f, g, h, i, j, k, l, o, p, q, r
# there is no 'a' in the equation set but this is how the
# problem was originally posed
syms = [a, b, c, f, h, k, n]
eqs = [b + r/d - c/d,
c*(1/d + 1/e + 1/g) - f/g - r/d,
f*(1/g + 1/i + 1/j) - c/g - h/i,
h*(1/i + 1/l + 1/m) - f/i - k/m,
k*(1/m + 1/o + 1/p) - h/m - n/p,
n*(1/p + 1/q) - k/p]
assert len(nonlinsolve(eqs, syms)) == 1
@SKIP("Hangs")
def _test_issue_5335():
# Not able to check zero dimensional system.
# is_zero_dimensional Hangs
lam, a0, conc = symbols('lam a0 conc')
eqs = [lam + 2*y - a0*(1 - x/2)*x - 0.005*x/2*x,
a0*(1 - x/2)*x - 1*y - 0.743436700916726*y,
x + y - conc]
sym = [x, y, a0]
# there are 4 solutions but only two are valid
assert len(nonlinsolve(eqs, sym)) == 2
# float
eqs = [lam + 2*y - a0*(1 - x/2)*x - 0.005*x/2*x,
a0*(1 - x/2)*x - 1*y - 0.743436700916726*y,
x + y - conc]
sym = [x, y, a0]
assert len(nonlinsolve(eqs, sym)) == 2
def test_issue_2777():
# the equations represent two circles
x, y = symbols('x y', real=True)
e1, e2 = sqrt(x**2 + y**2) - 10, sqrt(y**2 + (-x + 10)**2) - 3
a, b = Rational(191, 20), 3*sqrt(391)/20
ans = {(a, -b), (a, b)}
assert nonlinsolve((e1, e2), (x, y)) == ans
assert nonlinsolve((e1, e2/(x - a)), (x, y)) == S.EmptySet
# make the 2nd circle's radius be -3
e2 += 6
assert nonlinsolve((e1, e2), (x, y)) == S.EmptySet
def test_issue_8828():
x1 = 0
y1 = -620
r1 = 920
x2 = 126
y2 = 276
x3 = 51
y3 = 205
r3 = 104
v = [x, y, z]
f1 = (x - x1)**2 + (y - y1)**2 - (r1 - z)**2
f2 = (x2 - x)**2 + (y2 - y)**2 - z**2
f3 = (x - x3)**2 + (y - y3)**2 - (r3 - z)**2
F = [f1, f2, f3]
g1 = sqrt((x - x1)**2 + (y - y1)**2) + z - r1
g2 = f2
g3 = sqrt((x - x3)**2 + (y - y3)**2) + z - r3
G = [g1, g2, g3]
# both soln same
A = nonlinsolve(F, v)
B = nonlinsolve(G, v)
assert A == B
def test_nonlinsolve_conditionset():
# when solveset failed to solve all the eq
# return conditionset
f = Function('f')
f1 = f(x) - pi/2
f2 = f(y) - pi*Rational(3, 2)
intermediate_system = Eq(2*f(x) - pi, 0) & Eq(2*f(y) - 3*pi, 0)
symbols = Tuple(x, y)
soln = ConditionSet(
symbols,
intermediate_system,
S.Complexes**2)
assert nonlinsolve([f1, f2], [x, y]) == soln
def test_substitution_basic():
assert substitution([], [x, y]) == S.EmptySet
assert substitution([], []) == S.EmptySet
system = [2*x**2 + 3*y**2 - 30, 3*x**2 - 2*y**2 - 19]
soln = FiniteSet((-3, -2), (-3, 2), (3, -2), (3, 2))
assert substitution(system, [x, y]) == soln
soln = FiniteSet((-1, 1))
assert substitution([x + y], [x], [{y: 1}], [y], set(), [x, y]) == soln
assert substitution(
[x + y], [x], [{y: 1}], [y],
{x + 1}, [y, x]) == S.EmptySet
def test_issue_5132_substitution():
x, y, z, r, t = symbols('x, y, z, r, t', real=True)
system = [r - x**2 - y**2, tan(t) - y/x]
s_x_1 = Complement(FiniteSet(-sqrt(r/(tan(t)**2 + 1))), FiniteSet(0))
s_x_2 = Complement(FiniteSet(sqrt(r/(tan(t)**2 + 1))), FiniteSet(0))
s_y = sqrt(r/(tan(t)**2 + 1))*tan(t)
soln = FiniteSet((s_x_2, s_y)) + FiniteSet((s_x_1, -s_y))
assert substitution(system, [x, y]) == soln
n = Dummy('n')
eqs = [exp(x)**2 - sin(y) + z**2, 1/exp(y) - 3]
s_real_y = -log(3)
s_real_z = sqrt(-exp(2*x) - sin(log(3)))
soln_real = FiniteSet((s_real_y, s_real_z), (s_real_y, -s_real_z))
lam = Lambda(n, 2*n*I*pi + -log(3))
s_complex_y = ImageSet(lam, S.Integers)
lam = Lambda(n, sqrt(-exp(2*x) + sin(2*n*I*pi + -log(3))))
s_complex_z_1 = ImageSet(lam, S.Integers)
lam = Lambda(n, -sqrt(-exp(2*x) + sin(2*n*I*pi + -log(3))))
s_complex_z_2 = ImageSet(lam, S.Integers)
soln_complex = FiniteSet(
(s_complex_y, s_complex_z_1),
(s_complex_y, s_complex_z_2))
soln = soln_real + soln_complex
assert dumeq(substitution(eqs, [y, z]), soln)
def test_raises_substitution():
raises(ValueError, lambda: substitution([x**2 -1], []))
raises(TypeError, lambda: substitution([x**2 -1]))
raises(ValueError, lambda: substitution([x**2 -1], [sin(x)]))
raises(TypeError, lambda: substitution([x**2 -1], x))
raises(TypeError, lambda: substitution([x**2 -1], 1))
# end of tests for nonlinsolve
def test_issue_9556():
b = Symbol('b', positive=True)
assert solveset(Abs(x) + 1, x, S.Reals) == EmptySet()
assert solveset(Abs(x) + b, x, S.Reals) == EmptySet()
assert solveset(Eq(b, -1), b, S.Reals) == EmptySet()
def test_issue_9611():
assert solveset(Eq(x - x + a, a), x, S.Reals) == S.Reals
assert solveset(Eq(y - y + a, a), y) == S.Complexes
def test_issue_9557():
assert solveset(x**2 + a, x, S.Reals) == Intersection(S.Reals,
FiniteSet(-sqrt(-a), sqrt(-a)))
def test_issue_9778():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert solveset(x**3 + 1, x, S.Reals) == FiniteSet(-1)
assert solveset(x**Rational(3, 5) + 1, x, S.Reals) == S.EmptySet
assert solveset(x**3 + y, x, S.Reals) == \
FiniteSet(-Abs(y)**Rational(1, 3)*sign(y))
def test_issue_10214():
assert solveset(x**Rational(3, 2) + 4, x, S.Reals) == S.EmptySet
assert solveset(x**(Rational(-3, 2)) + 4, x, S.Reals) == S.EmptySet
ans = FiniteSet(-2**Rational(2, 3))
assert solveset(x**(S(3)) + 4, x, S.Reals) == ans
assert (x**(S(3)) + 4).subs(x,list(ans)[0]) == 0 # substituting ans and verifying the result.
assert (x**(S(3)) + 4).subs(x,-(-2)**Rational(2, 3)) == 0
def test_issue_9849():
assert solveset(Abs(sin(x)) + 1, x, S.Reals) == S.EmptySet
def test_issue_9953():
assert linsolve([ ], x) == S.EmptySet
def test_issue_9913():
assert solveset(2*x + 1/(x - 10)**2, x, S.Reals) == \
FiniteSet(-(3*sqrt(24081)/4 + Rational(4027, 4))**Rational(1, 3)/3 - 100/
(3*(3*sqrt(24081)/4 + Rational(4027, 4))**Rational(1, 3)) + Rational(20, 3))
def test_issue_10397():
assert solveset(sqrt(x), x, S.Complexes) == FiniteSet(0)
def test_issue_14987():
raises(ValueError, lambda: linear_eq_to_matrix(
[x**2], x))
raises(ValueError, lambda: linear_eq_to_matrix(
[x*(-3/x + 1) + 2*y - a], [x, y]))
raises(ValueError, lambda: linear_eq_to_matrix(
[(x**2 - 3*x)/(x - 3) - 3], x))
raises(ValueError, lambda: linear_eq_to_matrix(
[(x + 1)**3 - x**3 - 3*x**2 + 7], x))
raises(ValueError, lambda: linear_eq_to_matrix(
[x*(1/x + 1) + y], [x, y]))
raises(ValueError, lambda: linear_eq_to_matrix(
[(x + 1)*y], [x, y]))
raises(ValueError, lambda: linear_eq_to_matrix(
[Eq(1/x, 1/x + y)], [x, y]))
raises(ValueError, lambda: linear_eq_to_matrix(
[Eq(y/x, y/x + y)], [x, y]))
raises(ValueError, lambda: linear_eq_to_matrix(
[Eq(x*(x + 1), x**2 + y)], [x, y]))
def test_simplification():
eq = x + (a - b)/(-2*a + 2*b)
assert solveset(eq, x) == FiniteSet(S.Half)
assert solveset(eq, x, S.Reals) == Intersection({-((a - b)/(-2*a + 2*b))}, S.Reals)
# So that ap - bn is not zero:
ap = Symbol('ap', positive=True)
bn = Symbol('bn', negative=True)
eq = x + (ap - bn)/(-2*ap + 2*bn)
assert solveset(eq, x) == FiniteSet(S.Half)
assert solveset(eq, x, S.Reals) == FiniteSet(S.Half)
def test_issue_10555():
f = Function('f')
g = Function('g')
assert solveset(f(x) - pi/2, x, S.Reals).dummy_eq(
ConditionSet(x, Eq(f(x) - pi/2, 0), S.Reals))
assert solveset(f(g(x)) - pi/2, g(x), S.Reals).dummy_eq(
ConditionSet(g(x), Eq(f(g(x)) - pi/2, 0), S.Reals))
def test_issue_8715():
eq = x + 1/x > -2 + 1/x
assert solveset(eq, x, S.Reals) == \
(Interval.open(-2, oo) - FiniteSet(0))
assert solveset(eq.subs(x,log(x)), x, S.Reals) == \
Interval.open(exp(-2), oo) - FiniteSet(1)
def test_issue_11174():
eq = z**2 + exp(2*x) - sin(y)
soln = Intersection(S.Reals, FiniteSet(log(-z**2 + sin(y))/2))
assert solveset(eq, x, S.Reals) == soln
eq = sqrt(r)*Abs(tan(t))/sqrt(tan(t)**2 + 1) + x*tan(t)
s = -sqrt(r)*Abs(tan(t))/(sqrt(tan(t)**2 + 1)*tan(t))
soln = Intersection(S.Reals, FiniteSet(s))
assert solveset(eq, x, S.Reals) == soln
def test_issue_11534():
# eq and eq2 should give the same solution as a Complement
x = Symbol('x', real=True)
y = Symbol('y', real=True)
eq = -y + x/sqrt(-x**2 + 1)
eq2 = -y**2 + x**2/(-x**2 + 1)
soln = Complement(FiniteSet(-y/sqrt(y**2 + 1), y/sqrt(y**2 + 1)), FiniteSet(-1, 1))
assert solveset(eq, x, S.Reals) == soln
assert solveset(eq2, x, S.Reals) == soln
def test_issue_10477():
assert solveset((x**2 + 4*x - 3)/x < 2, x, S.Reals) == \
Union(Interval.open(-oo, -3), Interval.open(0, 1))
def test_issue_10671():
assert solveset(sin(y), y, Interval(0, pi)) == FiniteSet(0, pi)
i = Interval(1, 10)
assert solveset((1/x).diff(x) < 0, x, i) == i
def test_issue_11064():
eq = x + sqrt(x**2 - 5)
assert solveset(eq > 0, x, S.Reals) == \
Interval(sqrt(5), oo)
assert solveset(eq < 0, x, S.Reals) == \
Interval(-oo, -sqrt(5))
assert solveset(eq > sqrt(5), x, S.Reals) == \
Interval.Lopen(sqrt(5), oo)
def test_issue_12478():
eq = sqrt(x - 2) + 2
soln = solveset_real(eq, x)
assert soln is S.EmptySet
assert solveset(eq < 0, x, S.Reals) is S.EmptySet
assert solveset(eq > 0, x, S.Reals) == Interval(2, oo)
def test_issue_12429():
eq = solveset(log(x)/x <= 0, x, S.Reals)
sol = Interval.Lopen(0, 1)
assert eq == sol
def test_solveset_arg():
assert solveset(arg(x), x, S.Reals) == Interval.open(0, oo)
assert solveset(arg(4*x -3), x) == Interval.open(Rational(3, 4), oo)
def test__is_finite_with_finite_vars():
f = _is_finite_with_finite_vars
# issue 12482
assert all(f(1/x) is None for x in (
Dummy(), Dummy(real=True), Dummy(complex=True)))
assert f(1/Dummy(real=False)) is True # b/c it's finite but not 0
def test_issue_13550():
assert solveset(x**2 - 2*x - 15, symbol = x, domain = Interval(-oo, 0)) == FiniteSet(-3)
def test_issue_13849():
assert nonlinsolve((t*(sqrt(5) + sqrt(2)) - sqrt(2), t), t) == EmptySet()
def test_issue_14223():
assert solveset((Abs(x + Min(x, 2)) - 2).rewrite(Piecewise), x,
S.Reals) == FiniteSet(-1, 1)
assert solveset((Abs(x + Min(x, 2)) - 2).rewrite(Piecewise), x,
Interval(0, 2)) == FiniteSet(1)
def test_issue_10158():
dom = S.Reals
assert solveset(x*Max(x, 15) - 10, x, dom) == FiniteSet(Rational(2, 3))
assert solveset(x*Min(x, 15) - 10, x, dom) == FiniteSet(-sqrt(10), sqrt(10))
assert solveset(Max(Abs(x - 3) - 1, x + 2) - 3, x, dom) == FiniteSet(-1, 1)
assert solveset(Abs(x - 1) - Abs(y), x, dom) == FiniteSet(-Abs(y) + 1, Abs(y) + 1)
assert solveset(Abs(x + 4*Abs(x + 1)), x, dom) == FiniteSet(Rational(-4, 3), Rational(-4, 5))
assert solveset(2*Abs(x + Abs(x + Max(3, x))) - 2, x, S.Reals) == FiniteSet(-1, -2)
dom = S.Complexes
raises(ValueError, lambda: solveset(x*Max(x, 15) - 10, x, dom))
raises(ValueError, lambda: solveset(x*Min(x, 15) - 10, x, dom))
raises(ValueError, lambda: solveset(Max(Abs(x - 3) - 1, x + 2) - 3, x, dom))
raises(ValueError, lambda: solveset(Abs(x - 1) - Abs(y), x, dom))
raises(ValueError, lambda: solveset(Abs(x + 4*Abs(x + 1)), x, dom))
def test_issue_14300():
f = 1 - exp(-18000000*x) - y
a1 = FiniteSet(-log(-y + 1)/18000000)
assert solveset(f, x, S.Reals) == \
Intersection(S.Reals, a1)
assert dumeq(solveset(f, x),
ImageSet(Lambda(n, -I*(2*n*pi + arg(-y + 1))/18000000 -
log(Abs(y - 1))/18000000), S.Integers))
def test_issue_14454():
number = CRootOf(x**4 + x - 1, 2)
raises(ValueError, lambda: invert_real(number, 0, x, S.Reals))
assert invert_real(x**2, number, x, S.Reals) # no error
def test_issue_17882():
assert solveset(-8*x**2/(9*(x**2 - 1)**(S(4)/3)) + 4/(3*(x**2 - 1)**(S(1)/3)), x, S.Complexes) == \
FiniteSet(sqrt(3), -sqrt(3))
def test_term_factors():
assert list(_term_factors(3**x - 2)) == [-2, 3**x]
expr = 4**(x + 1) + 4**(x + 2) + 4**(x - 1) - 3**(x + 2) - 3**(x + 3)
assert set(_term_factors(expr)) == {
3**(x + 2), 4**(x + 2), 3**(x + 3), 4**(x - 1), -1, 4**(x + 1)}
#################### tests for transolve and its helpers ###############
def test_transolve():
assert _transolve(3**x, x, S.Reals) == S.EmptySet
assert _transolve(3**x - 9**(x + 5), x, S.Reals) == FiniteSet(-10)
# exponential tests
def test_exponential_real():
from sympy.abc import x, y, z
e1 = 3**(2*x) - 2**(x + 3)
e2 = 4**(5 - 9*x) - 8**(2 - x)
e3 = 2**x + 4**x
e4 = exp(log(5)*x) - 2**x
e5 = exp(x/y)*exp(-z/y) - 2
e6 = 5**(x/2) - 2**(x/3)
e7 = 4**(x + 1) + 4**(x + 2) + 4**(x - 1) - 3**(x + 2) - 3**(x + 3)
e8 = -9*exp(-2*x + 5) + 4*exp(3*x + 1)
e9 = 2**x + 4**x + 8**x - 84
e10 = 29*2**(x + 1)*615**(x) - 123*2726**(x)
assert solveset(e1, x, S.Reals) == FiniteSet(
-3*log(2)/(-2*log(3) + log(2)))
assert solveset(e2, x, S.Reals) == FiniteSet(Rational(4, 15))
assert solveset(e3, x, S.Reals) == S.EmptySet
assert solveset(e4, x, S.Reals) == FiniteSet(0)
assert solveset(e5, x, S.Reals) == Intersection(
S.Reals, FiniteSet(y*log(2*exp(z/y))))
assert solveset(e6, x, S.Reals) == FiniteSet(0)
assert solveset(e7, x, S.Reals) == FiniteSet(2)
assert solveset(e8, x, S.Reals) == FiniteSet(-2*log(2)/5 + 2*log(3)/5 + Rational(4, 5))
assert solveset(e9, x, S.Reals) == FiniteSet(2)
assert solveset(e10,x, S.Reals) == FiniteSet((-log(29) - log(2) + log(123))/(-log(2726) + log(2) + log(615)))
assert solveset_real(-9*exp(-2*x + 5) + 2**(x + 1), x) == FiniteSet(
-((-5 - 2*log(3) + log(2))/(log(2) + 2)))
assert solveset_real(4**(x/2) - 2**(x/3), x) == FiniteSet(0)
b = sqrt(6)*sqrt(log(2))/sqrt(log(5))
assert solveset_real(5**(x/2) - 2**(3/x), x) == FiniteSet(-b, b)
# coverage test
C1, C2 = symbols('C1 C2')
f = Function('f')
assert solveset_real(C1 + C2/x**2 - exp(-f(x)), f(x)) == Intersection(
S.Reals, FiniteSet(-log(C1 + C2/x**2)))
y = symbols('y', positive=True)
assert solveset_real(x**2 - y**2/exp(x), y) == Intersection(
S.Reals, FiniteSet(-sqrt(x**2*exp(x)), sqrt(x**2*exp(x))))
p = Symbol('p', positive=True)
assert solveset_real((1/p + 1)**(p + 1), p).dummy_eq(
ConditionSet(x, Eq((1 + 1/x)**(x + 1), 0), S.Reals))
@XFAIL
def test_exponential_complex():
from sympy.abc import x
from sympy import Dummy
n = Dummy('n')
assert dumeq(solveset_complex(2**x + 4**x, x),imageset(
Lambda(n, I*(2*n*pi + pi)/log(2)), S.Integers))
assert solveset_complex(x**z*y**z - 2, z) == FiniteSet(
log(2)/(log(x) + log(y)))
assert dumeq(solveset_complex(4**(x/2) - 2**(x/3), x), imageset(
Lambda(n, 3*n*I*pi/log(2)), S.Integers))
assert dumeq(solveset(2**x + 32, x), imageset(
Lambda(n, (I*(2*n*pi + pi) + 5*log(2))/log(2)), S.Integers))
eq = (2**exp(y**2/x) + 2)/(x**2 + 15)
a = sqrt(x)*sqrt(-log(log(2)) + log(log(2) + 2*n*I*pi))
assert solveset_complex(eq, y) == FiniteSet(-a, a)
union1 = imageset(Lambda(n, I*(2*n*pi - pi*Rational(2, 3))/log(2)), S.Integers)
union2 = imageset(Lambda(n, I*(2*n*pi + pi*Rational(2, 3))/log(2)), S.Integers)
assert dumeq(solveset(2**x + 4**x + 8**x, x), Union(union1, union2))
eq = 4**(x + 1) + 4**(x + 2) + 4**(x - 1) - 3**(x + 2) - 3**(x + 3)
res = solveset(eq, x)
num = 2*n*I*pi - 4*log(2) + 2*log(3)
den = -2*log(2) + log(3)
ans = imageset(Lambda(n, num/den), S.Integers)
assert dumeq(res, ans)
def test_expo_conditionset():
f1 = (exp(x) + 1)**x - 2
f2 = (x + 2)**y*x - 3
f3 = 2**x - exp(x) - 3
f4 = log(x) - exp(x)
f5 = 2**x + 3**x - 5**x
assert solveset(f1, x, S.Reals).dummy_eq(ConditionSet(
x, Eq((exp(x) + 1)**x - 2, 0), S.Reals))
assert solveset(f2, x, S.Reals).dummy_eq(ConditionSet(
x, Eq(x*(x + 2)**y - 3, 0), S.Reals))
assert solveset(f3, x, S.Reals).dummy_eq(ConditionSet(
x, Eq(2**x - exp(x) - 3, 0), S.Reals))
assert solveset(f4, x, S.Reals).dummy_eq(ConditionSet(
x, Eq(-exp(x) + log(x), 0), S.Reals))
assert solveset(f5, x, S.Reals).dummy_eq(ConditionSet(
x, Eq(2**x + 3**x - 5**x, 0), S.Reals))
def test_exponential_symbols():
x, y, z = symbols('x y z', positive=True)
xr, zr = symbols('xr, zr', real=True)
assert solveset(z**x - y, x, S.Reals) == Intersection(
S.Reals, FiniteSet(log(y)/log(z)))
f1 = 2*x**w - 4*y**w
f2 = (x/y)**w - 2
sol1 = Intersection({log(2)/(log(x) - log(y))}, S.Reals)
sol2 = Intersection({log(2)/log(x/y)}, S.Reals)
assert solveset(f1, w, S.Reals) == sol1, solveset(f1, w, S.Reals)
assert solveset(f2, w, S.Reals) == sol2, solveset(f2, w, S.Reals)
assert solveset(x**x, x, Interval.Lopen(0,oo)).dummy_eq(
ConditionSet(w, Eq(w**w, 0), Interval.open(0, oo)))
assert solveset(x**y - 1, y, S.Reals) == FiniteSet(0)
assert solveset(exp(x/y)*exp(-z/y) - 2, y, S.Reals) == \
Complement(ConditionSet(y, Eq(im(x)/y, 0) & Eq(im(z)/y, 0), \
Complement(Intersection(FiniteSet((x - z)/log(2)), S.Reals), FiniteSet(0))), FiniteSet(0))
assert solveset(exp(xr/y)*exp(-zr/y) - 2, y, S.Reals) == \
Complement(FiniteSet((xr - zr)/log(2)), FiniteSet(0))
assert solveset(a**x - b**x, x).dummy_eq(ConditionSet(
w, Ne(a, 0) & Ne(b, 0), FiniteSet(0)))
def test_ignore_assumptions():
# make sure assumptions are ignored
xpos = symbols('x', positive=True)
x = symbols('x')
assert solveset_complex(xpos**2 - 4, xpos
) == solveset_complex(x**2 - 4, x)
@XFAIL
def test_issue_10864():
assert solveset(x**(y*z) - x, x, S.Reals) == FiniteSet(1)
@XFAIL
def test_solve_only_exp_2():
assert solveset_real(sqrt(exp(x)) + sqrt(exp(-x)) - 4, x) == \
FiniteSet(2*log(-sqrt(3) + 2), 2*log(sqrt(3) + 2))
def test_is_exponential():
assert _is_exponential(y, x) is False
assert _is_exponential(3**x - 2, x) is True
assert _is_exponential(5**x - 7**(2 - x), x) is True
assert _is_exponential(sin(2**x) - 4*x, x) is False
assert _is_exponential(x**y - z, y) is True
assert _is_exponential(x**y - z, x) is False
assert _is_exponential(2**x + 4**x - 1, x) is True
assert _is_exponential(x**(y*z) - x, x) is False
assert _is_exponential(x**(2*x) - 3**x, x) is False
assert _is_exponential(x**y - y*z, y) is False
assert _is_exponential(x**y - x*z, y) is True
def test_solve_exponential():
assert _solve_exponential(3**(2*x) - 2**(x + 3), 0, x, S.Reals) == \
FiniteSet(-3*log(2)/(-2*log(3) + log(2)))
assert _solve_exponential(2**y + 4**y, 1, y, S.Reals) == \
FiniteSet(log(Rational(-1, 2) + sqrt(5)/2)/log(2))
assert _solve_exponential(2**y + 4**y, 0, y, S.Reals) == \
S.EmptySet
assert _solve_exponential(2**x + 3**x - 5**x, 0, x, S.Reals) == \
ConditionSet(x, Eq(2**x + 3**x - 5**x, 0), S.Reals)
# end of exponential tests
# logarithmic tests
def test_logarithmic():
assert solveset_real(log(x - 3) + log(x + 3), x) == FiniteSet(
-sqrt(10), sqrt(10))
assert solveset_real(log(x + 1) - log(2*x - 1), x) == FiniteSet(2)
assert solveset_real(log(x + 3) + log(1 + 3/x) - 3, x) == FiniteSet(
-3 + sqrt(-12 + exp(3))*exp(Rational(3, 2))/2 + exp(3)/2,
-sqrt(-12 + exp(3))*exp(Rational(3, 2))/2 - 3 + exp(3)/2)
eq = z - log(x) + log(y/(x*(-1 + y**2/x**2)))
assert solveset_real(eq, x) == \
Intersection(S.Reals, FiniteSet(-sqrt(y**2 - y*exp(z)),
sqrt(y**2 - y*exp(z)))) - \
Intersection(S.Reals, FiniteSet(-sqrt(y**2), sqrt(y**2)))
assert solveset_real(
log(3*x) - log(-x + 1) - log(4*x + 1), x) == FiniteSet(Rational(-1, 2), S.Half)
assert solveset(log(x**y) - y*log(x), x, S.Reals) == S.Reals
@XFAIL
def test_uselogcombine_2():
eq = log(exp(2*x) + 1) + log(-tanh(x) + 1) - log(2)
assert solveset_real(eq, x) == EmptySet()
eq = log(8*x) - log(sqrt(x) + 1) - 2
assert solveset_real(eq, x) == EmptySet()
def test_is_logarithmic():
assert _is_logarithmic(y, x) is False
assert _is_logarithmic(log(x), x) is True
assert _is_logarithmic(log(x) - 3, x) is True
assert _is_logarithmic(log(x)*log(y), x) is True
assert _is_logarithmic(log(x)**2, x) is False
assert _is_logarithmic(log(x - 3) + log(x + 3), x) is True
assert _is_logarithmic(log(x**y) - y*log(x), x) is True
assert _is_logarithmic(sin(log(x)), x) is False
assert _is_logarithmic(x + y, x) is False
assert _is_logarithmic(log(3*x) - log(1 - x) + 4, x) is True
assert _is_logarithmic(log(x) + log(y) + x, x) is False
assert _is_logarithmic(log(log(x - 3)) + log(x - 3), x) is True
assert _is_logarithmic(log(log(3) + x) + log(x), x) is True
assert _is_logarithmic(log(x)*(y + 3) + log(x), y) is False
def test_solve_logarithm():
y = Symbol('y')
assert _solve_logarithm(log(x**y) - y*log(x), 0, x, S.Reals) == S.Reals
y = Symbol('y', positive=True)
assert _solve_logarithm(log(x)*log(y), 0, x, S.Reals) == FiniteSet(1)
# end of logarithmic tests
def test_linear_coeffs():
from sympy.solvers.solveset import linear_coeffs
assert linear_coeffs(0, x) == [0, 0]
assert all(i is S.Zero for i in linear_coeffs(0, x))
assert linear_coeffs(x + 2*y + 3, x, y) == [1, 2, 3]
assert linear_coeffs(x + 2*y + 3, y, x) == [2, 1, 3]
assert linear_coeffs(x + 2*x**2 + 3, x, x**2) == [1, 2, 3]
raises(ValueError, lambda:
linear_coeffs(x + 2*x**2 + x**3, x, x**2))
raises(ValueError, lambda:
linear_coeffs(1/x*(x - 1) + 1/x, x))
assert linear_coeffs(a*(x + y), x, y) == [a, a, 0]
assert linear_coeffs(1.0, x, y) == [0, 0, 1.0]
# modular tests
def test_is_modular():
assert _is_modular(y, x) is False
assert _is_modular(Mod(x, 3) - 1, x) is True
assert _is_modular(Mod(x**3 - 3*x**2 - x + 1, 3) - 1, x) is True
assert _is_modular(Mod(exp(x + y), 3) - 2, x) is True
assert _is_modular(Mod(exp(x + y), 3) - log(x), x) is True
assert _is_modular(Mod(x, 3) - 1, y) is False
assert _is_modular(Mod(x, 3)**2 - 5, x) is False
assert _is_modular(Mod(x, 3)**2 - y, x) is False
assert _is_modular(exp(Mod(x, 3)) - 1, x) is False
assert _is_modular(Mod(3, y) - 1, y) is False
def test_invert_modular():
n = Dummy('n', integer=True)
from sympy.solvers.solveset import _invert_modular as invert_modular
# non invertible cases
assert invert_modular(Mod(sin(x), 7), S(5), n, x) == (Mod(sin(x), 7), 5)
assert invert_modular(Mod(exp(x), 7), S(5), n, x) == (Mod(exp(x), 7), 5)
assert invert_modular(Mod(log(x), 7), S(5), n, x) == (Mod(log(x), 7), 5)
# a is symbol
assert dumeq(invert_modular(Mod(x, 7), S(5), n, x),
(x, ImageSet(Lambda(n, 7*n + 5), S.Integers)))
# a.is_Add
assert dumeq(invert_modular(Mod(x + 8, 7), S(5), n, x),
(x, ImageSet(Lambda(n, 7*n + 4), S.Integers)))
assert invert_modular(Mod(x**2 + x, 7), S(5), n, x) == \
(Mod(x**2 + x, 7), 5)
# a.is_Mul
assert dumeq(invert_modular(Mod(3*x, 7), S(5), n, x),
(x, ImageSet(Lambda(n, 7*n + 4), S.Integers)))
assert invert_modular(Mod((x + 1)*(x + 2), 7), S(5), n, x) == \
(Mod((x + 1)*(x + 2), 7), 5)
# a.is_Pow
assert invert_modular(Mod(x**4, 7), S(5), n, x) == \
(x, EmptySet())
assert dumeq(invert_modular(Mod(3**x, 4), S(3), n, x),
(x, ImageSet(Lambda(n, 2*n + 1), S.Naturals0)))
assert dumeq(invert_modular(Mod(2**(x**2 + x + 1), 7), S(2), n, x),
(x**2 + x + 1, ImageSet(Lambda(n, 3*n + 1), S.Naturals0)))
assert invert_modular(Mod(sin(x)**4, 7), S(5), n, x) == (x, EmptySet())
def test_solve_modular():
n = Dummy('n', integer=True)
# if rhs has symbol (need to be implemented in future).
assert solveset(Mod(x, 4) - x, x, S.Integers
).dummy_eq(
ConditionSet(x, Eq(-x + Mod(x, 4), 0),
S.Integers))
# when _invert_modular fails to invert
assert solveset(3 - Mod(sin(x), 7), x, S.Integers
).dummy_eq(
ConditionSet(x, Eq(Mod(sin(x), 7) - 3, 0), S.Integers))
assert solveset(3 - Mod(log(x), 7), x, S.Integers
).dummy_eq(
ConditionSet(x, Eq(Mod(log(x), 7) - 3, 0), S.Integers))
assert solveset(3 - Mod(exp(x), 7), x, S.Integers
).dummy_eq(ConditionSet(x, Eq(Mod(exp(x), 7) - 3, 0),
S.Integers))
# EmptySet solution definitely
assert solveset(7 - Mod(x, 5), x, S.Integers) == EmptySet()
assert solveset(5 - Mod(x, 5), x, S.Integers) == EmptySet()
# Negative m
assert dumeq(solveset(2 + Mod(x, -3), x, S.Integers),
ImageSet(Lambda(n, -3*n - 2), S.Integers))
assert solveset(4 + Mod(x, -3), x, S.Integers) == EmptySet()
# linear expression in Mod
assert dumeq(solveset(3 - Mod(x, 5), x, S.Integers),
ImageSet(Lambda(n, 5*n + 3), S.Integers))
assert dumeq(solveset(3 - Mod(5*x - 8, 7), x, S.Integers),
ImageSet(Lambda(n, 7*n + 5), S.Integers))
assert dumeq(solveset(3 - Mod(5*x, 7), x, S.Integers),
ImageSet(Lambda(n, 7*n + 2), S.Integers))
# higher degree expression in Mod
assert dumeq(solveset(Mod(x**2, 160) - 9, x, S.Integers),
Union(ImageSet(Lambda(n, 160*n + 3), S.Integers),
ImageSet(Lambda(n, 160*n + 13), S.Integers),
ImageSet(Lambda(n, 160*n + 67), S.Integers),
ImageSet(Lambda(n, 160*n + 77), S.Integers),
ImageSet(Lambda(n, 160*n + 83), S.Integers),
ImageSet(Lambda(n, 160*n + 93), S.Integers),
ImageSet(Lambda(n, 160*n + 147), S.Integers),
ImageSet(Lambda(n, 160*n + 157), S.Integers)))
assert solveset(3 - Mod(x**4, 7), x, S.Integers) == EmptySet()
assert dumeq(solveset(Mod(x**4, 17) - 13, x, S.Integers),
Union(ImageSet(Lambda(n, 17*n + 3), S.Integers),
ImageSet(Lambda(n, 17*n + 5), S.Integers),
ImageSet(Lambda(n, 17*n + 12), S.Integers),
ImageSet(Lambda(n, 17*n + 14), S.Integers)))
# a.is_Pow tests
assert dumeq(solveset(Mod(7**x, 41) - 15, x, S.Integers),
ImageSet(Lambda(n, 40*n + 3), S.Naturals0))
assert dumeq(solveset(Mod(12**x, 21) - 18, x, S.Integers),
ImageSet(Lambda(n, 6*n + 2), S.Naturals0))
assert dumeq(solveset(Mod(3**x, 4) - 3, x, S.Integers),
ImageSet(Lambda(n, 2*n + 1), S.Naturals0))
assert dumeq(solveset(Mod(2**x, 7) - 2 , x, S.Integers),
ImageSet(Lambda(n, 3*n + 1), S.Naturals0))
assert dumeq(solveset(Mod(3**(3**x), 4) - 3, x, S.Integers),
Intersection(ImageSet(Lambda(n, Intersection({log(2*n + 1)/log(3)},
S.Integers)), S.Naturals0), S.Integers))
# Implemented for m without primitive root
assert solveset(Mod(x**3, 7) - 2, x, S.Integers) == EmptySet()
assert dumeq(solveset(Mod(x**3, 8) - 1, x, S.Integers),
ImageSet(Lambda(n, 8*n + 1), S.Integers))
assert dumeq(solveset(Mod(x**4, 9) - 4, x, S.Integers),
Union(ImageSet(Lambda(n, 9*n + 4), S.Integers),
ImageSet(Lambda(n, 9*n + 5), S.Integers)))
# domain intersection
assert dumeq(solveset(3 - Mod(5*x - 8, 7), x, S.Naturals0),
Intersection(ImageSet(Lambda(n, 7*n + 5), S.Integers), S.Naturals0))
# Complex args
assert solveset(Mod(x, 3) - I, x, S.Integers) == \
EmptySet()
assert solveset(Mod(I*x, 3) - 2, x, S.Integers
).dummy_eq(
ConditionSet(x, Eq(Mod(I*x, 3) - 2, 0), S.Integers))
assert solveset(Mod(I + x, 3) - 2, x, S.Integers
).dummy_eq(
ConditionSet(x, Eq(Mod(x + I, 3) - 2, 0), S.Integers))
# issue 17373 (https://github.com/sympy/sympy/issues/17373)
assert dumeq(solveset(Mod(x**4, 14) - 11, x, S.Integers),
Union(ImageSet(Lambda(n, 14*n + 3), S.Integers),
ImageSet(Lambda(n, 14*n + 11), S.Integers)))
assert dumeq(solveset(Mod(x**31, 74) - 43, x, S.Integers),
ImageSet(Lambda(n, 74*n + 31), S.Integers))
# issue 13178
n = symbols('n', integer=True)
a = 742938285
b = 1898888478
m = 2**31 - 1
c = 20170816
assert dumeq(solveset(c - Mod(a**n*b, m), n, S.Integers),
ImageSet(Lambda(n, 2147483646*n + 100), S.Naturals0))
assert dumeq(solveset(c - Mod(a**n*b, m), n, S.Naturals0),
Intersection(ImageSet(Lambda(n, 2147483646*n + 100), S.Naturals0),
S.Naturals0))
assert dumeq(solveset(c - Mod(a**(2*n)*b, m), n, S.Integers),
Intersection(ImageSet(Lambda(n, 1073741823*n + 50), S.Naturals0),
S.Integers))
assert solveset(c - Mod(a**(2*n + 7)*b, m), n, S.Integers) == EmptySet()
assert dumeq(solveset(c - Mod(a**(n - 4)*b, m), n, S.Integers),
Intersection(ImageSet(Lambda(n, 2147483646*n + 104), S.Naturals0),
S.Integers))
# end of modular tests
def test_issue_17276():
assert nonlinsolve([Eq(x, 5**(S(1)/5)), Eq(x*y, 25*sqrt(5))], x, y) == \
FiniteSet((5**(S(1)/5), 25*5**(S(3)/10)))
def test_issue_10426():
x=Dummy('x')
a=Symbol('a')
n=Dummy('n')
assert (solveset(sin(x + a) - sin(x), a)).dummy_eq(Dummy('x')) == (Union(
ImageSet(Lambda(n, 2*n*pi), S.Integers),
Intersection(S.Complexes, ImageSet(Lambda(n, -I*(I*(2*n*pi + arg(-exp(-2*I*x))) + 2*im(x))),
S.Integers)))).dummy_eq(Dummy('x,n'))
def test_issue_18208():
vars = symbols('x0:16') + symbols('y0:12')
x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,\
y0, y1, y2, y3, y4, y5, y6, y7, y8, y9, y10, y11 = vars
eqs = [x0 + x1 + x2 + x3 - 51,
x0 + x1 + x4 + x5 - 46,
x2 + x3 + x6 + x7 - 39,
x0 + x3 + x4 + x7 - 50,
x1 + x2 + x5 + x6 - 35,
x4 + x5 + x6 + x7 - 34,
x4 + x5 + x8 + x9 - 46,
x10 + x11 + x6 + x7 - 23,
x11 + x4 + x7 + x8 - 25,
x10 + x5 + x6 + x9 - 44,
x10 + x11 + x8 + x9 - 35,
x12 + x13 + x8 + x9 - 35,
x10 + x11 + x14 + x15 - 29,
x11 + x12 + x15 + x8 - 35,
x10 + x13 + x14 + x9 - 29,
x12 + x13 + x14 + x15 - 29,
y0 + y1 + y2 + y3 - 55,
y0 + y1 + y4 + y5 - 53,
y2 + y3 + y6 + y7 - 56,
y0 + y3 + y4 + y7 - 57,
y1 + y2 + y5 + y6 - 52,
y4 + y5 + y6 + y7 - 54,
y4 + y5 + y8 + y9 - 48,
y10 + y11 + y6 + y7 - 60,
y11 + y4 + y7 + y8 - 51,
y10 + y5 + y6 + y9 - 57,
y10 + y11 + y8 + y9 - 54,
x10 - 2,
x11 - 5,
x12 - 1,
x13 - 6,
x14 - 1,
x15 - 21,
y0 - 12,
y1 - 20]
expected = [38 - x3, x3 - 10, 23 - x3, x3, 12 - x7, x7 + 6, 16 - x7, x7,
8, 20, 2, 5, 1, 6, 1, 21, 12, 20, -y11 + y9 + 2, y11 - y9 + 21,
-y11 - y7 + y9 + 24, y11 + y7 - y9 - 3, 33 - y7, y7, 27 - y9, y9,
27 - y11, y11]
A, b = linear_eq_to_matrix(eqs, vars)
# solve
solve_expected = {v:eq for v, eq in zip(vars, expected) if v != eq}
assert solve(eqs, vars) == solve_expected
# linsolve
linsolve_expected = FiniteSet(Tuple(*expected))
assert linsolve(eqs, vars) == linsolve_expected
assert linsolve((A, b), vars) == linsolve_expected
# gauss_jordan_solve
gj_solve, new_vars = A.gauss_jordan_solve(b)
gj_solve = [i for i in gj_solve]
tau0, tau1, tau2, tau3, tau4 = symbols([str(v) for v in new_vars])
gj_expected = linsolve_expected.subs(zip([x3, x7, y7, y9, y11], new_vars))
assert FiniteSet(Tuple(*gj_solve)) == gj_expected
# nonlinsolve
# The solution set of nonlinsolve is currently equivalent to linsolve and is
# also correct. However, we would prefer to use the same symbols as parameters
# for the solution to the underdetermined system in all cases if possible.
# We want a solution that is not just equivalent but also given in the same form.
# This test may be changed should nonlinsolve be modified in this way.
nonlinsolve_expected = FiniteSet((38 - x3, x3 - 10, 23 - x3, x3, 12 - x7, x7 + 6,
16 - x7, x7, 8, 20, 2, 5, 1, 6, 1, 21, 12, 20,
-y5 + y7 - 1, y5 - y7 + 24, 21 - y5, y5, 33 - y7,
y7, 27 - y9, y9, -y5 + y7 - y9 + 24, y5 - y7 + y9 + 3))
assert nonlinsolve(eqs, vars) == nonlinsolve_expected
@XFAIL
def test_substitution_with_infeasible_solution():
a00, a01, a10, a11, l0, l1, l2, l3, m0, m1, m2, m3, m4, m5, m6, m7, c00, c01, c10, c11, p00, p01, p10, p11 = symbols(
'a00, a01, a10, a11, l0, l1, l2, l3, m0, m1, m2, m3, m4, m5, m6, m7, c00, c01, c10, c11, p00, p01, p10, p11'
)
solvefor = [p00, p01, p10, p11, c00, c01, c10, c11, m0, m1, m3, l0, l1, l2, l3]
system = [
-l0 * c00 - l1 * c01 + m0 + c00 + c01,
-l0 * c10 - l1 * c11 + m1,
-l2 * c00 - l3 * c01 + c00 + c01,
-l2 * c10 - l3 * c11 + m3,
-l0 * p00 - l2 * p10 + p00 + p10,
-l1 * p00 - l3 * p10 + p00 + p10,
-l0 * p01 - l2 * p11,
-l1 * p01 - l3 * p11,
-a00 + c00 * p00 + c10 * p01,
-a01 + c01 * p00 + c11 * p01,
-a10 + c00 * p10 + c10 * p11,
-a11 + c01 * p10 + c11 * p11,
-m0 * p00,
-m1 * p01,
-m2 * p10,
-m3 * p11,
-m4 * c00,
-m5 * c01,
-m6 * c10,
-m7 * c11,
m2,
m4,
m5,
m6,
m7
]
sol = FiniteSet(
(0, Complement(FiniteSet(p01), FiniteSet(0)), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, l2, l3),
(p00, Complement(FiniteSet(p01), FiniteSet(0)), 0, p11, 0, 0, 0, 0, 0, 0, 0, 1, 1, -p01/p11, -p01/p11),
(0, Complement(FiniteSet(p01), FiniteSet(0)), 0, p11, 0, 0, 0, 0, 0, 0, 0, 1, -l3*p11/p01, -p01/p11, l3),
(0, Complement(FiniteSet(p01), FiniteSet(0)), 0, p11, 0, 0, 0, 0, 0, 0, 0, -l2*p11/p01, -l3*p11/p01, l2, l3),
)
assert sol != nonlinsolve(system, solvefor)
| 40.303307 | 126 | 0.557253 |
d07d3ece37b8bdc7d5a8b94ec42d0087cf0152ab | 571 | py | Python | app/main/controller/auth_controller.py | leandroudala/StoreApi | fd4cda5b741badcb8d8b294436848c1504ad00ba | [
"MIT"
] | null | null | null | app/main/controller/auth_controller.py | leandroudala/StoreApi | fd4cda5b741badcb8d8b294436848c1504ad00ba | [
"MIT"
] | null | null | null | app/main/controller/auth_controller.py | leandroudala/StoreApi | fd4cda5b741badcb8d8b294436848c1504ad00ba | [
"MIT"
] | null | null | null | from flask import request
from flask_restplus import Resource
from app.main.service.auth_helper import Auth
from ..util.dto import AuthDto
api = AuthDto.api
user_auth = AuthDto.user_auth
@api.route('/login')
class UserLogin(Resource):
@api.doc('Log in user and get a token')
@api.expect(user_auth, validate=True)
def post(self):
return Auth.login_user(data=request.json)
@api.route('/logout')
class UserLogout(Resource):
@api.doc('Log out a user')
def post(self):
return Auth.logout_user(data=request.headers.get('Authorization'))
| 25.954545 | 74 | 0.723292 |
f846d5abe0b14a18263eefeef5f5f1439fc6f1f6 | 1,215 | py | Python | web/handlers/ProcessedVideoWS.py | AmarMaksumic/ML-CV | 730a6f543aa6d5b019f09e4481c3188964ecbe2c | [
"MIT"
] | null | null | null | web/handlers/ProcessedVideoWS.py | AmarMaksumic/ML-CV | 730a6f543aa6d5b019f09e4481c3188964ecbe2c | [
"MIT"
] | 5 | 2021-08-25T16:17:12.000Z | 2022-03-12T00:59:43.000Z | web/handlers/ProcessedVideoWS.py | AmarMaksumic/ML-CV | 730a6f543aa6d5b019f09e4481c3188964ecbe2c | [
"MIT"
] | null | null | null | from os.path import abspath, dirname, join
import uuid
from tornado.websocket import WebSocketHandler, WebSocketClosedError
import logging
logger = logging.getLogger(__name__)
class ProcessedVideoWS(WebSocketHandler):
"""
"""
watchers = set()
def open(self):
self.uid = str(uuid.uuid4())
logger.info("ProcessedVideoWS websocket opened %s" % self.uid)
self.write_message('connected')
ProcessedVideoWS.watchers.add(self)
def check_origin(self, origin):
"""
Allow CORS requests
"""
return True
"""
broadcast to clients, assumes its target data
"""
def on_message(self, message):
# logger.info('pushing image')
for waiter in ProcessedVideoWS.watchers:
if waiter == self:
continue
waiter.write_message(message, binary=True)
def send_msg(self, msg):
try:
self.write_message(msg, False)
except WebSocketClosedError:
logger.warn("websocket closed when sending message")
def on_close(self):
logger.info("ProcessedVideoWS websocket closed %s" % self.uid)
ProcessedVideoWS.watchers.remove(self)
| 27.613636 | 70 | 0.636214 |
21f3cafc3cd9dc33e4228cb175e4756af3530773 | 3,158 | py | Python | orlando/test_meu_ETL.py | orlandosaraivajr/pds-api-client | eae931d37ef3f58c77bcd99a474ffc3a11686b13 | [
"MIT"
] | 4 | 2022-02-04T00:18:20.000Z | 2022-02-12T03:26:49.000Z | orlando/test_meu_ETL.py | orlandosaraivajr/pds-api-client | eae931d37ef3f58c77bcd99a474ffc3a11686b13 | [
"MIT"
] | 1 | 2022-02-23T02:23:39.000Z | 2022-02-23T02:23:39.000Z | orlando/test_meu_ETL.py | orlandosaraivajr/pds-api-client | eae931d37ef3f58c77bcd99a474ffc3a11686b13 | [
"MIT"
] | 6 | 2022-02-04T03:44:19.000Z | 2022-02-23T22:01:33.000Z | from meu_ETL import Extract, Extract_1, Loader
from meu_ETL import Extract_2
from meu_ETL import MeuETL
from meu_ETL import Loader_2
import pytest
class TestExtract_1:
def test_metodo_get_lista_fiis(self):
objeto = Extract_1()
lista = objeto.get_lista_fiis()
assert isinstance(lista, list)
assert isinstance(lista[0], str)
assert len(lista) == 383
assert len(lista[0]) == 4
def test_metodo_get_cotacao_1(self):
objeto = Extract_1()
cotacao = objeto.get_cotacao('hglg11')
assert isinstance(cotacao, str)
def test_metodo_get_cotacao_2(self):
objeto = Extract_1()
cotacao = objeto.get_cotacao('hglg')
assert isinstance(cotacao, str)
assert cotacao == '0.0'
def test_str_repr(self):
objeto = Extract_1()
msg1 = 'Classe Extract que faz uso de planilha e site yahoo finance '
msg2 = 'Classe Extract => Planilha e YahooFinance (API)'
assert str(objeto) == msg1
assert repr(objeto) == msg2
class TestExtract_2:
def test_metodo_get_lista_fiis(self):
objeto = Extract_2()
lista = objeto.get_lista_fiis()
assert isinstance(lista, list)
assert isinstance(lista[0], str)
assert len(lista) == 383
assert len(lista[0]) == 4
def test_metodo_get_cotacao_1(self):
objeto = Extract_2()
cotacao = objeto.get_cotacao('hglg11')
assert isinstance(cotacao, str)
def test_metodo_get_cotacao_2(self):
objeto = Extract_2()
cotacao = objeto.get_cotacao('hglg')
assert isinstance(cotacao, str)
assert cotacao == '0'
def test_metodo__extract_to_list(self):
objeto = Extract_2()
objeto._extract_to_list()
assert isinstance(objeto.lista_fiis, list)
assert len(objeto.lista_fiis) > 200
def test_str_repr(self):
objeto = Extract_2()
msg1 = 'Classe Extract que faz uso da API mfinance '
msg2 = 'Classe Extract => API MFinance'
assert str(objeto) == msg1
assert repr(objeto) == msg2
class TestMeuETL:
def test_instanciar_meu_etl_caminho_feliz(self):
extrator = Extract_1()
loader = Loader_2()
etl = MeuETL(extrator, loader)
assert isinstance(etl, MeuETL)
assert isinstance(etl.extract, Extract)
assert isinstance(etl.load, Loader)
assert isinstance(etl.lista_fiis, list)
def test_extrator_incorreto(self):
extrator = str
loader = Loader_2()
with pytest.raises(TypeError) as error:
MeuETL(extrator, loader)
assert str(error.value) == 'Extrador incorreto'
def test_carregador_incorreto(self):
extrator = Extract_1()
loader = str
with pytest.raises(TypeError) as error:
MeuETL(extrator, loader)
assert str(error.value) == 'Carregador incorreto'
def test_str_repr(self):
extrator = Extract_1()
loader = Loader_2()
etl = MeuETL(extrator, loader)
msg1 = 'Classe Meu ETL'
assert str(etl) == msg1
assert repr(etl) == msg1
| 31.267327 | 77 | 0.633946 |
f42f84ce2264d12900c8630fde57d959b9f8aec8 | 63,446 | py | Python | napari/layers/base/base.py | jojoelfe/napari | b52a136dad392c091b0008c0b8d7fcc5ef460f66 | [
"BSD-3-Clause"
] | 7 | 2018-07-03T17:35:46.000Z | 2018-11-07T15:48:58.000Z | napari/layers/base/base.py | jojoelfe/napari | b52a136dad392c091b0008c0b8d7fcc5ef460f66 | [
"BSD-3-Clause"
] | 23 | 2018-06-03T17:17:03.000Z | 2019-01-23T18:45:05.000Z | napari/layers/base/base.py | jojoelfe/napari | b52a136dad392c091b0008c0b8d7fcc5ef460f66 | [
"BSD-3-Clause"
] | 4 | 2018-06-03T15:04:32.000Z | 2018-10-09T19:11:18.000Z | from __future__ import annotations
import itertools
import warnings
from abc import ABC, abstractmethod
from collections import defaultdict, namedtuple
from contextlib import contextmanager
from functools import cached_property
from typing import List, Optional, Tuple, Union
import magicgui as mgui
import numpy as np
from ...utils._dask_utils import configure_dask
from ...utils._magicgui import add_layer_to_viewer, get_layers
from ...utils.events import EmitterGroup, Event
from ...utils.events.event import WarningEmitter
from ...utils.geometry import (
find_front_back_face,
intersect_line_with_axis_aligned_bounding_box_3d,
)
from ...utils.key_bindings import KeymapProvider
from ...utils.mouse_bindings import MousemapProvider
from ...utils.naming import magic_name
from ...utils.status_messages import generate_layer_status
from ...utils.transforms import Affine, CompositeAffine, TransformChain
from ...utils.translations import trans
from .._source import current_source
from ..utils.interactivity_utils import drag_data_to_projected_distance
from ..utils.layer_utils import (
coerce_affine,
compute_multiscale_level_and_corners,
convert_to_uint8,
dims_displayed_world_to_layer,
get_extent_world,
)
from ..utils.plane import ClippingPlane, ClippingPlaneList
from ._base_constants import Blending
Extent = namedtuple('Extent', 'data world step')
def no_op(layer: Layer, event: Event) -> None:
"""
A convenient no-op event for the layer mouse binding.
This makes it easier to handle many cases by inserting this as
as place holder
Parameters
----------
layer : Layer
Current layer on which this will be bound as a callback
event : Event
event that triggered this mouse callback.
Returns
-------
None
"""
return None
@mgui.register_type(choices=get_layers, return_callback=add_layer_to_viewer)
class Layer(KeymapProvider, MousemapProvider, ABC):
"""Base layer class.
Parameters
----------
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
rotate : float, 3-tuple of float, or n-D array.
If a float convert into a 2D rotation matrix using that value as an
angle. If 3-tuple convert into a 3D rotation matrix, using a yaw,
pitch, roll convention. Otherwise assume an nD rotation. Angles are
assumed to be in degrees. They can be converted from radians with
np.degrees if needed.
shear : 1-D array or n-D array
Either a vector of upper triangular values, or an nD shear matrix with
ones along the main diagonal.
affine : n-D array or napari.utils.transforms.Affine
(N+1, N+1) affine transformation matrix in homogeneous coordinates.
The first (N, N) entries correspond to a linear transform and
the final column is a length N translation vector and a 1 or a napari
`Affine` transform object. Applied as an extra transform on top of the
provided scale, rotate, and shear values.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', 'translucent_no_depth', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
multiscale : bool
Whether the data is multiscale or not. Multiscale data is
represented by a list of data objects and should go from largest to
smallest.
Attributes
----------
name : str
Unique name of the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
visible : bool
Whether the layer visual is currently being displayed.
blending : Blending
Determines how RGB and alpha values get mixed.
* ``Blending.OPAQUE``
Allows for only the top layer to be visible and corresponds to
``depth_test=True``, ``cull_face=False``, ``blend=False``.
* ``Blending.TRANSLUCENT``
Allows for multiple layers to be blended with different opacity and
corresponds to ``depth_test=True``, ``cull_face=False``,
``blend=True``, ``blend_func=('src_alpha', 'one_minus_src_alpha')``.
* ``Blending.TRANSLUCENT_NO_DEPTH``
Allows for multiple layers to be blended with different opacity, but
no depth testing is performed. Corresponds to ``depth_test=False``,
``cull_face=False``, ``blend=True``,
``blend_func=('src_alpha', 'one_minus_src_alpha')``.
* ``Blending.ADDITIVE``
Allows for multiple layers to be blended together with different
colors and opacity. Useful for creating overlays. It corresponds to
``depth_test=False``, ``cull_face=False``, ``blend=True``,
``blend_func=('src_alpha', 'one')``.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
rotate : float, 3-tuple of float, or n-D array.
If a float convert into a 2D rotation matrix using that value as an
angle. If 3-tuple convert into a 3D rotation matrix, using a yaw,
pitch, roll convention. Otherwise assume an nD rotation. Angles are
assumed to be in degrees. They can be converted from radians with
np.degrees if needed.
shear : 1-D array or n-D array
Either a vector of upper triangular values, or an nD shear matrix with
ones along the main diagonal.
affine : n-D array or napari.utils.transforms.Affine
(N+1, N+1) affine transformation matrix in homogeneous coordinates.
The first (N, N) entries correspond to a linear transform and
the final column is a length N translation vector and a 1 or a napari
`Affine` transform object. Applied as an extra transform on top of the
provided scale, rotate, and shear values.
multiscale : bool
Whether the data is multiscale or not. Multiscale data is
represented by a list of data objects and should go from largest to
smallest.
cache : bool
Whether slices of out-of-core datasets should be cached upon retrieval.
Currently, this only applies to dask arrays.
z_index : int
Depth of the layer visual relative to other visuals in the scenecanvas.
coordinates : tuple of float
Cursor position in data coordinates.
corner_pixels : array
Coordinates of the top-left and bottom-right canvas pixels in the data
coordinates of each layer. For multiscale data the coordinates are in
the space of the currently viewed data level, not the highest resolution
level.
position : tuple
Cursor position in world coordinates.
ndim : int
Dimensionality of the layer.
thumbnail : (N, M, 4) array
Array of thumbnail data for the layer.
status : str
Displayed in status bar bottom left.
help : str
Displayed in status bar bottom right.
interactive : bool
Determine if canvas pan/zoom interactivity is enabled.
cursor : str
String identifying which cursor displayed over canvas.
cursor_size : int | None
Size of cursor if custom. None yields default size
scale_factor : float
Conversion factor from canvas coordinates to image coordinates, which
depends on the current zoom level.
source : Source
source of the layer (such as a plugin or widget)
Notes
-----
Must define the following:
* `_extent_data`: property
* `data` property (setter & getter)
May define the following:
* `_set_view_slice()`: called to set currently viewed slice
* `_basename()`: base/default name of the layer
"""
def __init__(
self,
data,
ndim,
*,
name=None,
metadata=None,
scale=None,
translate=None,
rotate=None,
shear=None,
affine=None,
opacity=1,
blending='translucent',
visible=True,
multiscale=False,
cache=True, # this should move to future "data source" object.
experimental_clipping_planes=None,
):
super().__init__()
if name is None and data is not None:
name = magic_name(data)
self._source = current_source()
self.dask_optimized_slicing = configure_dask(data, cache)
self._metadata = dict(metadata or {})
self._opacity = opacity
self._blending = Blending(blending)
self._visible = visible
self._freeze = False
self._status = 'Ready'
self._help = ''
self._cursor = 'standard'
self._cursor_size = 1
self._interactive = True
self._value = None
self.scale_factor = 1
self.multiscale = multiscale
self._experimental_clipping_planes = ClippingPlaneList()
self._ndim = ndim
self._ndisplay = 2
self._dims_order = list(range(ndim))
# Create a transform chain consisting of four transforms:
# 1. `tile2data`: An initial transform only needed to display tiles
# of an image. It maps pixels of the tile into the coordinate space
# of the full resolution data and can usually be represented by a
# scale factor and a translation. A common use case is viewing part
# of lower resolution level of a multiscale image, another is using a
# downsampled version of an image when the full image size is larger
# than the maximum allowed texture size of your graphics card.
# 2. `data2physical`: The main transform mapping data to a world-like
# physical coordinate that may also encode acquisition parameters or
# sample spacing.
# 3. `physical2world`: An extra transform applied in world-coordinates that
# typically aligns this layer with another.
# 4. `world2grid`: An additional transform mapping world-coordinates
# into a grid for looking at layers side-by-side.
if scale is None:
scale = [1] * ndim
if translate is None:
translate = [0] * ndim
self._transforms = TransformChain(
[
Affine(np.ones(ndim), np.zeros(ndim), name='tile2data'),
CompositeAffine(
scale,
translate,
rotate=rotate,
shear=shear,
ndim=ndim,
name='data2physical',
),
coerce_affine(affine, ndim=ndim, name='physical2world'),
Affine(np.ones(ndim), np.zeros(ndim), name='world2grid'),
]
)
self._position = (0,) * ndim
self._dims_point = [0] * ndim
self.corner_pixels = np.zeros((2, ndim), dtype=int)
self._editable = True
self._array_like = False
self._thumbnail_shape = (32, 32, 4)
self._thumbnail = np.zeros(self._thumbnail_shape, dtype=np.uint8)
self._update_properties = True
self._name = ''
self.experimental_clipping_planes = experimental_clipping_planes
self.events = EmitterGroup(
source=self,
refresh=Event,
set_data=Event,
blending=Event,
opacity=Event,
visible=Event,
scale=Event,
translate=Event,
rotate=Event,
shear=Event,
affine=Event,
data=Event,
name=Event,
thumbnail=Event,
status=Event,
help=Event,
interactive=Event,
cursor=Event,
cursor_size=Event,
editable=Event,
loaded=Event,
_ndisplay=Event,
select=WarningEmitter(
trans._(
"'layer.events.select' is deprecated and will be removed in napari v0.4.9, use 'viewer.layers.selection.events.changed' instead, and inspect the 'added' attribute on the event.",
deferred=True,
),
type='select',
),
deselect=WarningEmitter(
trans._(
"'layer.events.deselect' is deprecated and will be removed in napari v0.4.9, use 'viewer.layers.selection.events.changed' instead, and inspect the 'removed' attribute on the event.",
deferred=True,
),
type='deselect',
),
)
self.name = name
def __str__(self):
"""Return self.name."""
return self.name
def __repr__(self):
cls = type(self)
return f"<{cls.__name__} layer {repr(self.name)} at {hex(id(self))}>"
def _mode_setter_helper(self, mode, Modeclass):
"""
Helper to manage callbacks in multiple layers
Parameters
----------
mode : Modeclass | str
New mode for the current layer.
Modeclass : Enum
Enum for the current class representing the modes it can takes,
this is usually specific on each subclass.
Returns
-------
tuple (new Mode, mode changed)
"""
mode = Modeclass(mode)
assert mode is not None
if not self.editable:
mode = Modeclass.PAN_ZOOM
if mode == self._mode:
return mode, False
if mode.value not in Modeclass.keys():
raise ValueError(
trans._(
"Mode not recognized: {mode}", deferred=True, mode=mode
)
)
old_mode = self._mode
self._mode = mode
for callback_list, mode_dict in [
(self.mouse_drag_callbacks, self._drag_modes),
(self.mouse_move_callbacks, self._move_modes),
(
self.mouse_double_click_callbacks,
getattr(
self, '_double_click_modes', defaultdict(lambda: no_op)
),
),
]:
if mode_dict[old_mode] in callback_list:
callback_list.remove(mode_dict[old_mode])
callback_list.append(mode_dict[mode])
self.cursor = self._cursor_modes[mode]
self.interactive = mode == Modeclass.PAN_ZOOM
return mode, True
@classmethod
def _basename(cls):
return f'{cls.__name__}'
@property
def name(self):
"""str: Unique name of the layer."""
return self._name
@name.setter
def name(self, name):
if name == self.name:
return
if not name:
name = self._basename()
self._name = str(name)
self.events.name()
@property
def metadata(self) -> dict:
"""Key/value map for user-stored data."""
return self._metadata
@metadata.setter
def metadata(self, value: dict) -> None:
self._metadata.clear()
self._metadata.update(value)
@property
def source(self):
return self._source
@property
def loaded(self) -> bool:
"""Return True if this layer is fully loaded in memory.
This base class says that layers are permanently in the loaded state.
Derived classes that do asynchronous loading can override this.
"""
return True
@property
def opacity(self):
"""float: Opacity value between 0.0 and 1.0."""
return self._opacity
@opacity.setter
def opacity(self, opacity):
if not 0.0 <= opacity <= 1.0:
raise ValueError(
trans._(
'opacity must be between 0.0 and 1.0; got {opacity}',
deferred=True,
opacity=opacity,
)
)
self._opacity = opacity
self._update_thumbnail()
self.events.opacity()
@property
def blending(self):
"""Blending mode: Determines how RGB and alpha values get mixed.
Blending.OPAQUE
Allows for only the top layer to be visible and corresponds to
depth_test=True, cull_face=False, blend=False.
Blending.TRANSLUCENT
Allows for multiple layers to be blended with different opacity
and corresponds to depth_test=True, cull_face=False,
blend=True, blend_func=('src_alpha', 'one_minus_src_alpha').
Blending.ADDITIVE
Allows for multiple layers to be blended together with
different colors and opacity. Useful for creating overlays. It
corresponds to depth_test=False, cull_face=False, blend=True,
blend_func=('src_alpha', 'one').
"""
return str(self._blending)
@blending.setter
def blending(self, blending):
self._blending = Blending(blending)
self.events.blending()
@property
def visible(self):
"""bool: Whether the visual is currently being displayed."""
return self._visible
@visible.setter
def visible(self, visibility):
self._visible = visibility
self.refresh()
self.events.visible()
self.editable = self._set_editable() if self.visible else False
@property
def editable(self):
"""bool: Whether the current layer data is editable from the viewer."""
return self._editable
@editable.setter
def editable(self, editable):
if self._editable == editable:
return
self._editable = editable
self._set_editable(editable=editable)
self.events.editable()
@property
def scale(self):
"""list: Anisotropy factors to scale data into world coordinates."""
return self._transforms['data2physical'].scale
@scale.setter
def scale(self, scale):
if scale is None:
scale = [1] * self.ndim
self._transforms['data2physical'].scale = np.array(scale)
self._update_dims()
self.events.scale()
@property
def translate(self):
"""list: Factors to shift the layer by in units of world coordinates."""
return self._transforms['data2physical'].translate
@translate.setter
def translate(self, translate):
self._transforms['data2physical'].translate = np.array(translate)
self._update_dims()
self.events.translate()
@property
def rotate(self):
"""array: Rotation matrix in world coordinates."""
return self._transforms['data2physical'].rotate
@rotate.setter
def rotate(self, rotate):
self._transforms['data2physical'].rotate = rotate
self._update_dims()
self.events.rotate()
@property
def shear(self):
"""array: Shear matrix in world coordinates."""
return self._transforms['data2physical'].shear
@shear.setter
def shear(self, shear):
self._transforms['data2physical'].shear = shear
self._update_dims()
self.events.shear()
@property
def affine(self):
"""napari.utils.transforms.Affine: Extra affine transform to go from physical to world coordinates."""
return self._transforms['physical2world']
@affine.setter
def affine(self, affine):
# Assignment by transform name is not supported by TransformChain and
# EventedList, so use the integer index instead. For more details, see:
# https://github.com/napari/napari/issues/3058
self._transforms[2] = coerce_affine(
affine, ndim=self.ndim, name='physical2world'
)
self._update_dims()
self.events.affine()
@property
def translate_grid(self):
warnings.warn(
trans._(
"translate_grid will become private in v0.4.14. See Layer.translate or Layer.data_to_world() instead.",
),
DeprecationWarning,
stacklevel=2,
)
return self._translate_grid
@translate_grid.setter
def translate_grid(self, translate_grid):
warnings.warn(
trans._(
"translate_grid will become private in v0.4.14. See Layer.translate or Layer.data_to_world() instead.",
),
DeprecationWarning,
stacklevel=2,
)
self._translate_grid = translate_grid
@property
def _translate_grid(self):
"""list: Factors to shift the layer by."""
return self._transforms['world2grid'].translate
@_translate_grid.setter
def _translate_grid(self, translate_grid):
if np.all(self._translate_grid == translate_grid):
return
self._transforms['world2grid'].translate = np.array(translate_grid)
self.events.translate()
@property
def _is_moving(self):
return self._private_is_moving
@_is_moving.setter
def _is_moving(self, value):
assert value in (True, False)
if value:
assert self._moving_coordinates is not None
self._private_is_moving = value
@property
def _dims_displayed(self):
"""To be removed displayed dimensions."""
# Ultimately we aim to remove all slicing information from the layer
# itself so that layers can be sliced in different ways for multiple
# canvas. See https://github.com/napari/napari/pull/1919#issuecomment-738585093
# for additional discussion.
return self._dims_order[-self._ndisplay :]
@property
def _dims_not_displayed(self):
"""To be removed not displayed dimensions."""
# Ultimately we aim to remove all slicing information from the layer
# itself so that layers can be sliced in different ways for multiple
# canvas. See https://github.com/napari/napari/pull/1919#issuecomment-738585093
# for additional discussion.
return self._dims_order[: -self._ndisplay]
@property
def _dims_displayed_order(self):
"""To be removed order of displayed dimensions."""
# Ultimately we aim to remove all slicing information from the layer
# itself so that layers can be sliced in different ways for multiple
# canvas. See https://github.com/napari/napari/pull/1919#issuecomment-738585093
# for additional discussion.
displayed = self._dims_displayed
# equivalent to: order = np.argsort(displayed)
order = sorted(range(len(displayed)), key=lambda x: displayed[x])
return tuple(order)
def _update_dims(self, event=None):
"""Update the dims model and clear the extent cache.
This function needs to be called whenever data or transform information
changes, and should be called before events get emitted.
"""
from ...components.dims import reorder_after_dim_reduction
ndim = self._get_ndim()
old_ndim = self._ndim
if old_ndim > ndim:
keep_axes = range(old_ndim - ndim, old_ndim)
self._transforms = self._transforms.set_slice(keep_axes)
self._dims_point = self._dims_point[-ndim:]
self._dims_order = list(
reorder_after_dim_reduction(self._dims_order[-ndim:])
)
self._position = self._position[-ndim:]
elif old_ndim < ndim:
new_axes = range(ndim - old_ndim)
self._transforms = self._transforms.expand_dims(new_axes)
self._dims_point = [0] * (ndim - old_ndim) + self._dims_point
self._dims_order = list(range(ndim - old_ndim)) + [
o + ndim - old_ndim for o in self._dims_order
]
self._position = (0,) * (ndim - old_ndim) + self._position
self._ndim = ndim
if 'extent' in self.__dict__:
del self.extent
self.refresh() # This call is need for invalidate cache of extent in LayerList. If you remove it pleas ad another workaround.
@property
@abstractmethod
def data(self):
# user writes own docstring
raise NotImplementedError()
@data.setter
@abstractmethod
def data(self, data):
raise NotImplementedError()
@property
@abstractmethod
def _extent_data(self) -> np.ndarray:
"""Extent of layer in data coordinates.
Returns
-------
extent_data : array, shape (2, D)
"""
raise NotImplementedError()
@property
def _extent_world(self) -> np.ndarray:
"""Range of layer in world coordinates.
Returns
-------
extent_world : array, shape (2, D)
"""
# Get full nD bounding box
return get_extent_world(
self._extent_data, self._data_to_world, self._array_like
)
@cached_property
def extent(self) -> Extent:
"""Extent of layer in data and world coordinates."""
extent_data = self._extent_data
data_to_world = self._data_to_world
extent_world = get_extent_world(
extent_data, data_to_world, self._array_like
)
return Extent(
data=extent_data,
world=extent_world,
step=abs(data_to_world.scale),
)
@property
def _slice_indices(self):
"""(D, ) array: Slice indices in data coordinates."""
if len(self._dims_not_displayed) == 0:
# all dims are displayed dimensions
return (slice(None),) * self.ndim
if self.ndim > self._ndisplay:
inv_transform = self._data_to_world.inverse
# Subspace spanned by non displayed dimensions
non_displayed_subspace = np.zeros(self.ndim)
for d in self._dims_not_displayed:
non_displayed_subspace[d] = 1
# Map subspace through inverse transform, ignoring translation
_inv_transform = Affine(
ndim=self.ndim,
linear_matrix=inv_transform.linear_matrix,
translate=None,
)
mapped_nd_subspace = _inv_transform(non_displayed_subspace)
# Look at displayed subspace
displayed_mapped_subspace = (
mapped_nd_subspace[d] for d in self._dims_displayed
)
# Check that displayed subspace is null
if any(abs(v) > 1e-8 for v in displayed_mapped_subspace):
warnings.warn(
trans._(
'Non-orthogonal slicing is being requested, but is not fully supported. Data is displayed without applying an out-of-slice rotation or shear component.',
deferred=True,
),
category=UserWarning,
)
slice_inv_transform = inv_transform.set_slice(self._dims_not_displayed)
world_pts = [self._dims_point[ax] for ax in self._dims_not_displayed]
data_pts = slice_inv_transform(world_pts)
if getattr(self, "_round_index", True):
# A round is taken to convert these values to slicing integers
data_pts = np.round(data_pts).astype(int)
indices = [slice(None)] * self.ndim
for i, ax in enumerate(self._dims_not_displayed):
indices[ax] = data_pts[i]
return tuple(indices)
@abstractmethod
def _get_ndim(self):
raise NotImplementedError()
def _set_editable(self, editable=None):
if editable is None:
self.editable = True
def _get_base_state(self):
"""Get dictionary of attributes on base layer.
Returns
-------
state : dict
Dictionary of attributes on base layer.
"""
base_dict = {
'name': self.name,
'metadata': self.metadata,
'scale': list(self.scale),
'translate': list(self.translate),
'rotate': [list(r) for r in self.rotate],
'shear': list(self.shear),
'affine': self.affine.affine_matrix,
'opacity': self.opacity,
'blending': self.blending,
'visible': self.visible,
'experimental_clipping_planes': [
plane.dict() for plane in self.experimental_clipping_planes
],
}
return base_dict
@abstractmethod
def _get_state(self):
raise NotImplementedError()
@property
def _type_string(self):
return self.__class__.__name__.lower()
def as_layer_data_tuple(self):
state = self._get_state()
state.pop('data', None)
return self.data, state, self._type_string
@property
def thumbnail(self):
"""array: Integer array of thumbnail for the layer"""
return self._thumbnail
@thumbnail.setter
def thumbnail(self, thumbnail):
if 0 in thumbnail.shape:
thumbnail = np.zeros(self._thumbnail_shape, dtype=np.uint8)
if thumbnail.dtype != np.uint8:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
thumbnail = convert_to_uint8(thumbnail)
padding_needed = np.subtract(self._thumbnail_shape, thumbnail.shape)
pad_amounts = [(p // 2, (p + 1) // 2) for p in padding_needed]
thumbnail = np.pad(thumbnail, pad_amounts, mode='constant')
# blend thumbnail with opaque black background
background = np.zeros(self._thumbnail_shape, dtype=np.uint8)
background[..., 3] = 255
f_dest = thumbnail[..., 3][..., None] / 255
f_source = 1 - f_dest
thumbnail = thumbnail * f_dest + background * f_source
self._thumbnail = thumbnail.astype(np.uint8)
self.events.thumbnail()
@property
def ndim(self):
"""int: Number of dimensions in the data."""
return self._ndim
@property
def help(self):
"""str: displayed in status bar bottom right."""
return self._help
@help.setter
def help(self, help):
if help == self.help:
return
self._help = help
self.events.help(help=help)
@property
def interactive(self):
"""bool: Determine if canvas pan/zoom interactivity is enabled."""
return self._interactive
@interactive.setter
def interactive(self, interactive):
if interactive == self._interactive:
return
self._interactive = interactive
self.events.interactive(interactive=interactive)
@property
def cursor(self):
"""str: String identifying cursor displayed over canvas."""
return self._cursor
@cursor.setter
def cursor(self, cursor):
if cursor == self.cursor:
return
self._cursor = cursor
self.events.cursor(cursor=cursor)
@property
def cursor_size(self):
"""int | None: Size of cursor if custom. None yields default size."""
return self._cursor_size
@cursor_size.setter
def cursor_size(self, cursor_size):
if cursor_size == self.cursor_size:
return
self._cursor_size = cursor_size
self.events.cursor_size(cursor_size=cursor_size)
@property
def experimental_clipping_planes(self):
return self._experimental_clipping_planes
@experimental_clipping_planes.setter
def experimental_clipping_planes(
self,
value: Union[
dict,
ClippingPlane,
List[Union[ClippingPlane, dict]],
ClippingPlaneList,
],
):
self._experimental_clipping_planes.clear()
if value is None:
return
if isinstance(value, (ClippingPlane, dict)):
value = [value]
for new_plane in value:
plane = ClippingPlane()
plane.update(new_plane)
self._experimental_clipping_planes.append(plane)
def set_view_slice(self):
with self.dask_optimized_slicing():
self._set_view_slice()
@abstractmethod
def _set_view_slice(self):
raise NotImplementedError()
def _slice_dims(self, point=None, ndisplay=2, order=None):
"""Slice data with values from a global dims model.
Note this will likely be moved off the base layer soon.
Parameters
----------
point : list
Values of data to slice at in world coordinates.
ndisplay : int
Number of dimensions to be displayed.
order : list of int
Order of dimensions, where last `ndisplay` will be
rendered in canvas.
"""
if point is None:
ndim = self.ndim
else:
ndim = len(point)
if order is None:
order = list(range(ndim))
# adjust the order of the global dims based on the number of
# dimensions that a layer has - for example a global order of
# [2, 1, 0, 3] -> [0, 1] for a layer that only has two dimensions
# or -> [1, 0, 2] for a layer with three as that corresponds to
# the relative order of the last two and three dimensions
# respectively
order = self._world_to_data_dims_displayed(order, ndim_world=ndim)
if point is None:
point = [0] * ndim
nd = min(self.ndim, ndisplay)
for i in order[-nd:]:
point[i] = slice(None)
else:
point = list(point)
# If no slide data has changed, then do nothing
offset = ndim - self.ndim
if (
np.all(order == self._dims_order)
and ndisplay == self._ndisplay
and np.all(point[offset:] == self._dims_point)
):
return
self._dims_order = order
if self._ndisplay != ndisplay:
self._ndisplay = ndisplay
self.events._ndisplay()
# Update the point values
self._dims_point = point[offset:]
self._update_dims()
self._set_editable()
@abstractmethod
def _update_thumbnail(self):
raise NotImplementedError()
@abstractmethod
def _get_value(self, position):
"""Value of the data at a position in data coordinates.
Parameters
----------
position : tuple
Position in data coordinates.
Returns
-------
value : tuple
Value of the data.
"""
raise NotImplementedError()
def get_value(
self,
position,
*,
view_direction: Optional[np.ndarray] = None,
dims_displayed: Optional[List[int]] = None,
world=False,
):
"""Value of the data at a position.
If the layer is not visible, return None.
Parameters
----------
position : tuple
Position in either data or world coordinates.
view_direction : Optional[np.ndarray]
A unit vector giving the direction of the ray in nD world coordinates.
The default value is None.
dims_displayed : Optional[List[int]]
A list of the dimensions currently being displayed in the viewer.
The default value is None.
world : bool
If True the position is taken to be in world coordinates
and converted into data coordinates. False by default.
Returns
-------
value : tuple, None
Value of the data. If the layer is not visible return None.
"""
if self.visible:
if world:
ndim_world = len(position)
if dims_displayed is not None:
# convert the dims_displayed to the layer dims.This accounts
# for differences in the number of dimensions in the world
# dims versus the layer and for transpose and rolls.
dims_displayed = dims_displayed_world_to_layer(
dims_displayed,
ndim_world=ndim_world,
ndim_layer=self.ndim,
)
position = self.world_to_data(position)
if (dims_displayed is not None) and (view_direction is not None):
if len(dims_displayed) == 2 or self.ndim == 2:
value = self._get_value(position=tuple(position))
elif len(dims_displayed) == 3:
view_direction = self._world_to_data_ray(
list(view_direction)
)
start_point, end_point = self.get_ray_intersections(
position=position,
view_direction=view_direction,
dims_displayed=dims_displayed,
world=False,
)
value = self._get_value_3d(
start_point=start_point,
end_point=end_point,
dims_displayed=dims_displayed,
)
else:
value = self._get_value(position)
else:
value = None
# This should be removed as soon as possible, it is still
# used in Points and Shapes.
self._value = value
return value
def _get_value_3d(
self,
start_point: np.ndarray,
end_point: np.ndarray,
dims_displayed: List[int],
) -> Union[float, int]:
"""Get the layer data value along a ray
Parameters
----------
start_point : np.ndarray
The start position of the ray used to interrogate the data.
end_point : np.ndarray
The end position of the ray used to interrogate the data.
dims_displayed : List[int]
The indices of the dimensions currently displayed in the Viewer.
Returns
-------
value
The data value along the supplied ray.
"""
return None
def projected_distance_from_mouse_drag(
self,
start_position: np.ndarray,
end_position: np.ndarray,
view_direction: np.ndarray,
vector: np.ndarray,
dims_displayed: Union[List, np.ndarray],
):
"""Calculate the length of the projection of a line between two mouse
clicks onto a vector (or array of vectors) in data coordinates.
Parameters
----------
start_position : np.ndarray
Starting point of the drag vector in data coordinates
end_position : np.ndarray
End point of the drag vector in data coordinates
view_direction : np.ndarray
Vector defining the plane normal of the plane onto which the drag
vector is projected.
vector : np.ndarray
(3,) unit vector or (n, 3) array thereof on which to project the drag
vector from start_event to end_event. This argument is defined in data
coordinates.
dims_displayed : Union[List, np.ndarray]
(3,) list of currently displayed dimensions
Returns
-------
projected_distance : (1, ) or (n, ) np.ndarray of float
"""
start_position = self._world_to_displayed_data(
start_position, dims_displayed
)
end_position = self._world_to_displayed_data(
end_position, dims_displayed
)
view_direction = self._world_to_displayed_data_ray(
view_direction, dims_displayed
)
return drag_data_to_projected_distance(
start_position, end_position, view_direction, vector
)
@contextmanager
def block_update_properties(self):
previous = self._update_properties
self._update_properties = False
try:
yield
finally:
self._update_properties = previous
def _set_highlight(self, force=False):
"""Render layer highlights when appropriate.
Parameters
----------
force : bool
Bool that forces a redraw to occur when `True`.
"""
pass
def refresh(self, event=None):
"""Refresh all layer data based on current view slice."""
if self.visible:
self.set_view_slice()
self.events.set_data() # refresh is called in _update_dims which means that extent cache is invalidated. Then, base on this event extent cache in layerlist is invalidated.
self._update_thumbnail()
self._set_highlight(force=True)
def world_to_data(self, position):
"""Convert from world coordinates to data coordinates.
Parameters
----------
position : tuple, list, 1D array
Position in world coordinates. If longer then the
number of dimensions of the layer, the later
dimensions will be used.
Returns
-------
tuple
Position in data coordinates.
"""
if len(position) >= self.ndim:
coords = list(position[-self.ndim :])
else:
coords = [0] * (self.ndim - len(position)) + list(position)
return tuple(self._transforms[1:].simplified.inverse(coords))
def data_to_world(self, position):
"""Convert from data coordinates to world coordinates.
Parameters
----------
position : tuple, list, 1D array
Position in data coordinates. If longer then the
number of dimensions of the layer, the later
dimensions will be used.
Returns
-------
tuple
Position in world coordinates.
"""
if len(position) >= self.ndim:
coords = list(position[-self.ndim :])
else:
coords = [0] * (self.ndim - len(position)) + list(position)
return tuple(self._transforms[1:].simplified(coords))
def _world_to_displayed_data(
self, position: np.ndarray, dims_displayed: np.ndarray
) -> tuple:
"""Convert world to data coordinates for displayed dimensions only.
Parameters
----------
position : tuple, list, 1D array
Position in world coordinates. If longer then the
number of dimensions of the layer, the later
dimensions will be used.
dims_displayed : list, 1D array
Indices of displayed dimensions of the data.
Returns
-------
tuple
Position in data coordinates for the displayed dimensions only
"""
position_nd = self.world_to_data(position)
position_ndisplay = np.asarray(position_nd)[dims_displayed]
return tuple(position_ndisplay)
@property
def _data_to_world(self) -> Affine:
"""The transform from data to world coordinates.
This affine transform is composed from the affine property and the
other transform properties in the following order:
affine * (rotate * shear * scale + translate)
"""
return self._transforms[1:3].simplified
def _world_to_data_ray(self, vector) -> tuple:
"""Convert a vector defining an orientation from world coordinates to data coordinates.
For example, this would be used to convert the view ray.
Parameters
----------
vector : tuple, list, 1D array
A vector in world coordinates.
Returns
-------
tuple
Vector in data coordinates.
"""
p1 = np.asarray(self.world_to_data(vector))
p0 = np.asarray(self.world_to_data(np.zeros_like(vector)))
normalized_vector = (p1 - p0) / np.linalg.norm(p1 - p0)
return tuple(normalized_vector)
def _world_to_displayed_data_ray(
self, vector_world, dims_displayed
) -> np.ndarray:
"""Convert an orientation from world to displayed data coordinates.
For example, this would be used to convert the view ray.
Parameters
----------
vector_world : tuple, list, 1D array
A vector in world coordinates.
Returns
-------
tuple
Vector in data coordinates.
"""
vector_data_nd = np.asarray(self._world_to_data_ray(vector_world))
vector_data_ndisplay = vector_data_nd[dims_displayed]
vector_data_ndisplay /= np.linalg.norm(vector_data_ndisplay)
return vector_data_ndisplay
def _world_to_data_dims_displayed(
self, dims_displayed: List[int], ndim_world: int
) -> List[int]:
"""Convert indices of displayed dims from world to data coordinates.
This accounts for differences in dimensionality between the world
and the data coordinates. For example a world dims order of
[2, 1, 0, 3] would be [0, 1] for a layer that only has two dimensions
or [1, 0, 2] for a layer with three as that corresponds to the
relative order of the last two and three dimensions respectively
Parameters
----------
dims_displayed : List[int]
The world displayed dimensions.
ndim_world : int
The number of dimensions in the world coordinate system.
Returns
-------
dims_displayed_data : List[int]
The displayed dimensions in data coordinates.
"""
offset = ndim_world - self.ndim
order = np.array(dims_displayed)
if offset <= 0:
return list(range(-offset)) + list(order - offset)
else:
return list(order[order >= offset] - offset)
def _display_bounding_box(self, dims_displayed: np.ndarray):
"""An axis aligned (self._ndisplay, 2) bounding box around the data"""
return self._extent_data[:, dims_displayed].T
def click_plane_from_click_data(
self,
click_position: np.ndarray,
view_direction: np.ndarray,
dims_displayed: List,
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate a (point, normal) plane parallel to the canvas in data
coordinates, centered on the centre of rotation of the camera.
Parameters
----------
click_position : np.ndarray
click position in world coordinates from mouse event.
view_direction : np.ndarray
view direction in world coordinates from mouse event.
dims_displayed : List
dimensions of the data array currently in view.
Returns
-------
click_plane : Tuple[np.ndarray, np.ndarray]
tuple of (plane_position, plane_normal) in data coordinates.
"""
click_position = np.asarray(click_position)
view_direction = np.asarray(view_direction)
plane_position = self.world_to_data(click_position)[dims_displayed]
plane_normal = self._world_to_data_ray(view_direction)[dims_displayed]
return plane_position, plane_normal
def get_ray_intersections(
self,
position: List[float],
view_direction: np.ndarray,
dims_displayed: List[int],
world: bool = True,
) -> Union[Tuple[np.ndarray, np.ndarray], Tuple[None, None]]:
"""Get the start and end point for the ray extending
from a point through the data bounding box.
Parameters
----------
position
the position of the point in nD coordinates. World vs. data
is set by the world keyword argument.
view_direction : np.ndarray
a unit vector giving the direction of the ray in nD coordinates.
World vs. data is set by the world keyword argument.
dims_displayed
a list of the dimensions currently being displayed in the viewer.
world : bool
True if the provided coordinates are in world coordinates.
Default value is True.
Returns
-------
start_point : np.ndarray
The point on the axis-aligned data bounding box that the cursor click
intersects with. This is the point closest to the camera.
The point is the full nD coordinates of the layer data.
If the click does not intersect the axis-aligned data bounding box,
None is returned.
end_point : np.ndarray
The point on the axis-aligned data bounding box that the cursor click
intersects with. This is the point farthest from the camera.
The point is the full nD coordinates of the layer data.
If the click does not intersect the axis-aligned data bounding box,
None is returned.
"""
if len(dims_displayed) != 3:
return None, None
# create the bounding box in data coordinates
bounding_box = self._display_bounding_box(dims_displayed)
start_point, end_point = self._get_ray_intersections(
position=position,
view_direction=view_direction,
dims_displayed=dims_displayed,
world=world,
bounding_box=bounding_box,
)
return start_point, end_point
def _get_offset_data_position(self, position: List[float]) -> List[float]:
"""Adjust position for offset between viewer and data coordinates."""
return position
def _get_ray_intersections(
self,
position: List[float],
view_direction: np.ndarray,
dims_displayed: List[int],
world: bool = True,
bounding_box: Optional[np.ndarray] = None,
) -> Union[Tuple[np.ndarray, np.ndarray], Tuple[None, None]]:
"""Get the start and end point for the ray extending
from a point through the data bounding box.
Parameters
----------
position
the position of the point in nD coordinates. World vs. data
is set by the world keyword argument.
view_direction : np.ndarray
a unit vector giving the direction of the ray in nD coordinates.
World vs. data is set by the world keyword argument.
dims_displayed
a list of the dimensions currently being displayed in the viewer.
world : bool
True if the provided coordinates are in world coordinates.
Default value is True.
bounding_box : np.ndarray
A (2, 3) bounding box around the data currently in view
Returns
-------
start_point : np.ndarray
The point on the axis-aligned data bounding box that the cursor click
intersects with. This is the point closest to the camera.
The point is the full nD coordinates of the layer data.
If the click does not intersect the axis-aligned data bounding box,
None is returned.
end_point : np.ndarray
The point on the axis-aligned data bounding box that the cursor click
intersects with. This is the point farthest from the camera.
The point is the full nD coordinates of the layer data.
If the click does not intersect the axis-aligned data bounding box,
None is returned."""
# get the view direction and click position in data coords
# for the displayed dimensions only
if world is True:
view_dir = self._world_to_displayed_data_ray(
view_direction, dims_displayed
)
click_pos_data = self._world_to_displayed_data(
position, dims_displayed
)
else:
# adjust for any offset between viewer and data coordinates
position = self._get_offset_data_position(position)
view_dir = np.asarray(view_direction)[dims_displayed]
click_pos_data = np.asarray(position)[dims_displayed]
# Determine the front and back faces
front_face_normal, back_face_normal = find_front_back_face(
click_pos_data, bounding_box, view_dir
)
if front_face_normal is None and back_face_normal is None:
# click does not intersect the data bounding box
return None, None
# Calculate ray-bounding box face intersections
start_point_displayed_dimensions = (
intersect_line_with_axis_aligned_bounding_box_3d(
click_pos_data, view_dir, bounding_box, front_face_normal
)
)
end_point_displayed_dimensions = (
intersect_line_with_axis_aligned_bounding_box_3d(
click_pos_data, view_dir, bounding_box, back_face_normal
)
)
# add the coordinates for the axes not displayed
start_point = np.asarray(position)
start_point[dims_displayed] = start_point_displayed_dimensions
end_point = np.asarray(position)
end_point[dims_displayed] = end_point_displayed_dimensions
return start_point, end_point
@property
def _displayed_axes(self):
# assignment upfront to avoid repeated computation of properties
_dims_displayed = self._dims_displayed
_dims_displayed_order = self._dims_displayed_order
displayed_axes = [_dims_displayed[i] for i in _dims_displayed_order]
return displayed_axes
@property
def _corner_pixels_displayed(self):
displayed_axes = self._displayed_axes
corner_pixels_displayed = self.corner_pixels[:, displayed_axes]
return corner_pixels_displayed
def _update_draw(
self, scale_factor, corner_pixels_displayed, shape_threshold
):
"""Update canvas scale and corner values on draw.
For layer multiscale determining if a new resolution level or tile is
required.
Parameters
----------
scale_factor : float
Scale factor going from canvas to world coordinates.
corner_pixels_displayed : array, shape (2, 2)
Coordinates of the top-left and bottom-right canvas pixels in
world coordinates.
shape_threshold : tuple
Requested shape of field of view in data coordinates.
"""
self.scale_factor = scale_factor
displayed_axes = self._displayed_axes
# must adjust displayed_axes according to _dims_order
displayed_axes = np.asarray(
[self._dims_order[d] for d in displayed_axes]
)
# we need to compute all four corners to compute a complete,
# data-aligned bounding box, because top-left/bottom-right may not
# remain top-left and bottom-right after transformations.
all_corners = list(itertools.product(*corner_pixels_displayed.T))
# Note that we ignore the first transform which is tile2data
data_corners = (
self._transforms[1:]
.simplified.set_slice(displayed_axes)
.inverse(all_corners)
)
# find the maximal data-axis-aligned bounding box containing all four
# canvas corners
data_bbox = np.stack(
[np.min(data_corners, axis=0), np.max(data_corners, axis=0)]
)
# round and clip the bounding box values
data_bbox_int = np.stack(
[np.floor(data_bbox[0]), np.ceil(data_bbox[1])]
).astype(int)
displayed_extent = self.extent.data[:, displayed_axes]
data_bbox_clipped = np.clip(
data_bbox_int, displayed_extent[0], displayed_extent[1]
)
if self._ndisplay == 2 and self.multiscale:
level, scaled_corners = compute_multiscale_level_and_corners(
data_bbox_clipped,
shape_threshold,
self.downsample_factors[:, displayed_axes],
)
corners = np.zeros((2, self.ndim))
corners[:, displayed_axes] = scaled_corners
corners = corners.astype(int)
display_shape = tuple(
corners[1, displayed_axes] - corners[0, displayed_axes]
)
if any(s == 0 for s in display_shape):
return
if self.data_level != level or not np.all(
self.corner_pixels == corners
):
self._data_level = level
self.corner_pixels = corners
self.refresh()
else:
corners = np.zeros((2, self.ndim), dtype=int)
corners[:, displayed_axes] = data_bbox_clipped
self.corner_pixels = corners
def get_status(
self,
position: np.ndarray,
*,
view_direction: Optional[np.ndarray] = None,
dims_displayed: Optional[List[int]] = None,
world=False,
):
"""
Status message of the data at a coordinate position.
Parameters
----------
position : tuple
Position in either data or world coordinates.
view_direction : Optional[np.ndarray]
A unit vector giving the direction of the ray in nD world coordinates.
The default value is None.
dims_displayed : Optional[List[int]]
A list of the dimensions currently being displayed in the viewer.
The default value is None.
world : bool
If True the position is taken to be in world coordinates
and converted into data coordinates. False by default.
Returns
-------
msg : string
String containing a message that can be used as a status update.
"""
value = self.get_value(
position,
view_direction=view_direction,
dims_displayed=dims_displayed,
world=world,
)
return generate_layer_status(self.name, position, value)
def _get_tooltip_text(
self,
position,
*,
view_direction: Optional[np.ndarray] = None,
dims_displayed: Optional[List[int]] = None,
world: bool = False,
):
"""
tooltip message of the data at a coordinate position.
Parameters
----------
position : tuple
Position in either data or world coordinates.
view_direction : Optional[np.ndarray]
A unit vector giving the direction of the ray in nD world coordinates.
The default value is None.
dims_displayed : Optional[List[int]]
A list of the dimensions currently being displayed in the viewer.
The default value is None.
world : bool
If True the position is taken to be in world coordinates
and converted into data coordinates. False by default.
Returns
-------
msg : string
String containing a message that can be used as a tooltip.
"""
return ""
def save(self, path: str, plugin: Optional[str] = None) -> List[str]:
"""Save this layer to ``path`` with default (or specified) plugin.
Parameters
----------
path : str
A filepath, directory, or URL to open. Extensions may be used to
specify output format (provided a plugin is available for the
requested format).
plugin : str, optional
Name of the plugin to use for saving. If ``None`` then all plugins
corresponding to appropriate hook specification will be looped
through to find the first one that can save the data.
Returns
-------
list of str
File paths of any files that were written.
"""
from ...plugins.io import save_layers
return save_layers(path, [self], plugin=plugin)
def _on_selection(self, selected: bool):
# This method is a temporary workaround to the fact that the Points
# layer needs to know when its selection state changes so that it can
# update the highlight state. This, along with the events.select and
# events.deselect emitters, (and the LayerList._on_selection_event
# method) can be removed once highlighting logic has been removed from
# the layer model.
if selected:
self.events.select()
else:
self.events.deselect()
@classmethod
def create(
cls, data, meta: dict = None, layer_type: Optional[str] = None
) -> Layer:
"""Create layer from `data` of type `layer_type`.
Primarily intended for usage by reader plugin hooks and creating a
layer from an unwrapped layer data tuple.
Parameters
----------
data : Any
Data in a format that is valid for the corresponding `layer_type`.
meta : dict, optional
Dict of keyword arguments that will be passed to the corresponding
layer constructor. If any keys in `meta` are not valid for the
corresponding layer type, an exception will be raised.
layer_type : str
Type of layer to add. Must be the (case insensitive) name of a
Layer subclass. If not provided, the layer is assumed to
be "image", unless data.dtype is one of (np.int32, np.uint32,
np.int64, np.uint64), in which case it is assumed to be "labels".
Raises
------
ValueError
If ``layer_type`` is not one of the recognized layer types.
TypeError
If any keyword arguments in ``meta`` are unexpected for the
corresponding `add_*` method for this layer_type.
Examples
--------
A typical use case might be to upack a tuple of layer data with a
specified layer_type.
>>> data = (
... np.random.random((10, 2)) * 20,
... {'face_color': 'blue'},
... 'points',
... )
>>> Layer.create(*data)
"""
from ... import layers
from ..image._image_utils import guess_labels
layer_type = (layer_type or '').lower()
# assumes that big integer type arrays are likely labels.
if not layer_type:
layer_type = guess_labels(data)
if layer_type not in layers.NAMES:
raise ValueError(
trans._(
"Unrecognized layer_type: '{layer_type}'. Must be one of: {layer_names}.",
deferred=True,
layer_type=layer_type,
layer_names=layers.NAMES,
)
)
Cls = getattr(layers, layer_type.title())
try:
return Cls(data, **(meta or {}))
except Exception as exc:
if 'unexpected keyword argument' not in str(exc):
raise exc
bad_key = str(exc).split('keyword argument ')[-1]
raise TypeError(
trans._(
"_add_layer_from_data received an unexpected keyword argument ({bad_key}) for layer type {layer_type}",
deferred=True,
bad_key=bad_key,
layer_type=layer_type,
)
) from exc
| 35.543978 | 202 | 0.60557 |
9cc921c9cec5133b98a89ff6f73e767b697c0883 | 2,930 | py | Python | drivers/agilent_lightwave.py | jtambasco/photonic-coupling-drivers | 9f8e422b1b6e2e5ff783c9146130ed71ee01241d | [
"MIT"
] | 4 | 2018-11-15T06:58:40.000Z | 2021-06-10T12:02:14.000Z | drivers/agilent_lightwave.py | jtambasco/photonic-coupling-drivers | 9f8e422b1b6e2e5ff783c9146130ed71ee01241d | [
"MIT"
] | null | null | null | drivers/agilent_lightwave.py | jtambasco/photonic-coupling-drivers | 9f8e422b1b6e2e5ff783c9146130ed71ee01241d | [
"MIT"
] | 3 | 2019-05-24T16:26:10.000Z | 2021-06-07T16:03:46.000Z | from .agilent_lightwave_connection import AgilentLightWaveConnection
from .lasers.agilent_8164B_laser import LaserAgilent8164B
from .power_meters.agilent_8164B_power_meter import PowerMeterAgilent8164B
class AgilentLightWaveSystem(AgilentLightWaveConnection):
'''
Driver for the Agilent Lightwave.
Args:
gpib_num (int): The number of the GPIB bus
the power meter is sitting on.
gpib_dev_num (int): The device number that
the power meter is on the aforementioned bus.
power_meter_channel_num (int): Either `1` or `2`
depending on which power metre channel to use.
output_mode (str):
'HIGH' -> The High Power output is regulated.
'LOWS' -> The Low SSE output is regulated.
'BHR' -> Both outputs are active but only the
High Power output is Regulated.
'BLR' -> Both outputs are active but only the
Low SSE output is Regulated.
power_unit (str): Either \'W\' or \'dBm\' depending
on whether the power units should be displayed
in [W] or [dBm] on the Agielent 8164B\'s screen.
'''
def __init__(self, gpib_num, gpib_dev_num):
super().__init__(gpib_num, gpib_dev_num)
if not self.get_lock_status():
self.set_unlock()
def get_lock_status(self):
lock_status = bool(int(self._query('lock?')))
return lock_status
def set_lock(self, password='1234'):
assert len(password) == 4, 'Password should be 4 characters long.'
self._write('lock 1,%s' % password)
return self.get_lock_status()
def set_unlock(self, password='1234'):
assert len(password) == 4, 'Password should be 4 characters long.'
self._write('lock 0,%s' % password)
return self.get_lock_status()
def get_modules_installed(self):
return self._query('*OPT?').strip().replace(' ', '').split(',')
def get_latest_error(self):
return self._query('syst:err?')
def clear_error_list(self):
self._write('*CLS')
return self.get_latest_error()
def set_preset(self):
self._write('*RST')
class AgilentLightWave():
def __init__(self, gpib_num, gpib_dev_num, power_meter_channel_num='0',
laser_output_mode='high', power_unit='W'):
self.system = AgilentLightWaveSystem(gpib_num, gpib_dev_num)
self.system.set_preset()
self.laser = LaserAgilent8164B(gpib_num,
gpib_dev_num,
power_unit,
laser_output_mode)
self.power_meter = PowerMeterAgilent8164B(gpib_num,
gpib_dev_num,
power_meter_channel_num,
power_unit)
| 39.594595 | 75 | 0.596246 |
bbd08f90345294a3b4b1d5e7bed3d35d73c3277a | 10,648 | py | Python | airflow/providers/amazon/aws/hooks/athena.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 8,092 | 2016-04-27T20:32:29.000Z | 2019-01-05T07:39:33.000Z | airflow/providers/amazon/aws/hooks/athena.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 2,961 | 2016-05-05T07:16:16.000Z | 2019-01-05T08:47:59.000Z | airflow/providers/amazon/aws/hooks/athena.py | npodewitz/airflow | 511ea702d5f732582d018dad79754b54d5e53f9d | [
"Apache-2.0"
] | 3,546 | 2016-05-04T20:33:16.000Z | 2019-01-05T05:14:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains AWS Athena hook.
.. spelling::
PageIterator
"""
import warnings
from time import sleep
from typing import Any, Dict, Optional
from botocore.paginate import PageIterator
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
class AthenaHook(AwsBaseHook):
"""
Interact with AWS Athena to run, poll queries and return query results
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
:class:`~airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
:param sleep_time: Time (in seconds) to wait between two consecutive calls to check query status on Athena
"""
INTERMEDIATE_STATES = (
'QUEUED',
'RUNNING',
)
FAILURE_STATES = (
'FAILED',
'CANCELLED',
)
SUCCESS_STATES = ('SUCCEEDED',)
TERMINAL_STATES = (
"SUCCEEDED",
"FAILED",
"CANCELLED",
)
def __init__(self, *args: Any, sleep_time: int = 30, **kwargs: Any) -> None:
super().__init__(client_type='athena', *args, **kwargs) # type: ignore
self.sleep_time = sleep_time
def run_query(
self,
query: str,
query_context: Dict[str, str],
result_configuration: Dict[str, Any],
client_request_token: Optional[str] = None,
workgroup: str = 'primary',
) -> str:
"""
Run Presto query on athena with provided config and return submitted query_execution_id
:param query: Presto query to run
:param query_context: Context in which query need to be run
:param result_configuration: Dict with path to store results in and config related to encryption
:param client_request_token: Unique token created by user to avoid multiple executions of same query
:param workgroup: Athena workgroup name, when not specified, will be 'primary'
:return: str
"""
params = {
'QueryString': query,
'QueryExecutionContext': query_context,
'ResultConfiguration': result_configuration,
'WorkGroup': workgroup,
}
if client_request_token:
params['ClientRequestToken'] = client_request_token
response = self.get_conn().start_query_execution(**params)
query_execution_id = response['QueryExecutionId']
return query_execution_id
def check_query_status(self, query_execution_id: str) -> Optional[str]:
"""
Fetch the status of submitted athena query. Returns None or one of valid query states.
:param query_execution_id: Id of submitted athena query
:return: str
"""
response = self.get_conn().get_query_execution(QueryExecutionId=query_execution_id)
state = None
try:
state = response['QueryExecution']['Status']['State']
except Exception as ex:
self.log.error('Exception while getting query state %s', ex)
finally:
# The error is being absorbed here and is being handled by the caller.
# The error is being absorbed to implement retries.
return state
def get_state_change_reason(self, query_execution_id: str) -> Optional[str]:
"""
Fetch the reason for a state change (e.g. error message). Returns None or reason string.
:param query_execution_id: Id of submitted athena query
:return: str
"""
response = self.get_conn().get_query_execution(QueryExecutionId=query_execution_id)
reason = None
try:
reason = response['QueryExecution']['Status']['StateChangeReason']
except Exception as ex:
self.log.error('Exception while getting query state change reason: %s', ex)
finally:
# The error is being absorbed here and is being handled by the caller.
# The error is being absorbed to implement retries.
return reason
def get_query_results(
self, query_execution_id: str, next_token_id: Optional[str] = None, max_results: int = 1000
) -> Optional[dict]:
"""
Fetch submitted athena query results. returns none if query is in intermediate state or
failed/cancelled state else dict of query output
:param query_execution_id: Id of submitted athena query
:param next_token_id: The token that specifies where to start pagination.
:param max_results: The maximum number of results (rows) to return in this request.
:return: dict
"""
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.error('Invalid Query state')
return None
elif query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES:
self.log.error('Query is in "%s" state. Cannot fetch results', query_state)
return None
result_params = {'QueryExecutionId': query_execution_id, 'MaxResults': max_results}
if next_token_id:
result_params['NextToken'] = next_token_id
return self.get_conn().get_query_results(**result_params)
def get_query_results_paginator(
self,
query_execution_id: str,
max_items: Optional[int] = None,
page_size: Optional[int] = None,
starting_token: Optional[str] = None,
) -> Optional[PageIterator]:
"""
Fetch submitted athena query results. returns none if query is in intermediate state or
failed/cancelled state else a paginator to iterate through pages of results. If you
wish to get all results at once, call build_full_result() on the returned PageIterator
:param query_execution_id: Id of submitted athena query
:param max_items: The total number of items to return.
:param page_size: The size of each page.
:param starting_token: A token to specify where to start paginating.
:return: PageIterator
"""
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.error('Invalid Query state (null)')
return None
if query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES:
self.log.error('Query is in "%s" state. Cannot fetch results', query_state)
return None
result_params = {
'QueryExecutionId': query_execution_id,
'PaginationConfig': {
'MaxItems': max_items,
'PageSize': page_size,
'StartingToken': starting_token,
},
}
paginator = self.get_conn().get_paginator('get_query_results')
return paginator.paginate(**result_params)
def poll_query_status(self, query_execution_id: str, max_tries: Optional[int] = None) -> Optional[str]:
"""
Poll the status of submitted athena query until query state reaches final state.
Returns one of the final states
:param query_execution_id: Id of submitted athena query
:param max_tries: Number of times to poll for query state before function exits
:return: str
"""
try_number = 1
final_query_state = None # Query state when query reaches final state or max_tries reached
while True:
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.info('Trial %s: Invalid query state. Retrying again', try_number)
elif query_state in self.TERMINAL_STATES:
self.log.info(
'Trial %s: Query execution completed. Final state is %s}', try_number, query_state
)
final_query_state = query_state
break
else:
self.log.info('Trial %s: Query is still in non-terminal state - %s', try_number, query_state)
if max_tries and try_number >= max_tries: # Break loop if max_tries reached
final_query_state = query_state
break
try_number += 1
sleep(self.sleep_time)
return final_query_state
def get_output_location(self, query_execution_id: str) -> str:
"""
Function to get the output location of the query results
in s3 uri format.
:param query_execution_id: Id of submitted athena query
:return: str
"""
output_location = None
if query_execution_id:
response = self.get_conn().get_query_execution(QueryExecutionId=query_execution_id)
if response:
try:
output_location = response['QueryExecution']['ResultConfiguration']['OutputLocation']
except KeyError:
self.log.error("Error retrieving OutputLocation")
raise
else:
raise
else:
raise ValueError("Invalid Query execution id")
return output_location
def stop_query(self, query_execution_id: str) -> Dict:
"""
Cancel the submitted athena query
:param query_execution_id: Id of submitted athena query
:return: dict
"""
return self.get_conn().stop_query_execution(QueryExecutionId=query_execution_id)
class AWSAthenaHook(AthenaHook):
"""
This hook is deprecated.
Please use :class:`airflow.providers.amazon.aws.hooks.athena.AthenaHook`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"This hook is deprecated. Please use `airflow.providers.amazon.aws.hooks.athena.AthenaHook`.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| 39.291513 | 110 | 0.646225 |
b22e37411a2c9eabf9ccee7f940bce5f0821f39a | 2,895 | py | Python | tests/model/test_page_xml_renderer.py | hnesk/ocrd-browser | c84e55cf58cab3821e04f8691958a44bca018b17 | [
"MIT"
] | null | null | null | tests/model/test_page_xml_renderer.py | hnesk/ocrd-browser | c84e55cf58cab3821e04f8691958a44bca018b17 | [
"MIT"
] | null | null | null | tests/model/test_page_xml_renderer.py | hnesk/ocrd-browser | c84e55cf58cab3821e04f8691958a44bca018b17 | [
"MIT"
] | null | null | null | from tests import TestCase
from ocrd_browser.model.page_xml_renderer import RegionFactory, Region
from ocrd_models.ocrd_page import CoordsType, SeparatorRegionType
class RegionFactoryTestCase(TestCase):
def setUp(self) -> None:
identity = {
'transform': [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
'angle': 0,
'features': ',normalized'
}
self.factory: RegionFactory = RegionFactory(identity, 'DUMMY_0001', None)
def test_create(self):
ds = SeparatorRegionType(id='r6', Coords=CoordsType(points="0,0 0,1 1,1 1,0"))
try:
with self.assertLogs('ocrd_browser.model.page_xml_renderer', level='WARNING') as log_watch:
region = self.factory.create(ds)
raise ValueError('Dummy instead of assertNoLogs')
except ValueError:
pass
self.assertEqual(len(log_watch.output), 0, '{:d} Warning(s) logged "{:s}'.format(len(log_watch.output), '\n'.join(log_watch.output)))
self.assertIsInstance(region, Region)
self.assertGreater(region.poly.area, 0)
def test_create_with_error(self):
ds = SeparatorRegionType(id='r6', Coords=CoordsType(points="1,1 1,1 1,1 1,1"))
with self.assertLogs('ocrd_browser.model.page_xml_renderer', level='ERROR') as log_watch:
region = self.factory.create(ds)
self.assertRegex(log_watch.output[0], r'ERROR:ocrd_browser\.model\.page_xml_renderer\.RegionFactory:Page "DUMMY_0001" @ SeparatorRegion#r6 Too few points.+')
self.assertIsNone(region)
def test_create_with_warning(self):
ds = SeparatorRegionType(id='r6', Coords=CoordsType(points="239,1303 508,1303 899,1302 1626,1307 2441,1307 2444,1319 2414,1322 1664,1319 619,1317 235,1317 237,1302 235,1302"))
with self.assertLogs('ocrd_browser.model.page_xml_renderer', level='WARNING') as log_watch:
region = self.factory.create(ds)
self.assertIsNotNone(region)
self.assertRegex(log_watch.output[0], r'WARNING:ocrd_browser\.model\.page_xml_renderer\.RegionFactory:Page "DUMMY_0001" @ SeparatorRegion#r6 Self-intersection.+')
self.assertRegex(region.warnings[0], r'Self-intersection.+')
def test_create_with_warning_negative(self):
ds = SeparatorRegionType(id='r6', Coords=CoordsType(points="0,0 0,-1 -1,-1 -1,0"))
with self.assertLogs('ocrd_browser.model.page_xml_renderer', level='WARNING'):
region = self.factory.create(ds)
self.assertRegex(region.warnings[0], r'is negative')
def test_create_with_warning_too_few_points(self):
ds = SeparatorRegionType(id='r6', Coords=CoordsType(points="0,0 0,1 1,1"))
with self.assertLogs('ocrd_browser.model.page_xml_renderer', level='WARNING'):
region = self.factory.create(ds)
self.assertRegex(region.warnings[0], r'has too few points')
| 53.611111 | 183 | 0.677375 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.