text
stringlengths 2
999k
|
|---|
"""
Pipeline imports
"""
from .base import Pipeline
from .extractor import Extractor
from .factory import PipelineFactory
from .hfmodel import HFModel
from .hfonnx import HFOnnx
from .hfpipeline import HFPipeline
from .hftrainer import HFTrainer
from .labels import Labels
from .mlonnx import MLOnnx
from .questions import Questions
from .segmentation import Segmentation
from .similarity import Similarity
from .summary import Summary
from .tabular import Tabular
from .tensors import Tensors
from .textractor import Textractor
from .tokenizer import Tokenizer
from .transcription import Transcription
from .translation import Translation
|
from __future__ import unicode_literals
import re
import json
from django.conf import settings
from django.core.files import File
from django.shortcuts import render, get_object_or_404, redirect
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse, JsonResponse
from django.contrib import messages
from froide.helper.utils import render_400, render_403
from froide.helper.redaction import redact_file
from ..models import FoiRequest, FoiMessage, FoiAttachment
from ..auth import (can_read_foirequest, can_read_foirequest_authenticated,
can_write_foirequest)
X_ACCEL_REDIRECT_PREFIX = getattr(settings, 'X_ACCEL_REDIRECT_PREFIX', '')
@require_POST
def approve_attachment(request, slug, attachment):
foirequest = get_object_or_404(FoiRequest, slug=slug)
if not can_write_foirequest(foirequest, request):
return render_403(request)
att = get_object_or_404(FoiAttachment, id=int(attachment))
if not att.can_approve and not request.user.is_staff:
return render_403(request)
att.approve_and_save()
messages.add_message(request, messages.SUCCESS,
_('Attachment approved.'))
return redirect(att.get_anchor_url())
def auth_message_attachment(request, message_id, attachment_name):
'''
nginx auth view
'''
message = get_object_or_404(FoiMessage, id=int(message_id))
attachment = get_object_or_404(FoiAttachment, belongs_to=message,
name=attachment_name)
foirequest = message.request
if not can_read_foirequest(foirequest, request):
return render_403(request)
if not attachment.approved:
# allow only approved attachments to be read
# do not allow anonymous authentication here
allowed = can_read_foirequest_authenticated(
foirequest, request, allow_code=False
)
if not allowed:
return render_403(request)
response = HttpResponse()
response['Content-Type'] = ""
response['X-Accel-Redirect'] = X_ACCEL_REDIRECT_PREFIX + attachment.get_internal_url()
return response
def redact_attachment(request, slug, attachment_id):
foirequest = get_object_or_404(FoiRequest, slug=slug)
if not can_write_foirequest(foirequest, request):
return render_403(request)
attachment = get_object_or_404(FoiAttachment, pk=int(attachment_id),
belongs_to__request=foirequest)
if not attachment.can_approve and not request.user.is_staff:
return render_403(request)
already = None
if attachment.redacted:
already = attachment.redacted
elif attachment.is_redacted:
already = attachment
if (already is not None and not already.can_approve and
not request.user.is_staff):
return render_403(request)
if request.method == 'POST':
# Python 2.7/3.5 requires str for json.loads
instructions = json.loads(request.body.decode('utf-8'))
path = redact_file(attachment.file.file, instructions)
if path is None:
return render_400(request)
name = attachment.name.rsplit('.', 1)[0]
name = re.sub(r'[^\w\.\-]', '', name)
pdf_file = File(open(path, 'rb'))
if already:
att = already
else:
att = FoiAttachment(
belongs_to=attachment.belongs_to,
name=_('%s_redacted.pdf') % name,
is_redacted=True,
filetype='application/pdf',
approved=True,
can_approve=True
)
att.file = pdf_file
att.size = pdf_file.size
att.approve_and_save()
if not attachment.is_redacted:
attachment.redacted = att
attachment.can_approve = False
attachment.approved = False
attachment.save()
return JsonResponse({'url': att.get_anchor_url()})
return render(request, 'foirequest/redact.html', {
'foirequest': foirequest,
'attachment': attachment
})
|
# -*- coding: utf-8 -*-
import pandas as pd
from bio2bel import make_downloader
from ..constants import REGULATORY_SITES_PATH, REGULATORY_SITES_URL
__all__ = [
'download_regulatory_sites'
]
download_regulatory_sites = make_downloader(REGULATORY_SITES_URL, REGULATORY_SITES_PATH)
def get_regulatory_sites_df(url=None, cache=True, force_download=False):
"""Gets the modifications site flat file
:param Optional[str] url: The URL (or file path) to download.
:param bool cache: If true, the data is downloaded to the file system, else it is loaded from the internet
:param bool force_download: If true, overwrites a previously cached file
:rtype: pandas.DataFrame
"""
if url is None and cache:
url = download_regulatory_sites(force_download=force_download)
return pd.read_csv(
url or REGULATORY_SITES_URL,
skiprows=2,
sep='\t'
)
|
#!/usr/bin/env python
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
from geometry_msgs.msg import Twist
import sys, select, termios, tty
msg = """
Control Your Turtlebot!
---------------------------
Moving around:
u i o
j k l
m , .
q/z : increase/decrease max speeds by 10%
w/x : increase/decrease only linear speed by 10%
e/c : increase/decrease only angular speed by 10%
space key, k : force stop
anything else : stop smoothly
CTRL-C to quit
"""
moveBindings = {
'i':(1,0),
'o':(1,-1),
'j':(0,1),
'l':(0,-1),
'u':(1,1),
',':(-1,0),
'.':(-1,1),
'm':(-1,-1),
}
speedBindings={
'q':(1.1,1.1),
'z':(.9,.9),
'w':(1.1,1),
'x':(.9,1),
'e':(1,1.1),
'c':(1,.9),
}
def getKey():
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
speed = .2
turn = 1
def vels(speed,turn):
return "currently:\tspeed %s\tturn %s " % (speed,turn)
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('mobile_base')
pub = rospy.Publisher('~mobile_base_controller/cmd_vel', Twist, queue_size=5)
x = 0
th = 0
status = 0
count = 0
acc = 0.1
target_speed = 0
target_turn = 0
control_speed = 0
control_turn = 0
try:
print msg
print vels(speed,turn)
while(1):
key = getKey()
if key in moveBindings.keys():
x = moveBindings[key][0]
th = moveBindings[key][1]
count = 0
elif key in speedBindings.keys():
speed = speed * speedBindings[key][0]
turn = turn * speedBindings[key][1]
count = 0
print vels(speed,turn)
if (status == 14):
print msg
status = (status + 1) % 15
elif key == ' ' or key == 'k' :
x = 0
th = 0
control_speed = 0
control_turn = 0
else:
count = count + 1
if count > 4:
x = 0
th = 0
if (key == '\x03'):
break
target_speed = speed * x
target_turn = turn * th
# if target_speed > control_speed:
# control_speed = min( target_speed, control_speed + 0.02 )
# elif target_speed < control_speed:
# control_speed = max( target_speed, control_speed - 0.02 )
# else:
control_speed = target_speed
# if target_turn > control_turn:
# control_turn = min( target_turn, control_turn + 0.1 )
# elif target_turn < control_turn:
# control_turn = max( target_turn, control_turn - 0.1 )
# else:
control_turn = target_turn
twist = Twist()
twist.linear.x = control_speed; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = control_turn
pub.publish(twist)
#print("loop: {0}".format(count))
#print("target: vx: {0}, wz: {1}".format(target_speed, target_turn))
#print("publihsed: vx: {0}, wz: {1}".format(twist.linear.x, twist.angular.z))
except:
print e
finally:
twist = Twist()
twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0
pub.publish(twist)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
|
"""
parser telnet
create by judy 2019/11/21
"""
import json
import os
import traceback
from commonbaby.mslog import MsLogger, MsLogManager
from datacontract.iscandataset.iscantask import IscanTask
from idownclient.clientdatafeedback.scoutdatafeedback.portinfo import (
PortInfo, Telnet)
from .zgrab2parserbase import Zgrab2ParserBase
class Zgrab2ParserTelnet(Zgrab2ParserBase):
"""zgrab2 parser"""
# _logger: MsLogger = MsLogManager.get_logger("Zgrab2Parsertelnet")
def __init__(self):
# self._name = type(self).__name__
Zgrab2ParserBase.__init__(self)
def parse_banner_telnet(self, task: IscanTask, level: int, pinfo_dict, resultfi: str):
"""
Parse telnet banner information
"""
try:
if not os.path.isfile(resultfi):
self._logger.error(
f"Resultfi not exists:\ntaskid:{task.taskid}\nresultfi:{resultfi}"
)
return
# its' one json object per line
linenum = 1
with open(resultfi, mode='r') as fs:
while True:
try:
line = fs.readline()
if line is None or line == '':
break
sj = json.loads(line)
if sj is None:
continue
ip = sj.get('ip')
if ip is None or pinfo_dict.get(ip) is None:
self._logger.error("Unexpect error, cant get ip info from zgrab2 result")
continue
portinfo = pinfo_dict.get(ip)
res = self._parse_telnet(sj, task, level, portinfo)
# 如果成功了则证明已经将telnet的信息解析出来了就不用再继续解析了
if res:
break
except Exception:
self._logger.error(
"Parse one telnet banner json line error:\ntaskid:{}\nresultfi:{}\nlinenum:{}"
.format(task.taskid, task.batchid, resultfi, linenum))
finally:
linenum += 1
except Exception:
self._logger.error(
"Parse telnet banner error:\ntaskid:{}\nresultfi:{}"
.format(task.taskid, resultfi))
def _parse_telnet(self, sj: dict, task: IscanTask, level: int,
portinfo: PortInfo):
"""
解析telnet的banner和一些其他的信息
总之就是port里的信息
:param sj:
:param task:
:param level:
:param portinfo:
:return:
"""
res = False
if not sj.__contains__("data") or not sj["data"].__contains__(
"telnet"):
return
try:
sjtelnet = sj['data']['telnet']
succ = sjtelnet["status"]
if succ != "success":
return
protocol = sjtelnet["protocol"]
if protocol != "telnet":
return
if portinfo.service != protocol:
portinfo.service = protocol
self._get_port_timestamp(sjtelnet, portinfo)
# 开始构建telnet的banner
mres = sjtelnet.get('result')
if mres is None:
return
tdata = Telnet()
tdata.banner = mres.get('banner')
portinfo.banner = tdata.build_banner()
res = True
portinfo.set_telnet(tdata)
except:
self._logger.error(
f"Parse banner protocal error, err:{traceback.format_exc()}")
return res
|
import os.path
from moler.config import load_config
from moler.device.device import DeviceFactory
def test_network_outage():
load_config(config=os.path.abspath('my_devices.yml'))
unix1 = DeviceFactory.get_device(name='MyMachine1')
unix2 = DeviceFactory.get_device(name='MyMachine2')
if __name__ == '__main__':
test_network_outage()
"""
copy this file into workshop1/network_outage.py
1. run it
2. check logs
3. add PATH to LOGGER configuration & check logs
- if not given then logs are created is current working directory
4. add RAW_LOG: True & check logs
5. add DEBUG_LEVEL: DEBUG & check logs
6. add DATE_FORMAT: "%Y-%m-%d %H:%M:%S" & check logs
"""
|
from veripb import InvalidProof
from veripb.rules import Rule, EmptyRule, register_rule
from veripb.rules import ReversePolishNotation, IsContradiction
from veripb.rules_register import register_rule, dom_friendly_rules, rules_to_dict
from veripb.parser import OPBParser, MaybeWordParser, ParseContext
from veripb import verifier
from veripb.exceptions import ParseError
from veripb.timed_function import TimedFunction
from collections import deque
from veripb.autoproving import autoProof, TemporaryAttach
from veripb.optimized.constraints import maxId as getMaxConstraintId
constraintMaxId = getMaxConstraintId()
class SubContextInfo():
def __init__(self):
self.toDelete = []
self.toAdd = []
self.previousRules = None
self.callbacks = []
self.subgoals = dict()
def addToDelete(self, ineqs):
self.toDelete.extend(ineqs)
class SubContext():
@classmethod
def setup(cls, context):
try:
return context.subContexts
except AttributeError:
context.subContexts = cls(context)
return context.subContexts
def __init__(self, context):
self.infos = []
self.context = context
f = lambda ineqs, context: self.addToDelete(ineqs)
context.addIneqListener.append(f)
def __bool__(self):
return bool(self.infos)
def addToDelete(self, ineqs):
if len(self.infos) > 0:
self.infos[-1].addToDelete(ineqs)
def push(self):
newSubContext = SubContextInfo()
self.infos.append(newSubContext)
return self.infos[-1]
def pop(self, checkSubgoals = True):
oldContext = self.infos.pop()
for callback in oldContext.callbacks:
callback(self.context, oldContext)
if checkSubgoals and len(oldContext.subgoals) > 0:
for Id, subgoal in oldContext.subgoals.items():
raise InvalidProof("Open subgoal not proven: %s: %s"%(str(Id), subgoal.toString(self.context.ineqFactory)))
return oldContext
def getCurrent(self):
return self.infos[-1]
class EndOfProof(EmptyRule):
Ids = ["qed", "end"]
@classmethod
def parse(cls, line, context):
subContexts = SubContext.setup(context)
if not subContexts:
raise ParseError("No subcontext to end here.")
return cls(subContexts.getCurrent())
def __init__(self, subcontext):
self.subcontext = subcontext
def deleteConstraints(self):
return self.subcontext.toDelete
def compute(self, antecedents, context):
autoProof(context, antecedents, self.subcontext.subgoals)
subContexts = SubContext.setup(context)
poped = subContexts.pop()
assert(self.subcontext == poped)
return self.subcontext.toAdd
def antecedentIDs(self):
return "all"
def allowedRules(self, context, currentRules):
if self.subcontext.previousRules is not None:
return self.subcontext.previousRules
else:
return currentRules
def numConstraints(self):
return len(self.subcontext.toAdd)
class NegatedSubGoals:
def __init__(self, constraints):
self.constraints = constraints
self.isProven = False
def getAsLeftHand(self):
return self.constraints
def getAsRightHand(self):
return None
def toString(self, ineqFactory):
constraintsString = " ".join([ineqFactory.toString(constraint) for constraint in self.constraints])
return "not [%s]" % constraintsString
def isProven():
return False
class SubGoal:
def __init__(self, constraint):
self.constraint = constraint
self.isProven = False
def getAsLeftHand(self):
return [self.constraint.copy().negated()]
def getAsRightHand(self):
return self.constraint
def toString(self, ineqFactory):
return ineqFactory.toString(self.constraint)
class SubProof(EmptyRule):
Ids = ["proofgoal"]
# todo enforce only one def
@classmethod
def parse(cls, line, context):
subcontexts = SubContext.setup(context)
subContext = subcontexts.getCurrent()
with MaybeWordParser(line) as words:
myGoal = next(words)
if myGoal[0] != "#":
myGoal = int(myGoal)
if not subContext.subgoals:
raise ValueError("No proofgoals left to proof.")
if myGoal not in subContext.subgoals:
raise ValueError("Invalid proofgoal.")
return cls(subContext.subgoals, myGoal)
def __init__(self, subgoals, myGoal):
self.subRules = dom_friendly_rules() + [EndOfProof]
self.myGoal = myGoal
self.subgoals = subgoals
def compute(self, antecedents, context):
subContexts = SubContext.setup(context)
self.subContext = subContexts.push()
f = lambda context, subContext: self.check(context)
self.subContext.callbacks.append(f)
subgoal = self.subgoals[self.myGoal]
del self.subgoals[self.myGoal]
return subgoal.getAsLeftHand()
def antecedentIDs(self):
return []
def numConstraints(self):
return 1
def allowedRules(self, context, currentRules):
self.subContext.previousRules = currentRules
return rules_to_dict(self.subRules)
def check(self, context):
if not getattr(context, "containsContradiction", False):
raise InvalidProof("Sub proof did not show contradiction.")
context.containsContradiction = False
class MultiGoalRule(EmptyRule):
subRules = [EndOfProof, SubProof]
subProofBegin = "begin"
@classmethod
def parseHasExplicitSubproof(cls, words):
try:
nxt = next(words)
if nxt != cls.subProofBegin:
raise ValueError("Unexpected word, expected 'begin'")
return True
except StopIteration:
return False
def __init__(self, context):
self.subContexts = SubContext.setup(context)
self.subContext = self.subContexts.push()
self.displayGoals = context.verifierSettings.trace
self.constraints = []
self.ineqFactory = context.ineqFactory
self.nextId = 1
self.autoProoved = False
def addSubgoal(self, goal, Id = None):
if Id is None or Id == constraintMaxId:
# the goal does not relate to an existing constraint
Id = "#%i" % (self.nextId)
self.nextId += 1
assert(Id not in self.subContext.subgoals)
self.subContext.subgoals[Id] = goal
if self.displayGoals:
ineqStr = goal.toString(self.ineqFactory)
if isinstance(Id, int):
Id = "%03i" % (Id)
print(" proofgoal %s: %s"%(Id, ineqStr))
def addAvailable(self, ineq):
"""add constraint available in sub proof"""
self.constraints.append(ineq)
def autoProof(self, context, db):
self.autoProoved = True
if self.subContext.subgoals:
added = list()
with TemporaryAttach(context.propEngine) as temporary:
for c in self.constraints:
if c is not None:
temporary.attach(c)
autoProof(context, db, self.subContext.subgoals)
self.constraints = self.subContext.toAdd
self.subContexts.pop()
def addIntroduced(self, ineq):
"""add constraint introduced after all subgoals are proven"""
self.subContext.toAdd.append(ineq)
def compute(self, antecedents, context = None):
return self.constraints
def numConstraints(self):
return len(self.constraints)
def allowedRules(self, context, currentRules):
if not self.autoProoved:
self.subContext.previousRules = currentRules
return rules_to_dict(self.subRules)
else:
return currentRules
|
# -*- coding: utf-8 -*-
import os
import os.path
import sys
import time
import glob
import http.cookiejar
import tempfile
import lz4.block
import datetime
import configparser
try:
import json
except ImportError:
import simplejson as json
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
import sqlite3
# external dependencies
import keyring
import pyaes
from pbkdf2 import PBKDF2
__doc__ = 'Loads browser cookies into a cookiejar'
class BrowserCookieError(Exception):
pass
def create_local_copy(cookie_file):
if os.path.exists(cookie_file):
tmp_cookie_file = tempfile.NamedTemporaryFile(suffix='.sqlite').name
open(tmp_cookie_file, 'wb').write(open(cookie_file, 'rb').read())
return tmp_cookie_file
else:
raise BrowserCookieError('Can not find cookie file at: ' + cookie_file)
def windows_group_policy_path():
from winreg import ConnectRegistry, HKEY_LOCAL_MACHINE, OpenKeyEx, QueryValueEx, REG_EXPAND_SZ, REG_SZ
try:
root = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
policy_key = OpenKeyEx(root, r"SOFTWARE\Policies\Google\Chrome")
user_data_dir, type_ = QueryValueEx(policy_key, "UserDataDir")
if type_ == REG_EXPAND_SZ:
user_data_dir = os.path.expandvars(user_data_dir)
elif type_ != REG_SZ:
return None
except OSError:
return None
return os.path.join(user_data_dir, "Default", "Cookies")
# Code adapted slightly from https://github.com/Arnie97/chrome-cookies
def crypt_unprotect_data(
cipher_text=b'', entropy=b'', reserved=None, prompt_struct=None
):
import ctypes
import ctypes.wintypes
class DataBlob(ctypes.Structure):
_fields_ = [
('cbData', ctypes.wintypes.DWORD),
('pbData', ctypes.POINTER(ctypes.c_char))
]
blob_in, blob_entropy, blob_out = map(
lambda x: DataBlob(len(x), ctypes.create_string_buffer(x)),
[cipher_text, entropy, b'']
)
desc = ctypes.c_wchar_p()
CRYPTPROTECT_UI_FORBIDDEN = 0x01
if not ctypes.windll.crypt32.CryptUnprotectData(
ctypes.byref(blob_in), ctypes.byref(
desc), ctypes.byref(blob_entropy),
reserved, prompt_struct, CRYPTPROTECT_UI_FORBIDDEN, ctypes.byref(
blob_out)
):
raise RuntimeError('Failed to decrypt the cipher text with DPAPI')
description = desc.value
buffer_out = ctypes.create_string_buffer(int(blob_out.cbData))
ctypes.memmove(buffer_out, blob_out.pbData, blob_out.cbData)
map(ctypes.windll.kernel32.LocalFree, [desc, blob_out.pbData])
return description, buffer_out.value
class Chrome:
def __init__(self, cookie_file=None, domain_name=""):
self.salt = b'saltysalt'
self.iv = b' ' * 16
self.length = 16
self.domain_name = domain_name
if sys.platform == 'darwin':
my_pass = keyring.get_password('Chrome Safe Storage', 'Chrome').encode(
'utf8') # get key from keyring
iterations = 1003
self.key = PBKDF2(my_pass, self.salt,
iterations=iterations).read(self.length)
cookie_file = cookie_file \
or os.path.expanduser('~/Library/Application Support/Google/Chrome/Default/Cookies')
elif sys.platform.startswith('linux'):
my_pass = 'peanuts'.encode('utf8')
iterations = 1
self.key = PBKDF2(my_pass, self.salt,
iterations=iterations).read(self.length)
cookie_file = cookie_file \
or os.path.expanduser('~/.config/google-chrome/Default/Cookies') \
or os.path.expanduser('~/.config/chromium/Default/Cookies') \
or os.path.expanduser('~/.config/google-chrome-beta/Default/Cookies')
elif sys.platform == "win32":
cookie_file = cookie_file or windows_group_policy_path() \
or glob.glob(os.path.join(os.getenv('APPDATA', ''), '..\Local\\Google\\Chrome\\User Data\\Default\\Cookies')) \
or glob.glob(os.path.join(os.getenv('LOCALAPPDATA', ''), 'Google\\Chrome\\User Data\\Default\\Cookies')) \
or glob.glob(os.path.join(os.getenv('APPDATA', ''), 'Google\\Chrome\\User Data\\Default\\Cookies'))
else:
raise BrowserCookieError(
"OS not recognized. Works on Chrome for OSX, Windows, and Linux.")
if isinstance(cookie_file, list):
if not cookie_file:
raise BrowserCookieError('Failed to find Chrome cookie')
cookie_file = cookie_file[0]
self.tmp_cookie_file = create_local_copy(cookie_file)
def __del__(self):
if hasattr(self, 'tmp_cookie_file'):
os.remove(self.tmp_cookie_file)
def __str__(self):
return 'chrome'
def load(self):
"""Load sqlite cookies into a cookiejar
"""
con = sqlite3.connect(self.tmp_cookie_file)
cur = con.cursor()
try:
cur.execute('SELECT host_key, path, secure, expires_utc, name, value, encrypted_value '
'FROM cookies WHERE host_key like "%{}%";'.format(self.domain_name))
except sqlite3.OperationalError:
cur.execute('SELECT host_key, path, is_secure, expires_utc, name, value, encrypted_value '
'FROM cookies WHERE host_key like "%{}%";'.format(self.domain_name))
cj = http.cookiejar.CookieJar()
epoch_start = datetime.datetime(1601, 1, 1)
for item in cur.fetchall():
host, path, secure, expires, name = item[:5]
if item[3] != 0:
offset = min(int(item[3]), 265000000000000000)
delta = datetime.timedelta(microseconds=offset)
expires = epoch_start + delta
expires = expires.timestamp()
value = self._decrypt(item[5], item[6])
c = create_cookie(host, path, secure, expires, name, value)
cj.set_cookie(c)
con.close()
return cj
@staticmethod
def _decrypt_windows_chrome(value, encrypted_value):
if len(value) != 0:
return value
if encrypted_value == "":
return ""
_, data = crypt_unprotect_data(encrypted_value)
assert isinstance(data, bytes)
return data.decode()
def _decrypt(self, value, encrypted_value):
"""Decrypt encoded cookies
"""
if sys.platform == 'win32':
return self._decrypt_windows_chrome(value, encrypted_value)
if value or (encrypted_value[:3] != b'v10'):
return value
encrypted_value = encrypted_value[3:]
encrypted_value_half_len = int(len(encrypted_value) / 2)
cipher = pyaes.Decrypter(
pyaes.AESModeOfOperationCBC(self.key, self.iv))
decrypted = cipher.feed(encrypted_value[:encrypted_value_half_len])
decrypted += cipher.feed(encrypted_value[encrypted_value_half_len:])
decrypted += cipher.feed()
return decrypted.decode("utf-8")
class Firefox:
def __init__(self, cookie_file=None, domain_name=""):
self.tmp_cookie_file = None
cookie_file = cookie_file or self.find_cookie_file()
self.tmp_cookie_file = create_local_copy(cookie_file)
self.session_file = os.path.join(
os.path.dirname(cookie_file), 'sessionstore.js')
self.session_file_lz4 = os.path.join(os.path.dirname(
cookie_file), 'sessionstore-backups', 'recovery.jsonlz4')
self.domain_name = domain_name
def __del__(self):
if self.tmp_cookie_file:
os.remove(self.tmp_cookie_file)
def __str__(self):
return 'firefox'
def get_default_profile(self, profiles_ini_path, template_for_relative):
""" Given the path to firefox profiles.ini,
will return relative path to firefox default profile
"""
config = configparser.ConfigParser()
config.read(profiles_ini_path)
for section in config.sections():
try:
if config[section]['Default'] == '1' and config[section]['IsRelative'] == '1':
return template_for_relative.format(config[section]['Path'])
except KeyError:
continue
return None
def find_cookie_file(self):
if sys.platform == 'darwin':
profiles_ini_paths = glob.glob(os.path.expanduser(
'~/Library/Application Support/Firefox/profiles.ini'))
profiles_ini_path = self.get_default_profile(profiles_ini_paths, os.path.expanduser(
'~/Library/Application Support/Firefox/Profiles/{0}/cookies.sqlite'.format(profiles_ini_path)))
cookie_files = glob.glob(
os.path.expanduser('~/Library/Application Support/Firefox/Profiles/*default/cookies.sqlite')) \
or glob.glob(profiles_ini_path)
elif sys.platform.startswith('linux'):
profiles_ini_paths = glob.glob(
os.path.expanduser('~/.mozilla/firefox/profiles.ini'))
profiles_ini_path = self.get_default_profile(
profiles_ini_paths, os.path.expanduser('~/.mozilla/firefox/{0}/cookies.sqlite'))
cookie_files = glob.glob(os.path.expanduser('~/.mozilla/firefox/*default*/cookies.sqlite')) \
or glob.glob(profiles_ini_path)
elif sys.platform == 'win32':
profiles_ini_paths = glob.glob(os.path.join(os.environ.get('APPDATA', ''),
'Mozilla/Firefox/profiles.ini')) \
or glob.glob(os.path.join(os.environ.get('LOCALAPPDATA', ''),
'Mozilla/Firefox/profiles.ini'))
profiles_ini_path = self.get_default_profile(profiles_ini_paths, os.path.join(os.environ.get('APPDATA', ''),
"Mozilla/Firefox/{0}/cookies.sqlite"))
cookie_files = glob.glob(os.path.join(os.environ.get('PROGRAMFILES', ''),
'Mozilla Firefox/profile/cookies.sqlite')) \
or glob.glob(os.path.join(os.environ.get('PROGRAMFILES(X86)', ''),
'Mozilla Firefox/profile/cookies.sqlite')) \
or glob.glob(os.path.join(os.environ.get('APPDATA', ''),
'Mozilla/Firefox/Profiles/*default*/cookies.sqlite')) \
or glob.glob(os.path.join(os.environ.get('LOCALAPPDATA', ''),
'Mozilla/Firefox/Profiles/*default*/cookies.sqlite')) \
or glob.glob(profiles_ini_path)
else:
raise BrowserCookieError(
'Unsupported operating system: ' + sys.platform)
if cookie_files:
return cookie_files[0]
else:
raise BrowserCookieError('Failed to find Firefox cookie')
@staticmethod
def __create_session_cookie(cookie_json):
expires = str(int(time.time()) + 3600 * 24 * 7)
return create_cookie(cookie_json.get('host', ''), cookie_json.get('path', ''), False, expires,
cookie_json.get('name', ''), cookie_json.get('value', ''))
def __add_session_cookies(self, cj):
if not os.path.exists(self.session_file):
return
try:
json_data = json.loads(
open(self.session_file, 'rb').read().decode())
except ValueError as e:
print('Error parsing firefox session JSON:', str(e))
else:
for window in json_data.get('windows', []):
for cookie in window.get('cookies', []):
cj.set_cookie(Firefox.__create_session_cookie(cookie))
def __add_session_cookies_lz4(self, cj):
if not os.path.exists(self.session_file_lz4):
return
try:
file_obj = open(self.session_file_lz4, 'rb')
file_obj.read(8)
json_data = json.loads(lz4.block.decompress(file_obj.read()))
except ValueError as e:
print('Error parsing firefox session JSON LZ4:', str(e))
else:
for cookie in json_data.get('cookies', []):
cj.set_cookie(Firefox.__create_session_cookie(cookie))
def load(self):
con = sqlite3.connect(self.tmp_cookie_file)
cur = con.cursor()
cur.execute('select host, path, isSecure, expiry, name, value from moz_cookies '
'where host like "%{}%"'.format(self.domain_name))
cj = http.cookiejar.CookieJar()
for item in cur.fetchall():
c = create_cookie(*item)
cj.set_cookie(c)
con.close()
self.__add_session_cookies(cj)
self.__add_session_cookies_lz4(cj)
return cj
def create_cookie(host, path, secure, expires, name, value):
"""Shortcut function to create a cookie
"""
return http.cookiejar.Cookie(0, name, value, None, False, host, host.startswith('.'), host.startswith('.'), path,
True, secure, expires, False, None, None, {})
def chrome(cookie_file=None, domain_name=""):
"""Returns a cookiejar of the cookies used by Chrome. Optionally pass in a
domain name to only load cookies from the specified domain
"""
return Chrome(cookie_file, domain_name).load()
def firefox(cookie_file=None, domain_name=""):
"""Returns a cookiejar of the cookies and sessions used by Firefox. Optionally
pass in a domain name to only load cookies from the specified domain
"""
return Firefox(cookie_file, domain_name).load()
def load(domain_name=""):
"""Try to load cookies from all supported browsers and return combined cookiejar
Optionally pass in a domain name to only load cookies from the specified domain
"""
cj = http.cookiejar.CookieJar()
for cookie_fn in [chrome, firefox]:
try:
for cookie in cookie_fn(domain_name=domain_name):
cj.set_cookie(cookie)
except BrowserCookieError:
pass
return cj
if __name__ == '__main__':
pass
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
class V1SecretReference(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'namespace': 'str'
}
attribute_map = {
'name': 'name',
'namespace': 'namespace'
}
def __init__(self, name=None, namespace=None): # noqa: E501
"""V1SecretReference - a model defined in Swagger""" # noqa: E501
self._name = None
self._namespace = None
self.discriminator = None
if name is not None:
self.name = name
if namespace is not None:
self.namespace = namespace
@property
def name(self):
"""Gets the name of this V1SecretReference. # noqa: E501
Name is unique within a namespace to reference a secret resource. # noqa: E501
:return: The name of this V1SecretReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1SecretReference.
Name is unique within a namespace to reference a secret resource. # noqa: E501
:param name: The name of this V1SecretReference. # noqa: E501
:type: str
"""
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1SecretReference. # noqa: E501
Namespace defines the space within which the secret name must be unique. # noqa: E501
:return: The namespace of this V1SecretReference. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1SecretReference.
Namespace defines the space within which the secret name must be unique. # noqa: E501
:param namespace: The namespace of this V1SecretReference. # noqa: E501
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in self.swagger_types.items():
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SecretReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import os
from urllib import parse as urlparse
def fix_catalog_url(url):
"""
Replace .html with .xml extension
"""
from os.path import splitext, join
u = urlparse.urlsplit(url)
name, ext = splitext(u.path)
if ext == ".html":
u = urlparse.urlsplit(url.replace(".html", ".xml"))
elif ext == '':
u = urlparse.urlsplit(join(url, "catalog.xml"))
return u.geturl()
def construct_url(url, href):
u = urlparse.urlsplit(url)
base_url = u.scheme + "://" + u.netloc
relative_path = urlparse.urljoin(base_url, os.path.split(u.path)[0])
if href[0] == "/":
# Absolute paths
cat = urlparse.urljoin(base_url, href)
elif href[0:4] == "http":
# Full HTTP links
cat = href
else:
# Relative paths.
cat = relative_path + "/" + href
return cat
def size_in_bytes(size, unit):
# Convert to bytes
if unit == "Kbytes":
size *= 1000.0
elif unit == "Mbytes":
size *= 1e+6
elif unit == "Gbytes":
size *= 1e+9
elif unit == "Tbytes":
size *= 1e+12
return int(size)
|
import datetime
from backtrader_ib_api.finviz import get_stock_info, estimate_next_earnings_date
def test_stock_info():
stock_info = get_stock_info("AAPL")
print(stock_info)
def test_estimate_next_earnings_date():
next_earnings_date = estimate_next_earnings_date("AAPL")
print(next_earnings_date)
assert next_earnings_date
assert next_earnings_date > datetime.datetime.today()
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This file is copied from tensorflow/models https://github.com/tensorflow/models/tree/master/research/inception/inception/data
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
This script is called as
process_bounding_boxes.py <dir> [synsets-file]
Where <dir> is a directory containing the downloaded and unpacked bounding box
data. If [synsets-file] is supplied, then only the bounding boxes whose
synstes are contained within this file are returned. Note that the
[synsets-file] file contains synset ids, one per line.
The script dumps out a CSV text file in which each line contains an entry.
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
The entry can be read as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
The bounding box for <JPEG file name> contains two points (xmin, ymin) and
(xmax, ymax) specifying the lower-left corner and upper-right corner of a
bounding box in *relative* coordinates.
The user supplies a directory where the XML files reside. The directory
structure in the directory <dir> is assumed to look like this:
<dir>/nXXXXXXXX/nXXXXXXXX_YYYY.xml
Each XML file contains a bounding box annotation. The script:
(1) Parses the XML file and extracts the filename, label and bounding box info.
(2) The bounding box is specified in the XML files as integer (xmin, ymin) and
(xmax, ymax) *relative* to image size displayed to the human annotator. The
size of the image displayed to the human annotator is stored in the XML file
as integer (height, width).
Note that the displayed size will differ from the actual size of the image
downloaded from image-net.org. To make the bounding box annotation useable,
we convert bounding box to floating point numbers relative to displayed
height and width of the image.
Note that each XML file might contain N bounding box annotations.
Note that the points are all clamped at a range of [0.0, 1.0] because some
human annotations extend outside the range of the supplied image.
See details here: http://image-net.org/download-bboxes
(3) By default, the script outputs all valid bounding boxes. If a
[synsets-file] is supplied, only the subset of bounding boxes associated
with those synsets are outputted. Importantly, one can supply a list of
synsets in the ImageNet Challenge and output the list of bounding boxes
associated with the training images of the ILSVRC.
We use these bounding boxes to inform the random distortion of images
supplied to the network.
If you run this script successfully, you will see the following output
to stderr:
> Finished processing 544546 XML files.
> Skipped 0 XML files not in ImageNet Challenge.
> Skipped 0 bounding boxes not in ImageNet Challenge.
> Wrote 615299 bounding boxes from 544546 annotated images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import sys
import xml.etree.ElementTree as ET
class BoundingBox(object):
pass
def GetItem(name, root, index=0):
count = 0
for item in root.iter(name):
if count == index:
return item.text
count += 1
# Failed to find "index" occurrence of item.
return -1
def GetInt(name, root, index=0):
# In some XML annotation files, the point values are not integers, but floats.
# So we add a float function to avoid ValueError.
return int(float(GetItem(name, root, index)))
def FindNumberBoundingBoxes(root):
index = 0
while True:
if GetInt('xmin', root, index) == -1:
break
index += 1
return index
def ProcessXMLAnnotation(xml_file):
"""Process a single XML file containing a bounding box."""
# pylint: disable=broad-except
try:
tree = ET.parse(xml_file)
except Exception:
print('Failed to parse: ' + xml_file, file=sys.stderr)
return None
# pylint: enable=broad-except
root = tree.getroot()
num_boxes = FindNumberBoundingBoxes(root)
boxes = []
for index in range(num_boxes):
box = BoundingBox()
# Grab the 'index' annotation.
box.xmin = GetInt('xmin', root, index)
box.ymin = GetInt('ymin', root, index)
box.xmax = GetInt('xmax', root, index)
box.ymax = GetInt('ymax', root, index)
box.width = GetInt('width', root)
box.height = GetInt('height', root)
box.filename = GetItem('filename', root) + '.JPEG'
box.label = GetItem('name', root)
xmin = float(box.xmin) / float(box.width)
xmax = float(box.xmax) / float(box.width)
ymin = float(box.ymin) / float(box.height)
ymax = float(box.ymax) / float(box.height)
# Some images contain bounding box annotations that
# extend outside of the supplied image. See, e.g.
# n03127925/n03127925_147.xml
# Additionally, for some bounding boxes, the min > max
# or the box is entirely outside of the image.
min_x = min(xmin, xmax)
max_x = max(xmin, xmax)
box.xmin_scaled = min(max(min_x, 0.0), 1.0)
box.xmax_scaled = min(max(max_x, 0.0), 1.0)
min_y = min(ymin, ymax)
max_y = max(ymin, ymax)
box.ymin_scaled = min(max(min_y, 0.0), 1.0)
box.ymax_scaled = min(max(max_y, 0.0), 1.0)
boxes.append(box)
return boxes
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('Invalid usage\n'
'usage: process_bounding_boxes.py <dir> [synsets-file]',
file=sys.stderr)
sys.exit(-1)
xml_files = glob.glob(sys.argv[1] + '/*/*.xml')
print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]),
file=sys.stderr)
if len(sys.argv) == 3:
labels = set([l.strip() for l in open(sys.argv[2]).readlines()])
print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]),
file=sys.stderr)
else:
labels = None
skipped_boxes = 0
skipped_files = 0
saved_boxes = 0
saved_files = 0
for file_index, one_file in enumerate(xml_files):
# Example: <...>/n06470073/n00141669_6790.xml
label = os.path.basename(os.path.dirname(one_file))
# Determine if the annotation is from an ImageNet Challenge label.
if labels is not None and label not in labels:
skipped_files += 1
continue
bboxes = ProcessXMLAnnotation(one_file)
assert bboxes is not None, 'No bounding boxes found in ' + one_file
found_box = False
for bbox in bboxes:
if labels is not None:
if bbox.label != label:
# Note: There is a slight bug in the bounding box annotation data.
# Many of the dog labels have the human label 'Scottish_deerhound'
# instead of the synset ID 'n02092002' in the bbox.label field. As a
# simple hack to overcome this issue, we only exclude bbox labels
# *which are synset ID's* that do not match original synset label for
# the XML file.
if bbox.label in labels:
skipped_boxes += 1
continue
# Guard against improperly specified boxes.
if (bbox.xmin_scaled >= bbox.xmax_scaled or
bbox.ymin_scaled >= bbox.ymax_scaled):
skipped_boxes += 1
continue
# Note bbox.filename occasionally contains '%s' in the name. This is
# data set noise that is fixed by just using the basename of the XML file.
image_filename = os.path.splitext(os.path.basename(one_file))[0]
print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' %
(image_filename,
bbox.xmin_scaled, bbox.ymin_scaled,
bbox.xmax_scaled, bbox.ymax_scaled))
saved_boxes += 1
found_box = True
if found_box:
saved_files += 1
else:
skipped_files += 1
if not file_index % 5000:
print('--> processed %d of %d XML files.' %
(file_index + 1, len(xml_files)),
file=sys.stderr)
print('--> skipped %d boxes and %d XML files.' %
(skipped_boxes, skipped_files), file=sys.stderr)
print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr)
print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files,
file=sys.stderr)
print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes,
file=sys.stderr)
print('Wrote %d bounding boxes from %d annotated images.' %
(saved_boxes, saved_files),
file=sys.stderr)
print('Finished.', file=sys.stderr)
|
import asyncio
import json
import os
import string
from statistics import mean
from typing import Any
from packaging import version as pyver
import pytz
from alerts.models import SEVERITY_CHOICES
from core.models import CoreSettings
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from logs.models import BaseAuditModel
from loguru import logger
from .utils import bytes2human
logger.configure(**settings.LOG_CONFIG)
CHECK_TYPE_CHOICES = [
("diskspace", "Disk Space Check"),
("ping", "Ping Check"),
("cpuload", "CPU Load Check"),
("memory", "Memory Check"),
("winsvc", "Service Check"),
("script", "Script Check"),
("eventlog", "Event Log Check"),
]
CHECK_STATUS_CHOICES = [
("passing", "Passing"),
("failing", "Failing"),
("pending", "Pending"),
]
EVT_LOG_NAME_CHOICES = [
("Application", "Application"),
("System", "System"),
("Security", "Security"),
]
EVT_LOG_TYPE_CHOICES = [
("INFO", "Information"),
("WARNING", "Warning"),
("ERROR", "Error"),
("AUDIT_SUCCESS", "Success Audit"),
("AUDIT_FAILURE", "Failure Audit"),
]
EVT_LOG_FAIL_WHEN_CHOICES = [
("contains", "Log contains"),
("not_contains", "Log does not contain"),
]
class Check(BaseAuditModel):
# common fields
agent = models.ForeignKey(
"agents.Agent",
related_name="agentchecks",
null=True,
blank=True,
on_delete=models.CASCADE,
)
policy = models.ForeignKey(
"automation.Policy",
related_name="policychecks",
null=True,
blank=True,
on_delete=models.CASCADE,
)
managed_by_policy = models.BooleanField(default=False)
overriden_by_policy = models.BooleanField(default=False)
parent_check = models.PositiveIntegerField(null=True, blank=True)
name = models.CharField(max_length=255, null=True, blank=True)
check_type = models.CharField(
max_length=50, choices=CHECK_TYPE_CHOICES, default="diskspace"
)
status = models.CharField(
max_length=100, choices=CHECK_STATUS_CHOICES, default="pending"
)
more_info = models.TextField(null=True, blank=True)
last_run = models.DateTimeField(null=True, blank=True)
email_alert = models.BooleanField(default=False)
text_alert = models.BooleanField(default=False)
dashboard_alert = models.BooleanField(default=False)
fails_b4_alert = models.PositiveIntegerField(default=1)
fail_count = models.PositiveIntegerField(default=0)
outage_history = models.JSONField(null=True, blank=True) # store
extra_details = models.JSONField(null=True, blank=True)
run_interval = models.PositiveIntegerField(blank=True, default=0)
# check specific fields
# for eventlog, script, ip, and service alert severity
alert_severity = models.CharField(
max_length=15,
choices=SEVERITY_CHOICES,
default="warning",
null=True,
blank=True,
)
# threshold percent for diskspace, cpuload or memory check
error_threshold = models.PositiveIntegerField(
validators=[MinValueValidator(0), MaxValueValidator(99)],
null=True,
blank=True,
default=0,
)
warning_threshold = models.PositiveIntegerField(
null=True,
blank=True,
validators=[MinValueValidator(0), MaxValueValidator(99)],
default=0,
)
# diskcheck i.e C:, D: etc
disk = models.CharField(max_length=2, null=True, blank=True)
# ping checks
ip = models.CharField(max_length=255, null=True, blank=True)
# script checks
script = models.ForeignKey(
"scripts.Script",
related_name="script",
on_delete=models.CASCADE,
null=True,
blank=True,
)
script_args = ArrayField(
models.CharField(max_length=255, null=True, blank=True),
null=True,
blank=True,
default=list,
)
info_return_codes = ArrayField(
models.PositiveIntegerField(),
null=True,
blank=True,
default=list,
)
warning_return_codes = ArrayField(
models.PositiveIntegerField(),
null=True,
blank=True,
default=list,
)
timeout = models.PositiveIntegerField(null=True, blank=True)
stdout = models.TextField(null=True, blank=True)
stderr = models.TextField(null=True, blank=True)
retcode = models.IntegerField(null=True, blank=True)
execution_time = models.CharField(max_length=100, null=True, blank=True)
# cpu and mem check history
history = ArrayField(
models.IntegerField(blank=True), null=True, blank=True, default=list
)
# win service checks
svc_name = models.CharField(max_length=255, null=True, blank=True)
svc_display_name = models.CharField(max_length=255, null=True, blank=True)
pass_if_start_pending = models.BooleanField(null=True, blank=True)
pass_if_svc_not_exist = models.BooleanField(default=False)
restart_if_stopped = models.BooleanField(null=True, blank=True)
svc_policy_mode = models.CharField(
max_length=20, null=True, blank=True
) # 'default' or 'manual', for editing policy check
# event log checks
log_name = models.CharField(
max_length=255, choices=EVT_LOG_NAME_CHOICES, null=True, blank=True
)
event_id = models.IntegerField(null=True, blank=True)
event_id_is_wildcard = models.BooleanField(default=False)
event_type = models.CharField(
max_length=255, choices=EVT_LOG_TYPE_CHOICES, null=True, blank=True
)
event_source = models.CharField(max_length=255, null=True, blank=True)
event_message = models.TextField(null=True, blank=True)
fail_when = models.CharField(
max_length=255, choices=EVT_LOG_FAIL_WHEN_CHOICES, null=True, blank=True
)
search_last_days = models.PositiveIntegerField(null=True, blank=True)
number_of_events_b4_alert = models.PositiveIntegerField(
null=True, blank=True, default=1
)
def __str__(self):
if self.agent:
return f"{self.agent.hostname} - {self.readable_desc}"
else:
return f"{self.policy.name} - {self.readable_desc}"
@property
def readable_desc(self):
if self.check_type == "diskspace":
text = ""
if self.warning_threshold:
text += f" Warning Threshold: {self.warning_threshold}%"
if self.error_threshold:
text += f" Error Threshold: {self.error_threshold}%"
return f"{self.get_check_type_display()}: Drive {self.disk} - {text}" # type: ignore
elif self.check_type == "ping":
return f"{self.get_check_type_display()}: {self.name}" # type: ignore
elif self.check_type == "cpuload" or self.check_type == "memory":
text = ""
if self.warning_threshold:
text += f" Warning Threshold: {self.warning_threshold}%"
if self.error_threshold:
text += f" Error Threshold: {self.error_threshold}%"
return f"{self.get_check_type_display()} - {text}" # type: ignore
elif self.check_type == "winsvc":
return f"{self.get_check_type_display()}: {self.svc_display_name}" # type: ignore
elif self.check_type == "eventlog":
return f"{self.get_check_type_display()}: {self.name}" # type: ignore
elif self.check_type == "script":
return f"{self.get_check_type_display()}: {self.script.name}" # type: ignore
else:
return "n/a"
@property
def history_info(self):
if self.check_type == "cpuload" or self.check_type == "memory":
return ", ".join(str(f"{x}%") for x in self.history[-6:])
@property
def last_run_as_timezone(self):
if self.last_run is not None and self.agent is not None:
return self.last_run.astimezone(
pytz.timezone(self.agent.timezone)
).strftime("%b-%d-%Y - %H:%M")
return self.last_run
@property
def non_editable_fields(self) -> list[str]:
return [
"check_type",
"status",
"more_info",
"last_run",
"fail_count",
"outage_history",
"extra_details",
"stdout",
"stderr",
"retcode",
"execution_time",
"history",
"readable_desc",
"history_info",
"parent_check",
"managed_by_policy",
"overriden_by_policy",
"created_by",
"created_time",
"modified_by",
"modified_time",
]
@property
def policy_fields_to_copy(self) -> list[str]:
return [
"warning_threshold",
"error_threshold",
"alert_severity",
"name",
"run_interval",
"disk",
"fails_b4_alert",
"ip",
"script",
"script_args",
"info_return_codes",
"warning_return_codes",
"timeout",
"svc_name",
"svc_display_name",
"svc_policy_mode",
"pass_if_start_pending",
"pass_if_svc_not_exist",
"restart_if_stopped",
"log_name",
"event_id",
"event_id_is_wildcard",
"event_type",
"event_source",
"event_message",
"fail_when",
"search_last_days",
"number_of_events_b4_alert",
"email_alert",
"text_alert",
"dashboard_alert",
]
def should_create_alert(self, alert_template=None):
return (
self.dashboard_alert
or self.email_alert
or self.text_alert
or (
alert_template
and (
alert_template.check_always_alert
or alert_template.check_always_email
or alert_template.check_always_text
)
)
)
def add_check_history(self, value: int, more_info: Any = None) -> None:
CheckHistory.objects.create(check_history=self, y=value, results=more_info)
def handle_checkv2(self, data):
from alerts.models import Alert
# cpuload or mem checks
if self.check_type == "cpuload" or self.check_type == "memory":
self.history.append(data["percent"])
if len(self.history) > 15:
self.history = self.history[-15:]
self.save(update_fields=["history"])
avg = int(mean(self.history))
if self.error_threshold and avg > self.error_threshold:
self.status = "failing"
self.alert_severity = "error"
elif self.warning_threshold and avg > self.warning_threshold:
self.status = "failing"
self.alert_severity = "warning"
else:
self.status = "passing"
# add check history
self.add_check_history(data["percent"])
# diskspace checks
elif self.check_type == "diskspace":
if data["exists"]:
percent_used = round(data["percent_used"])
total = bytes2human(data["total"])
free = bytes2human(data["free"])
if self.error_threshold and (100 - percent_used) < self.error_threshold:
self.status = "failing"
self.alert_severity = "error"
elif (
self.warning_threshold
and (100 - percent_used) < self.warning_threshold
):
self.status = "failing"
self.alert_severity = "warning"
else:
self.status = "passing"
self.more_info = f"Total: {total}B, Free: {free}B"
# add check history
self.add_check_history(100 - percent_used)
else:
self.status = "failing"
self.alert_severity = "error"
self.more_info = f"Disk {self.disk} does not exist"
self.save(update_fields=["more_info"])
# script checks
elif self.check_type == "script":
self.stdout = data["stdout"]
self.stderr = data["stderr"]
self.retcode = data["retcode"]
try:
# python agent
self.execution_time = "{:.4f}".format(data["stop"] - data["start"])
except:
# golang agent
self.execution_time = "{:.4f}".format(data["runtime"])
if data["retcode"] in self.info_return_codes:
self.alert_severity = "info"
self.status = "failing"
elif data["retcode"] in self.warning_return_codes:
self.alert_severity = "warning"
self.status = "failing"
elif data["retcode"] != 0:
self.status = "failing"
self.alert_severity = "error"
else:
self.status = "passing"
self.save(
update_fields=[
"stdout",
"stderr",
"retcode",
"execution_time",
]
)
# add check history
self.add_check_history(
1 if self.status == "failing" else 0,
{
"retcode": data["retcode"],
"stdout": data["stdout"][:60],
"stderr": data["stderr"][:60],
"execution_time": self.execution_time,
},
)
# ping checks
elif self.check_type == "ping":
output = data["output"]
if pyver.parse(self.agent.version) <= pyver.parse("1.5.2"):
# DEPRECATED
success = ["Reply", "bytes", "time", "TTL"]
if data["has_stdout"]:
if all(x in output for x in success):
self.status = "passing"
else:
self.status = "failing"
elif data["has_stderr"]:
self.status = "failing"
else:
self.status = data["status"]
self.more_info = output
self.save(update_fields=["more_info"])
self.add_check_history(
1 if self.status == "failing" else 0, self.more_info[:60]
)
# windows service checks
elif self.check_type == "winsvc":
svc_stat = data["status"]
self.more_info = f"Status {svc_stat.upper()}"
if data["exists"]:
if svc_stat == "running":
self.status = "passing"
elif svc_stat == "start_pending" and self.pass_if_start_pending:
self.status = "passing"
else:
if self.agent and self.restart_if_stopped:
nats_data = {
"func": "winsvcaction",
"payload": {"name": self.svc_name, "action": "start"},
}
r = asyncio.run(self.agent.nats_cmd(nats_data, timeout=32))
if r == "timeout" or r == "natsdown":
self.status = "failing"
elif not r["success"] and r["errormsg"]:
self.status = "failing"
elif r["success"]:
self.status = "passing"
self.more_info = f"Status RUNNING"
else:
self.status = "failing"
else:
self.status = "failing"
else:
if self.pass_if_svc_not_exist:
self.status = "passing"
else:
self.status = "failing"
self.more_info = f"Service {self.svc_name} does not exist"
self.save(update_fields=["more_info"])
self.add_check_history(
1 if self.status == "failing" else 0, self.more_info[:60]
)
elif self.check_type == "eventlog":
log = []
is_wildcard = self.event_id_is_wildcard
eventType = self.event_type
eventID = self.event_id
source = self.event_source
message = self.event_message
r = data["log"]
for i in r:
if i["eventType"] == eventType:
if not is_wildcard and not int(i["eventID"]) == eventID:
continue
if not source and not message:
if is_wildcard:
log.append(i)
elif int(i["eventID"]) == eventID:
log.append(i)
continue
if source and message:
if is_wildcard:
if source in i["source"] and message in i["message"]:
log.append(i)
elif int(i["eventID"]) == eventID:
if source in i["source"] and message in i["message"]:
log.append(i)
continue
if source and source in i["source"]:
if is_wildcard:
log.append(i)
elif int(i["eventID"]) == eventID:
log.append(i)
if message and message in i["message"]:
if is_wildcard:
log.append(i)
elif int(i["eventID"]) == eventID:
log.append(i)
if self.fail_when == "contains":
if log and len(log) >= self.number_of_events_b4_alert:
self.status = "failing"
else:
self.status = "passing"
elif self.fail_when == "not_contains":
if log and len(log) >= self.number_of_events_b4_alert:
self.status = "passing"
else:
self.status = "failing"
self.extra_details = {"log": log}
self.save(update_fields=["extra_details"])
self.add_check_history(
1 if self.status == "failing" else 0,
"Events Found:" + str(len(self.extra_details["log"])),
)
# handle status
if self.status == "failing":
self.fail_count += 1
self.save(update_fields=["status", "fail_count", "alert_severity"])
if self.fail_count >= self.fails_b4_alert:
Alert.handle_alert_failure(self)
elif self.status == "passing":
self.fail_count = 0
self.save(update_fields=["status", "fail_count", "alert_severity"])
if Alert.objects.filter(assigned_check=self, resolved=False).exists():
Alert.handle_alert_resolve(self)
return self.status
@staticmethod
def serialize(check):
# serializes the check and returns json
from .serializers import CheckSerializer
return CheckSerializer(check).data
# for policy diskchecks
@staticmethod
def all_disks():
return [f"{i}:" for i in string.ascii_uppercase]
# for policy service checks
@staticmethod
def load_default_services():
with open(
os.path.join(settings.BASE_DIR, "services/default_services.json")
) as f:
default_services = json.load(f)
return default_services
def create_policy_check(self, agent=None, policy=None):
if (not agent and not policy) or (agent and policy):
return
check = Check.objects.create(
agent=agent,
policy=policy,
managed_by_policy=bool(agent),
parent_check=(self.pk if agent else None),
check_type=self.check_type,
script=self.script,
)
for field in self.policy_fields_to_copy:
setattr(check, field, getattr(self, field))
check.save()
def is_duplicate(self, check):
if self.check_type == "diskspace":
return self.disk == check.disk
elif self.check_type == "script":
return self.script == check.script
elif self.check_type == "ping":
return self.ip == check.ip
elif self.check_type == "cpuload":
return True
elif self.check_type == "memory":
return True
elif self.check_type == "winsvc":
return self.svc_name == check.svc_name
elif self.check_type == "eventlog":
return [self.log_name, self.event_id] == [check.log_name, check.event_id]
def send_email(self):
CORE = CoreSettings.objects.first()
body: str = ""
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self.agent.hostname} - {self} Failed"
else:
subject = f"{self} Failed"
if self.check_type == "diskspace":
text = ""
if self.warning_threshold:
text += f" Warning Threshold: {self.warning_threshold}%"
if self.error_threshold:
text += f" Error Threshold: {self.error_threshold}%"
try:
percent_used = [
d["percent"] for d in self.agent.disks if d["device"] == self.disk
][0]
percent_free = 100 - percent_used
body = subject + f" - Free: {percent_free}%, {text}"
except:
body = subject + f" - Disk {self.disk} does not exist"
elif self.check_type == "script":
body = (
subject
+ f" - Return code: {self.retcode}\nStdout:{self.stdout}\nStderr: {self.stderr}"
)
elif self.check_type == "ping":
body = self.more_info
elif self.check_type == "cpuload" or self.check_type == "memory":
text = ""
if self.warning_threshold:
text += f" Warning Threshold: {self.warning_threshold}%"
if self.error_threshold:
text += f" Error Threshold: {self.error_threshold}%"
avg = int(mean(self.history))
if self.check_type == "cpuload":
body = subject + f" - Average CPU utilization: {avg}%, {text}"
elif self.check_type == "memory":
body = subject + f" - Average memory usage: {avg}%, {text}"
elif self.check_type == "winsvc":
body = subject + f" - Status: {self.more_info}"
elif self.check_type == "eventlog":
if self.event_source and self.event_message:
start = f"Event ID {self.event_id}, source {self.event_source}, containing string {self.event_message} "
elif self.event_source:
start = f"Event ID {self.event_id}, source {self.event_source} "
elif self.event_message:
start = (
f"Event ID {self.event_id}, containing string {self.event_message} "
)
else:
start = f"Event ID {self.event_id} "
body = start + f"was found in the {self.log_name} log\n\n"
for i in self.extra_details["log"]:
try:
if i["message"]:
body += f"{i['message']}\n"
except:
continue
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_sms(self):
CORE = CoreSettings.objects.first()
body: str = ""
if self.agent:
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Failed"
else:
subject = f"{self} Failed"
if self.check_type == "diskspace":
text = ""
if self.warning_threshold:
text += f" Warning Threshold: {self.warning_threshold}%"
if self.error_threshold:
text += f" Error Threshold: {self.error_threshold}%"
try:
percent_used = [
d["percent"] for d in self.agent.disks if d["device"] == self.disk
][0]
percent_free = 100 - percent_used
body = subject + f" - Free: {percent_free}%, {text}"
except:
body = subject + f" - Disk {self.disk} does not exist"
elif self.check_type == "script":
body = subject + f" - Return code: {self.retcode}"
elif self.check_type == "ping":
body = subject
elif self.check_type == "cpuload" or self.check_type == "memory":
text = ""
if self.warning_threshold:
text += f" Warning Threshold: {self.warning_threshold}%"
if self.error_threshold:
text += f" Error Threshold: {self.error_threshold}%"
avg = int(mean(self.history))
if self.check_type == "cpuload":
body = subject + f" - Average CPU utilization: {avg}%, {text}"
elif self.check_type == "memory":
body = subject + f" - Average memory usage: {avg}%, {text}"
elif self.check_type == "winsvc":
body = subject + f" - Status: {self.more_info}"
elif self.check_type == "eventlog":
body = subject
CORE.send_sms(body, alert_template=self.agent.alert_template)
def send_resolved_email(self):
CORE = CoreSettings.objects.first()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
body = f"{self} is now back to normal"
CORE.send_mail(subject, body, alert_template=self.agent.alert_template)
def send_resolved_sms(self):
CORE = CoreSettings.objects.first()
subject = f"{self.agent.client.name}, {self.agent.site.name}, {self} Resolved"
CORE.send_sms(subject, alert_template=self.agent.alert_template)
class CheckHistory(models.Model):
check_history = models.ForeignKey(
Check,
related_name="check_history",
on_delete=models.CASCADE,
)
x = models.DateTimeField(auto_now_add=True)
y = models.PositiveIntegerField(null=True, blank=True, default=None)
results = models.JSONField(null=True, blank=True)
def __str__(self):
return self.check_history.readable_desc
|
from DataStructures.heaps import MinHeap, MaxHeap
class ArrayBasedQueue:
def __init__(self) -> None:
self.__container = []
self.__size = 0
def isEmpty(self) -> bool:
return len(self.__container) == 0
def enqueue(self, item) -> None:
self.__container.append(item)
self.__size += 1
def dequeue(self) -> object:
if self.__size > 0:
self.__size -= 1
return self.__container.pop(0)
class MinPriorityQueue(MinHeap):
def __init__(self, container=...) -> None:
super().__init__(container=container)
class MaxPriorityQueue(MaxHeap):
def __init__(self, container=...) -> None:
super().__init__(container=container)
|
"""Public interface exposed by library.
This module contains all the interfaces that represents a generic Apple TV device and
all its features.
"""
from abc import ABC, abstractmethod
import asyncio
import hashlib
import inspect
import io
from ipaddress import IPv4Address
import re
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
MutableMapping,
NamedTuple,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from pyatv import const, convert, exceptions
from pyatv.const import (
DeviceModel,
FeatureName,
FeatureState,
InputAction,
OperatingSystem,
PairingRequirement,
Protocol,
)
from pyatv.support.device_info import lookup_version
from pyatv.support.http import ClientSessionManager
from pyatv.support.state_producer import StateProducer
__pdoc__ = {
"feature": False,
"DeviceInfo.OPERATING_SYSTEM": False,
"DeviceInfo.VERSION": False,
"DeviceInfo.BUILD_NUMBER": False,
"DeviceInfo.MODEL": False,
"DeviceInfo.MAC": False,
"DeviceInfo.RAW_MODEL": False,
}
_ALL_FEATURES = {} # type: Dict[int, Tuple[str, str]]
ReturnType = TypeVar("ReturnType", bound=Callable[..., Any])
class ArtworkInfo(NamedTuple):
"""Artwork information."""
bytes: bytes
mimetype: str
width: int
height: int
class FeatureInfo(NamedTuple):
"""Feature state and options."""
state: FeatureState
options: Optional[Dict[str, object]] = {}
def feature(index: int, name: str, doc: str) -> Callable[[ReturnType], ReturnType]:
"""Decorate functions and properties as a feature.
Note: This is an internal function.
"""
def _feat_decorator(func: ReturnType) -> ReturnType:
if index not in _ALL_FEATURES or _ALL_FEATURES[index][0] == name:
_ALL_FEATURES[index] = (name, doc)
setattr(func, "_feature_name", name)
return func
raise Exception(
f"Index {index} collides between {name} and {_ALL_FEATURES[index]}"
)
return _feat_decorator
def _get_first_sentence_in_pydoc(obj):
doc = obj.__doc__
index = doc.find(".")
if index == -1:
# Here we have no leading . so return everything
return doc
# Try to find the first complete sentence and respect
# abbreviations correctly
match = re.findall(r"(.*\.[^A-Z]*)\.(?: [A-Z].*|)", doc)
if len(match) == 1:
return match[0]
return doc[0:index]
def retrieve_commands(obj: object):
"""Retrieve all commands and help texts from an API object."""
commands = {} # type: Dict[str, str]
for func in obj.__dict__:
if not inspect.isfunction(obj.__dict__[func]) and not isinstance(
obj.__dict__[func], property
):
continue
if func.startswith("_") or func == "listener":
continue
commands[func] = _get_first_sentence_in_pydoc(obj.__dict__[func])
return commands
class BaseService(ABC):
"""Base class for protocol services."""
def __init__(
self,
identifier: Optional[str],
protocol: Protocol,
port: int,
properties: Optional[Mapping[str, str]],
credentials: Optional[str] = None,
password: Optional[str] = None,
) -> None:
"""Initialize a new BaseService."""
self._identifier = identifier
self._protocol = protocol
self._port = port
self._properties: MutableMapping[str, str] = dict(properties or {})
self.credentials: Optional[str] = credentials
self.password: Optional[str] = password
@property
def identifier(self) -> Optional[str]:
"""Return unique identifier associated with this service."""
return self._identifier
@property
def protocol(self) -> Protocol:
"""Return protocol type."""
return self._protocol
@property
def port(self) -> int:
"""Return service port number."""
return self._port
@property
@abstractmethod
def requires_password(self) -> bool:
"""Return if a password is required to access service."""
@property
@abstractmethod
def pairing(self) -> PairingRequirement:
"""Return if pairing is required by service."""
@property
def properties(self) -> Mapping[str, str]:
"""Return service Zeroconf properties."""
return self._properties
def merge(self, other) -> None:
"""Merge with other service of same type.
Merge will only include credentials, password and properties.
"""
self.credentials = other.credentials or self.credentials
self.password = other.password or self.password
self._properties.update(other.properties)
def __str__(self) -> str:
"""Return a string representation of this object."""
return (
f"Protocol: {convert.protocol_str(self.protocol)}, "
f"Port: {self.port}, "
f"Credentials: {self.credentials}, "
f"Requires Password: {self.requires_password}, "
f"Password: {self.password}, "
f"Pairing: {self.pairing.name}"
)
@abstractmethod
def __deepcopy__(self, memo) -> "BaseService":
"""Return deep-copy of instance."""
class PairingHandler(ABC):
"""Base class for API used to pair with an Apple TV."""
def __init__(
self, session_manager: ClientSessionManager, service: BaseService
) -> None:
"""Initialize a new instance of PairingHandler."""
self.session_manager = session_manager
self._service = service
@property
def service(self) -> BaseService:
"""Return service used for pairing."""
return self._service
async def close(self) -> None:
"""Call to free allocated resources after pairing."""
await self.session_manager.close()
@abstractmethod
def pin(self, pin) -> None:
"""Pin code used for pairing."""
raise exceptions.NotSupportedError()
@property
@abstractmethod
def device_provides_pin(self) -> bool:
"""Return True if remote device presents PIN code, else False."""
raise exceptions.NotSupportedError()
@property
@abstractmethod
def has_paired(self) -> bool:
"""If a successful pairing has been performed.
The value will be reset when stop() is called.
"""
raise exceptions.NotSupportedError()
@abstractmethod
async def begin(self) -> None:
"""Start pairing process."""
raise exceptions.NotSupportedError()
@abstractmethod
async def finish(self) -> None:
"""Stop pairing process."""
raise exceptions.NotSupportedError()
class RemoteControl:
"""Base class for API used to control an Apple TV."""
@feature(51, "set_custom", "Custom cmds")
async def set_custom(self, keyboard: str, action: InputAction, fn: int, devid:str) -> bool:
"""Custom cmd"""
raise exceptions.NotSupportedError()
# pylint: disable=invalid-name
@feature(0, "Up", "Up button on remote.")
async def up(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key up."""
raise exceptions.NotSupportedError()
@feature(1, "Down", "Down button on remote.")
async def down(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key down."""
raise exceptions.NotSupportedError()
@feature(2, "Left", "Left button on remote.")
async def left(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key left."""
raise exceptions.NotSupportedError()
@feature(3, "Right", "Right button on remote.")
async def right(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key right."""
raise exceptions.NotSupportedError()
@feature(4, "Play", "Start playing media.")
async def play(self) -> None:
"""Press key play."""
raise exceptions.NotSupportedError()
@feature(5, "PlayPause", "Toggle between play/pause.")
async def play_pause(self) -> None:
"""Toggle between play and pause."""
raise exceptions.NotSupportedError()
@feature(6, "Pause", "Pause playing media.")
async def pause(self) -> None:
"""Press key play."""
raise exceptions.NotSupportedError()
@feature(7, "Stop", "Stop playing media.")
async def stop(self) -> None:
"""Press key stop."""
raise exceptions.NotSupportedError()
@feature(8, "Next", "Change to next item.")
async def next(self) -> None:
"""Press key next."""
raise exceptions.NotSupportedError()
@feature(9, "Previous", "Change to previous item.")
async def previous(self) -> None:
"""Press key previous."""
raise exceptions.NotSupportedError()
@feature(10, "Select", "Select current option.")
async def select(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key select."""
raise exceptions.NotSupportedError()
@feature(11, "Menu", "Go back to previous menu.")
async def menu(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key menu."""
raise exceptions.NotSupportedError()
@feature(12, "VolumeUp", "Increase volume (deprecated: use Audio.volume_up).")
async def volume_up(self) -> None:
"""Press key volume up.
**DEPRECATED: Use `pyatv.interface.Audio.volume_up` instead.**
"""
raise exceptions.NotSupportedError()
@feature(13, "VolumeDown", "Decrease volume (deprecated: use Audio.volume_down)..")
async def volume_down(self) -> None:
"""Press key volume down.
**DEPRECATED: Use `pyatv.interface.Audio.volume_down` instead.**
"""
raise exceptions.NotSupportedError()
@feature(14, "Home", "Home/TV button.")
async def home(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key home."""
raise exceptions.NotSupportedError()
@feature(
15, "HomeHold", "Long-press home button (deprecated: use RemoteControl.home)."
)
async def home_hold(self) -> None:
"""Hold key home."""
raise exceptions.NotSupportedError()
@feature(16, "TopMenu", "Go to main menu.")
async def top_menu(self) -> None:
"""Go to main menu (long press menu)."""
raise exceptions.NotSupportedError()
@feature(17, "Suspend", "Suspend device (deprecated; use Power.turn_off).")
async def suspend(self) -> None:
"""Suspend the device.
**DEPRECATED: Use `pyatv.interface.Power.turn_off` instead.**
"""
raise exceptions.NotSupportedError()
@feature(18, "WakeUp", "Wake up device (deprecated; use Power.turn_on).")
async def wakeup(self) -> None:
"""Wake up the device.
**DEPRECATED: Use `pyatv.interface.Power.turn_on` instead.**
"""
raise exceptions.NotSupportedError()
@feature(
36,
"SkipForward",
"Skip forward a time interval.",
)
async def skip_forward(self) -> None:
"""Skip forward a time interval.
Skip interval is typically 15-30s, but is decided by the app.
"""
raise exceptions.NotSupportedError()
@feature(37, "SkipBackward", "Skip backwards a time interval.")
async def skip_backward(self) -> None:
"""Skip backwards a time interval.
Skip interval is typically 15-30s, but is decided by the app.
"""
raise exceptions.NotSupportedError()
@feature(19, "SetPosition", "Seek to position.")
async def set_position(self, pos: int) -> None:
"""Seek in the current playing media."""
raise exceptions.NotSupportedError()
@feature(20, "SetShuffle", "Change shuffle state.")
async def set_shuffle(self, shuffle_state: const.ShuffleState) -> None:
"""Change shuffle mode to on or off."""
raise exceptions.NotSupportedError()
@feature(21, "SetRepeat", "Change repeat state.")
async def set_repeat(self, repeat_state: const.RepeatState) -> None:
"""Change repeat state."""
raise exceptions.NotSupportedError()
# TODO: Should be made into a dataclass when support for 3.6 is dropped
class Playing(ABC):
"""Base class for retrieving what is currently playing."""
_PROPERTIES = [
"media_type",
"device_state",
"title",
"artist",
"album",
"genre",
"total_time",
"position",
"shuffle",
"repeat",
"hash",
"series_name",
"season_number",
"episode_number",
"content_identifier",
]
def __init__( # pylint: disable=too-many-locals
self,
media_type: const.MediaType = const.MediaType.Unknown,
device_state: const.DeviceState = const.DeviceState.Idle,
title: Optional[str] = None,
artist: Optional[str] = None,
album: Optional[str] = None,
genre: Optional[str] = None,
total_time: Optional[int] = None,
position: Optional[int] = None,
shuffle: Optional[const.ShuffleState] = None,
repeat: Optional[const.RepeatState] = None,
hash: Optional[str] = None, # pylint: disable=redefined-builtin
series_name: Optional[str] = None,
season_number: Optional[int] = None,
episode_number: Optional[int] = None,
content_identifier: Optional[str] = None,
) -> None:
"""Initialize a new Playing instance."""
self._media_type = media_type
self._device_state = device_state
self._title = title
self._artist = artist
self._album = album
self._genre = genre
self._total_time = total_time
self._position = position
self._shuffle = shuffle
self._repeat = repeat
self._hash = hash
self._series_name = series_name
self._season_number = season_number
self._episode_number = episode_number
self._content_identifier = content_identifier
self._post_process()
def _post_process(self):
if self._position:
# Make sure position never is negative
self._position = max(self._position, 0)
# If there's a total time, never exceed that'
if self._total_time:
self._position = min(self._position, self._total_time)
def __str__(self) -> str: # pylint: disable=too-many-branches
"""Convert this playing object to a readable string."""
output = []
output.append(f" Media type: {convert.media_type_str(self.media_type)}")
output.append(f"Device state: {convert.device_state_str(self.device_state)}")
if self.title is not None:
output.append(f" Title: {self.title}")
if self.artist is not None:
output.append(f" Artist: {self.artist}")
if self.album is not None:
output.append(f" Album: {self.album}")
if self.genre is not None:
output.append(f" Genre: {self.genre}")
if self.series_name is not None:
output.append(f" Series Name: {self.series_name}")
if self.season_number is not None:
output.append(f" Season: {self.season_number}")
if self.episode_number is not None:
output.append(f" Episode: {self.episode_number}")
if self.content_identifier:
output.append(f" Identifier: {self.content_identifier}")
position = self.position
total_time = self.total_time
if position is not None and total_time is not None and total_time != 0:
output.append(
f" Position: {position}/{total_time}s "
f"({float(position) / float(total_time):.1%})"
)
elif position is not None and position != 0:
output.append(f" Position: {position}s")
elif total_time is not None and position != 0:
output.append(f" Total time: {total_time}s")
if self.repeat is not None:
output.append(f" Repeat: {convert.repeat_str(self.repeat)}")
if self.shuffle is not None:
output.append(f" Shuffle: {convert.shuffle_str(self.shuffle)}")
return "\n".join(output)
def __eq__(self, other):
"""Compare if two objects are equal."""
if isinstance(other, Playing):
for prop in self._PROPERTIES:
if getattr(self, prop) != getattr(other, prop):
return False
return True
return False
@property
def hash(self) -> str:
"""Create a unique hash for what is currently playing.
The hash is based on title, artist, album and total time. It should
always be the same for the same content, but it is not guaranteed.
"""
if self._hash:
return self._hash
base = f"{self.title}{self.artist}{self.album}{self.total_time}"
return hashlib.sha256(base.encode("utf-8")).hexdigest()
@property
def media_type(self) -> const.MediaType:
"""Type of media is currently playing, e.g. video, music."""
return self._media_type
@property
def device_state(self) -> const.DeviceState:
"""Device state, e.g. playing or paused."""
return self._device_state
@property # type: ignore
@feature(22, "Title", "Title of playing media.")
def title(self) -> Optional[str]:
"""Title of the current media, e.g. movie or song name."""
return self._title
@property # type: ignore
@feature(23, "Artist", "Artist of playing song.")
def artist(self) -> Optional[str]:
"""Artist of the currently playing song."""
return self._artist
@property # type: ignore
@feature(24, "Album", "Album from playing artist.")
def album(self) -> Optional[str]:
"""Album of the currently playing song."""
return self._album
@property # type: ignore
@feature(25, "Genre", "Genre of playing song.")
def genre(self) -> Optional[str]:
"""Genre of the currently playing song."""
return self._genre
@property # type: ignore
@feature(26, "TotalTime", "Total length of playing media (seconds).")
def total_time(self) -> Optional[int]:
"""Total play time in seconds."""
return self._total_time
@property # type: ignore
@feature(27, "Position", "Current play time position.")
def position(self) -> Optional[int]:
"""Position in the playing media (seconds)."""
return self._position
@property # type: ignore
@feature(28, "Shuffle", "Shuffle state.")
def shuffle(self) -> Optional[const.ShuffleState]:
"""If shuffle is enabled or not."""
return self._shuffle
@property # type: ignore
@feature(29, "Repeat", "Repeat state.")
def repeat(self) -> Optional[const.RepeatState]:
"""Repeat mode."""
return self._repeat
@property # type: ignore
@feature(40, "SeriesName", "Title of TV series.")
def series_name(self) -> Optional[str]:
"""Title of TV series."""
return self._series_name
@property # type: ignore
@feature(41, "SeasonNumber", "Season number of TV series.")
def season_number(self) -> Optional[int]:
"""Season number of TV series."""
return self._season_number
@property # type: ignore
@feature(42, "EpisodeNumber", "Episode number of TV series.")
def episode_number(self) -> Optional[int]:
"""Episode number of TV series."""
return self._episode_number
@property # type: ignore
@feature(47, "ContentIdentifier", "Identifier for Content")
def content_identifier(self) -> Optional[str]:
"""Content identifier (app specific)."""
return self._content_identifier
class App:
"""Information about an app."""
def __init__(self, name: Optional[str], identifier: str) -> None:
"""Initialize a new App instance."""
self._name = name
self._identifier = identifier
@property
def name(self) -> Optional[str]:
"""User friendly name of app."""
return self._name
@property
def identifier(self) -> str:
"""Return a unique bundle id for the app."""
return self._identifier
def __str__(self) -> str:
"""Convert app info to readable string."""
return f"App: {self.name} ({self.identifier})"
def __eq__(self, other) -> bool:
"""Return self==other."""
if isinstance(other, App):
return self.name == other.name and self.identifier == other.identifier
return False
class Apps:
"""Base class for app handling."""
@feature(38, "AppList", "List of launchable apps.")
async def app_list(self) -> List[App]:
"""Fetch a list of apps that can be launched."""
raise exceptions.NotSupportedError()
@feature(39, "LaunchApp", "Launch an app.")
async def launch_app(self, bundle_id: str) -> None:
"""Launch an app based on bundle ID."""
raise exceptions.NotSupportedError()
class Metadata:
"""Base class for retrieving metadata from an Apple TV."""
@property
def device_id(self) -> Optional[str]:
"""Return a unique identifier for current device."""
raise exceptions.NotSupportedError()
@feature(30, "Artwork", "Playing media artwork.")
async def artwork(
self, width: Optional[int] = 512, height: Optional[int] = None
) -> Optional[ArtworkInfo]:
"""Return artwork for what is currently playing (or None).
The parameters "width" and "height" makes it possible to request artwork of a
specific size. This is just a request, the device might impose restrictions and
return artwork of a different size. Set both parameters to None to request
default size. Set one of them and let the other one be None to keep original
aspect ratio.
"""
raise exceptions.NotSupportedError()
@property
def artwork_id(self) -> str:
"""Return a unique identifier for current artwork."""
raise exceptions.NotSupportedError()
async def playing(self) -> Playing:
"""Return what is currently playing."""
raise exceptions.NotSupportedError()
@property # type: ignore
@feature(35, "App", "App playing media.")
def app(self) -> Optional[App]:
"""Return information about current app playing something.
Do note that this property returns which app is currently playing something and
not which app is currently active. If nothing is playing, the corresponding
feature will be unavailable.
"""
raise exceptions.NotSupportedError()
class PushListener(ABC):
"""Listener interface for push updates."""
@abstractmethod
def playstatus_update(self, updater, playstatus: Playing) -> None:
"""Inform about changes to what is currently playing."""
@abstractmethod
def playstatus_error(self, updater, exception: Exception) -> None:
"""Inform about an error when updating play status."""
class PushUpdater(ABC, StateProducer):
"""Base class for push/async updates from an Apple TV.
Listener interface: `pyatv.interface.PushListener`
"""
def __init__(self, loop: asyncio.AbstractEventLoop):
"""Initialize a new PushUpdater."""
super().__init__()
self.loop = loop
self._previous_state: Optional[Playing] = None
@property
@abstractmethod
def active(self) -> bool:
"""Return if push updater has been started."""
raise NotImplementedError
@feature(43, "PushUpdates", "Push updates are supported.")
@abstractmethod
def start(self, initial_delay: int = 0) -> None:
"""Begin to listen to updates.
If an error occurs, start must be called again.
"""
raise NotImplementedError
@abstractmethod
def stop(self) -> None:
"""No longer forward updates to listener."""
raise NotImplementedError
def post_update(self, playing: Playing) -> None:
"""Post an update to listener."""
if playing != self._previous_state:
self.loop.call_soon(self.listener.playstatus_update, self, playing)
self._previous_state = playing
class Stream: # pylint: disable=too-few-public-methods
"""Base class for stream functionality."""
def close(self) -> None: # pylint: disable=no-self-use
"""Close connection and release allocated resources."""
raise exceptions.NotSupportedError()
@feature(31, "PlayUrl", "Stream a URL on device.")
async def play_url(self, url: str, **kwargs) -> None:
"""Play media from an URL on the device."""
raise exceptions.NotSupportedError()
@feature(44, "StreamFile", "Stream local file to device.")
async def stream_file(self, file: Union[str, io.BufferedReader], **kwargs) -> None:
"""Stream local file to device.
INCUBATING METHOD - MIGHT CHANGE IN THE FUTURE!
"""
raise exceptions.NotSupportedError()
class DeviceListener(ABC):
"""Listener interface for generic device updates."""
@abstractmethod
def connection_lost(self, exception: Exception) -> None:
"""Device was unexpectedly disconnected."""
raise NotImplementedError()
@abstractmethod
def connection_closed(self) -> None:
"""Device connection was (intentionally) closed."""
raise NotImplementedError()
class PowerListener(ABC): # pylint: disable=too-few-public-methods
"""Listener interface for power updates."""
@abstractmethod
def powerstate_update(
self, old_state: const.PowerState, new_state: const.PowerState
):
"""Device power state was updated."""
raise NotImplementedError()
class Power(ABC, StateProducer):
"""Base class for retrieving power state from an Apple TV.
Listener interface: `pyatv.interfaces.PowerListener`
"""
@property # type: ignore
@feature(32, "PowerState", "Current device power state.")
def power_state(self) -> const.PowerState:
"""Return device power state."""
raise exceptions.NotSupportedError()
@feature(33, "TurnOn", "Turn device on.")
async def turn_on(self, await_new_state: bool = False) -> None:
"""Turn device on."""
raise exceptions.NotSupportedError()
@feature(34, "TurnOff", "Turn off device.")
async def turn_off(self, await_new_state: bool = False) -> None:
"""Turn device off."""
raise exceptions.NotSupportedError()
class DeviceInfo:
"""General information about device."""
OPERATING_SYSTEM = "os"
VERSION = "version"
BUILD_NUMBER = "build_number"
MODEL = "model"
RAW_MODEL = "raw_model"
MAC = "mac"
def __init__(self, device_info: Mapping[str, Any]) -> None:
"""Initialize a new DeviceInfo instance."""
self._devinfo = device_info
self._os = self._pop_with_type(
self.OPERATING_SYSTEM, OperatingSystem.Unknown, OperatingSystem
)
self._version = self._pop_with_type(self.VERSION, None, str)
self._build_number = self._pop_with_type(self.BUILD_NUMBER, None, str)
self._model = self._pop_with_type(self.MODEL, DeviceModel.Unknown, DeviceModel)
self._mac = self._pop_with_type(self.MAC, None, str)
def _pop_with_type(self, field, default, expected_type):
value = self._devinfo.pop(field, default)
if value is None or isinstance(value, expected_type):
return value
raise TypeError(
f"expected {expected_type} for '{field}'' but got {type(value)}"
)
@property
def operating_system(self) -> const.OperatingSystem:
"""Operating system running on device."""
if self._os != OperatingSystem.Unknown:
return self._os
if self.model in [DeviceModel.AirPortExpress, DeviceModel.AirPortExpressGen2]:
return OperatingSystem.AirPortOS
if self.model in [DeviceModel.HomePod, DeviceModel.HomePodMini]:
return OperatingSystem.TvOS
if self.model in [
DeviceModel.Gen2,
DeviceModel.Gen3,
DeviceModel.Gen4,
DeviceModel.Gen4K,
DeviceModel.AppleTV4KGen2,
]:
return OperatingSystem.TvOS
return OperatingSystem.Unknown
@property
def version(self) -> Optional[str]:
"""Operating system version."""
if self._version:
return self._version
version = lookup_version(self.build_number)
if version:
return version
return self._version
@property
def build_number(self) -> Optional[str]:
"""Operating system build number, e.g. 17K795."""
return self._build_number
@property
def model(self) -> const.DeviceModel:
"""Hardware model name, e.g. 3, 4 or 4K."""
return self._model
@property
def raw_model(self) -> Optional[str]:
"""Return raw model description.
If `pyatv.interface.DeviceInfo.model` returns `pyatv.const.DeviceModel.Unknown`
then this property contains the raw model string (if any is available).
"""
return self._devinfo.get(DeviceInfo.RAW_MODEL)
@property
def mac(self) -> Optional[str]:
"""Device MAC address."""
return self._mac
def __str__(self) -> str:
"""Convert device info to readable string."""
# If no model is available but raw_model is, use that. Otherwise fall back
# to whatever model_str returns.
if self.model == DeviceModel.Unknown and self.raw_model:
model = self.raw_model
else:
model = convert.model_str(self.model)
output = (
model
+ ", "
+ {
OperatingSystem.Legacy: "ATV SW",
OperatingSystem.TvOS: "tvOS",
OperatingSystem.AirPortOS: "AirPortOS",
}.get(self.operating_system, "Unknown OS")
)
if self.version:
output += " " + self.version
if self.build_number:
output += " build " + self.build_number
return output
class Features:
"""Base class for supported feature functionality."""
def get_feature(self, feature_name: FeatureName) -> FeatureInfo:
"""Return current state of a feature."""
raise NotImplementedError()
def all_features(self, include_unsupported=False) -> Dict[FeatureName, FeatureInfo]:
"""Return state of all features."""
features: Dict[FeatureName, FeatureInfo] = {}
for name in FeatureName:
info = self.get_feature(name)
if info.state != FeatureState.Unsupported or include_unsupported:
features[name] = info
return features
def in_state(
self,
states: Union[List[FeatureState], FeatureState],
*feature_names: FeatureName
):
"""Return if features are in a specific state.
This method will return True if all given features are in the state specified
by "states". If "states" is a list of states, it is enough for the feature to be
in one of the listed states.
"""
for name in feature_names:
info = self.get_feature(name)
expected_states = states if isinstance(states, list) else [states]
if info.state not in expected_states:
return False
return True
class Audio:
"""Base class for audio functionality.
Volume level is managed in percent where 0 is muted and 100 is max volume.
"""
@property # type: ignore
@feature(45, "Volume", "Current volume level.")
def volume(self) -> float:
"""Return current volume level.
Range is in percent, i.e. [0.0-100.0].
"""
raise exceptions.NotSupportedError()
@feature(46, "SetVolume", "Set volume level.")
async def set_volume(self, level: float) -> None:
"""Change current volume level.
Range is in percent, i.e. [0.0-100.0].
"""
raise exceptions.NotSupportedError()
@feature(12, "VolumeUp", "Increase volume.")
async def volume_up(self) -> None:
"""Increase volume by one step.
Step size is device dependent, but usually around 2,5% of the total volume
range. It is not necessarily linear.
Call will block until volume change has been acknowledged by the device (when
possible and supported).
"""
raise exceptions.NotSupportedError()
@feature(13, "VolumeDown", "Decrease volume.")
async def volume_down(self) -> None:
"""Decrease volume by one step.
Step size is device dependent, but usually around 2.5% of the total volume
range. It is not necessarily linear.
Call will block until volume change has been acknowledged by the device (when
possible and supported).
"""
raise exceptions.NotSupportedError()
class BaseConfig(ABC):
"""Representation of a device configuration.
An instance of this class represents a single device. A device can have
several services depending on the protocols it supports, e.g. DMAP or
AirPlay.
"""
def __init__(self, properties: Mapping[str, Mapping[str, Any]]) -> None:
"""Initialize a new BaseConfig instance."""
self._properties = properties
@property
@abstractmethod
def address(self) -> IPv4Address:
"""IP address of device."""
@property
@abstractmethod
def name(self) -> str:
"""Name of device."""
@property
@abstractmethod
def deep_sleep(self) -> bool:
"""If device is in deep sleep."""
@property
@abstractmethod
def services(self) -> List[BaseService]:
"""Return all supported services."""
@property
@abstractmethod
def device_info(self) -> DeviceInfo:
"""Return general device information."""
@abstractmethod
def add_service(self, service: BaseService) -> None:
"""Add a new service.
If the service already exists, it will be merged.
"""
@abstractmethod
def get_service(self, protocol: Protocol) -> Optional[BaseService]:
"""Look up a service based on protocol.
If a service with the specified protocol is not available, None is
returned.
"""
@property
def properties(self) -> Mapping[str, Mapping[str, str]]:
"""Return Zeroconf properties."""
return self._properties
@property
def ready(self) -> bool:
"""Return if configuration is ready, (at least one service with identifier)."""
for service in self.services:
if service.identifier:
return True
return False
@property
def identifier(self) -> Optional[str]:
"""Return the main identifier associated with this device."""
for prot in [Protocol.MRP, Protocol.DMAP, Protocol.AirPlay, Protocol.RAOP]:
service = self.get_service(prot)
if service and service.identifier is not None:
return service.identifier
return None
@property
def all_identifiers(self) -> List[str]:
"""Return all unique identifiers for this device."""
return [x.identifier for x in self.services if x.identifier is not None]
def main_service(self, protocol: Optional[Protocol] = None) -> BaseService:
"""Return suggested service used to establish connection."""
protocols = (
[protocol]
if protocol is not None
else [Protocol.MRP, Protocol.DMAP, Protocol.AirPlay, Protocol.RAOP]
)
for prot in protocols:
service = self.get_service(prot)
if service is not None:
return service
raise exceptions.NoServiceError("no service to connect to")
def set_credentials(self, protocol: Protocol, credentials: str) -> bool:
"""Set credentials for a protocol if it exists."""
service = self.get_service(protocol)
if service:
service.credentials = credentials
return True
return False
def __eq__(self, other) -> bool:
"""Compare instance with another instance."""
if isinstance(other, self.__class__):
return self.identifier == other.identifier
return False
def __str__(self) -> str:
"""Return a string representation of this object."""
device_info = self.device_info
services = "\n".join([f" - {s}" for s in self.services])
identifiers = "\n".join([f" - {x}" for x in self.all_identifiers])
return (
f" Name: {self.name}\n"
f" Model/SW: {device_info}\n"
f" Address: {self.address}\n"
f" MAC: {self.device_info.mac}\n"
f" Deep Sleep: {self.deep_sleep}\n"
f"Identifiers:\n"
f"{identifiers}\n"
f"Services:\n"
f"{services}"
)
@abstractmethod
def __deepcopy__(self, memo) -> "BaseConfig":
"""Return deep-copy of instance."""
class AppleTV(ABC, StateProducer[DeviceListener]):
"""Base class representing an Apple TV.
Listener interface: `pyatv.interfaces.DeviceListener`
"""
@abstractmethod
async def connect(self) -> None:
"""Initiate connection to device.
No need to call it yourself, it's done automatically.
"""
@abstractmethod
def close(self) -> Set[asyncio.Task]:
"""Close connection and release allocated resources."""
@property
@abstractmethod
def device_info(self) -> DeviceInfo:
"""Return API for device information."""
@property
@abstractmethod
def service(self) -> BaseService:
"""Return service used to connect to the Apple TV."""
@property
@abstractmethod
def remote_control(self) -> RemoteControl:
"""Return API for controlling the Apple TV."""
@property
@abstractmethod
def metadata(self) -> Metadata:
"""Return API for retrieving metadata from the Apple TV."""
@property
@abstractmethod
def push_updater(self) -> PushUpdater:
"""Return API for handling push update from the Apple TV."""
@property
@abstractmethod
def stream(self) -> Stream:
"""Return API for streaming media."""
@property
@abstractmethod
def power(self) -> Power:
"""Return API for power management."""
@property
@abstractmethod
def features(self) -> Features:
"""Return features interface."""
@property
@abstractmethod
def apps(self) -> Apps:
"""Return apps interface."""
@property
@abstractmethod
def audio(self) -> Audio:
"""Return audio interface."""
|
size = int(input())
assert size % 2 == 1
lst = [int(x) for x in input().split()]
#print (lst)
for i in range(len(lst)+1):
tmp = lst.pop()
#print (lst, "%s[%s]: %s"%(tmp, i, lst[i]), sep=' ')
if not tmp in lst:
print (tmp)
break
lst.insert(0, tmp)
|
import base64
import json
import boto3
import datetime
def lambda_handler(event, context):
"""
Receive a batch of events from Kinesis and insert as-is into our DynamoDB table if invoked asynchronously,
otherwise perform an asynchronous invocation of this Lambda and immediately return
"""
if not event.get('async'):
invoke_self_async(event, context)
return
print('Received request')
item = None
dynamo_db = boto3.resource('dynamodb')
table = dynamo_db.Table('moxatable-develop')
decoded_record_data = [base64.b64decode(record['kinesis']['data']) for record in event['Records']]
deserialized_data = [json.loads(decoded_record) for decoded_record in decoded_record_data]
with table.batch_writer() as batch_writer:
for item in deserialized_data:
# Add a processed time so we have a rough idea how far behind we are
item['processed'] = datetime.datetime.utcnow().isoformat()
batch_writer.put_item(Item=item)
# Print the last item to make it easy to see how we're doing
print(json.dumps(item))
print('Number of records: {}'.format(str(len(deserialized_data))))
def invoke_self_async(event, context):
"""
Have the Lambda invoke itself asynchronously, passing the same event it received originally,
and tagging the event as 'async' so it's actually processed
"""
event['async'] = True
called_function = context.invoked_function_arn
boto3.client('lambda').invoke(
FunctionName=called_function,
InvocationType='Event',
Payload=bytes(json.dumps(event), 'utf-8')
)
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2019, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_data_labels29.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [67858816, 67863296]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'data_labels': {'value': True, 'custom': [{'delete': 1}]}
})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from v1.forms import FilterableListForm
from v1.models.learn_page import AbstractFilterPage
from v1.util.ref import get_category_children
from v1.util.util import get_secondary_nav_items
class FilterableListMixin(object):
"""Wagtail Page mixin that allows for filtering of other pages."""
filterable_categories = []
"""Determines page categories to be filtered; see filterable_pages."""
filterable_children_only = True
"""Determines page tree to be filtered; see filterable_pages."""
filterable_per_page_limit = 10
"""Number of results to return per page."""
do_not_index = False
"""Determines whether we tell crawlers to index the page or not."""
def filterable_pages(self):
"""Return pages that are eligible to be filtered by this page.
Always includes only live pages and pages that live in the same Wagtail
site as this page. If this page cannot be mapped to a Wagtail site (for
example, if it does not live under a site root), then it will not
return any filterable results.
The class property filterable_categories can be set to a list of page
categories from the set in v1.util.ref.categories. If set, this page
will only filter pages that are tagged with a tag in those categories.
By default this is an empty list and all page tags are eligible.
The class property filterable_children_only determines whether this
page filters only pages that are direct children of this page. By
default this is True; set this to False to allow this page to filter
pages that are not direct children of this page.
"""
site = self.get_site()
if not site:
return AbstractFilterPage.objects.none()
pages = AbstractFilterPage.objects.in_site(site).live()
if self.filterable_categories:
category_names = get_category_children(self.filterable_categories)
pages = pages.filter(categories__name__in=category_names)
if self.filterable_children_only:
pages = pages.child_of(self)
return pages
def get_context(self, request, *args, **kwargs):
context = super(FilterableListMixin, self).get_context(
request, *args, **kwargs
)
form_data, has_active_filters = self.get_form_data(request.GET)
form = FilterableListForm(
form_data,
filterable_pages=self.filterable_pages()
)
context.update({
'filter_data': self.process_form(request, form),
'get_secondary_nav_items': get_secondary_nav_items,
'has_active_filters': has_active_filters,
})
return context
def process_form(self, request, form):
filter_data = {}
if form.is_valid():
paginator = Paginator(form.get_page_set(),
self.filterable_per_page_limit)
page = request.GET.get('page')
# Get the page number in the request and get the page from the
# paginator to serve.
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
filter_data['page_set'] = pages
else:
paginator = Paginator([], self.filterable_per_page_limit)
filter_data['page_set'] = paginator.page(1)
filter_data['form'] = form
return filter_data
def set_do_not_index(self, field, value):
"""Do not index queries unless they consist of a single topic field."""
if field != 'topics' or len(value) > 1:
self.do_not_index = True
# Set up the form's data either with values from the GET request
# or with defaults based on whether it's a dropdown/list or a text field
def get_form_data(self, request_dict):
form_data = {}
has_active_filters = False
for field in FilterableListForm.declared_fields:
if field in ['categories', 'topics', 'authors']:
value = request_dict.getlist(field, [])
else:
value = request_dict.get(field, '')
if value:
form_data[field] = value
has_active_filters = True
self.set_do_not_index(field, value)
return form_data, has_active_filters
def serve(self, request, *args, **kwargs):
"""Modify response headers."""
response = super(FilterableListMixin, self).serve(request)
# Set a shorter TTL in Akamai
response['Edge-Control'] = 'cache-maxage=10m'
# Set noindex for crawlers if needed
if self.do_not_index:
response['X-Robots-Tag'] = 'noindex'
return response
|
# Copyright (c) 2006-2017, Christoph Gohlke
# Copyright (c) 2006-2017, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2017.02.17
Requirements
------------
* `CPython 2.7 or 3.5 <http://www.python.org>`_
* `Numpy 1.11 <http://www.numpy.org>`_
* `Transformations.c 2017.02.17 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for speedup of some functions)
Notes
-----
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
numpy.dot(M, v) for shape (4, \*) column vectors, respectively
numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
This module follows the "column vectors on the right" and "row major storage"
(C contiguous) conventions. The translation components are in the right column
of the transformation matrix, i.e. M[:3, 3].
The transpose of the transformation matrices may have to be used to interface
with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
Other Python packages and modules for 3D transformations and quaternions:
* `Transforms3d <https://pypi.python.org/pypi/transforms3d>`_
includes most code of this module.
* `Blender.mathutils <http://www.blender.org/api/blender_python_api>`_
* `numpy-dtypes <https://github.com/numpy/numpy-dtypes>`_
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
(16) Column Vectors vs. Row Vectors.
http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix([1, 2, 3])
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, [1, 2, 3])
True
>>> numpy.allclose(shear, [0, math.tan(beta), 0])
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import math
import numpy
__version__ = '2017.02.17'
__docformat__ = 'restructuredtext en'
__all__ = ()
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.diag([cosa, cosa, cosa])
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix([0, 0, 0], [1, 0, 0])
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = numpy.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
[0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
[0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
[0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
[0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
[0.0, 0.0, 0.0, 1.0]]
return numpy.array(M)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix([1, 2, 3])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0.0, 0.0, 0.0, 1.0
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ))
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0.0, 0.0, 0.0, 1.0
else:
perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
translate = M[3, :3].copy()
M[3, :3] = 0.0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
numpy.negative(scale, scale)
numpy.negative(row, row)
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
# angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array([
[ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
[-a*sinb*co, b*sina, 0.0, 0.0],
[ a*cosb, b*cosa, c, 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M
def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Euclidean transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
return q
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, [1, 0, 0])
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
q = numpy.array([0.0, axis[0], axis[1], axis[2]])
qlen = vector_norm(q)
if qlen > _EPS:
q *= math.sin(angle/2.0) / qlen
q[0] = math.cos(angle/2.0)
return q
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
>>> is_same_quaternion(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
>>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
>>> is_same_quaternion(quaternion_from_matrix(R, isprecise=False),
... quaternion_from_matrix(R, isprecise=True))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q = q[[3, 0, 1, 2]]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array([
-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([ 0., 1., 2.])
"""
return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
numpy.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
numpy.cos(t1)*r1, numpy.sin(t2)*r2])
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rand: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1, 1, 0], [-1, 1, 0])
>>> ball.constrain = True
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0.0, 0.0, 1.0])
self._constrain = False
if initial is None:
self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
@property
def constrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
@constrain.setter
def constrain(self, value):
"""Set state of constrain to axis mode."""
self._constrain = bool(value)
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v0 = (point[0] - center[0]) / radius
v1 = (center[1] - point[1]) / radius
n = v0*v0 + v1*v1
if n > 1.0:
# position outside of sphere
n = math.sqrt(n)
return numpy.array([v0/n, v1/n, 0.0])
else:
return numpy.array([v0, v1, math.sqrt(1.0 - n)])
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
numpy.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return numpy.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3))
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0) and numpy.all(v < 1)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def is_same_quaternion(q0, q1):
"""Return True if two quaternions are equal."""
q0 = numpy.array(q0)
q1 = numpy.array(q1)
return numpy.allclose(q0, q1) or numpy.allclose(q0, -q1)
def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
import warnings
from importlib import import_module
try:
if not package:
module = import_module(name)
else:
module = import_module('.' + name, package=package)
except ImportError:
if warn:
warnings.warn("failed to import module %s" % name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("no Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
_import_module('_transformations')
if __name__ == "__main__":
import doctest
import random # noqa: used in doctests
numpy.set_printoptions(suppress=True, precision=5)
doctest.testmod()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from ckeditor.fields import RichTextField
class Question(models.Model):
question = models.CharField(max_length=255, unique=True)
answer = models.CharField(max_length=255)
explanation = RichTextField()
def __str__(self):
return self.question
|
import os
import sys
import traceback
from language_fragments import load_module
USAGE = "python -m language_fragments mode [options]"
### library `modes`
MODES = {}
## tools
MODES["random_sat_sampler"] = "language_fragments.tools.random_sat_nl"
def main(argv):
"""The main execution point
:param argv: the cli input
"""
if not argv:
exit('Please specify mode and settings! Current modes="%s" exiting...' % '; '.join(MODES))
## check the mode
mode = MODES.get(argv[0],None)
if mode is None:
exit('Unknown mode=%s, please choose from `%s`' % (argv[0],'; '.join(MODES)))
## try to execute the target module
try:
mod = load_module(mode)
mod.main(argv)
except Exception as e:
print("Uncaught error encountered during execution!",file=sys.stderr)
traceback.print_exc(file=sys.stdout)
raise e
finally:
## close the stdout
if sys.stdout != sys.__stdout__:
sys.stdout.close()
if sys.stderr != sys.__stderr__:
sys.stderr.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
# import the necessary packages
import time
import cv2
import imutils
import numpy as np
from imutils.video import FileVideoStream
fvs = FileVideoStream('data/sarwesh.mp4', queue_size=1024).start() # with bag
time.sleep(1.0)
kernelSize = 7
backgroundHistory = 15
openposeProtoFile = "dnn_models/pose/coco/pose_deploy_linevec.prototxt"
openposeWeightsFile = "dnn_models/pose/coco/pose_iter_440000.caffemodel"
nPoints = 18
objectdetectionProtoFile = "dnn_models/object_detection/MobileNetSSD_deploy.prototxt"
objectdetectionWeightsFile = "dnn_models/object_detection/MobileNetSSD_deploy.caffemodel"
# COCO Output Format
keypointsMapping = ['Nose', 'Neck', 'R-Sho', 'R-Elb', 'R-Wr', 'L-Sho', 'L-Elb', 'L-Wr', 'R-Hip', 'R-Knee', 'R-Ank',
'L-Hip', 'L-Knee', 'L-Ank', 'R-Eye', 'L-Eye', 'R-Ear', 'L-Ear']
POSE_PAIRS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7],
[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13],
[1, 0], [0, 14], [14, 16], [0, 15], [15, 17],
[2, 17], [5, 16]]
# index of pafs correspoding to the POSE_PAIRS
# e.g for POSE_PAIR(1,2), the PAFs are located at indices (31,32) of output, Similarly, (1,5) -> (39,40) and so on.
mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44],
[19, 20], [21, 22], [23, 24], [25, 26], [27, 28], [29, 30],
[47, 48], [49, 50], [53, 54], [51, 52], [55, 56],
[37, 38], [45, 46]]
colors = [[0, 100, 255], [0, 100, 255], [0, 255, 255], [0, 100, 255], [0, 255, 255], [0, 100, 255],
[0, 255, 0], [255, 200, 100], [255, 0, 255], [0, 255, 0], [255, 200, 100], [255, 0, 255],
[0, 0, 255], [255, 0, 0], [200, 200, 0], [255, 0, 0], [200, 200, 0], [0, 0, 0]]
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
def getKeypoints(prob_map, thres=0.1):
map_smooth = cv2.GaussianBlur(prob_map, (3, 3), 0, 0)
map_mask = np.uint8(map_smooth > thres)
keypoints_array = []
# find the blobs
contours, _ = cv2.findContours(map_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# for each blob find the maxima
for cnt in contours:
blob_mask = np.zeros(map_mask.shape)
blob_mask = cv2.fillConvexPoly(blob_mask, cnt, 1)
masked_prob_map = map_smooth * blob_mask
_, max_val, _, max_loc = cv2.minMaxLoc(masked_prob_map)
keypoints_array.append(max_loc + (prob_map[max_loc[1], max_loc[0]],))
return keypoints_array
# Find valid connections between the different joints of a all persons present
def getValidPairs(generated_output):
validpairs = []
invalidpairs = []
n_interp_samples = 10
paf_score_th = 0.1
conf_th = 0.7
# loop for every POSE_PAIR
for k in range(len(mapIdx)):
# A->B constitute a limb
pafA = generated_output[0, mapIdx[k][0], :, :]
pafB = generated_output[0, mapIdx[k][1], :, :]
pafA = cv2.resize(pafA, (frameWidth, frameHeight))
pafB = cv2.resize(pafB, (frameWidth, frameHeight))
# Find the keypoints for the first and second limb
candA = detected_keypoints[POSE_PAIRS[k][0]]
candB = detected_keypoints[POSE_PAIRS[k][1]]
nA = len(candA)
nB = len(candB)
# If keypoints for the joint-pair is detected
# check every joint in candA with every joint in candB
# Calculate the distance vector between the two joints
# Find the PAF values at a set of interpolated points between the joints
# Use the above formula to compute a score to mark the connection valid
if nA != 0 and nB != 0:
valid_pair = np.zeros((0, 3))
for i in range(nA):
max_j = -1
max_score = -1
found = 0
for j in range(nB):
# Find d_ij
d_ij = np.subtract(candB[j][:2], candA[i][:2])
norm = np.linalg.norm(d_ij)
if norm:
d_ij = d_ij / norm
else:
continue
# Find p(u)
interp_coord = list(zip(np.linspace(candA[i][0], candB[j][0], num=n_interp_samples),
np.linspace(candA[i][1], candB[j][1], num=n_interp_samples)))
# Find L(p(u))
paf_interp = []
for k in range(len(interp_coord)):
paf_interp.append([pafA[int(round(interp_coord[k][1])), int(round(interp_coord[k][0]))],
pafB[int(round(interp_coord[k][1])), int(round(interp_coord[k][0]))]])
# Find E
paf_scores = np.dot(paf_interp, d_ij)
avg_paf_score = sum(paf_scores) / len(paf_scores)
# Check if the connection is valid
# If the fraction of interpolated vectors aligned with PAF is higher then threshold -> Valid Pair
if (len(np.where(paf_scores > paf_score_th)[0]) / n_interp_samples) > conf_th:
if avg_paf_score > max_score:
max_j = j
max_score = avg_paf_score
found = 1
# Append the connection to the list
if found:
valid_pair = np.append(valid_pair, [[candA[i][3], candB[max_j][3], max_score]], axis=0)
# Append the detected connections to the global list
validpairs.append(valid_pair)
else: # If no keypoints are detected
invalidpairs.append(k)
validpairs.append([])
return validpairs, invalidpairs
# This function creates a list of keypoints belonging to each person
# For each detected valid pair, it assigns the joint(s) to a person
def getPersonwiseKeypoints(validpairs, invalidpairs):
# the last number in each row is the overall score
personwise_keypoints = -1 * np.ones((0, 19))
for k in range(len(mapIdx)):
if k not in invalidpairs:
partAs = validpairs[k][:, 0]
partBs = validpairs[k][:, 1]
indexA, indexB = np.array(POSE_PAIRS[k])
for i in range(len(validpairs[k])):
found = 0
person_idx = -1
for j in range(len(personwise_keypoints)):
if personwise_keypoints[j][indexA] == partAs[i]:
person_idx = j
found = 1
break
if found:
personwise_keypoints[person_idx][indexB] = partBs[i]
personwise_keypoints[person_idx][-1] += keypoints_list[partBs[i].astype(int), 2] + validpairs[k][i][
2]
# if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(19)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
# add the keypoint_scores for the two keypoints and the paf_score
row[-1] = sum(keypoints_list[validpairs[k][i, :2].astype(int), 2]) + validpairs[k][i][2]
personwise_keypoints = np.vstack([personwise_keypoints, row])
return personwise_keypoints
while fvs.more():
frame = fvs.read()
frame = imutils.resize(frame, width=960)
frameClone = frame.copy()
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
# Fix the input Height and get the width according to the Aspect Ratio
inHeight = 368
inWidth = int((inHeight / frameHeight) * frameWidth)
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
net = cv2.dnn.readNetFromCaffe(openposeProtoFile, openposeWeightsFile)
objnet = cv2.dnn.readNetFromCaffe(objectdetectionProtoFile, objectdetectionWeightsFile)
net.setInput(inpBlob)
output = net.forward()
# pass the blob through the network and obtain the detections and predictions
objnet.setInput(inpBlob)
detections = objnet.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > 0.6:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
box = detections[0, 0, i, 3:7] * np.array([frameWidth, frameHeight, frameWidth, frameHeight])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY), COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frameClone, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
# Applying background subtraction on the capture frame
# frame = fgbg.apply(frame)
detected_keypoints = []
keypoints_list = np.zeros((0, 3))
keypoint_id = 0
threshold = 0.1
for part in range(nPoints):
probMap = output[0, part, :, :]
probMap = cv2.resize(probMap, (frame.shape[1], frame.shape[0]))
keypoints = getKeypoints(probMap, threshold)
keypoints_with_id = []
for i in range(len(keypoints)):
keypoints_with_id.append(keypoints[i] + (keypoint_id,))
keypoints_list = np.vstack([keypoints_list, keypoints[i]])
keypoint_id += 1
detected_keypoints.append(keypoints_with_id)
# for i in range(nPoints):
# for j in range(len(detected_keypoints[i])):
# cv2.circle(frame, detected_keypoints[i][j][0:2], 5, colors[i], -1, cv2.LINE_AA)
# cv2.imshow("Keypoints", frame)
valid_pairs, invalid_pairs = getValidPairs(output)
personwiseKeypoints = getPersonwiseKeypoints(valid_pairs, invalid_pairs)
for i in range(17):
for n in range(len(personwiseKeypoints)):
index = personwiseKeypoints[n][np.array(POSE_PAIRS[i])]
if -1 in index:
continue
B = np.int32(keypoints_list[index.astype(int), 0])
A = np.int32(keypoints_list[index.astype(int), 1])
cv2.line(frame, (B[0], A[0]), (B[1], A[1]), colors[i], 2, cv2.LINE_AA)
# frame = cv2.addWeighted(frameClone, 0.5, frame, 0.5, 0.0)
frame = cv2.addWeighted(frameClone, 0.8, frame, 0.5, 0.0)
cv2.imshow("Frame", frame)
k = cv2.waitKey(50) & 0xff
if k == 27:
break
# do a bit of cleanup
cv2.destroyAllWindows()
fvs.stop()
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
'''
MIT License
Copyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import torch
from torch.nn import init
import torch.nn as nn
import torch.nn.functional as F
import functools
def load_state_dict(state_dict, net):
model_dict = net.state_dict()
pretrained_dict = {k: v for k, v in state_dict.items() if k in model_dict}
for k, v in pretrained_dict.items():
if v.size() == model_dict[k].size():
model_dict[k] = v
not_initialized = set()
for k, v in model_dict.items():
if k not in pretrained_dict or v.size() != pretrained_dict[k].size():
not_initialized.add(k.split('.')[0])
print('not initialized', sorted(not_initialized))
net.load_state_dict(model_dict)
return net
def conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False):
return nn.Conv2d(in_planes, out_planes, kernel_size=3,
stride=strd, padding=padding, bias=bias)
def init_weights(net, init_type='normal', init_gain=0.02):
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find(
'BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
if len(gpu_ids) > 0:
assert (torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
class CustomBCELoss(nn.Module):
def __init__(self, brock=False, gamma=None):
super(CustomBCELoss, self).__init__()
self.brock = brock
self.gamma = gamma
def forward(self, pred, gt, gamma, w=None):
x_hat = torch.clamp(pred, 1e-5, 1.0-1e-5) # prevent log(0) from happening
gamma = gamma[:,None,None] if self.gamma is None else self.gamma
if self.brock:
x = 3.0*gt - 1.0 # rescaled to [-1,2]
loss = -(gamma*x*torch.log(x_hat) + (1.0-gamma)*(1.0-x)*torch.log(1.0-x_hat))
else:
loss = -(gamma*gt*torch.log(x_hat) + (1.0-gamma)*(1.0-gt)*torch.log(1.0-x_hat))
if w is not None:
if len(w.size()) == 1:
w = w[:,None,None]
return (loss * w).mean()
else:
return loss.mean()
class CustomMSELoss(nn.Module):
def __init__(self, gamma=None):
super(CustomMSELoss, self).__init__()
self.gamma = gamma
def forward(self, pred, gt, gamma, w=None):
gamma = gamma[:,None,None] if self.gamma is None else self.gamma
weight = gamma * gt + (1.0-gamma) * (1 - gt)
loss = (weight * (pred - gt).pow(2)).mean()
if w is not None:
return (loss * w).mean()
else:
return loss.mean()
def createMLP(dims, norm='bn', activation='relu', last_op=nn.Tanh(), dropout=False):
act = None
if activation == 'relu':
act = nn.ReLU()
if activation == 'lrelu':
act = nn.LeakyReLU()
if activation == 'selu':
act = nn.SELU()
if activation == 'elu':
act = nn.ELU()
if activation == 'prelu':
act = nn.PReLU()
mlp = []
for i in range(1,len(dims)):
if norm == 'bn':
mlp += [ nn.Linear(dims[i-1], dims[i]),
nn.BatchNorm1d(dims[i])]
if norm == 'in':
mlp += [ nn.Linear(dims[i-1], dims[i]),
nn.InstanceNorm1d(dims[i])]
if norm == 'wn':
mlp += [ nn.utils.weight_norm(nn.Linear(dims[i-1], dims[i]), name='weight')]
if norm == 'none':
mlp += [ nn.Linear(dims[i-1], dims[i])]
if i != len(dims)-1:
if act is not None:
mlp += [act]
if dropout:
mlp += [nn.Dropout(0.2)]
if last_op is not None:
mlp += [last_op]
return mlp
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Construct model and evaluation metrics for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import protein_dataset
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib.layers.python.layers import optimizers as optimizers_lib
_THRESHOLDS_FOR_RECALL_METRIC = [2, 3, 5, 10]
REPRESENTATION_KEY = 'representation'
LOGITS_KEY = 'logits'
def _f1_score(labels, predictions):
"""Computes F1 score, i.e. the harmonic mean of precision and recall."""
precision = tf.metrics.precision(labels, predictions)
recall = tf.metrics.recall(labels, predictions)
return (2 * precision[0] * recall[0] / (precision[0] + recall[0] + 1e-5),
tf.group(precision[1], recall[1]))
def _mean_examplewise_f1_score(labels, predictions):
"""Calculates mean example-wise F1 score (micro-F1).
Args:
labels: 2D tensor of one hots.
predictions: 2D tensor of one hots.
Returns:
metric, update ops from tf.metrics.mean
"""
labels = tf.cast(labels, tf.float32)
predictions = tf.cast(predictions, tf.float32)
assert len(labels.shape) == 2
assert len(predictions.shape) == 2
true_positives = labels * predictions
false_positives = predictions * (1 - labels)
false_negatives = (1 - predictions) * labels
true_positives = tf.reduce_sum(true_positives, axis=1)
false_positives = tf.reduce_sum(false_positives, axis=1)
false_negatives = tf.reduce_sum(false_negatives, axis=1)
precision = true_positives / (true_positives + false_positives + 1e-5)
recall = true_positives / (true_positives + false_negatives)
f1 = 2 * precision * recall / (precision + recall + 1e-5)
# F1 score is not defined where there are no correct labels, ignore these:
well_defined = tf.greater(true_positives + false_negatives, 0)
# Remove any nans (these new 0s will be ignored by the weights anyway):
f1 = tf.where(well_defined, f1, tf.zeros_like(f1))
return tf.metrics.mean(f1, weights=well_defined)
def _custom_recall_at_k(labels_as_multi_hot, predictions, k):
"""Calculates recall_at_k metric with multi-hot labels.
For each example which contains at least one label, a recall-at-k is
calculated by assessing what proportion of these labels are in the top k
predictions. This metric is the mean of these values.
Args:
labels_as_multi_hot: a tensor of [batch_size, num_output_classes] where
elements are zero (absent) or one (present).
predictions: a tensor of [batch_size, num_output_classes] where elemenents
are floats indicating the probability of class membership.
k: number of top predictions to consider (must be <= num_output_classes).
Returns:
mean: A scalar `Tensor` representing the current mean, the value of `total`
divided by `count` (of finite values).
update_op: An operation that increments the `total` and `count` variables
appropriately and whose (scalar) value matches the mean_value.
"""
labels_as_multi_hot = tf.cast(labels_as_multi_hot, tf.float32)
num_output_classes = tf.shape(labels_as_multi_hot)[1]
_, indices = tf.math.top_k(predictions, k=k)
predictions_top_k_as_multi_hot = _indices_to_multihot(indices,
num_output_classes)
true_positives_tensor = tf.math.logical_and(
tf.cast(labels_as_multi_hot, tf.bool),
tf.cast(predictions_top_k_as_multi_hot, tf.bool))
false_negatives_tensor = tf.math.greater(labels_as_multi_hot,
predictions_top_k_as_multi_hot)
true_positives_per_example = tf.count_nonzero(true_positives_tensor, axis=1)
false_negatives_per_example = tf.count_nonzero(false_negatives_tensor, axis=1)
recall_per_example = true_positives_per_example / (
true_positives_per_example + false_negatives_per_example)
is_finite = tf.is_finite(recall_per_example) # To filter out no label cases.
recall_per_example_finite_only = tf.boolean_mask(recall_per_example,
is_finite)
return tf.metrics.mean(recall_per_example_finite_only)
def _make_evaluation_metrics(labels, predictions, num_output_classes, hparams):
"""Construct various eval metrics.
Args:
labels: dict with ground truth data necessary for computing metrics.
predictions: dict containing Tensors for predictions.
num_output_classes: number of different labels.
hparams: tf.contrib.HParams object.
Returns:
A dict where the values obey the tf.metrics API.
"""
labels_op = labels[protein_dataset.LABEL_KEY]
multi_hot_labels = _indices_to_multihot(labels_op, num_output_classes)
predictions_as_floats = predictions[protein_dataset.LABEL_KEY]
recall_threshold = hparams.decision_threshold
predictions_as_bools = tf.greater(predictions_as_floats,
tf.constant(recall_threshold))
metrics = {
'precision_at_threshold':
tf.metrics.precision(
labels=multi_hot_labels, predictions=predictions_as_bools),
'recall_at_threshold':
tf.metrics.recall(
labels=multi_hot_labels, predictions=predictions_as_bools),
'f1_at_threshold':
_f1_score(labels=multi_hot_labels, predictions=predictions_as_bools),
'mean_examplewise_f1_at_threshold':
_mean_examplewise_f1_score(
labels=multi_hot_labels, predictions=predictions_as_bools),
'true_positives':
tf.metrics.true_positives(
labels=multi_hot_labels, predictions=predictions_as_bools),
'false_positives':
tf.metrics.false_positives(
labels=multi_hot_labels, predictions=predictions_as_bools)
}
for k in _THRESHOLDS_FOR_RECALL_METRIC:
metrics['recall@%d' % k] = _custom_recall_at_k(
labels_as_multi_hot=multi_hot_labels,
predictions=predictions_as_floats,
k=k)
return metrics
def _set_padding_to_sentinel(padded_representations, sequence_lengths,
sentinel):
"""Set padding on batch of padded representations to a sentinel value.
Useful for preparing a batch of sequence representations for max or average
pooling.
Args:
padded_representations: float32 tensor, shape (batch, longest_sequence, d),
where d is some arbitrary embedding dimension. E.g. the output of
tf.data.padded_batch.
sequence_lengths: tensor, shape (batch,). Each entry corresponds to the
original length of the sequence (before padding) of that sequence within
the batch.
sentinel: float32 tensor, shape: broadcastable to padded_representations.
Returns:
tensor of same shape as padded_representations, where all entries
in the sequence dimension that came from padding (i.e. are beyond index
sequence_length[i]) are set to sentinel.
"""
sequence_dimension = 1
embedding_dimension = 2
with tf.variable_scope('set_padding_to_sentinel', reuse=False):
longest_sequence_length = tf.shape(
padded_representations)[sequence_dimension]
embedding_size = tf.shape(padded_representations)[embedding_dimension]
seq_mask = tf.sequence_mask(sequence_lengths, longest_sequence_length)
seq_mask = tf.expand_dims(seq_mask, [embedding_dimension])
is_not_padding = tf.tile(seq_mask, [1, 1, embedding_size])
full_sentinel = tf.zeros_like(padded_representations)
full_sentinel = full_sentinel + tf.convert_to_tensor(sentinel)
per_location_representations = tf.where(
is_not_padding, padded_representations, full_sentinel)
return per_location_representations
def _make_per_sequence_features(per_location_representations, raw_features,
hparams):
"""Aggregate representations across the sequence dimension."""
sequence_lengths = raw_features[protein_dataset.SEQUENCE_LENGTH_KEY]
per_location_representations = _set_padding_to_sentinel(
per_location_representations, sequence_lengths, tf.constant(0.))
# We average the representations across the sequence length dimension:
# tf.reduce_mean(..., axis=1) is problematic, since different batches
# may be dynamically padded to different lengths. Instead, we normalize
# each element of the batch individually, by the length of each element's
# un-normalized sequence. We raise this to a tunable power to allow the
# tuner to choose between mean and sum-pooling or an intermediate type.
denominator = tf.cast(
tf.expand_dims(
raw_features[protein_dataset.SEQUENCE_LENGTH_KEY], axis=-1),
tf.float32)**hparams.denominator_power
pooled_representation = tf.reduce_sum(
per_location_representations, axis=1) / denominator
pooled_representation = tf.identity(
pooled_representation, name='pooled_representation')
return pooled_representation
def _convert_representation_to_prediction_ops(representation, raw_features,
num_output_classes, hparams):
"""Map per-location features to problem-specific prediction ops.
Args:
representation: [batch_size, sequence_length, feature_dim] Tensor.
raw_features: dictionary containing the raw input Tensors; this is the
sequence, keyed by sequence_key.
num_output_classes: number of different labels.
hparams: tf.contrib.HParams object.
Returns:
predictions: dictionary containing Tensors that Estimator
will return as predictions.
predictions_for_loss: Tensor that make_loss() consumes.
"""
per_sequence_features = _make_per_sequence_features(
per_location_representations=representation,
raw_features=raw_features,
hparams=hparams)
logits = tf.layers.dense(
per_sequence_features, num_output_classes, name=LOGITS_KEY)
predictions = {
protein_dataset.LABEL_KEY:
tf.identity(tf.sigmoid(logits), name='predictions')
}
predictions_for_loss = logits
return predictions, predictions_for_loss
def _make_representation(features, hparams, mode):
"""Produces [batch_size, sequence_length, embedding_dim] features.
Args:
features: dict from str to Tensor, containing sequence and sequence length.
hparams: tf.contrib.training.HParams()
mode: tf.estimator.ModeKeys instance.
Returns:
Tensor of shape [batch_size, sequence_length, embedding_dim].
"""
sequence_features = features[protein_dataset.SEQUENCE_KEY]
sequence_lengths = features[protein_dataset.SEQUENCE_LENGTH_KEY]
is_training = mode == tf.estimator.ModeKeys.TRAIN
sequence_features = _conv_layer(
sequence_features=sequence_features,
sequence_lengths=sequence_lengths,
num_units=hparams.filters,
dilation_rate=1,
kernel_size=hparams.kernel_size,
)
for layer_index in range(hparams.num_layers):
sequence_features = _residual_block(
sequence_features=sequence_features,
sequence_lengths=sequence_lengths,
hparams=hparams,
layer_index=layer_index,
activation_fn=tf.nn.relu,
is_training=is_training)
return sequence_features
def _make_prediction_ops(features, hparams, mode, label_vocab):
"""Returns (predictions, predictions_for_loss, representation)."""
representation = _make_representation(features, hparams, mode)
representation = tf.identity(representation, name=REPRESENTATION_KEY)
# Used to save constants in the graph, e.g. for SavedModel.
_ = tf.constant(label_vocab, name='label_vocab')
_ = tf.constant(hparams.decision_threshold, name='decision_threshold')
num_output_classes = len(label_vocab)
predictions, prediction_for_loss = _convert_representation_to_prediction_ops(
representation=representation,
raw_features=features,
num_output_classes=num_output_classes,
hparams=hparams)
return predictions, prediction_for_loss
def _batch_norm(features, is_training):
return tf.layers.batch_normalization(features, training=is_training)
def _conv_layer(sequence_features, sequence_lengths, num_units, dilation_rate,
kernel_size):
"""Return a convolution of the input features that respects sequence len."""
padding_zeroed = _set_padding_to_sentinel(sequence_features, sequence_lengths,
tf.constant(0.))
conved = tf.layers.conv1d(
padding_zeroed,
filters=num_units,
kernel_size=[kernel_size],
dilation_rate=dilation_rate,
padding='same')
# Re-zero padding, because shorter sequences will have their padding
# affected by half the width of the convolution kernel size.
re_zeroed = _set_padding_to_sentinel(conved, sequence_lengths,
tf.constant(0.))
return re_zeroed
def _residual_block(sequence_features, sequence_lengths, hparams, layer_index,
activation_fn, is_training):
"""Construct a single block for a residual network."""
with tf.variable_scope('residual_block_{}'.format(layer_index), reuse=False):
shifted_layer_index = layer_index - hparams.first_dilated_layer + 1
dilation_rate = max(1, hparams.dilation_rate**shifted_layer_index)
num_bottleneck_units = math.floor(
hparams.resnet_bottleneck_factor * hparams.filters)
features = _batch_norm(sequence_features, is_training)
features = activation_fn(features)
features = _conv_layer(
sequence_features=features,
sequence_lengths=sequence_lengths,
num_units=num_bottleneck_units,
dilation_rate=dilation_rate,
kernel_size=hparams.kernel_size,
)
features = _batch_norm(features, is_training=is_training)
features = activation_fn(features)
# The second convolution is purely local linear transformation across
# feature channels, as is done in
# third_party/tensorflow_models/slim/nets/resnet_v2.bottleneck
residual = _conv_layer(
features,
sequence_lengths,
num_units=hparams.filters,
dilation_rate=1,
kernel_size=1)
with_skip_connection = sequence_features + residual
return with_skip_connection
def _indices_to_multihot(indices, vocab_size):
"""Converts [batch,n_labels] of indices to [batch,vocab_size] multihot.
Indices can be padded with -1.
Args:
indices: dense tensor of indices [batch, arbitrary_n_labels], padded with -1
if necessary.
vocab_size: integer vocab_size.
Returns:
Multihot float32 tensor of dimension [batch, vocab_size].
e.g. [[0,1],[2,-1]] (vocab_size:4) -> [1,1,0,0], [0,0,1,0]
"""
if len(indices.shape) != 2:
raise ValueError(
'indices_to_multihot expects tensors of dimension 2, got shape %s' %
indices.shape)
sparse_indices = contrib_layers.dense_to_sparse(indices, eos_token=-1)
multihot = tf.sparse.to_indicator(sparse_indices, vocab_size=vocab_size)
multihot = tf.cast(multihot, tf.float32)
return multihot
def _make_loss(predictions_for_loss, labels, num_output_classes):
"""Make scalar loss."""
logits = predictions_for_loss
labels_op = labels[protein_dataset.LABEL_KEY]
# We need to get labels into a multi-hot format:
labels_op = _indices_to_multihot(labels_op, vocab_size=num_output_classes)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels_op, logits=logits)
loss = tf.reduce_mean(loss)
return loss
def _make_train_op(loss, hparams):
"""Create train op."""
def learning_rate_decay_fn(learning_rate, global_step):
learning_rate = tf.train.exponential_decay(learning_rate, global_step,
hparams.lr_decay_steps,
hparams.lr_decay_rate)
learning_rate = learning_rate * tf.minimum(
tf.cast(global_step / hparams.lr_warmup_steps, tf.float32),
tf.constant(1.))
return learning_rate
return contrib_layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
clip_gradients=optimizers_lib.adaptive_clipping_fn(
decay=hparams.gradient_clipping_decay,
report_summary=True,
),
learning_rate=hparams.learning_rate,
learning_rate_decay_fn=learning_rate_decay_fn,
optimizer='Adam')
def make_model_fn(label_vocab, hparams):
"""Returns a model function for estimator given prediction base class.
Args:
label_vocab: list of string.
hparams: tf.contrib.HParams object.
Returns:
A function that returns a tf.estimator.EstimatorSpec
"""
def _model_fn(features, labels, params, mode=None):
"""Returns tf.estimator.EstimatorSpec."""
predictions, predictions_for_loss = _make_prediction_ops(
features=features, hparams=params, mode=mode, label_vocab=label_vocab)
evaluation_hooks = []
num_output_classes = len(label_vocab)
if mode == tf.estimator.ModeKeys.TRAIN:
loss = _make_loss(
predictions_for_loss=predictions_for_loss,
labels=labels,
num_output_classes=num_output_classes)
train_op = _make_train_op(loss=loss, hparams=params)
eval_ops = None
elif mode == tf.estimator.ModeKeys.PREDICT:
loss = None
train_op = None
eval_ops = None
else: # Eval mode.
loss = _make_loss(
predictions_for_loss=predictions_for_loss,
labels=labels,
num_output_classes=num_output_classes)
train_op = None
eval_ops = _make_evaluation_metrics(
labels=labels,
predictions=predictions,
num_output_classes=num_output_classes,
hparams=hparams)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_ops,
evaluation_hooks=evaluation_hooks,
)
return _model_fn
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from typing import Any, Optional, AsyncIterator as AsyncIteratorType
from collections.abc import AsyncIterator
import logging
import asyncio
import aiohttp
from requests.exceptions import (
ChunkedEncodingError,
StreamConsumedError)
from azure.core.configuration import ConnectionConfiguration
from azure.core.exceptions import ServiceRequestError, ServiceResponseError
from azure.core.pipeline import Pipeline
from ._base import HttpRequest
from ._base_async import (
AsyncHttpTransport,
AsyncHttpResponse,
_ResponseStopIteration)
# Matching requests, because why not?
CONTENT_CHUNK_SIZE = 10 * 1024
_LOGGER = logging.getLogger(__name__)
class AioHttpTransport(AsyncHttpTransport):
"""AioHttp HTTP sender implementation.
Fully asynchronous implementation using the aiohttp library.
:param session: The client session.
:param loop: The event loop.
:param bool session_owner: Session owner. Defaults True.
:keyword bool use_env_settings: Uses proxy settings from environment. Defaults to True.
.. admonition:: Example:
.. literalinclude:: ../samples/test_example_async.py
:start-after: [START aiohttp]
:end-before: [END aiohttp]
:language: python
:dedent: 4
:caption: Asynchronous transport with aiohttp.
"""
def __init__(self, *, session=None, loop=None, session_owner=True, **kwargs):
self._loop = loop
self._session_owner = session_owner
self.session = session
self.connection_config = ConnectionConfiguration(**kwargs)
self._use_env_settings = kwargs.pop('use_env_settings', True)
async def __aenter__(self):
await self.open()
return self
async def __aexit__(self, *args): # pylint: disable=arguments-differ
await self.close()
async def open(self):
"""Opens the connection.
"""
if not self.session and self._session_owner:
jar = aiohttp.DummyCookieJar()
self.session = aiohttp.ClientSession(
loop=self._loop,
trust_env=self._use_env_settings,
cookie_jar=jar
)
if self.session is not None:
await self.session.__aenter__()
async def close(self):
"""Closes the connection.
"""
if self._session_owner and self.session:
await self.session.close()
self._session_owner = False
self.session = None
def _build_ssl_config(self, cert, verify): # pylint: disable=no-self-use
ssl_ctx = None
if cert or verify not in (True, False):
import ssl
if verify not in (True, False):
ssl_ctx = ssl.create_default_context(cafile=verify)
else:
ssl_ctx = ssl.create_default_context()
if cert:
ssl_ctx.load_cert_chain(*cert)
return ssl_ctx
return verify
def _get_request_data(self, request): #pylint: disable=no-self-use
if request.files:
form_data = aiohttp.FormData()
for form_file, data in request.files.items():
content_type = data[2] if len(data) > 2 else None
try:
form_data.add_field(form_file, data[1], filename=data[0], content_type=content_type)
except IndexError:
raise ValueError("Invalid formdata formatting: {}".format(data))
return form_data
return request.data
async def send(self, request: HttpRequest, **config: Any) -> Optional[AsyncHttpResponse]:
"""Send the request using this HTTP sender.
Will pre-load the body into memory to be available with a sync method.
Pass stream=True to avoid this behavior.
:param request: The HttpRequest object
:type request: ~azure.core.pipeline.transport.HttpRequest
:param config: Any keyword arguments
:return: The AsyncHttpResponse
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
:keyword bool stream: Defaults to False.
:keyword dict proxies: dict of proxy to used based on protocol. Proxy is a dict (protocol, url)
:keyword str proxy: will define the proxy to use all the time
"""
await self.open()
proxies = config.pop('proxies', None)
if proxies and 'proxy' not in config:
# aiohttp needs a single proxy, so iterating until we found the right protocol
# Sort by longest string first, so "http" is not used for "https" ;-)
for protocol in sorted(proxies.keys(), reverse=True):
if request.url.startswith(protocol):
config['proxy'] = proxies[protocol]
break
response = None
config['ssl'] = self._build_ssl_config(
cert=config.pop('connection_cert', self.connection_config.cert),
verify=config.pop('connection_verify', self.connection_config.verify)
)
# If we know for sure there is not body, disable "auto content type"
# Otherwise, aiohttp will send "application/octect-stream" even for empty POST request
# and that break services like storage signature
if not request.data and not request.files:
config['skip_auto_headers'] = ['Content-Type']
try:
stream_response = config.pop("stream", False)
timeout = config.pop('connection_timeout', self.connection_config.timeout)
read_timeout = config.pop('read_timeout', self.connection_config.read_timeout)
socket_timeout = aiohttp.ClientTimeout(sock_connect=timeout, sock_read=read_timeout)
result = await self.session.request(
request.method,
request.url,
headers=request.headers,
data=self._get_request_data(request),
timeout=socket_timeout,
allow_redirects=False,
**config
)
response = AioHttpTransportResponse(request, result, self.connection_config.data_block_size)
if not stream_response:
await response.load_body()
except aiohttp.client_exceptions.ClientResponseError as err:
raise ServiceResponseError(err, error=err) from err
except aiohttp.client_exceptions.ClientError as err:
raise ServiceRequestError(err, error=err) from err
except asyncio.TimeoutError as err:
raise ServiceResponseError(err, error=err) from err
return response
class AioHttpStreamDownloadGenerator(AsyncIterator):
"""Streams the response body data.
:param pipeline: The pipeline object
:param response: The client response object.
:param block_size: block size of data sent over connection.
:type block_size: int
"""
def __init__(self, pipeline: Pipeline, response: AsyncHttpResponse) -> None:
self.pipeline = pipeline
self.request = response.request
self.response = response
self.block_size = response.block_size
self.content_length = int(response.internal_response.headers.get('Content-Length', 0))
self.downloaded = 0
def __len__(self):
return self.content_length
async def __anext__(self):
retry_active = True
retry_total = 3
retry_interval = 1000
while retry_active:
try:
chunk = await self.response.internal_response.content.read(self.block_size)
if not chunk:
raise _ResponseStopIteration()
self.downloaded += self.block_size
return chunk
except _ResponseStopIteration:
self.response.internal_response.close()
raise StopAsyncIteration()
except (ChunkedEncodingError, ConnectionError):
retry_total -= 1
if retry_total <= 0:
retry_active = False
else:
await asyncio.sleep(retry_interval)
headers = {'range': 'bytes=' + str(self.downloaded) + '-'}
resp = self.pipeline.run(self.request, stream=True, headers=headers)
if resp.status_code == 416:
raise
chunk = await self.response.internal_response.content.read(self.block_size)
if not chunk:
raise StopIteration()
self.downloaded += len(chunk)
return chunk
continue
except StreamConsumedError:
raise
except Exception as err:
_LOGGER.warning("Unable to stream download: %s", err)
self.response.internal_response.close()
raise
class AioHttpTransportResponse(AsyncHttpResponse):
"""Methods for accessing response body data.
:param request: The HttpRequest object
:type request: ~azure.core.pipeline.transport.HttpRequest
:param aiohttp_response: Returned from ClientSession.request().
:type aiohttp_response: aiohttp.ClientResponse object
:param block_size: block size of data sent over connection.
:type block_size: int
"""
def __init__(self, request: HttpRequest, aiohttp_response: aiohttp.ClientResponse, block_size=None) -> None:
super(AioHttpTransportResponse, self).__init__(request, aiohttp_response, block_size=block_size)
# https://aiohttp.readthedocs.io/en/stable/client_reference.html#aiohttp.ClientResponse
self.status_code = aiohttp_response.status
self.headers = aiohttp_response.headers
self.reason = aiohttp_response.reason
self.content_type = aiohttp_response.headers.get('content-type')
self._body = None
def body(self) -> bytes:
"""Return the whole body as bytes in memory.
"""
if self._body is None:
raise ValueError("Body is not available. Call async method load_body, or do your call with stream=False.")
return self._body
async def load_body(self) -> None:
"""Load in memory the body, so it could be accessible from sync methods."""
self._body = await self.internal_response.read()
def stream_download(self, pipeline) -> AsyncIteratorType[bytes]:
"""Generator for streaming response body data.
:param pipeline: The pipeline object
:type pipeline: azure.core.pipeline
"""
return AioHttpStreamDownloadGenerator(pipeline, self)
|
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ConvMixer training/validation using single GPU """
import sys
import os
import time
import logging
import argparse
import random
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from datasets import get_dataloader
from datasets import get_dataset
from utils import AverageMeter
from utils import WarmupCosineScheduler
from utils import get_exclude_from_weight_decay_fn
from config import get_config
from config import update_config
from mixup import Mixup
from losses import LabelSmoothingCrossEntropyLoss
from losses import SoftTargetCrossEntropyLoss
from losses import DistillationLoss
from convmixer import build_convmixer as build_model
def get_arguments():
"""return argumeents, this will overwrite the config after loading yaml file"""
parser = argparse.ArgumentParser('ConvMixer')
parser.add_argument('-cfg', type=str, default=None)
parser.add_argument('-dataset', type=str, default=None)
parser.add_argument('-batch_size', type=int, default=None)
parser.add_argument('-image_size', type=int, default=None)
parser.add_argument('-data_path', type=str, default=None)
parser.add_argument('-output', type=str, default=None)
parser.add_argument('-ngpus', type=int, default=None)
parser.add_argument('-pretrained', type=str, default=None)
parser.add_argument('-resume', type=str, default=None)
parser.add_argument('-last_epoch', type=int, default=None)
parser.add_argument('-eval', action='store_true')
parser.add_argument('-amp', action='store_true')
arguments = parser.parse_args()
return arguments
def get_logger(filename, logger_name=None):
"""set logging file and format
Args:
filename: str, full path of the logger file to write
logger_name: str, the logger name, e.g., 'master_logger', 'local_logger'
Return:
logger: python logger
"""
log_format = "%(asctime)s %(message)s"
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt="%m%d %I:%M:%S %p")
# different name is needed when creating multiple logger in one process
logger = logging.getLogger(logger_name)
fh = logging.FileHandler(os.path.join(filename))
fh.setFormatter(logging.Formatter(log_format))
logger.addHandler(fh)
return logger
def train(dataloader,
model,
criterion,
optimizer,
epoch,
total_epochs,
total_batch,
debug_steps=100,
accum_iter=1,
mixup_fn=None,
amp=False,
logger=None):
"""Training for one epoch
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
criterion: nn.criterion
epoch: int, current epoch
total_epochs: int, total num of epochs
total_batch: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
accum_iter: int, num of iters for accumulating gradients, default: 1
mixup_fn: Mixup, mixup instance, default: None
amp: bool, if True, use mix precision training, default: False
logger: logger for logging, default: None
Returns:
train_loss_meter.avg: float, average loss on current process/gpu
train_acc_meter.avg: float, average top1 accuracy on current process/gpu
train_time: float, training time
"""
model.train()
train_loss_meter = AverageMeter()
train_acc_meter = AverageMeter()
if amp is True:
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
time_st = time.time()
for batch_id, data in enumerate(dataloader):
image = data[0]
label = data[1]
label_orig = label.clone()
if mixup_fn is not None:
image, label = mixup_fn(image, label_orig)
if amp is True: # mixed precision training
with paddle.amp.auto_cast():
output = model(image)
loss = criterion(output, label)
scaled = scaler.scale(loss)
scaled.backward()
if ((batch_id +1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):
scaler.minimize(optimizer, scaled)
optimizer.clear_grad()
else: # full precision training
output = model(image)
loss = criterion(output, label)
#NOTE: division may be needed depending on the loss function
# Here no division is needed:
# default 'reduction' param in nn.CrossEntropyLoss is set to 'mean'
#loss = loss / accum_iter
loss.backward()
if ((batch_id +1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):
optimizer.step()
optimizer.clear_grad()
pred = F.softmax(output)
if mixup_fn:
acc = paddle.metric.accuracy(pred, label_orig)
else:
acc = paddle.metric.accuracy(pred, label_orig.unsqueeze(1))
batch_size = image.shape[0]
train_loss_meter.update(loss.numpy()[0], batch_size)
train_acc_meter.update(acc.numpy()[0], batch_size)
if logger and batch_id % debug_steps == 0:
logger.info(
f"Epoch[{epoch:03d}/{total_epochs:03d}], " +
f"Step[{batch_id:04d}/{total_batch:04d}], " +
f"Avg Loss: {train_loss_meter.avg:.4f}, " +
f"Avg Acc: {train_acc_meter.avg:.4f}")
train_time = time.time() - time_st
return train_loss_meter.avg, train_acc_meter.avg, train_time
def validate(dataloader, model, criterion, total_batch, debug_steps=100, logger=None):
"""Validation for whole dataset
Args:
dataloader: paddle.io.DataLoader, dataloader instance
model: nn.Layer, a ViT model
criterion: nn.criterion
total_batch: int, total num of batches for one epoch
debug_steps: int, num of iters to log info, default: 100
logger: logger for logging, default: None
Returns:
val_loss_meter.avg: float, average loss on current process/gpu
val_acc1_meter.avg: float, average top1 accuracy on current process/gpu
val_acc5_meter.avg: float, average top5 accuracy on current process/gpu
val_time: float, valitaion time
"""
model.eval()
val_loss_meter = AverageMeter()
val_acc1_meter = AverageMeter()
val_acc5_meter = AverageMeter()
time_st = time.time()
with paddle.no_grad():
for batch_id, data in enumerate(dataloader):
image = data[0]
label = data[1]
output = model(image)
loss = criterion(output, label)
pred = F.softmax(output)
acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1))
acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5)
batch_size = image.shape[0]
val_loss_meter.update(loss.numpy()[0], batch_size)
val_acc1_meter.update(acc1.numpy()[0], batch_size)
val_acc5_meter.update(acc5.numpy()[0], batch_size)
if logger and batch_id % debug_steps == 0:
logger.info(
f"Val Step[{batch_id:04d}/{total_batch:04d}], " +
f"Avg Loss: {val_loss_meter.avg:.4f}, " +
f"Avg Acc@1: {val_acc1_meter.avg:.4f}, " +
f"Avg Acc@5: {val_acc5_meter.avg:.4f}")
val_time = time.time() - time_st
return val_loss_meter.avg, val_acc1_meter.avg, val_acc5_meter.avg, val_time
def main():
# STEP 0: Preparation
# config is updated by: (1) config.py, (2) yaml file, (3) arguments
arguments = get_arguments()
config = get_config()
config = update_config(config, arguments)
# set output folder
if not config.EVAL:
config.SAVE = '{}/train-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S'))
else:
config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S'))
if not os.path.exists(config.SAVE):
os.makedirs(config.SAVE, exist_ok=True)
last_epoch = config.TRAIN.LAST_EPOCH
seed = config.SEED
paddle.seed(seed)
np.random.seed(seed)
random.seed(seed)
logger = get_logger(filename=os.path.join(config.SAVE, 'log.txt'))
logger.info(f'\n{config}')
# STEP 1: Create model
model = build_model(config)
# STEP 2: Create train and val dataloader
if not config.EVAL:
dataset_train = get_dataset(config, mode='train')
dataloader_train = get_dataloader(config, dataset_train, 'train', False)
dataset_val = get_dataset(config, mode='val')
dataloader_val = get_dataloader(config, dataset_val, 'val', False)
# STEP 3: Define Mixup function
mixup_fn = None
if config.TRAIN.MIXUP_PROB > 0 or config.TRAIN.CUTMIX_ALPHA > 0 or config.TRAIN.CUTMIX_MINMAX is not None:
mixup_fn = Mixup(mixup_alpha=config.TRAIN.MIXUP_ALPHA,
cutmix_alpha=config.TRAIN.CUTMIX_ALPHA,
cutmix_minmax=config.TRAIN.CUTMIX_MINMAX,
prob=config.TRAIN.MIXUP_PROB,
switch_prob=config.TRAIN.MIXUP_SWITCH_PROB,
mode=config.TRAIN.MIXUP_MODE,
label_smoothing=config.TRAIN.SMOOTHING,
num_classes=config.MODEL.NUM_CLASSES)
# STEP 4: Define criterion
if config.TRAIN.MIXUP_PROB > 0.:
criterion = SoftTargetCrossEntropyLoss()
elif config.TRAIN.SMOOTHING:
criterion = LabelSmoothingCrossEntropyLoss()
else:
criterion = nn.CrossEntropyLoss()
# only use cross entropy for val
criterion_val = nn.CrossEntropyLoss()
# STEP 5: Define optimizer and lr_scheduler
# set lr according to batch size and world size (hacked from Swin official code and modified for CSwin)
if config.TRAIN.LINEAR_SCALED_LR is not None:
linear_scaled_lr = (
config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE) / config.TRAIN.LINEAR_SCALED_LR
linear_scaled_warmup_start_lr = (
config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE) / config.TRAIN.LINEAR_SCALED_LR
linear_scaled_end_lr = (
config.TRAIN.END_LR * config.DATA.BATCH_SIZE) / config.TRAIN.LINEAR_SCALED_LR
if config.TRAIN.ACCUM_ITER > 1:
linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER
linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER
linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER
config.TRAIN.BASE_LR = linear_scaled_lr
config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr
config.TRAIN.END_LR = linear_scaled_end_lr
scheduler = None
if config.TRAIN.LR_SCHEDULER.NAME == "warmupcosine":
scheduler = WarmupCosineScheduler(learning_rate=config.TRAIN.BASE_LR,
warmup_start_lr=config.TRAIN.WARMUP_START_LR,
start_lr=config.TRAIN.BASE_LR,
end_lr=config.TRAIN.END_LR,
warmup_epochs=config.TRAIN.WARMUP_EPOCHS,
total_epochs=config.TRAIN.NUM_EPOCHS,
last_epoch=config.TRAIN.LAST_EPOCH)
elif config.TRAIN.LR_SCHEDULER.NAME == "cosine":
scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=config.TRAIN.BASE_LR,
T_max=config.TRAIN.NUM_EPOCHS,
last_epoch=last_epoch)
elif config.scheduler == "multi-step":
milestones = [int(v.strip()) for v in config.TRAIN.LR_SCHEDULER.MILESTONES.split(",")]
scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=config.TRAIN.BASE_LR,
milestones=milestones,
gamma=config.TRAIN.LR_SCHEDULER.DECAY_RATE,
last_epoch=last_epoch)
else:
logger.fatal(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.")
raise NotImplementedError(f"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.")
if config.TRAIN.OPTIMIZER.NAME == "SGD":
if config.TRAIN.GRAD_CLIP:
clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)
else:
clip = None
optimizer = paddle.optimizer.Momentum(
parameters=model.parameters(),
learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR,
weight_decay=config.TRAIN.WEIGHT_DECAY,
momentum=config.TRAIN.OPTIMIZER.MOMENTUM,
grad_clip=clip)
elif config.TRAIN.OPTIMIZER.NAME == "AdamW":
if config.TRAIN.GRAD_CLIP:
clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)
else:
clip = None
optimizer = paddle.optimizer.AdamW(
parameters=model.parameters(),
learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR,
beta1=config.TRAIN.OPTIMIZER.BETAS[0],
beta2=config.TRAIN.OPTIMIZER.BETAS[1],
weight_decay=config.TRAIN.WEIGHT_DECAY,
epsilon=config.TRAIN.OPTIMIZER.EPS,
grad_clip=clip,
apply_decay_param_fun=get_exclude_from_weight_decay_fn([
'absolute_pos_embed', 'relative_position_bias_table']),
)
else:
logger.fatal(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.")
raise NotImplementedError(f"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.")
# STEP 6: Load pretrained model or load resume model and optimizer states
if config.MODEL.PRETRAINED:
if (config.MODEL.PRETRAINED).endswith('.pdparams'):
raise ValueError(f'{config.MODEL.PRETRAINED} should not contain .pdparams')
assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True
model_state = paddle.load(config.MODEL.PRETRAINED+'.pdparams')
model.set_dict(model_state)
logger.info(f"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}")
if config.MODEL.RESUME:
assert os.path.isfile(config.MODEL.RESUME + '.pdparams') is True
assert os.path.isfile(config.MODEL.RESUME + '.pdopt') is True
model_state = paddle.load(config.MODEL.RESUME + '.pdparams')
model.set_dict(model_state)
opt_state = paddle.load(config.MODEL.RESUME+'.pdopt')
optimizer.set_state_dict(opt_state)
logger.info(
f"----- Resume: Load model and optmizer from {config.MODEL.RESUME}")
# STEP 7: Validation (eval mode)
if config.EVAL:
logger.info('----- Start Validating')
val_loss, val_acc1, val_acc5, val_time = validate(
dataloader=dataloader_val,
model=model,
criterion=criterion_val,
total_batch=len(dataloader_val),
debug_steps=config.REPORT_FREQ,
logger=logger)
logger.info(f"Validation Loss: {val_loss:.4f}, " +
f"Validation Acc@1: {val_acc1:.4f}, " +
f"Validation Acc@5: {val_acc5:.4f}, " +
f"time: {val_time:.2f}")
return
# STEP 8: Start training and validation (train mode)
logger.info(f"Start training from epoch {last_epoch+1}.")
for epoch in range(last_epoch+1, config.TRAIN.NUM_EPOCHS+1):
# train
logger.info(f"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}")
train_loss, train_acc, train_time = train(dataloader=dataloader_train,
model=model,
criterion=criterion,
optimizer=optimizer,
epoch=epoch,
total_epochs=config.TRAIN.NUM_EPOCHS,
total_batch=len(dataloader_train),
debug_steps=config.REPORT_FREQ,
accum_iter=config.TRAIN.ACCUM_ITER,
mixup_fn=mixup_fn,
amp=config.AMP,
logger=logger)
scheduler.step()
logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " +
f"Train Loss: {train_loss:.4f}, " +
f"Train Acc: {train_acc:.4f}, " +
f"time: {train_time:.2f}")
# validation
if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:
logger.info(f'----- Validation after Epoch: {epoch}')
val_loss, val_acc1, val_acc5, val_time = validate(
dataloader=dataloader_val,
model=model,
criterion=criterion_val,
total_batch=len(dataloader_val),
debug_steps=config.REPORT_FREQ,
logger=logger)
logger.info(f"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], " +
f"Validation Loss: {val_loss:.4f}, " +
f"Validation Acc@1: {val_acc1:.4f}, " +
f"Validation Acc@5: {val_acc5:.4f}, " +
f"time: {val_time:.2f}")
# model save
if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:
model_path = os.path.join(
config.SAVE, f"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}")
paddle.save(model.state_dict(), model_path + '.pdparams')
paddle.save(optimizer.state_dict(), model_path + '.pdopt')
logger.info(f"----- Save model: {model_path}.pdparams")
logger.info(f"----- Save optim: {model_path}.pdopt")
if __name__ == "__main__":
main()
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Google Cloud API base hook."""
import functools
import json
import logging
import os
import tempfile
from contextlib import ExitStack, contextmanager
from subprocess import check_output
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, TypeVar, Union, cast
import google.auth
import google.auth.credentials
import google.oauth2.service_account
import google_auth_httplib2
import tenacity
from google.api_core.exceptions import Forbidden, ResourceExhausted, TooManyRequests
from google.api_core.gapic_v1.client_info import ClientInfo
from google.auth import _cloud_sdk
from google.auth.environment_vars import CLOUD_SDK_CONFIG_DIR, CREDENTIALS
from googleapiclient import discovery
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload, build_http, set_user_agent
from airflow import version
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.providers.google.cloud.utils.credentials_provider import (
_get_scopes,
_get_target_principal_and_delegates,
get_credentials_and_project_id,
)
from airflow.utils.process_utils import patch_environ
log = logging.getLogger(__name__)
# Constants used by the mechanism of repeating requests in reaction to exceeding the temporary quota.
INVALID_KEYS = [
'DefaultRequestsPerMinutePerProject',
'DefaultRequestsPerMinutePerUser',
'RequestsPerMinutePerProject',
"Resource has been exhausted (e.g. check quota).",
]
INVALID_REASONS = [
'userRateLimitExceeded',
]
def is_soft_quota_exception(exception: Exception):
"""
API for Google services does not have a standardized way to report quota violation errors.
The function has been adapted by trial and error to the following services:
* Google Translate
* Google Vision
* Google Text-to-Speech
* Google Speech-to-Text
* Google Natural Language
* Google Video Intelligence
"""
if isinstance(exception, Forbidden):
return any(reason in error.details() for reason in INVALID_REASONS for error in exception.errors)
if isinstance(exception, (ResourceExhausted, TooManyRequests)):
return any(key in error.details() for key in INVALID_KEYS for error in exception.errors)
return False
def is_operation_in_progress_exception(exception: Exception) -> bool:
"""
Some of the calls return 429 (too many requests!) or 409 errors (Conflict)
in case of operation in progress.
* Google Cloud SQL
"""
if isinstance(exception, HttpError):
return exception.resp.status == 429 or exception.resp.status == 409
return False
class retry_if_temporary_quota(tenacity.retry_if_exception): # pylint: disable=invalid-name
"""Retries if there was an exception for exceeding the temporary quote limit."""
def __init__(self):
super().__init__(is_soft_quota_exception)
class retry_if_operation_in_progress(tenacity.retry_if_exception): # pylint: disable=invalid-name
"""Retries if there was an exception for exceeding the temporary quote limit."""
def __init__(self):
super().__init__(is_operation_in_progress_exception)
T = TypeVar("T", bound=Callable) # pylint: disable=invalid-name
RT = TypeVar('RT') # pylint: disable=invalid-name
class GoogleBaseHook(BaseHook):
"""
A base hook for Google cloud-related hooks. Google cloud has a shared REST
API client that is built in the same way no matter which service you use.
This class helps construct and authorize the credentials needed to then
call googleapiclient.discovery.build() to actually discover and build a client
for a Google cloud service.
The class also contains some miscellaneous helper functions.
All hook derived from this base hook use the 'Google Cloud' connection
type. Three ways of authentication are supported:
Default credentials: Only the 'Project Id' is required. You'll need to
have set up default credentials, such as by the
``GOOGLE_APPLICATION_DEFAULT`` environment variable or from the metadata
server on Google Compute Engine.
JSON key file: Specify 'Project Id', 'Keyfile Path' and 'Scope'.
Legacy P12 key files are not supported.
JSON data provided in the UI: Specify 'Keyfile JSON'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:type impersonation_chain: Union[str, Sequence[str]]
"""
def __init__(
self,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__()
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
self.extras = self.get_connection(self.gcp_conn_id).extra_dejson # type: Dict
self._cached_credentials: Optional[google.auth.credentials.Credentials] = None
self._cached_project_id: Optional[str] = None
def _get_credentials_and_project_id(self) -> Tuple[google.auth.credentials.Credentials, Optional[str]]:
"""Returns the Credentials object for Google API and the associated project_id"""
if self._cached_credentials is not None:
return self._cached_credentials, self._cached_project_id
key_path: Optional[str] = self._get_field('key_path', None)
try:
keyfile_dict: Optional[str] = self._get_field('keyfile_dict', None)
keyfile_dict_json: Optional[Dict[str, str]] = None
if keyfile_dict:
keyfile_dict_json = json.loads(keyfile_dict)
except json.decoder.JSONDecodeError:
raise AirflowException('Invalid key JSON.')
target_principal, delegates = _get_target_principal_and_delegates(self.impersonation_chain)
credentials, project_id = get_credentials_and_project_id(
key_path=key_path,
keyfile_dict=keyfile_dict_json,
scopes=self.scopes,
delegate_to=self.delegate_to,
target_principal=target_principal,
delegates=delegates,
)
overridden_project_id = self._get_field('project')
if overridden_project_id:
project_id = overridden_project_id
self._cached_credentials = credentials
self._cached_project_id = project_id
return credentials, project_id
def _get_credentials(self) -> google.auth.credentials.Credentials:
"""Returns the Credentials object for Google API"""
credentials, _ = self._get_credentials_and_project_id()
return credentials
def _get_access_token(self) -> str:
"""Returns a valid access token from Google API Credentials"""
return self._get_credentials().token
@functools.lru_cache(maxsize=None)
def _get_credentials_email(self) -> str:
"""
Returns the email address associated with the currently logged in account
If a service account is used, it returns the service account.
If user authentication (e.g. gcloud auth) is used, it returns the e-mail account of that user.
"""
credentials = self._get_credentials()
service_account_email = getattr(credentials, 'service_account_email', None)
if service_account_email:
return service_account_email
http_authorized = self._authorize()
oauth2_client = discovery.build('oauth2', "v1", http=http_authorized, cache_discovery=False)
return oauth2_client.tokeninfo().execute()['email'] # pylint: disable=no-member
def _authorize(self) -> google_auth_httplib2.AuthorizedHttp:
"""
Returns an authorized HTTP object to be used to build a Google cloud
service hook connection.
"""
credentials = self._get_credentials()
http = build_http()
http = set_user_agent(http, "airflow/" + version.version)
authed_http = google_auth_httplib2.AuthorizedHttp(credentials, http=http)
return authed_http
def _get_field(self, f: str, default: Any = None) -> Any:
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The google_cloud_platform hook type adds custom UI elements
to the hook page, which allow admins to specify service_account,
key_path, etc. They get formatted as shown below.
"""
long_f = f'extra__google_cloud_platform__{f}'
if hasattr(self, 'extras') and long_f in self.extras:
return self.extras[long_f]
else:
return default
@property
def project_id(self) -> Optional[str]:
"""
Returns project id.
:return: id of the project
:rtype: str
"""
_, project_id = self._get_credentials_and_project_id()
return project_id
@property
def num_retries(self) -> int:
"""
Returns num_retries from Connection.
:return: the number of times each API request should be retried
:rtype: int
"""
field_value = self._get_field('num_retries', default=5)
if field_value is None:
return 5
if isinstance(field_value, str) and field_value.strip() == '':
return 5
try:
return int(field_value)
except ValueError:
raise AirflowException(
f"The num_retries field should be a integer. "
f"Current value: \"{field_value}\" (type: {type(field_value)}). "
f"Please check the connection configuration."
)
@property
def client_info(self) -> ClientInfo:
"""
Return client information used to generate a user-agent for API calls.
It allows for better errors tracking.
This object is only used by the google-cloud-* libraries that are built specifically for
the Google Cloud. It is not supported by The Google APIs Python Client that use Discovery
based APIs.
"""
client_info = ClientInfo(client_library_version='airflow_v' + version.version)
return client_info
@property
def scopes(self) -> Sequence[str]:
"""
Return OAuth 2.0 scopes.
:return: Returns the scope defined in the connection configuration, or the default scope
:rtype: Sequence[str]
"""
scope_value = self._get_field('scope', None) # type: Optional[str]
return _get_scopes(scope_value)
@staticmethod
def quota_retry(*args, **kwargs) -> Callable:
"""
A decorator that provides a mechanism to repeat requests in response to exceeding a temporary quote
limit.
"""
def decorator(fun: Callable):
default_kwargs = {
'wait': tenacity.wait_exponential(multiplier=1, max=100),
'retry': retry_if_temporary_quota(),
'before': tenacity.before_log(log, logging.DEBUG),
'after': tenacity.after_log(log, logging.DEBUG),
}
default_kwargs.update(**kwargs)
return tenacity.retry(*args, **default_kwargs)(fun)
return decorator
@staticmethod
def operation_in_progress_retry(*args, **kwargs) -> Callable[[T], T]:
"""
A decorator that provides a mechanism to repeat requests in response to
operation in progress (HTTP 409)
limit.
"""
def decorator(fun: T):
default_kwargs = {
'wait': tenacity.wait_exponential(multiplier=1, max=300),
'retry': retry_if_operation_in_progress(),
'before': tenacity.before_log(log, logging.DEBUG),
'after': tenacity.after_log(log, logging.DEBUG),
}
default_kwargs.update(**kwargs)
return cast(T, tenacity.retry(*args, **default_kwargs)(fun))
return decorator
@staticmethod
def fallback_to_default_project_id(func: Callable[..., RT]) -> Callable[..., RT]:
"""
Decorator that provides fallback for Google Cloud project id. If
the project is None it will be replaced with the project_id from the
service account the Hook is authenticated with. Project id can be specified
either via project_id kwarg or via first parameter in positional args.
:param func: function to wrap
:return: result of the function call
"""
@functools.wraps(func)
def inner_wrapper(self: GoogleBaseHook, *args, **kwargs) -> RT:
if args:
raise AirflowException(
"You must use keyword arguments in this methods rather than positional"
)
if 'project_id' in kwargs:
kwargs['project_id'] = kwargs['project_id'] or self.project_id
else:
kwargs['project_id'] = self.project_id
if not kwargs['project_id']:
raise AirflowException(
"The project id must be passed either as "
"keyword project_id parameter or as project_id extra "
"in Google Cloud connection definition. Both are not set!"
)
return func(self, *args, **kwargs)
return inner_wrapper
@staticmethod
def provide_gcp_credential_file(func: T) -> T:
"""
Function decorator that provides a Google Cloud credentials for application supporting Application
Default Credentials (ADC) strategy.
It is recommended to use ``provide_gcp_credential_file_as_context`` context manager to limit the
scope when authorization data is available. Using context manager also
makes it easier to use multiple connection in one function.
"""
@functools.wraps(func)
def wrapper(self: GoogleBaseHook, *args, **kwargs):
with self.provide_gcp_credential_file_as_context():
return func(self, *args, **kwargs)
return cast(T, wrapper)
@contextmanager
def provide_gcp_credential_file_as_context(self):
"""
Context manager that provides a Google Cloud credentials for application supporting `Application
Default Credentials (ADC) strategy <https://cloud.google.com/docs/authentication/production>`__.
It can be used to provide credentials for external programs (e.g. gcloud) that expect authorization
file in ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable.
"""
key_path = self._get_field(
'key_path', None
) # type: Optional[str] # noqa: E501 # pylint: disable=protected-access
keyfile_dict = self._get_field(
'keyfile_dict', None
) # type: Optional[Dict] # noqa: E501 # pylint: disable=protected-access
if key_path and keyfile_dict:
raise AirflowException(
"The `keyfile_dict` and `key_path` fields are mutually exclusive. "
"Please provide only one value."
)
elif key_path:
if key_path.endswith('.p12'):
raise AirflowException('Legacy P12 key file are not supported, use a JSON key file.')
with patch_environ({CREDENTIALS: key_path}):
yield key_path
elif keyfile_dict:
with tempfile.NamedTemporaryFile(mode='w+t') as conf_file:
conf_file.write(keyfile_dict)
conf_file.flush()
with patch_environ({CREDENTIALS: conf_file.name}):
yield conf_file.name
else:
# We will use the default service account credentials.
yield None
@contextmanager
def provide_authorized_gcloud(self):
"""
Provides a separate gcloud configuration with current credentials.
The gcloud tool allows you to login to Google Cloud only - ``gcloud auth login`` and
for the needs of Application Default Credentials ``gcloud auth application-default login``.
In our case, we want all commands to use only the credentials from ADCm so
we need to configure the credentials in gcloud manually.
"""
credentials_path = _cloud_sdk.get_application_default_credentials_path()
project_id = self.project_id
with ExitStack() as exit_stack:
exit_stack.enter_context(self.provide_gcp_credential_file_as_context())
gcloud_config_tmp = exit_stack.enter_context(tempfile.TemporaryDirectory())
exit_stack.enter_context(patch_environ({CLOUD_SDK_CONFIG_DIR: gcloud_config_tmp}))
if project_id:
# Don't display stdout/stderr for security reason
check_output(["gcloud", "config", "set", "core/project", project_id])
if CREDENTIALS in os.environ:
# This solves most cases when we are logged in using the service key in Airflow.
# Don't display stdout/stderr for security reason
check_output(
[
"gcloud",
"auth",
"activate-service-account",
f"--key-file={os.environ[CREDENTIALS]}",
]
)
elif os.path.exists(credentials_path):
# If we are logged in by `gcloud auth application-default` then we need to log in manually.
# This will make the `gcloud auth application-default` and `gcloud auth` credentials equals.
with open(credentials_path) as creds_file:
creds_content = json.loads(creds_file.read())
# Don't display stdout/stderr for security reason
check_output(["gcloud", "config", "set", "auth/client_id", creds_content["client_id"]])
# Don't display stdout/stderr for security reason
check_output(
["gcloud", "config", "set", "auth/client_secret", creds_content["client_secret"]]
)
# Don't display stdout/stderr for security reason
check_output(
[
"gcloud",
"auth",
"activate-refresh-token",
creds_content["client_id"],
creds_content["refresh_token"],
]
)
yield
@staticmethod
def download_content_from_request(file_handle, request: dict, chunk_size: int) -> None:
"""
Download media resources.
Note that the Python file object is compatible with io.Base and can be used with this class also.
:param file_handle: io.Base or file object. The stream in which to write the downloaded
bytes.
:type file_handle: io.Base or file object
:param request: googleapiclient.http.HttpRequest, the media request to perform in chunks.
:type request: Dict
:param chunk_size: int, File will be downloaded in chunks of this many bytes.
:type chunk_size: int
"""
downloader = MediaIoBaseDownload(file_handle, request, chunksize=chunk_size)
done = False
while done is False:
_, done = downloader.next_chunk()
file_handle.flush()
|
#------------------------------------------------------------------------------
# Copyright (c) 2008, Riverbank Computing Limited
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Riverbank Computing Limited
# Description: <Enthought permissions package component>
#------------------------------------------------------------------------------
from .adapter_base import AdapterBase
from .i_policy_manager import IPolicyManager
from .i_user import IUser
from .i_user_manager import IUserManager
from .package_globals import get_permissions_manager, set_permissions_manager
from .permission import ManagePolicyPermission, ManageUsersPermission, Permission
from .permissions_manager import PermissionsManager
from .secure_proxy import SecureHandler, SecureProxy
|
from __future__ import unicode_literals
import logging
import os
from mopidy import config, ext
__version__ = '1.0.1'
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = 'Mopidy-TtsGpio-opi'
ext_name = 'ttsgpio-opi'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['debug_gpio_simulate'] = config.Boolean()
schema['pin_play_led'] = config.String()
schema['inverted'] = config.Boolean()
return schema
def setup(self, registry):
from .frontend import TtsGpioOpi
registry.add('frontend', TtsGpioOpi)
|
import funcoes
from time import sleep
from random import randint
funcoes.browser()
for c in range(0, 10):
#fazer ataque
funcoes.ataque()
#voltar ao contador
funcoes.contador()
#esperar 4min a 5min
x = randint(250, 300)
sleep(x)
# abrir o browser
funcoes.browser()
|
#
# This file is part of the Fonolo Python Wrapper package.
#
# (c) Foncloud, Inc.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
import re
from .requesthandler import RequestHandler
from ..exception.exception import FonoloException
class Calls(object):
def __init__(self, _handler):
self.handler = _handler;
def get(self, _params=None):
return self.handler.get('calls', _params);
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name, W0611, C0413
"""Vitis-AI runtime test for CPU only part
This test verifies as much as possible whether the a model can be correctly offloaded
and executed for Vitis-AI acceleration. This entails:
- Annotating and partitioning model for Vitis-AI acceleration
- Building a Vitis-AI PyXIR runtime module with on-the-fly quantization enabled
- Run first iteration of on-the-fly quantization flow. This will always be run
on CPU as the first N (parameter) will be used for collecting calibration data
for quantization.
NOTE This is not a full end-to-end test as we need the full Vitis-AI docker environment
and access to an FPGA instance for that. This test verifies the Vitis-AI flow as much as
possible without requiring access to dedicated docker environment and/or hardware setup.
NOTE Quantization is not being tested (we need to be inside Vitis-AI docker environment
for that) buth the internal representation used for quantization is being generated and
functionally tested (CPU).
"""
import sys
import numpy as np
import pytest
pytest.importorskip("pyxir")
import pyxir.contrib.target.DPUCADX8G
import tvm
import tvm.relay.testing
from tvm import relay
from .infrastructure import skip_test, verify_result
def test_extern_vitis_ai_resnet18():
"""Test first part of Vitis-AI on-the-fly quantization runtime with ResNet 18 model"""
if skip_test():
return
dtype = "float32"
ishape = (1, 3, 224, 224)
mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=1)
ref_mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=1)
ref_ex = relay.create_executor("graph", mod=ref_mod, ctx=tvm.cpu(0))
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_res = ref_ex.evaluate()(i_data, **params)
verify_result(
mod,
{"data": i_data},
(1, 1000),
ref_res.asnumpy(),
tol=1e-5,
params=params,
dpu_target="DPUCADX8G",
tvm_ops=4,
)
if __name__ == "__main__":
if sys.platform == "win32":
print("Skip test on Windows for now")
sys.exit(0)
test_extern_vitis_ai_resnet18()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-05 05:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('chembddb', '0020_auto_20170602_0900'),
]
operations = [
migrations.AlterField(
model_name='data',
name='credit',
field=models.CharField(default='The Hachmann Group', max_length=256, verbose_name='username'),
),
migrations.AlterField(
model_name='data',
name='met',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='chembddb.Method'),
),
migrations.AlterField(
model_name='data',
name='value',
field=models.FloatField(blank=True, db_index=True, null=True),
),
migrations.AlterField(
model_name='method',
name='comment',
field=models.CharField(blank=True, db_index=True, max_length=256, verbose_name='details for the method'),
),
migrations.AlterField(
model_name='method',
name='method',
field=models.CharField(db_index=True, default='00000', max_length=32, verbose_name='Keyword for method'),
),
migrations.AlterField(
model_name='molgraph',
name='SMILES_str',
field=models.CharField(blank=True, db_index=True, max_length=256, verbose_name='SMILES string'),
),
migrations.AlterField(
model_name='molprop',
name='prop',
field=models.CharField(db_index=True, default='00000', max_length=32, verbose_name='Keyword for property'),
),
migrations.AlterField(
model_name='molprop',
name='unit',
field=models.CharField(db_index=True, default=' ', max_length=32, verbose_name='keyword for unit'),
),
migrations.AlterIndexTogether(
name='data',
index_together=set([('mol_graph', 'property', 'value')]),
),
]
|
# Copyright 2018 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import tempfile
import time
from google.cloud import pubsub
from google.cloud import storage
# Add manager for bootstrapping device registry / device for testing
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'manager')) # noqa
import manager
import mock
import pytest
import requests
import gcs_send_to_device as gcs_to_device
gcs_bucket = os.environ['CLOUD_STORAGE_BUCKET']
project_id = os.environ['GCLOUD_PROJECT']
service_account_json = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
topic_id = 'test-device-events-{}'.format(int(time.time()))
device_id = 'test-device-{}'.format(int(time.time()))
registry_id = 'test-registry-{}'.format(int(time.time()))
pubsub_topic = 'projects/{}/topics/{}'.format(project_id, topic_id)
cloud_region = 'us-central1'
destination_file_name = 'destination-file.bin'
gcs_file_name = 'my-config'
@pytest.fixture(scope='module')
def test_blob():
"""Provides a pre-existing blob in the test bucket."""
bucket = storage.Client().bucket(gcs_bucket)
# Name of the blob
blob = bucket.blob('iot_core_store_file_gcs')
# Text in the blob
blob.upload_from_string('This file on GCS will go to a device.')
yield blob
# Clean up
blob.delete()
@mock.patch('google.cloud.storage.client.Client.create_bucket')
def test_create_bucket(create_bucket_mock, capsys):
# Unlike other tests for sending a config, this one mocks out the creation
# because buckets are expensive, globally-namespaced objects.
create_bucket_mock.return_value = mock.sentinel.bucket
gcs_to_device.create_bucket(gcs_bucket)
create_bucket_mock.assert_called_with(gcs_bucket)
def test_upload_local_file(capsys):
# Creates a temporary source file that gets uploaded
# to GCS. All other tests use the blob in test_blob().
with tempfile.NamedTemporaryFile() as source_file:
source_file.write(b'This is a source file.')
gcs_to_device.upload_local_file(
gcs_bucket,
gcs_file_name,
source_file.name)
out, _ = capsys.readouterr()
assert 'File {} uploaded as {}.'.format(
source_file.name, gcs_file_name) in out
def test_make_file_public(test_blob):
gcs_to_device.make_file_public(
gcs_bucket,
test_blob.name)
r = requests.get(test_blob.public_url)
# Test for the content of the file to verify that
# it's publicly accessible.
assert r.text == 'This file on GCS will go to a device.'
def test_send_to_device(capsys):
manager.create_iot_topic(project_id, topic_id)
manager.open_registry(
service_account_json,
project_id,
cloud_region,
pubsub_topic,
registry_id)
manager.create_unauth_device(
service_account_json,
project_id,
cloud_region,
registry_id,
device_id)
gcs_to_device.send_to_device(
gcs_bucket,
gcs_file_name,
destination_file_name,
project_id,
cloud_region,
registry_id,
device_id,
service_account_json)
manager.delete_device(
service_account_json, project_id, cloud_region, registry_id,
device_id)
manager.delete_registry(
service_account_json, project_id, cloud_region, registry_id)
pubsub_client = pubsub.PublisherClient()
topic_path = pubsub_client.topic_path(project_id, topic_id)
pubsub_client.delete_topic(topic_path)
out, _ = capsys.readouterr()
assert 'Successfully sent file to device' in out
def test_get_state(capsys):
manager.create_iot_topic(project_id, topic_id)
manager.open_registry(
service_account_json,
project_id,
cloud_region,
pubsub_topic,
registry_id)
manager.create_unauth_device(
service_account_json,
project_id,
cloud_region,
registry_id,
device_id)
gcs_to_device.get_state(
service_account_json,
project_id,
cloud_region,
registry_id,
device_id)
manager.delete_device(
service_account_json, project_id, cloud_region, registry_id,
device_id)
manager.delete_registry(
service_account_json, project_id, cloud_region, registry_id)
pubsub_client = pubsub.PublisherClient()
topic_path = pubsub_client.topic_path(project_id, topic_id)
pubsub_client.delete_topic(topic_path)
out, _ = capsys.readouterr()
assert 'Id' in out
assert 'Config' in out
|
import requests
import os
class Config:
def __init__(self, config=None):
self.baseUrl = ""
if config is not None:
if "baseUrl" in config:
self.baseUrl = config["baseUrl"]
class Client:
def __init__(self, config):
self.config = config
def list_device_apps(self, deviceId):
r = requests.get(
"{baseUrl}/devices/{deviceId}/apps".format(
baseUrl=self.config.baseUrl, deviceId=deviceId
)
)
r.raise_for_status()
apps = r.json()
return apps
def get_device(self, deviceId):
r = requests.get(
"{baseUrl}/devices/{deviceId}".format(
baseUrl=self.config.baseUrl, deviceId=deviceId
)
)
r.raise_for_status()
device = r.json()
return device
def list_devices(self):
r = requests.get("{baseUrl}/devices".format(baseUrl=self.config.baseUrl))
r.raise_for_status()
devices = r.json()
return devices
def start_session(self, deviceId, appId, autoSync=False, screenshots=False):
requestBody = {
"deviceId": deviceId,
"appId": appId,
"autoSync": autoSync,
"screenshots": screenshots,
}
r = requests.post(
"{baseUrl}/sessions".format(baseUrl=self.config.baseUrl), json=requestBody
)
r.raise_for_status()
return r.json()
def stop_session(self, sessionId, outputJson=False):
requestBody = {
"includeSessionJsonInResponse": outputJson,
}
r = requests.post(
"{baseUrl}/sessions/{sessionId}/stop".format(
baseUrl=self.config.baseUrl, sessionId=sessionId
),
json=requestBody
)
r.raise_for_status()
if not outputJson:
return
return r.json()
def sync(self):
r = requests.post("{baseUrl}/sessions/sync".format(baseUrl=self.config.baseUrl))
r.raise_for_status()
return
def get_properties(self):
r = requests.get("{baseUrl}/properties".format(baseUrl=self.config.baseUrl))
r.raise_for_status()
return r.json()
def set_properties(self, properties):
r = requests.put("{baseUrl}/properties".format(baseUrl=self.config.baseUrl), json=properties)
r.raise_for_status()
return
def generate_session_json(self, session_path, target_path=None):
requestBody = {
"sessionPath": session_path,
}
if target_path:
requestBody["targetPath"] = target_path
r = requests.post(
"{baseUrl}/generate-json".format(baseUrl=self.config.baseUrl),
json=requestBody,
)
r.raise_for_status()
return
def enable_wifi_prof(self, device_id):
r = requests.post(
"{baseUrl}/devices/{deviceId}/enable-wifi-prof".format(
baseUrl=self.config.baseUrl, deviceId=device_id
)
)
r.raise_for_status()
return
def disable_wifi_prof(self, device_id):
r = requests.post(
"{baseUrl}/devices/{deviceId}/disable-wifi-prof".format(
baseUrl=self.config.baseUrl, deviceId=device_id
)
)
r.raise_for_status()
return
class ClientFactory:
def create(self, config=None):
if config == None:
config = Config()
if os.environ.get("GBA_BASE_URL"):
config.baseUrl = os.environ.get("GBA_BASE_URL")
return Client(config)
|
from bs4 import BeautifulSoup
# Get Attribute from object Or Get Index from array etc...
def get_attr_with_err(src_object, index, none_if_empty=False, obj_if_empty=False):
result, is_error = None, False
try:
result = src_object[index]
if type(result) == list and none_if_empty:
if len(result) == 0 or len(result) < index:
result = None
except:
is_error = True
if obj_if_empty:
if type(result) == list and result == None:
result = []
if type(result) == dict and result is None:
result = {}
return result, is_error
def get_attr(src_object, index, none_if_empty=False, obj_if_empty=False):
res, _ = get_attr_with_err(src_object, index, none_if_empty, obj_if_empty)
return res
def parse_tags(html, structured_tag, eq=None):
""" Parses structured tags like div .select > #id and fetches its values from html """
if html is None:
return []
# Split with > character
tag_blocks = structured_tag.split(">")
for tag_block in tag_blocks:
selector_tuple = parse_selector(tag_block)
html = fetch_html_with(*selector_tuple, html, eq=eq)
return html
def parse_selector(selector):
"""Parses a block of selectors like div .name #tag to class=.name, selector=div and id=#tag.
Returns (selector, id, class[]) """
m_class, m_id, m_selector, m_attr = [], None, None, {}
if selector is not None and type(selector) == str:
selector_labels = selector.split()
for label in selector_labels:
if label.startswith("."):
m_class.append(label)
elif label.startswith("#"):
if m_id is not None:
raise ValueError("Multiple id's are declared in block "+str(selector))
m_id = label
elif label.startswith("@@"):
attribute_block = str(label).split('=')
if len(attribute_block) < 2:
raise ValueError('Attribute does not match the \
format @@<attribute_name>=<attribute_value> without space')
attr = attribute_block[0]
value = attribute_block[1]
mattr[attr] = value
else:
if m_selector is not None:
raise ValueError("Multiple selectors are declared in block "+str(selector))
m_selector = label
if mattr and not m_selector:
raise AssertionError('If selection is done with attribute @@<attr_name>=<attr_value>,\
then it is must to have selector.\n Eg: <selector> @@<attr_name>=<attr_value>"')
return m_selector, m_id, m_class, mattr
def fetch_html_with(m_selector, m_id, m_class, mattr, html, eq=None):
if m_id:
html = listify_soup(html, select=True, tupled_attrs=(m_id), eq=eq)
elif (m_selector and m_class) or (m_selector and mattr):
attrs = {}
if m_class:
attrs['class'] = ' '.join([cls[1:] for cls in m_class])
for attr, value in mattr.items():
attrs[attr] = value
html = listify_soup(html,tupled_attrs=(m_selector, attrs), eq=eq)
elif m_selector is None and len(m_class) > 0:
cls_param = ''.join(m_class)
html = listify_soup(html, select=True, tupled_attrs=(cls_param,), eq=eq)
elif m_selector is not None and len(m_class) == 0 and m_id is None:
html = listify_soup(html, tupled_attrs=(m_selector), eq=eq)
return html
def listify_soup(soup_object_list, select=False, tupled_attrs = (), eq=None):
result = []
is_tuple = type(tupled_attrs) == tuple
source_arr = soup_object_list#get_selector_result(soup_object_list, is_select=select, attrs=tupled_attrs)
if type(soup_object_list) == BeautifulSoup:
result = [soup for soup in get_selector_result(source_arr, is_select=select, attrs=tupled_attrs, eq=eq)]
else:
result = [soup for soup_object in source_arr for soup in \
get_selector_result(soup_object, is_select=select, attrs=tupled_attrs, eq=eq) if soup is not None]
return result
def get_selector_result(soup, is_select=False, attrs = (), eq=None):
is_tuple = type(attrs) == tuple
if soup is None:
return []
res = []
if eq is None:
res = apply_selector(soup,is_select, attrs)
elif eq is not None and type(eq) == int:
# Needs some more refactorings, for optimization
res = [get_attr(apply_selector(BeautifulSoup(str(sub_soup), "html.parser"), is_select, attrs), eq) \
for sub_soup in soup if str(sub_soup) is not None]
return res
def apply_selector(soup, is_select=False, attrs=()):
is_tuple = type(attrs) == tuple
res = []
if is_tuple:
if is_select:
res = soup.select(*attrs)
if not is_select:
res = soup.find_all(*attrs)
else:
if is_select:
res = soup.select(attrs)
else:
res = soup.find_all(attrs)
return res
|
"""'Git type Path Source."""
import logging
import shutil
import subprocess
import tempfile
from pathlib import Path
from typing import Any, Dict, Optional
from .source import Source
LOGGER = logging.getLogger(__name__)
class Git(Source):
"""Git Path Source.
The Git path source can be tasked with cloning a remote repository
and pointing to a specific module folder (or the root).
"""
def __init__(
self,
*,
arguments: Optional[Dict[str, str]] = None,
location: str = "",
uri: str = "",
**kwargs: Any,
) -> None:
"""Git Path Source.
Args:
arguments: A reference can be passed along via the arguments so that a specific
version of the repository is cloned. **commit**, **tag**, **branch**
are all valid keys with respective output
location: The relative location to the root of the repository where the
module resides. Leaving this as an empty string, ``/``, or ``./``
will have runway look in the root folder.
uri: The uniform resource identifier that targets the remote git repository
"""
self.args = arguments or {}
self.uri = uri
self.location = location
super().__init__(**kwargs)
def fetch(self) -> Path:
"""Retrieve the git repository from it's remote location."""
from git.repo import Repo # pylint: disable=import-outside-toplevel
ref = self.__determine_git_ref()
dir_name = "_".join([self.sanitize_git_path(self.uri), ref])
cached_dir_path = self.cache_dir / dir_name
if cached_dir_path.exists():
return cached_dir_path
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_repo_path = Path(tmpdirname) / dir_name
with Repo.clone_from(self.uri, str(tmp_repo_path)) as repo:
repo.head.set_reference(ref)
repo.head.reset(index=True, working_tree=True)
shutil.move(str(tmp_repo_path), self.cache_dir)
return cached_dir_path
def __git_ls_remote(self, ref: str) -> str:
"""List remote repositories based on uri and ref received.
Keyword Args:
ref (str): The git reference value
"""
cmd = ["git", "ls-remote", self.uri, ref]
LOGGER.debug("getting commit ID from repo: %s", " ".join(cmd))
ls_remote_output = subprocess.check_output(cmd)
if b"\t" in ls_remote_output:
commit_id = ls_remote_output.split(b"\t", maxsplit=1)[0].decode()
LOGGER.debug("matching commit id found: %s", commit_id)
return commit_id
raise ValueError(f'Ref "{ref}" not found for repo {self.uri}.')
def __determine_git_ls_remote_ref(self) -> str:
"""Determine remote ref, defaulting to HEAD unless a branch is found."""
ref = "HEAD"
if self.args.get("branch"):
ref = f"refs/heads/{self.args['branch']}"
return ref
def __determine_git_ref(self) -> str:
"""Determine the git reference code."""
ref_config_keys = sum(
bool(self.args.get(i)) for i in ["commit", "tag", "branch"]
)
if ref_config_keys > 1:
raise ValueError(
"Fetching remote git sources failed: conflicting revisions "
"(e.g. 'commit', 'tag', 'branch') specified for a package source"
)
if self.args.get("commit"):
return self.args["commit"]
if self.args.get("tag"):
return self.args["tag"]
return self.__git_ls_remote(self.__determine_git_ls_remote_ref())
@classmethod
def sanitize_git_path(cls, path: str) -> str:
"""Sanitize the git path for folder/file assignment.
Keyword Args:
path: The path string to be sanitized
"""
dir_name = path
split = path.split("//")
domain = split[len(split) - 1]
if domain.endswith(".git"):
dir_name = domain[:-4]
return cls.sanitize_directory_path(dir_name)
|
ParserException = type('ParserException', (Exception,), {})
ParserWarning = type('ParserWarning', (ParserException,), {})
ParserError = type('ParserError', (ParserException,), {})
InvalidSynonyms = type('InvalidSynonyms', (ParserException,), {})
|
# encoding: utf-8
import os
import sys
import re
import time
import inspect
import itertools
import pkgutil
from flask import Blueprint, send_from_directory
from flask.ctx import _AppCtxGlobals
from flask.sessions import SessionInterface
from flask_multistatic import MultiStaticFlask
import six
from werkzeug.exceptions import default_exceptions, HTTPException
from werkzeug.routing import Rule
from flask_babel import Babel
from beaker.middleware import SessionMiddleware
from ckan.common import asbool
from fanstatic import Fanstatic
from repoze.who.config import WhoConfig
from repoze.who.middleware import PluggableAuthenticationMiddleware
import ckan.model as model
from ckan.lib import base
from ckan.lib import helpers
from ckan.lib import jinja_extensions
from ckan.lib import uploader
from ckan.lib import i18n
from ckan.common import config, g, request, ungettext
from ckan.config.middleware.common_middleware import (TrackingMiddleware,
HostHeaderMiddleware)
import ckan.lib.app_globals as app_globals
import ckan.lib.plugins as lib_plugins
import ckan.plugins.toolkit as toolkit
from ckan.lib.webassets_tools import get_webassets_path
from ckan.plugins import PluginImplementations
from ckan.plugins.interfaces import IBlueprint, IMiddleware, ITranslation
from ckan.views import (identify_user,
set_cors_headers_for_response,
check_session_cookie,
set_controller_and_action,
set_cache_control_headers_for_response,
handle_i18n,
set_ckan_current_url,
)
import logging
from logging.handlers import SMTPHandler
log = logging.getLogger(__name__)
class I18nMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
handle_i18n(environ)
return self.app(environ, start_response)
class CKANBabel(Babel):
def __init__(self, *pargs, **kwargs):
super(CKANBabel, self).__init__(*pargs, **kwargs)
self._i18n_path_idx = 0
@property
def domain(self):
default = super(CKANBabel, self).domain
multiple = self.app.config.get('BABEL_MULTIPLE_DOMAINS')
if not multiple:
return default
domains = multiple.split(';')
try:
return domains[self._i18n_path_idx]
except IndexError:
return default
@property
def translation_directories(self):
self._i18n_path_idx = 0
for path in super(CKANBabel, self).translation_directories:
yield path
self._i18n_path_idx += 1
def make_flask_stack(conf):
""" This has to pass the flask app through all the same middleware that
Pylons used """
root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
debug = asbool(conf.get('debug', conf.get('DEBUG', False)))
testing = asbool(conf.get('testing', conf.get('TESTING', False)))
app = flask_app = CKANFlask(__name__, static_url_path='')
# Register storage for accessing group images, site logo, etc.
storage_folder = []
storage = uploader.get_storage_path()
if storage:
storage_folder = [os.path.join(storage, 'storage')]
# Static files folders (core and extensions)
public_folder = config.get(u'ckan.base_public_folder')
app.static_folder = config.get(
'extra_public_paths', ''
).split(',') + [os.path.join(root, public_folder)] + storage_folder
app.jinja_options = jinja_extensions.get_jinja_env_options()
app.jinja_env.policies['ext.i18n.trimmed'] = True
app.debug = debug
app.testing = testing
app.template_folder = os.path.join(root, 'templates')
app.app_ctx_globals_class = CKAN_AppCtxGlobals
app.url_rule_class = CKAN_Rule
# Update Flask config with the CKAN values. We use the common config
# object as values might have been modified on `load_environment`
if config:
app.config.update(config)
else:
app.config.update(conf)
# Do all the Flask-specific stuff before adding other middlewares
# Secret key needed for flask-debug-toolbar and sessions
if not app.config.get('SECRET_KEY'):
app.config['SECRET_KEY'] = config.get('beaker.session.secret')
if not app.config.get('SECRET_KEY'):
raise RuntimeError(u'You must provide a value for the secret key'
' with the SECRET_KEY config option')
root_path = config.get('ckan.root_path', None)
if debug:
from flask_debugtoolbar import DebugToolbarExtension
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
debug_ext = DebugToolbarExtension()
# register path that includes `ckan.site_root` before
# initializing debug app. In such a way, our route receives
# higher precedence.
# TODO: After removal of Pylons code, switch to
# `APPLICATION_ROOT` config value for flask application. Right
# now it's a bad option because we are handling both pylons
# and flask urls inside helpers and splitting this logic will
# bring us tons of headache.
if root_path:
app.add_url_rule(
root_path.replace('{{LANG}}', '').rstrip('/') +
'/_debug_toolbar/static/<path:filename>',
'_debug_toolbar.static', debug_ext.send_static_file
)
debug_ext.init_app(app)
from werkzeug.debug import DebuggedApplication
app.wsgi_app = DebuggedApplication(app.wsgi_app, True)
# Use Beaker as the Flask session interface
class BeakerSessionInterface(SessionInterface):
def open_session(self, app, request):
if 'beaker.session' in request.environ:
return request.environ['beaker.session']
def save_session(self, app, session, response):
session.save()
namespace = 'beaker.session.'
session_opts = {k.replace('beaker.', ''): v
for k, v in six.iteritems(config)
if k.startswith(namespace)}
if (not session_opts.get('session.data_dir') and
session_opts.get('session.type', 'file') == 'file'):
cache_dir = conf.get('cache_dir') or conf.get('cache.dir')
session_opts['session.data_dir'] = '{data_dir}/sessions'.format(
data_dir=cache_dir)
app.wsgi_app = SessionMiddleware(app.wsgi_app, session_opts)
app.session_interface = BeakerSessionInterface()
# Add Jinja2 extensions and filters
app.jinja_env.filters['empty_and_escape'] = \
jinja_extensions.empty_and_escape
# Common handlers for all requests
app.before_request(ckan_before_request)
app.after_request(ckan_after_request)
# Template context processors
app.context_processor(helper_functions)
app.context_processor(c_object)
@app.context_processor
def ungettext_alias():
u'''
Provide `ungettext` as an alias of `ngettext` for backwards
compatibility
'''
return dict(ungettext=ungettext)
# Babel
_ckan_i18n_dir = i18n.get_ckan_i18n_dir()
pairs = [
(_ckan_i18n_dir, u'ckan')
] + [
(p.i18n_directory(), p.i18n_domain())
for p in PluginImplementations(ITranslation)
]
i18n_dirs, i18n_domains = zip(*pairs)
app.config[u'BABEL_TRANSLATION_DIRECTORIES'] = ';'.join(i18n_dirs)
app.config[u'BABEL_DOMAIN'] = 'ckan'
app.config[u'BABEL_MULTIPLE_DOMAINS'] = ';'.join(i18n_domains)
babel = CKANBabel(app)
babel.localeselector(get_locale)
# WebAssets
_setup_webassets(app)
# Auto-register all blueprints defined in the `views` folder
_register_core_blueprints(app)
_register_error_handler(app)
# Set up each IBlueprint extension as a Flask Blueprint
for plugin in PluginImplementations(IBlueprint):
if hasattr(plugin, 'get_blueprint'):
plugin_blueprints = plugin.get_blueprint()
if not isinstance(plugin_blueprints, list):
plugin_blueprints = [plugin_blueprints]
for blueprint in plugin_blueprints:
app.register_extension_blueprint(blueprint)
lib_plugins.register_package_blueprints(app)
lib_plugins.register_group_blueprints(app)
# Set flask routes in named_routes
# TODO: refactor whatever helper is using this to not do it
if 'routes.named_routes' not in config:
config['routes.named_routes'] = {}
for rule in app.url_map.iter_rules():
if '.' not in rule.endpoint:
continue
controller, action = rule.endpoint.split('.')
needed = list(rule.arguments - set(rule.defaults or {}))
route = {
rule.endpoint: {
'action': action,
'controller': controller,
'highlight_actions': action,
'needed': needed
}
}
config['routes.named_routes'].update(route)
# Start other middleware
for plugin in PluginImplementations(IMiddleware):
app = plugin.make_middleware(app, config)
# Fanstatic
fanstatic_enable_rollup = asbool(
conf.get('fanstatic_enable_rollup', False))
if debug:
fanstatic_config = {
'versioning': True,
'recompute_hashes': True,
'minified': False,
'bottom': True,
'bundle': False,
'rollup': fanstatic_enable_rollup,
}
else:
fanstatic_config = {
'versioning': True,
'recompute_hashes': False,
'minified': True,
'bottom': True,
'bundle': True,
'rollup': fanstatic_enable_rollup,
}
if root_path:
root_path = re.sub('/{{LANG}}', '', root_path)
fanstatic_config['base_url'] = root_path
app = Fanstatic(app, **fanstatic_config)
for plugin in PluginImplementations(IMiddleware):
try:
app = plugin.make_error_log_middleware(app, config)
except AttributeError:
log.critical('Middleware class {0} is missing the method'
'make_error_log_middleware.'
.format(plugin.__class__.__name__))
# Initialize repoze.who
who_parser = WhoConfig(conf['here'])
who_parser.parse(open(conf['who.config_file']))
app = PluggableAuthenticationMiddleware(
app,
who_parser.identifiers,
who_parser.authenticators,
who_parser.challengers,
who_parser.mdproviders,
who_parser.request_classifier,
who_parser.challenge_decider,
logging.getLogger('repoze.who'),
logging.WARN, # ignored
who_parser.remote_user_key
)
# Update the main CKAN config object with the Flask specific keys
# that were set here or autogenerated
flask_config_keys = set(flask_app.config.keys()) - set(config.keys())
for key in flask_config_keys:
config[key] = flask_app.config[key]
# Prevent the host from request to be added to the new header location.
app = HostHeaderMiddleware(app)
if six.PY3:
app = I18nMiddleware(app)
if asbool(config.get('ckan.tracking_enabled', 'false')):
app = TrackingMiddleware(app, config)
# Add a reference to the actual Flask app so it's easier to access
app._wsgi_app = flask_app
return app
def get_locale():
u'''
Return the value of the `CKAN_LANG` key of the WSGI environ,
set by the I18nMiddleware based on the URL.
If no value is defined, it defaults to `ckan.locale_default` or `en`.
'''
return request.environ.get(
u'CKAN_LANG',
config.get(u'ckan.locale_default', u'en'))
def ckan_before_request():
u'''
Common handler executed before all Flask requests
If a response is returned by any of the functions called (
currently ``identify_user()` only) any further processing of the
request will be stopped and that response will be returned.
'''
response = None
# Update app_globals
app_globals.app_globals._check_uptodate()
# Identify the user from the repoze cookie or the API header
# Sets g.user and g.userobj
response = identify_user()
# Provide g.controller and g.action for backward compatibility
# with extensions
set_controller_and_action()
set_ckan_current_url(request.environ)
g.__timer = time.time()
return response
# Provide g.controller and g.action for backward compatibility
# with extensions
set_controller_and_action()
return response
# Provide g.controller and g.action for backward compatibility
# with extensions
set_controller_and_action()
def ckan_after_request(response):
u'''Common handler executed after all Flask requests'''
# Dispose of the SQLALchemy session
model.Session.remove()
# Check session cookie
response = check_session_cookie(response)
# Set CORS headers if necessary
response = set_cors_headers_for_response(response)
# Set Cache Control headers
response = set_cache_control_headers_for_response(response)
r_time = time.time() - g.__timer
url = request.environ['PATH_INFO']
log.debug(' %s render time %.3f seconds' % (url, r_time))
return response
def helper_functions():
u'''Make helper functions (`h`) available to Flask templates'''
if not helpers.helper_functions:
helpers.load_plugin_helpers()
return dict(h=helpers.helper_functions)
def c_object():
u'''
Expose `c` as an alias of `g` in templates for backwards compatibility
'''
return dict(c=g)
class CKAN_Rule(Rule):
u'''Custom Flask url_rule_class.
We use it to be able to flag routes defined in extensions as such
'''
def __init__(self, *args, **kwargs):
self.ckan_core = True
super(CKAN_Rule, self).__init__(*args, **kwargs)
class CKAN_AppCtxGlobals(_AppCtxGlobals):
'''Custom Flask AppCtxGlobal class (flask.g).'''
def __getattr__(self, name):
'''
If flask.g doesn't have attribute `name`, fall back to CKAN's
app_globals object.
If the key is also not found in there, an AttributeError will be raised
'''
return getattr(app_globals.app_globals, name)
class CKANFlask(MultiStaticFlask):
'''Extend the Flask class with a special method called on incoming
requests by AskAppDispatcherMiddleware.
'''
app_name = 'flask_app'
def can_handle_request(self, environ):
'''
Decides whether it can handle a request with the Flask app by
matching the request environ against the route mapper
Returns (True, 'flask_app', origin) if this is the case.
`origin` can be either 'core' or 'extension' depending on where
the route was defined.
'''
urls = self.url_map.bind_to_environ(environ)
try:
rule, args = urls.match(return_rule=True)
origin = 'core'
if hasattr(rule, 'ckan_core') and not rule.ckan_core:
origin = 'extension'
log.debug('Flask route match, endpoint: {0}, args: {1}, '
'origin: {2}'.format(rule.endpoint, args, origin))
# Disable built-in flask's ability to prepend site root to
# generated url, as we are going to use locale and existing
# logic is not flexible enough for this purpose
environ['SCRIPT_NAME'] = ''
return (True, self.app_name, origin)
except HTTPException:
return (False, self.app_name)
def register_extension_blueprint(self, blueprint, **kwargs):
'''
This method should be used to register blueprints that come from
extensions, so there's an opportunity to add extension-specific
options.
Sets the rule property `ckan_core` to False, to indicate that the rule
applies to an extension route.
'''
self.register_blueprint(blueprint, **kwargs)
# Get the new blueprint rules
bp_rules = itertools.chain.from_iterable(
v for k, v in six.iteritems(self.url_map._rules_by_endpoint)
if k.startswith(u'{0}.'.format(blueprint.name))
)
# This compare key will ensure the rule will be near the top.
top_compare_key = False, -100, [(-2, 0)]
for r in bp_rules:
r.ckan_core = False
r.match_compare_key = lambda: top_compare_key
def _register_core_blueprints(app):
u'''Register all blueprints defined in the `views` folder
'''
def is_blueprint(mm):
return isinstance(mm, Blueprint)
path = os.path.join(os.path.dirname(__file__), '..', '..', 'views')
for loader, name, _ in pkgutil.iter_modules([path], 'ckan.views.'):
module = loader.find_module(name).load_module(name)
for blueprint in inspect.getmembers(module, is_blueprint):
app.register_blueprint(blueprint[1])
log.debug(u'Registered core blueprint: {0!r}'.format(blueprint[0]))
def _register_error_handler(app):
u'''Register error handler'''
def error_handler(e):
log.error(e, exc_info=sys.exc_info)
if isinstance(e, HTTPException):
extra_vars = {
u'code': e.code,
u'content': e.description,
u'name': e.name
}
return base.render(
u'error_document_template.html', extra_vars), e.code
extra_vars = {u'code': [500], u'content': u'Internal server error'}
return base.render(u'error_document_template.html', extra_vars), 500
for code in default_exceptions:
app.register_error_handler(code, error_handler)
if not app.debug and not app.testing:
app.register_error_handler(Exception, error_handler)
if config.get('email_to'):
_setup_error_mail_handler(app)
def _setup_error_mail_handler(app):
class ContextualFilter(logging.Filter):
def filter(self, log_record):
log_record.url = request.path
log_record.method = request.method
log_record.ip = request.environ.get("REMOTE_ADDR")
log_record.headers = request.headers
return True
smtp_server = config.get('smtp.server', 'localhost')
mailhost = tuple(smtp_server.split(':')) \
if ':' in smtp_server else smtp_server
credentials = None
if config.get('smtp.user'):
credentials = (config.get('smtp.user'), config.get('smtp.password'))
secure = () if asbool(config.get('smtp.starttls')) else None
mail_handler = SMTPHandler(
mailhost=mailhost,
fromaddr=config.get('error_email_from'),
toaddrs=[config.get('email_to')],
subject='Application Error',
credentials=credentials,
secure=secure
)
mail_handler.setFormatter(logging.Formatter('''
Time: %(asctime)s
URL: %(url)s
Method: %(method)s
IP: %(ip)s
Headers: %(headers)s
'''))
context_provider = ContextualFilter()
app.logger.addFilter(context_provider)
app.logger.addHandler(mail_handler)
def _setup_webassets(app):
app.use_x_sendfile = toolkit.asbool(
config.get('ckan.webassets.use_x_sendfile')
)
webassets_folder = get_webassets_path()
@app.route('/webassets/<path:path>', endpoint='webassets.index')
def webassets(path):
return send_from_directory(webassets_folder, path)
|
from datetime import datetime
from datetime import date
from datetime import timedelta
# Calculating the days until Christmas
today_date = date.today()
christmas = date(2020, 12, 25)
days_until_christmas = (today_date - christmas).days
# print(f'days_until_christmas = {days_until_christmas}')
# if today_date == christmas:
# print('Today is Christmas!')
# elif days_until_christmas > 0:
# print(f'Christmas is coming! It will be here in {days_until_christmas} days! :)')
# elif days_until_christmas < 0:
# print(f'Christmas has passed. It was {abs(days_until_christmas)} days ago. :(')
# Calculating the days until EOL
asset_lifespan = timedelta(days=1825)
print(str(asset_lifespan)) # DEBUG
asset_purchase_date = date(2018, 7, 1)
asset_eol = asset_purchase_date + asset_lifespan
print(f'This asset will reach end of life on {str(asset_eol)}')
print(f'There are {asset_eol - today_date} days left until replacement')
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import collections
import copy
from keystoneauth1 import exceptions as ks_exc
import os_traits
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import retrying
from nova.compute import claims
from nova.compute import monitors
from nova.compute import stats as compute_stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
"""Returns True if the instance is in one of the resizing states.
:param instance: `nova.objects.Instance` object
"""
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if vm in [vm_states.ACTIVE, vm_states.STOPPED] and task in (
task_states.resizing_states + task_states.rebuild_states):
return True
return False
def _instance_is_live_migrating(instance):
vm = instance.vm_state
task = instance.task_state
if task == task_states.MIGRATING and vm in [vm_states.ACTIVE,
vm_states.PAUSED]:
return True
return False
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver, reportclient=None):
self.host = host
self.driver = driver
self.pci_tracker = None
# Dict of objects.ComputeNode objects, keyed by nodename
self.compute_nodes = {}
# Dict of Stats objects, keyed by nodename
self.stats = collections.defaultdict(compute_stats.Stats)
# Set of UUIDs of instances tracked on this host.
self.tracked_instances = set()
self.tracked_migrations = {}
self.is_bfv = {} # dict, keyed by instance uuid, to is_bfv boolean
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.reportclient = reportclient or report.SchedulerReportClient()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
self.provider_tree = None
# Dict of assigned_resources, keyed by resource provider uuid
# the value is a dict again, keyed by resource class
# and value of this sub-dict is a set of Resource obj
self.assigned_resources = collections.defaultdict(
lambda: collections.defaultdict(set))
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def instance_claim(self, context, instance, nodename, allocations,
limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance: instance to reserve resources for.
:type instance: nova.objects.instance.Instance object
:param nodename: The Ironic nodename selected by the scheduler
:param allocations: The placement allocation records for the instance.
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled(nodename):
# instance_claim() was called before update_available_resource()
# (which ensures that a compute node exists for nodename). We
# shouldn't get here but in case we do, just set the instance's
# host and nodename attribute (probably incorrect) and return a
# NoopClaim.
# TODO(jaypipes): Remove all the disabled junk from the resource
# tracker. Servicegroup API-level active-checking belongs in the
# nova-compute manager.
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
# sanity checks:
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
cn = self.compute_nodes[nodename]
pci_requests = instance.pci_requests
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, limits=limits)
# self._set_instance_host_and_node() will save instance to the DB
# so set instance.numa_topology first. We need to make sure
# that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
# so that the resource audit knows about any cpus we've pinned.
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
claimed_resources = self._claim_resources(allocations)
instance.resources = claimed_resources
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def rebuild_claim(self, context, instance, nodename, allocations,
limits=None, image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
instance_type = instance.flavor
return self._move_claim(context, instance, instance_type, nodename,
migration, allocations, move_type='evacuation',
limits=limits, image_meta=image_meta)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def resize_claim(self, context, instance, instance_type, nodename,
migration, allocations, image_meta=None, limits=None):
"""Create a claim for a resize or cold-migration move.
Note that this code assumes ``instance.new_flavor`` is set when
resizing with a new flavor.
"""
return self._move_claim(context, instance, instance_type, nodename,
migration, allocations, image_meta=image_meta,
limits=limits)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def live_migration_claim(self, context, instance, nodename, migration,
limits):
"""Builds a MoveClaim for a live migration.
:param context: The request context.
:param instance: The instance being live migrated.
:param nodename: The nodename of the destination host.
:param migration: The Migration object associated with this live
migration.
:param limits: A SchedulerLimits object from when the scheduler
selected the destination host.
:returns: A MoveClaim for this live migration.
"""
# Flavor and image cannot change during a live migration.
instance_type = instance.flavor
image_meta = instance.image_meta
# TODO(Luyao) will pass allocations to live_migration_claim after the
# live migration change is done, now just set it None to _move_claim
return self._move_claim(context, instance, instance_type, nodename,
migration, None, move_type='live-migration',
image_meta=image_meta, limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,
migration, allocations, move_type=None,
image_meta=None, limits=None):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
evacuate/rebuild operation.
:param context: security context
:param instance: instance object to reserve resources for
:param new_instance_type: new instance_type being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param migration: A migration object if one was already created
elsewhere for this operation (otherwise None)
:param allocations: the placement allocation records.
:param move_type: move type - can be one of 'migration', 'resize',
'live-migration', 'evacuate'
:param image_meta: instance image metadata
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
return claims.NopClaim(migration=migration)
cn = self.compute_nodes[nodename]
# TODO(moshele): we are recreating the pci requests even if
# there was no change on resize. This will cause allocating
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_pci_requests.instance_uuid = instance.uuid
# On resize merge the SR-IOV ports pci_requests
# with the new instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, migration, limits=limits)
claimed_pci_devices_objs = []
# TODO(artom) The second part of this condition should not be
# necessary, but since SRIOV live migration is currently handled
# elsewhere - see for example _claim_pci_for_instance_vifs() in the
# compute manager - we don't do any PCI claims if this is a live
# migration to avoid stepping on that code's toes. Ideally,
# MoveClaim/this method would be used for all live migration resource
# claims.
if self.pci_tracker and migration.migration_type != 'live-migration':
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
claimed_resources = self._claim_resources(allocations)
old_resources = instance.resources
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
# constructor flow so the Claim constructor only tests whether
# resources can be claimed, not consume the resources directly.
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests,
old_resources=old_resources,
new_resources=claimed_resources)
instance.migration_context = mig_context
instance.save()
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
"""Make an existing migration record count for resource tracking.
If a migration record was created already before the request made
it to this compute host, only set up the migration so it's included in
resource tracking. This should be done while the
COMPUTE_RESOURCES_SEMAPHORE is held.
"""
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
# NOTE(artom) Migration objects for live migrations are created with
# status 'accepted' by the conductor in live_migrate_instance() and do
# not have a 'pre-migrating' status.
if migration.migration_type != 'live-migration':
migration.status = 'pre-migrating'
migration.save()
def _claim_resources(self, allocations):
"""Claim resources according to assigned resources from allocations
and available resources in provider tree
"""
if not allocations:
return None
claimed_resources = []
for rp_uuid, alloc_dict in allocations.items():
try:
provider_data = self.provider_tree.data(rp_uuid)
except ValueError:
# If an instance is in evacuating, it will hold new and old
# allocations, but the provider UUIDs in old allocations won't
# exist in the current provider tree, so skip it.
LOG.debug("Skip claiming resources of provider %(rp_uuid)s, "
"since the provider UUIDs are not in provider tree.",
{'rp_uuid': rp_uuid})
continue
for rc, amount in alloc_dict['resources'].items():
if rc not in provider_data.resources:
# This means we don't use provider_data.resources to
# assign this kind of resource class, such as 'VCPU' for
# now, otherwise the provider_data.resources will be
# populated with this resource class when updating
# provider tree.
continue
assigned = self.assigned_resources[rp_uuid][rc]
free = provider_data.resources[rc] - assigned
if amount > len(free):
reason = (_("Needed %(amount)d units of resource class "
"%(rc)s, but %(avail)d are available.") %
{'amount': amount,
'rc': rc,
'avail': len(free)})
raise exception.ComputeResourcesUnavailable(reason=reason)
for i in range(amount):
claimed_resources.append(free.pop())
if claimed_resources:
self._add_assigned_resources(claimed_resources)
return objects.ResourceList(objects=claimed_resources)
def _populate_assigned_resources(self, context, instance_by_uuid):
"""Populate self.assigned_resources organized by resource class and
reource provider uuid, which is as following format:
{
$RP_UUID: {
$RESOURCE_CLASS: [objects.Resource, ...],
$RESOURCE_CLASS: [...]},
...}
"""
resources = []
# Get resources assigned to migrations
for mig in self.tracked_migrations.values():
mig_ctx = mig.instance.migration_context
# We might have a migration whose instance hasn't arrived here yet.
# Ignore it.
if not mig_ctx:
continue
if mig.source_compute == self.host and 'old_resources' in mig_ctx:
resources.extend(mig_ctx.old_resources or [])
if mig.dest_compute == self.host and 'new_resources' in mig_ctx:
resources.extend(mig_ctx.new_resources or [])
# Get resources assigned to instances
for uuid in self.tracked_instances:
resources.extend(instance_by_uuid[uuid].resources or [])
self.assigned_resources.clear()
self._add_assigned_resources(resources)
def _check_resources(self, context):
"""Check if there are assigned resources not found in provider tree"""
notfound = set()
for rp_uuid in self.assigned_resources:
provider_data = self.provider_tree.data(rp_uuid)
for rc, assigned in self.assigned_resources[rp_uuid].items():
notfound |= (assigned - provider_data.resources[rc])
if not notfound:
return
# This only happens when assigned resources are removed
# from the configuration and the compute service is SIGHUP'd
# or restarted.
resources = [(res.identifier, res.resource_class) for res in notfound]
reason = _("The following resources are assigned to instances, "
"but were not listed in the configuration: %s "
"Please check if this will influence your instances, "
"and restore your configuration if necessary") % resources
raise exception.AssignedResourceNotFound(reason=reason)
def _release_assigned_resources(self, resources):
"""Remove resources from self.assigned_resources."""
if not resources:
return
for resource in resources:
rp_uuid = resource.provider_uuid
rc = resource.resource_class
try:
self.assigned_resources[rp_uuid][rc].remove(resource)
except KeyError:
LOG.warning("Release resource %(rc)s: %(id)s of provider "
"%(rp_uuid)s, not tracked in "
"ResourceTracker.assigned_resources.",
{'rc': rc, 'id': resource.identifier,
'rp_uuid': rp_uuid})
def _add_assigned_resources(self, resources):
"""Add resources to self.assigned_resources"""
if not resources:
return
for resource in resources:
rp_uuid = resource.provider_uuid
rc = resource.resource_class
self.assigned_resources[rp_uuid][rc].add(resource)
def _set_instance_host_and_node(self, instance, nodename):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
# NOTE(mriedem): ComputeManager._nil_out_instance_obj_host_and_node is
# somewhat tightly coupled to the fields set in this method so if this
# method changes that method might need to be updated.
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
"""Untag the instance so it no longer belongs to the host.
This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so
the resource claim will not be lost if the audit process starts.
"""
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def abort_instance_claim(self, context, instance, nodename):
"""Remove usage from the given instance."""
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
# free old/new allocated pci devices
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
"""Remove usage for an incoming/outgoing migration.
:param context: Security context.
:param instance: The instance whose usage is to be removed.
:param nodename: Host on which to remove usage. If the migration
completed successfully, this is normally the source.
If it did not complete successfully (failed or
reverted), this is normally the destination.
:param instance_type: The flavor that determines the usage to remove.
If the migration completed successfully, this is
the old flavor to be removed from the source. If
the migration did not complete successfully, this
is the new flavor to be removed from the
destination.
:param prefix: Prefix to use when accessing migration context
attributes. 'old_' or 'new_', with 'new_' being the
default.
"""
# Remove usage for an instance that is tracked in migrations, such as
# on the dest node during revert resize.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
instance_type = self._get_instance_type(instance, prefix,
migration)
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
# included in both tracked_migrations and tracked_instances.
elif instance['uuid'] in self.tracked_instances:
self.tracked_instances.remove(instance['uuid'])
if instance_type is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, instance, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
resources = self._get_migration_context_resource(
'resources', instance, prefix=prefix)
self._release_assigned_resources(resources)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled(nodename):
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _check_for_nodes_rebalance(self, context, resources, nodename):
"""Check if nodes rebalance has happened.
The ironic driver maintains a hash ring mapping bare metal nodes
to compute nodes. If a compute dies, the hash ring is rebuilt, and
some of its bare metal nodes (more precisely, those not in ACTIVE
state) are assigned to other computes.
This method checks for this condition and adjusts the database
accordingly.
:param context: security context
:param resources: initial values
:param nodename: node name
:returns: True if a suitable compute node record was found, else False
"""
if not self.driver.rebalances_nodes:
return False
# Its possible ironic just did a node re-balance, so let's
# check if there is a compute node that already has the correct
# hypervisor_hostname. We can re-use that rather than create a
# new one and have to move existing placement allocations
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
context, nodename)
if len(cn_candidates) == 1:
cn = cn_candidates[0]
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
{"name": nodename, "old": cn.host, "new": self.host})
cn.host = self.host
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return True
elif len(cn_candidates) > 1:
LOG.error(
"Found more than one ComputeNode for nodename %s. "
"Please clean up the orphaned ComputeNode records in your DB.",
nodename)
return False
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
The resource tracker will be inoperable if compute_node
is not defined. The compute_node will remain undefined if
we fail to create it or if there is no associated service
registered.
If this method has to create a compute node it needs initial
values - these come from resources.
:param context: security context
:param resources: initial values
:returns: True if a new compute_nodes table record was created,
False otherwise
"""
nodename = resources['hypervisor_hostname']
# if there is already a compute node just use resources
# to initialize
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
if self._check_for_nodes_rebalance(context, resources, nodename):
return False
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
cn.create()
# Only map the ComputeNode into compute_nodes if create() was OK
# because if create() fails, on the next run through here nodename
# would be in compute_nodes and we won't try to create again (because
# of the logic above).
self.compute_nodes[nodename] = cn
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
return True
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
n_id = compute_node.id
self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources, initial=False):
"""Copy resource values to supplied compute_node."""
nodename = resources['hypervisor_hostname']
stats = self.stats[nodename]
# purge old stats and init with anything passed in by the driver
# NOTE(danms): Preserve 'failed_builds' across the stats clearing,
# as that is not part of resources
# TODO(danms): Stop doing this when we get a column to store this
# directly
prev_failed_builds = stats.get('failed_builds', 0)
stats.clear()
stats['failed_builds'] = prev_failed_builds
stats.digest_stats(resources.get('stats'))
compute_node.stats = stats
# Update the allocation ratios for the related ComputeNode object
# but only if the configured values are not the default; the
# ComputeNode._from_db_object method takes care of providing default
# allocation ratios when the config is left at the default, so
# we'll really end up with something like a
# ComputeNode.cpu_allocation_ratio of 16.0. We want to avoid
# resetting the ComputeNode fields to None because that will make
# the _resource_change method think something changed when really it
# didn't.
# NOTE(yikun): The CONF.initial_(cpu|ram|disk)_allocation_ratio would
# be used when we initialize the compute node object, that means the
# ComputeNode.(cpu|ram|disk)_allocation_ratio will be set to
# CONF.initial_(cpu|ram|disk)_allocation_ratio when initial flag is
# True.
for res in ('cpu', 'disk', 'ram'):
attr = '%s_allocation_ratio' % res
if initial:
conf_alloc_ratio = getattr(CONF, 'initial_%s' % attr)
else:
conf_alloc_ratio = getattr(self, attr)
# NOTE(yikun): In Stein version, we change the default value of
# (cpu|ram|disk)_allocation_ratio from 0.0 to None, but we still
# should allow 0.0 to keep compatibility, and this 0.0 condition
# will be removed in the next version (T version).
if conf_alloc_ratio not in (0.0, None):
setattr(compute_node, attr, conf_alloc_ratio)
# now copy rest to compute_node
compute_node.update_from_virt_driver(resources)
def remove_node(self, nodename):
"""Handle node removal/rebalance.
Clean up any stored data about a compute node no longer
managed by this host.
"""
self.stats.pop(nodename, None)
self.compute_nodes.pop(nodename, None)
self.old_resources.pop(nodename, None)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
# to be populated as a JSONified string.
metric_list = metrics.to_list()
if len(metric_list):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metric_list
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
compute_utils.notify_about_metrics_update(
context, self.host, CONF.my_ip, nodename, metrics)
return metric_list
def update_available_resource(self, context, nodename, startup=False):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
:param nodename: Temporary parameter representing the Ironic resource
node. This parameter will be removed once Ironic
baremetal resource nodes are handled like any other
resource in the system.
:param startup: Boolean indicating whether we're running this on
on startup (True) or periodic (False).
"""
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
# NOTE(jaypipes): The resources['hypervisor_hostname'] field now
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources, startup=startup)
def _pair_instances_to_migrations(self, migrations, instance_by_uuid):
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# NOTE(danms): If this happens, we don't set it here, and
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
# confirmed/reverted in that case instance already changed host
# to destination and no matching happens
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def _update_available_resource(self, context, resources, startup=False):
# initialize the compute node object, creating it
# if it does not already exist.
is_new_compute_node = self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context',
'resources'])
# Now calculate usage based on instance utilization:
instance_by_uuid = self._update_usage_from_instances(
context, instances, nodename)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename)
self._pair_instances_to_migrations(migrations, instance_by_uuid)
self._update_usage_from_migrations(context, migrations, nodename)
# A new compute node means there won't be a resource provider yet since
# that would be created via the _update() call below, and if there is
# no resource provider then there are no allocations against it.
if not is_new_compute_node:
self._remove_deleted_instances_allocations(
context, self.compute_nodes[nodename], migrations,
instance_by_uuid)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
cn.metrics = jsonutils.dumps(metrics)
# Update assigned resources to self.assigned_resources
self._populate_assigned_resources(context, instance_by_uuid)
# update the compute_node
self._update(context, cn, startup=startup)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
# Check if there is any resource assigned but not found
# in provider tree
if startup:
self._check_resources(context)
def _get_compute_node(self, context, nodename):
"""Returns compute node for the host and nodename."""
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
else:
free_vcpus = 'unknown'
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.debug("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
"""Check to see if any resources have changed."""
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _sync_compute_service_disabled_trait(self, context, traits):
"""Synchronize the COMPUTE_STATUS_DISABLED trait on the node provider.
Determines if the COMPUTE_STATUS_DISABLED trait should be added to
or removed from the provider's set of traits based on the related
nova-compute service disabled status.
:param context: RequestContext for cell database access
:param traits: set of traits for the compute node resource provider;
this is modified by reference
"""
trait = os_traits.COMPUTE_STATUS_DISABLED
try:
service = objects.Service.get_by_compute_host(context, self.host)
if service.disabled:
# The service is disabled so make sure the trait is reported.
traits.add(trait)
else:
# The service is not disabled so do not report the trait.
traits.discard(trait)
except exception.NotFound:
# This should not happen but handle it gracefully. The scheduler
# should ignore this node if the compute service record is gone.
LOG.error('Unable to find services table record for nova-compute '
'host %s', self.host)
def _get_traits(self, context, nodename, provider_tree):
"""Synchronizes internal and external traits for the node provider.
This works in conjunction with the ComptueDriver.update_provider_tree
flow and is used to synchronize traits reported by the compute driver,
traits based on information in the ComputeNode record, and traits set
externally using the placement REST API.
:param context: RequestContext for cell database access
:param nodename: ComputeNode.hypervisor_hostname for the compute node
resource provider whose traits are being synchronized; the node
must be in the ProviderTree.
:param provider_tree: ProviderTree being updated
"""
# Get the traits from the ProviderTree which will be the set
# of virt-owned traits plus any externally defined traits set
# on the provider that aren't owned by the virt driver.
traits = provider_tree.data(nodename).traits
# Now get the driver's capabilities and add any supported
# traits that are missing, and remove any existing set traits
# that are not currently supported.
for trait, supported in self.driver.capabilities_as_traits().items():
if supported:
traits.add(trait)
elif trait in traits:
traits.remove(trait)
# Always mark the compute node. This lets other processes (possibly
# unrelated to nova or even OpenStack) find and distinguish these
# providers easily.
traits.add(os_traits.COMPUTE_NODE)
self._sync_compute_service_disabled_trait(context, traits)
return list(traits)
@retrying.retry(stop_max_attempt_number=4,
retry_on_exception=lambda e: isinstance(
e, exception.ResourceProviderUpdateConflict))
def _update_to_placement(self, context, compute_node, startup):
"""Send resource and inventory changes to placement."""
# NOTE(jianghuaw): Some resources(e.g. VGPU) are not saved in the
# object of compute_node; instead the inventory data for these
# resource is reported by driver's update_provider_tree(). So even if
# there is no resource change for compute_node, we need proceed
# to get inventory and use report client interfaces to update
# inventory to placement. It's report client's responsibility to
# ensure the update request to placement only happens when inventory
# is changed.
nodename = compute_node.hypervisor_hostname
# Persist the stats to the Scheduler
# Retrieve the provider tree associated with this compute node. If
# it doesn't exist yet, this will create it with a (single, root)
# provider corresponding to the compute node.
prov_tree = self.reportclient.get_provider_tree_and_ensure_root(
context, compute_node.uuid, name=compute_node.hypervisor_hostname)
# Let the virt driver rearrange the provider tree and set/update
# the inventory, traits, and aggregates throughout.
allocs = None
try:
self.driver.update_provider_tree(prov_tree, nodename)
except exception.ReshapeNeeded:
if not startup:
# This isn't supposed to happen during periodic, so raise
# it up; the compute manager will treat it specially.
raise
LOG.info("Performing resource provider inventory and "
"allocation data migration during compute service "
"startup or fast-forward upgrade.")
allocs = self.reportclient.get_allocations_for_provider_tree(
context, nodename)
self.driver.update_provider_tree(prov_tree, nodename,
allocations=allocs)
# Inject driver capabilities traits into the provider
# tree. We need to determine the traits that the virt
# driver owns - so those that come from the tree itself
# (via the virt driver) plus the compute capabilities
# traits, and then merge those with the traits set
# externally that the driver does not own - and remove any
# set on the provider externally that the virt owns but
# aren't in the current list of supported traits. For
# example, let's say we reported multiattach support as a
# trait at t1 and then at t2 it's not, so we need to
# remove it. But at both t1 and t2 there is a
# CUSTOM_VENDOR_TRAIT_X which we can't touch because it
# was set externally on the provider.
# We also want to sync the COMPUTE_STATUS_DISABLED trait based
# on the related nova-compute service's disabled status.
traits = self._get_traits(
context, nodename, provider_tree=prov_tree)
prov_tree.update_traits(nodename, traits)
self.provider_tree = prov_tree
# Flush any changes. If we processed ReshapeNeeded above, allocs is not
# None, and this will hit placement's POST /reshaper route.
self.reportclient.update_from_provider_tree(context, prov_tree,
allocations=allocs)
def _update(self, context, compute_node, startup=False):
"""Update partial stats locally and populate them to Scheduler."""
# _resource_change will update self.old_resources if it detects changes
# but we want to restore those if compute_node.save() fails.
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB. Note that
# _update_to_placement below does not supersede the need to do this
# because there are stats-related fields in the ComputeNode object
# which could have changed and still need to be reported to the
# scheduler filters/weighers (which could be out of tree as well).
try:
compute_node.save()
except Exception:
# Restore the previous state in self.old_resources so that on
# the next trip through here _resource_change does not have
# stale data to compare.
with excutils.save_and_reraise_exception(logger=LOG):
self.old_resources[nodename] = old_compute
self._update_to_placement(context, compute_node, startup)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
# TODO(stephenfin): We don't use the CPU, RAM and disk fields for much
# except 'Aggregate(Core|Ram|Disk)Filter', the 'os-hypervisors' API,
# and perhaps some out-of-tree filters. Once the in-tree stuff is
# removed or updated to use information from placement, we can think
# about dropping the fields from the 'ComputeNode' object entirely
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.local_gb_used += sign * usage.get('swap', 0) / 1024
cn.vcpus_used += sign * vcpus_usage
# free ram and disk may be negative, depending on policy:
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
stats = self.stats[nodename]
cn.running_vms = stats.num_instances
# calculate the NUMA usage, assuming the instance is actually using
# NUMA, of course
if cn.numa_topology and usage.get('numa_topology'):
instance_numa_topology = usage.get('numa_topology')
# the ComputeNode.numa_topology field is a StringField, so
# deserialize
host_numa_topology = objects.NUMATopology.obj_from_db_obj(
cn.numa_topology)
free = sign == -1
# ...and reserialize once we save it back
cn.numa_topology = hardware.numa_usage_from_instance_numa(
host_numa_topology, instance_numa_topology, free)._to_json()
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
uuid = migration.instance_uuid
LOG.info("Updating resource usage from migration %s", migration.uuid,
instance_uuid=uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
tracked = uuid in self.tracked_instances
itype = None
numa_topology = None
sign = 0
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
else:
# The instance is already set to the new flavor (this is done
# by the compute manager on finish_resize()), hold space for a
# possible revert to the 'old_' resources.
# NOTE(lbeliveau): When the periodic audit timer gets
# triggered, the compute usage gets reset. The usage for an
# instance that is migrated to the new flavor but not yet
# confirmed/reverted will first get accounted for by
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_instance_type(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not tracked:
# instance has not yet migrated here:
itype = self._get_instance_type(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
LOG.debug('Starting to track incoming migration %s with flavor %s',
migration.uuid, itype.flavorid, instance=instance)
elif outbound and not tracked:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
# We could be racing with confirm_resize setting the
# instance.old_flavor field to None before the migration status
# is "confirmed" so if we did not find the flavor in the outgoing
# resized instance we won't track it.
if itype:
LOG.debug('Starting to track outgoing migration %s with '
'flavor %s', migration.uuid, itype.flavorid,
instance=instance)
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, instance, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
# migration referencing deleted instance
LOG.debug('Migration instance not found: %s', e)
continue
# Skip migation if instance is neither in a resize state nor is
# live-migrating.
if (not _instance_in_resize_state(instances[uuid]) and not
_instance_is_live_migrating(instances[uuid])):
LOG.debug('Skipping migration as instance is neither '
'resizing nor live-migrating.', instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
# Skip migration (and mark it as error) if it doesn't match the
# instance migration id.
# This can happen if we have a stale migration record.
# We want to proceed if instance.migration_context is None
if (instance.migration_context is not None and
instance.migration_context.migration_id != migration.id):
LOG.info("Current instance migration %(im)s doesn't match "
"migration %(m)s, marking migration as error. "
"This can occur if a previous migration for this "
"instance did not complete.",
{'im': instance.migration_context.migration_id,
'm': migration.id})
migration.status = "error"
migration.save()
continue
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances.add(uuid)
sign = 1
if is_removed_instance:
self.tracked_instances.remove(uuid)
self._release_assigned_resources(instance.resources)
sign = -1
cn = self.compute_nodes[nodename]
stats = self.stats[nodename]
stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = stats
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance, instance),
nodename, sign=sign)
# Stop tracking removed instances in the is_bfv cache. This needs to
# happen *after* calling _get_usage_dict() since that relies on the
# is_bfv cache.
if is_removed_instance and uuid in self.is_bfv:
del self.is_bfv[uuid]
cn.current_workload = stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
# set some initial values, reserve room for host/hypervisor:
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
instance_by_uuid = {}
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance, nodename)
instance_by_uuid[instance.uuid] = instance
return instance_by_uuid
def _remove_deleted_instances_allocations(self, context, cn,
migrations, instance_by_uuid):
migration_uuids = [migration.uuid for migration in migrations
if 'uuid' in migration]
# NOTE(jaypipes): All of this code sucks. It's basically dealing with
# all the corner cases in move, local delete, unshelve and rebuild
# operations for when allocations should be deleted when things didn't
# happen according to the normal flow of events where the scheduler
# always creates allocations for an instance
try:
# pai: report.ProviderAllocInfo namedtuple
pai = self.reportclient.get_allocations_for_resource_provider(
context, cn.uuid)
except (exception.ResourceProviderAllocationRetrievalFailed,
ks_exc.ClientException) as e:
LOG.error("Skipping removal of allocations for deleted instances: "
"%s", e)
return
allocations = pai.allocations
if not allocations:
# The main loop below would short-circuit anyway, but this saves us
# the (potentially expensive) context.elevated construction below.
return
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in self.tracked_instances:
LOG.debug("Instance %s actively managed on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
if consumer_uuid in migration_uuids:
LOG.debug("Migration %s is active on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
# We know these are instances now, so proceed
instance_uuid = consumer_uuid
instance = instance_by_uuid.get(instance_uuid)
if not instance:
try:
instance = objects.Instance.get_by_uuid(
read_deleted_context, consumer_uuid,
expected_attrs=[])
except exception.InstanceNotFound:
# The instance isn't even in the database. Either the
# scheduler _just_ created an allocation for it and we're
# racing with the creation in the cell database, or the
# instance was deleted and fully archived before we got a
# chance to run this. The former is far more likely than
# the latter. Avoid deleting allocations for a building
# instance here.
LOG.info("Instance %(uuid)s has allocations against this "
"compute host but is not found in the database.",
{'uuid': instance_uuid},
exc_info=False)
continue
# NOTE(mriedem): A cross-cell migration will work with instance
# records across two cells once the migration is confirmed/reverted
# one of them will be deleted but the instance still exists in the
# other cell. Before the instance is destroyed from the old cell
# though it is marked hidden=True so if we find a deleted hidden
# instance with allocations against this compute node we just
# ignore it since the migration operation will handle cleaning up
# those allocations.
if instance.deleted and not instance.hidden:
# The instance is gone, so we definitely want to remove
# allocations associated with it.
LOG.debug("Instance %s has been deleted (perhaps locally). "
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
self.reportclient.delete_allocation_for_instance(context,
instance_uuid)
continue
if not instance.host:
# Allocations related to instances being scheduled should not
# be deleted if we already wrote the allocation previously.
LOG.debug("Instance %s has been scheduled to this compute "
"host, the scheduler has made an allocation "
"against this compute node but the instance has "
"yet to start. Skipping heal of allocation: %s.",
instance_uuid, alloc)
continue
if (instance.host == cn.host and
instance.node == cn.hypervisor_hostname):
# The instance is supposed to be on this compute host but is
# not in the list of actively managed instances. This could be
# because we are racing with an instance_claim call during
# initial build or unshelve where the instance host/node is set
# before the instance is added to tracked_instances. If the
# task_state is set, then consider things in motion and log at
# debug level instead of warning.
if instance.task_state:
LOG.debug('Instance with task_state "%s" is not being '
'actively managed by this compute host but has '
'allocations referencing this compute node '
'(%s): %s. Skipping heal of allocations during '
'the task state transition.',
instance.task_state, cn.uuid, alloc,
instance=instance)
else:
LOG.warning("Instance %s is not being actively managed by "
"this compute host but has allocations "
"referencing this compute host: %s. Skipping "
"heal of allocation because we do not know "
"what to do.", instance_uuid, alloc)
continue
if instance.host != cn.host:
# The instance has been moved to another host either via a
# migration, evacuation or unshelve in between the time when we
# ran InstanceList.get_by_host_and_node(), added those
# instances to RT.tracked_instances and the above
# Instance.get_by_uuid() call. We SHOULD attempt to remove any
# allocations that reference this compute host if the VM is in
# a stable terminal state (i.e. it isn't in a state of waiting
# for resize to confirm/revert), however if the destination
# host is an Ocata compute host, it will delete the allocation
# that contains this source compute host information anyway and
# recreate an allocation that only refers to itself. So we
# don't need to do anything in that case. Just log the
# situation here for information but don't attempt to delete or
# change the allocation.
LOG.warning("Instance %s has been moved to another host "
"%s(%s). There are allocations remaining against "
"the source host that might need to be removed: "
"%s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not self.reportclient.remove_provider_tree_from_instance_allocation(
context, instance.uuid, cn_uuid):
LOG.error("Failed to clean allocation of evacuated "
"instance on the %s node %s",
node_type, cn_uuid, instance=instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances)
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)",
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(context,
instance.uuid)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, instance, prefix, migration):
"""Get the instance type from instance."""
stashed_flavors = migration.migration_type in ('resize',)
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, instance, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param instance: nova.objects.Instance for the related operation; this
is needed to determine if the instance is
volume-backed
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
def _is_bfv():
# Check to see if we have the is_bfv value cached.
if instance.uuid in self.is_bfv:
is_bfv = self.is_bfv[instance.uuid]
else:
is_bfv = compute_utils.is_volume_backed_instance(
instance._context, instance)
self.is_bfv[instance.uuid] = is_bfv
return is_bfv
usage = {}
if isinstance(object_or_dict, objects.Instance):
is_bfv = _is_bfv()
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'swap': object_or_dict.flavor.swap,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': (0 if is_bfv else
object_or_dict.flavor.root_gb),
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
if _is_bfv():
usage['root_gb'] = 0
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
def build_failed(self, nodename):
"""Increments the failed_builds stats for the given node."""
self.stats[nodename].build_failed()
def build_succeeded(self, nodename):
"""Resets the failed_builds stats for the given node."""
self.stats[nodename].build_succeeded()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def claim_pci_devices(self, context, pci_requests):
"""Claim instance PCI resources
:param context: security context
:param pci_requests: a list of nova.objects.InstancePCIRequests
:returns: a list of nova.objects.PciDevice objects
"""
result = self.pci_tracker.claim_instance(
context, pci_requests, None)
self.pci_tracker.save(context)
return result
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def allocate_pci_devices_for_instance(self, context, instance):
"""Allocate instance claimed PCI resources
:param context: security context
:param instance: instance object
"""
self.pci_tracker.allocate_instance(instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def free_pci_device_allocations_for_instance(self, context, instance):
"""Free instance allocated PCI resources
:param context: security context
:param instance: instance object
"""
self.pci_tracker.free_instance_allocations(context, instance)
self.pci_tracker.save(context)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def free_pci_device_claims_for_instance(self, context, instance):
"""Free instance claimed PCI resources
:param context: security context
:param instance: instance object
"""
self.pci_tracker.free_instance_claims(context, instance)
self.pci_tracker.save(context)
|
import sys
import os
import shutil
import time
import numpy as np
import uuid
import datetime
from ...common.database import Database
from .joukowski import JoukowskiAirfoil
from ..optimizer.swarm import Swarm
SIMCOLLECTION = "simulations"
LOGCOLLECTION = "logs"
DESIGNSCOLLECTION = "designs"
class Simulation(JoukowskiAirfoil):
def __init__(self, id=None, Uinf=1, R=1.3, a=1.0, alpha=0.0, beta=1.0, rho=1.0, cl=None, cd=None, L=None, D=None, opid=0, analysisid=0):
super().__init__(Uinf=Uinf, R=R, a=a, alpha=alpha, beta=beta, rho=rho)
self.id = None if id is None else id
self.cl = None if cl is None else cl
self.cd = None if cd is None else cd
self.L = None if L is None else L
self.D = None if D is None else D
self.analysisid = analysisid
self.opid = opid
### JSON ###
def json(self):
return {"id": self.id, "R":self.R, "a": self.a, "Uinf": self.Uinf, "alpha": self.alpha,
"beta": self.beta, "rho": self.rho, "cl":self.lift_coefficient, "cd": self.drag_coefficient,
"L": self.lift, "D": self.drag, "analysisid":self.analysisid, "opid": opid}
### Store it ###
def store(self):
return Database.insert(DESIGNSCOLLECTION, [self.json()])
### Log file for manual transactions ###
class DesignLogMessage():
def __init__(self, id=None, user=None, caseid=None, created=None, message=None):
self.id = None if id is None else id
self.user = None if user is None else user
self.caseid = None if caseid is None else caseid
self.message = None if message is None else message
self.created = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] if created is None else created
### TO json ###
def json(self):
return {"id":self.id,
"user": self.user,
"caseid": self.caseid,
"message": self.message,
"created": self.created}
### Store transaction ###
def store(self):
return Database.insert(LOGCOLLECTION, [self.json()])
### Store transaction ###
@staticmethod
def delete(caseid):
return Database.remove(LOGCOLLECTION, query={"caseid":["=", caseid]})
### Get all transactions ###
@classmethod
def retrieveAll(cls, caseid):
cases = Database.find(LOGCOLLECTION, query={"caseid":["=", caseid]})
if cases:
return [cls(**data) for data in cases]
else:
return []
class DigitalTwin(object):
### Constructor ###
def __init__(self, id=None, created=None, directory=None, U1=None, U2=None, alpha1=None, alpha2=None, a0=1.0, R0=1.05, beta0=0.0):
self.id = None if id is None else id
self.directory = uuid.uuid4().hex if directory is None else directory
self.created = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] if created is None else created
self.alpha1 = 0 if alpha1 is None else alpha1
self.alpha2 = 0 if alpha1 is None else alpha2
self.U1 = 1 if U1 is None else U1
self.U2 = 1 if U2 is None else U2
self.a0, self.R0, self.beta0 = a0, R0, beta0
if os.path.isdir("./application/static/res/{}".format(self.directory)):
pass
else:
os.mkdir("./application/static/res/{}".format(self.directory))
### Convert to JSON ###
def json(self):
return {"id": self.id,
"directory":self.directory,
"created": self.created,
"alpha1": self.alpha1,
"alpha2": self.alpha2,
"U1": self.U1,
"U2": self.U2,
"a0": self.a0,
"R0": self.R0,
"beta0": self.beta0
}
### Store it ###
def store(self):
return Database.insert(SIMCOLLECTION, [self.json()])
### Update it ###
def update(self):
return Database.update(SIMCOLLECTION, self.json(), query={"id":["=", self.id]})
### Retrieve from DB ###
@classmethod
def retrieveAll(cls):
cases = Database.find(SIMCOLLECTION)
if cases:
return [cls(**data) for data in cases]
else:
return []
### Retrieve from DB ###
@classmethod
def find_by_id(cls, caseid):
return cls(**Database.find(SIMCOLLECTION, query={"id":["=", caseid]}, one=True))
### Start the simulation ###
def simulate(self, plot=True):
sim1 = Simulation(Uinf=self.U1, alpha=self.alpha1, beta=self.beta0, a=self.a0, R=self.R0)
sim2 = Simulation(Uinf=self.U2, alpha=self.alpha2, beta=self.beta0, a=self.a0, R=self.R0)
sim1.calculateFlowField()
sim2.calculateFlowField()
if plot:
sim1.plot_flowfield(store=True, name="./application/static/res/{}/{}".format(self.directory, "flowfield1.png"))
sim2.plot_flowfield(store=True, name="./application/static/res/{}/{}".format(self.directory, "flowfield2.png"))
sim1.plot_cp(store=True, name="./application/static/res/{}/{}".format(self.directory, "profile1.png"))
sim2.plot_cp(store=True, name="./application/static/res/{}/{}".format(self.directory, "profile2.png"))
return {'LiftOp1': np.around(sim1.lift,6), "ClOp1": np.around(sim1.lift_coefficient,6),
"DragOp1": np.around(sim1.drag,6), "CdOp1": np.around(sim1.drag_coefficient,6),
"LiftOp2": np.around(sim2.lift,6), "ClOp2": np.around(sim2.lift_coefficient,6),
"DragOp2": np.around(sim2.drag,6), "CdOp2": np.around(sim2.drag_coefficient,6)}
### Remove from DB ###
@staticmethod
def remove_from_DB(caseid):
return Database.remove(SIMCOLLECTION, query={"id":["=", caseid]})
### Setup ###
@staticmethod
def optimize(caseid, xbounds, itermax, swarmsize, targets, constraints):
### Get Case ###
dt = DigitalTwin.find_by_id(caseid)
### Fitness function ###
def fitness(x, dt, constraints, targets, caseid):
y, c = [], []
for n in range(x.shape[0]):
dt.R0, dt.beta0, dt.a0 = x[n,0], x[n,1], x[n,2]
if dt.a0>=dt.R0:
y.append(len(targets)*[10])
c.append(len(constraints)*[10])
continue
### Run simulation ###
res = dt.simulate(plot=False)
### Separate between targets and constraints ###
yd, cd = [],[]
for name in sorted(list(res.keys())):
if name in list(constraints.keys()):
cd.append(res[name])
elif name in list(targets.keys()):
yd.append(targets[name]["optidir"]*res[name])
y.append(yd)
c.append(cd)
### Insert design into DB ###
print([{"id":None, "caseid":caseid,
"clop1": res["ClOp1"], "cdop1": res["CdOp1"], "lop1": res["LiftOp1"], "dop1": res["DragOp1"],
"clop2": res["ClOp2"], "cdop2": res["CdOp2"], "lop2": res["LiftOp2"], "dop2": res["DragOp2"],
"R": dt.R0, "a": dt.a0, "beta":dt.beta0,
}])
Database.insert(DESIGNSCOLLECTION, [{"id":None, "caseid":caseid,
"clop1": res["ClOp1"], "cdop1": res["CdOp1"], "lop1": res["LiftOp1"], "dop1": res["DragOp1"],
"clop2": res["ClOp2"], "cdop2": res["CdOp2"], "lop2": res["LiftOp2"], "dop2": res["DragOp2"],
"R": dt.R0, "a": dt.a0, "beta":dt.beta0,
}])
return np.asarray(y), np.asarray(c)
### Bounds ###
ybounds, cbounds = [], []
for name in sorted(['LiftOp1', 'LiftOp1', "DragOp1", "DragOp2"]):
if name in list(constraints.keys()):
cbounds.append(constraints[name]["bounds"])
elif name in list(targets.keys()):
ybounds.append(targets[name]["bounds"])
### Bokeh plot ###
### Start Optimization ##
swarm = Swarm(fitness, xbounds, ybounds, cbounds, nparticles=swarmsize, dt=dt, constraints=constraints, targets=targets, caseid=caseid)
swarm.initialize()
swarm.iterate(itermax)
### AJAX plot ####
@staticmethod
def get_opti_data(caseid):
return Database.find(DESIGNSCOLLECTION, query={"caseid":["=", caseid]})
|
"""
This script is for computing the definite integrals usingLegendre-Guass Quadrature.
Computes the Legendre-Gauss nodes and weights on interval [a,b] with truncation order N.
If f is a continuous function on [a,b], with a descritization induced by a vector x
of points in [a,b], evalute the definite integral of the descritization of f via
sum(f.*w).s
Requires:
None
Use:
Call:
'lgwt(N,a,b)
Return:
[x,w]
Inputs:
N: highest order of legendre polynomials (and later the "moment order" of the PDE) - int
a: left endpoint of interval of integration - float
b: right endpoint of interval of definition - float
Optional: epsilon - default to 1e-10
Outputs: [x,w]
x: N quadrature points within maximum error epsilon, ordered - numpy.ndarray of shape (N,)
w: N corresponding quadrature weights - numpy n.darray of shape (N,)
Translated from Greg von Winckel's MATLAB 02/25/2005 Edit, extension 'lgwt.m'
Last Edit: 11/27/2019
"""
#1. Import packages and initialize function definition:
import numpy as np
import math
import random
#Test Variables:
eps = np.finfo(float).eps
def lgwt(N,a,b):
#2. Prepare y as the initial point guess:
nodes_init = np.array([i for i in range(N)])
xu = np.linspace(-1,1,N)
y = np.cos((math.pi/(2*N))*(2*(nodes_init)+1)) + (0.27/N)*np.sin(math.pi*xu*((N-1)/(N+1)))
#3. Initialize L as matrix for Lgendre Polynomials 0 through N (N+1 in total) evaluated at y, and dL_N as d/dx(P_N) evaluated at y:
L = np.zeros((N,N+1))
L[:,0] = 1
dL_N = np.zeros(N)
#4. Apply Newton method and Bonnet recursion formula to bring the difference in newton iterates below epsilon, resulting in nodes y over [-1,1]:
y0 = 2
i = 0
while (max(abs(y-y0)) > eps):
i += 1
L[:,1] = y
for k in range(1,N):
L[:,k+1] = ((2*k+1)/(k+1))*np.multiply(y,L[:,k])-(k/(k+1))*L[:,k-1]
dL_N = np.divide((N+1)*(L[:,N-1] - np.multiply(y,L[:,N])),(1-y**2))
y0 = y
y = y0 - np.divide(L[:,N],dL_N)
if i > (1/eps)*1e2:
print("LGWT fail: More than more than"+str((1/eps)*1e2)+"iterations to compute zeros via Newton")
break
#5. Map the nodes from [-1,1] onto [a,b], yielding quadrature points x:
x = (a*(1-y)+b*(1+y))/2
#6. Compute weights:
w = np.zeros(N)
for i in range(N):
w[i] = (b-a)/((1-y[i]**2)*(dL_N[i]**2)*(N/(N+1))**2)
#7. Define function output:
return [np.flipud(x),w]
#8. Test Function:
"""
a,b = lgwt(4,1,2)
print(a,b)
"""
|
import lazy_dataset
from collections import OrderedDict
def test_unbatch():
examples = OrderedDict(
a=[0, 1, 2],
b=[3, 4],
c=[5, 6, 7]
)
ds = lazy_dataset.new(examples)
ds = ds.unbatch()
assert list(ds) == list(range(8))
def fragment_fn(ex):
for i in ex.split('_'):
yield int(i)
def test_fragment():
examples = OrderedDict(
a='0_1_2',
b='3_4',
c='5_6_7'
)
ds = lazy_dataset.new(examples)
ds = ds.map(fragment_fn).unbatch()
assert list(ds) == list(range(8))
def test_prefetch():
examples = OrderedDict(
a='0_1_2',
b='3_4',
c='5_6_7'
)
ds = lazy_dataset.new(examples)
ds = ds.map(fragment_fn).prefetch(2, 2).unbatch()
assert list(ds) == list(range(8))
|
from urllib.request import urlretrieve
import time
import tarfile
URL_path = r"https://aiedugithub4a2.blob.core.windows.net/a2-data"
filename = r"Data.tar.gz"
print("Please input the local folder path:")
local = input()
from_path = URL_path + "/" + filename
to_path = local + "/" + filename
print("Downloading...")
try:
urlretrieve(from_path, to_path)
print("Done.")
print("Extracting...")
try:
with tarfile.open(to_path) as file:
file.extractall(path = local)
print("Done.")
print("All Work Finished!")
except:
print("Failed Extraction!")
except:
print("Invalid Path!")
print("Exit in 3 seconds...")
time.sleep(3)
|
import pytest
from dvc.config import ConfigError
from dvc.exceptions import DvcException
from dvc.fs.s3 import S3FileSystem
bucket_name = "bucket-name"
prefix = "some/prefix"
url = f"s3://{bucket_name}/{prefix}"
key_id = "key-id"
key_secret = "key-secret"
session_token = "session-token"
@pytest.fixture(autouse=True)
def grants():
return {
"grant_read": "id=read-permission-id,id=other-read-permission-id",
"grant_read_acp": "id=read-acp-permission-id",
"grant_write_acp": "id=write-acp-permission-id",
"grant_full_control": "id=full-control-permission-id",
}
def test_init(dvc):
config = {"url": url}
fs = S3FileSystem(dvc, config)
assert fs.path_info == url
def test_verify_ssl_default_param(dvc):
config = {
"url": url,
}
fs = S3FileSystem(dvc, config)
assert fs.ssl_verify
def test_ssl_verify_bool_param(dvc):
config = {"url": url, "ssl_verify": False}
fs = S3FileSystem(dvc, config)
assert fs.ssl_verify == config["ssl_verify"]
def test_grants(dvc):
config = {
"url": url,
"grant_read": "id=read-permission-id,id=other-read-permission-id",
"grant_read_acp": "id=read-acp-permission-id",
"grant_write_acp": "id=write-acp-permission-id",
"grant_full_control": "id=full-control-permission-id",
}
fs = S3FileSystem(dvc, config)
assert (
fs.extra_args["GrantRead"]
== "id=read-permission-id,id=other-read-permission-id"
)
assert fs.extra_args["GrantReadACP"] == "id=read-acp-permission-id"
assert fs.extra_args["GrantWriteACP"] == "id=write-acp-permission-id"
assert fs.extra_args["GrantFullControl"] == "id=full-control-permission-id"
def test_grants_mutually_exclusive_acl_error(dvc, grants):
for grant_option, grant_value in grants.items():
config = {"url": url, "acl": "public-read", grant_option: grant_value}
with pytest.raises(ConfigError):
S3FileSystem(dvc, config)
def test_sse_kms_key_id(dvc):
fs = S3FileSystem(dvc, {"url": url, "sse_kms_key_id": "key"})
assert fs.extra_args["SSEKMSKeyId"] == "key"
def test_key_id_and_secret(dvc):
fs = S3FileSystem(
dvc,
{
"url": url,
"access_key_id": key_id,
"secret_access_key": key_secret,
"session_token": session_token,
},
)
assert fs.access_key_id == key_id
assert fs.secret_access_key == key_secret
assert fs.session_token == session_token
def test_get_s3_no_credentials(mocker):
from botocore.exceptions import NoCredentialsError
fs = S3FileSystem(None, {})
with pytest.raises(DvcException, match="Unable to find AWS credentials"):
with fs._get_s3():
raise NoCredentialsError
def test_get_s3_connection_error(mocker):
from botocore.exceptions import EndpointConnectionError
fs = S3FileSystem(None, {})
msg = "Unable to connect to 'AWS S3'."
with pytest.raises(DvcException, match=msg):
with fs._get_s3():
raise EndpointConnectionError(endpoint_url="url")
def test_get_s3_connection_error_endpoint(mocker):
from botocore.exceptions import EndpointConnectionError
fs = S3FileSystem(None, {"endpointurl": "https://example.com"})
msg = "Unable to connect to 'https://example.com'."
with pytest.raises(DvcException, match=msg):
with fs._get_s3():
raise EndpointConnectionError(endpoint_url="url")
def test_get_bucket():
fs = S3FileSystem(None, {"url": "s3://mybucket/path"})
with pytest.raises(DvcException, match="Bucket 'mybucket' does not exist"):
with fs._get_bucket("mybucket") as bucket:
raise bucket.meta.client.exceptions.NoSuchBucket({}, None)
|
"""
Renders a colormapped image of a scalar value field, and a cross section
chosen by a line interactor.
"""
# Standard library imports
from optparse import OptionParser
import sys
# Major library imports
from numpy import array, linspace, meshgrid, nanmin, nanmax, pi, zeros
# Enthought library imports
from chaco.api import ArrayDataSource, ArrayPlotData, ColorBar, ContourLinePlot, \
ColormappedScatterPlot, CMapImagePlot, \
ContourPolyPlot, DataRange1D, VPlotContainer, \
DataRange2D, GridMapper, GridDataSource, \
HPlotContainer, ImageData, LinearMapper, \
LinePlot, OverlayPlotContainer, Plot, PlotAxis
from chaco.default_colormaps import *
from enable.component_editor import ComponentEditor
from chaco.tools.api import LineInspector, PanTool, RangeSelection, \
RangeSelectionOverlay, ZoomTool
from enable.api import Window
from traits.api import Any, Array, Callable, CFloat, CInt, Enum, Event, Float, HasTraits, \
Int, Instance, Str, Trait, on_trait_change
from traitsui.api import Group, Handler, HGroup, Item, View
from traitsui.menu import Action, CloseAction, Menu, \
MenuBar, NoButtons, Separator
class Model(HasTraits):
#Traits view definitions:
traits_view = View(
Group(Item('function'),
HGroup(Item('npts_x', label="Number X Points"),
Item('npts_y', label="Number Y Points")),
HGroup(Item('min_x', label="Min X value"),
Item('max_x', label="Max X value")),
HGroup(Item('min_y', label="Min Y value"),
Item('max_y', label="Max Y value"))),
buttons=["OK", "Cancel"])
function = Str("tanh(x**2+y)*cos(y)*jn(0,x+y*2)")
npts_x = CInt(400)
npts_y = CInt(200)
min_x = CFloat(-2*pi)
max_x = CFloat(2*pi)
min_y = CFloat(-1.5*pi)
max_y = CFloat(1.5*pi)
xs = Array
ys = Array
zs = Array
minz = Float
maxz = Float
model_changed = Event
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self.compute_model()
def compute_model(self):
# The xs and ys used for the image plot range need to be the
# edges of the cells.
self.xs = linspace(self.min_x, self.max_x, self.npts_x+1)
self.ys = linspace(self.min_y, self.max_y, self.npts_y+1)
# The grid of points at which we will evaluate the 2D function
# is located at cell centers, so use halfsteps from the
# min/max values (which are edges)
xstep = (self.max_x - self.min_x) / self.npts_x
ystep = (self.max_y - self.min_y) / self.npts_y
gridx = linspace(self.min_x+xstep/2, self.max_x-xstep/2, self.npts_x)
gridy = linspace(self.min_y+xstep/2, self.max_y-xstep/2, self.npts_y)
x, y = meshgrid(gridx, gridy)
try:
d = dict(x=x, y=y)
exec "from scipy import *" in d
exec "from scipy.special import *" in d
self.zs = eval(self.function, d)
self.minz = nanmin(self.zs)
self.maxz = nanmax(self.zs)
self.model_changed = True
self._function = self.function
except:
self.set(function = self._function, trait_change_notify=False)
def _anytrait_changed(self, name, value):
if name in ['function', 'npts_x', 'npts_y',
'min_x', 'max_x', 'min_y', 'max_y']:
self.compute_model()
class PlotUI(HasTraits):
#Traits view definitions:
traits_view = View(
Group(Item('container',
editor=ComponentEditor(size=(800,600)),
show_label=False)),
buttons=NoButtons,
resizable=True)
plot_edit_view = View(
Group(Item('num_levels'),
Item('colormap')),
buttons=["OK","Cancel"])
num_levels = Int(15)
colormap = Enum(color_map_name_dict.keys())
#---------------------------------------------------------------------------
# Private Traits
#---------------------------------------------------------------------------
_image_index = Instance(GridDataSource)
_image_value = Instance(ImageData)
_cmap = Trait(jet, Callable)
#---------------------------------------------------------------------------
# Public View interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
super(PlotUI, self).__init__(*args, **kwargs)
self.create_plot()
def create_plot(self):
# Create the mapper, etc
self._image_index = GridDataSource(array([]),
array([]),
sort_order=("ascending","ascending"))
image_index_range = DataRange2D(self._image_index)
self._image_index.on_trait_change(self._metadata_changed,
"metadata_changed")
self._image_value = ImageData(data=array([]), value_depth=1)
image_value_range = DataRange1D(self._image_value)
# Create the contour plots
self.polyplot = ContourPolyPlot(index=self._image_index,
value=self._image_value,
index_mapper=GridMapper(range=
image_index_range),
color_mapper=\
self._cmap(image_value_range),
levels=self.num_levels)
self.lineplot = ContourLinePlot(index=self._image_index,
value=self._image_value,
index_mapper=GridMapper(range=
self.polyplot.index_mapper.range),
levels=self.num_levels)
# Add a left axis to the plot
left = PlotAxis(orientation='left',
title= "y",
mapper=self.polyplot.index_mapper._ymapper,
component=self.polyplot)
self.polyplot.overlays.append(left)
# Add a bottom axis to the plot
bottom = PlotAxis(orientation='bottom',
title= "x",
mapper=self.polyplot.index_mapper._xmapper,
component=self.polyplot)
self.polyplot.overlays.append(bottom)
# Add some tools to the plot
self.polyplot.tools.append(PanTool(self.polyplot,
constrain_key="shift"))
self.polyplot.overlays.append(ZoomTool(component=self.polyplot,
tool_mode="box", always_on=False))
self.polyplot.overlays.append(LineInspector(component=self.polyplot,
axis='index_x',
inspect_mode="indexed",
write_metadata=True,
is_listener=False,
color="white"))
self.polyplot.overlays.append(LineInspector(component=self.polyplot,
axis='index_y',
inspect_mode="indexed",
write_metadata=True,
color="white",
is_listener=False))
# Add these two plots to one container
contour_container = OverlayPlotContainer(padding=20,
use_backbuffer=True,
unified_draw=True)
contour_container.add(self.polyplot)
contour_container.add(self.lineplot)
# Create a colorbar
cbar_index_mapper = LinearMapper(range=image_value_range)
self.colorbar = ColorBar(index_mapper=cbar_index_mapper,
plot=self.polyplot,
padding_top=self.polyplot.padding_top,
padding_bottom=self.polyplot.padding_bottom,
padding_right=40,
resizable='v',
width=30)
self.pd = ArrayPlotData(line_index = array([]),
line_value = array([]),
scatter_index = array([]),
scatter_value = array([]),
scatter_color = array([]))
self.cross_plot = Plot(self.pd, resizable="h")
self.cross_plot.height = 100
self.cross_plot.padding = 20
self.cross_plot.plot(("line_index", "line_value"),
line_style="dot")
self.cross_plot.plot(("scatter_index","scatter_value","scatter_color"),
type="cmap_scatter",
name="dot",
color_mapper=self._cmap(image_value_range),
marker="circle",
marker_size=8)
self.cross_plot.index_range = self.polyplot.index_range.x_range
self.pd.set_data("line_index2", array([]))
self.pd.set_data("line_value2", array([]))
self.pd.set_data("scatter_index2", array([]))
self.pd.set_data("scatter_value2", array([]))
self.pd.set_data("scatter_color2", array([]))
self.cross_plot2 = Plot(self.pd, width = 140, orientation="v", resizable="v", padding=20, padding_bottom=160)
self.cross_plot2.plot(("line_index2", "line_value2"),
line_style="dot")
self.cross_plot2.plot(("scatter_index2","scatter_value2","scatter_color2"),
type="cmap_scatter",
name="dot",
color_mapper=self._cmap(image_value_range),
marker="circle",
marker_size=8)
self.cross_plot2.index_range = self.polyplot.index_range.y_range
# Create a container and add components
self.container = HPlotContainer(padding=40, fill_padding=True,
bgcolor = "white", use_backbuffer=False)
inner_cont = VPlotContainer(padding=0, use_backbuffer=True)
inner_cont.add(self.cross_plot)
inner_cont.add(contour_container)
self.container.add(self.colorbar)
self.container.add(inner_cont)
self.container.add(self.cross_plot2)
def update(self, model):
self.minz = model.minz
self.maxz = model.maxz
self.colorbar.index_mapper.range.low = self.minz
self.colorbar.index_mapper.range.high = self.maxz
self._image_index.set_data(model.xs, model.ys)
self._image_value.data = model.zs
self.pd.set_data("line_index", model.xs)
self.pd.set_data("line_index2", model.ys)
self.container.invalidate_draw()
self.container.request_redraw()
#---------------------------------------------------------------------------
# Event handlers
#---------------------------------------------------------------------------
def _metadata_changed(self, old, new):
""" This function takes out a cross section from the image data, based
on the line inspector selections, and updates the line and scatter
plots."""
self.cross_plot.value_range.low = self.minz
self.cross_plot.value_range.high = self.maxz
self.cross_plot2.value_range.low = self.minz
self.cross_plot2.value_range.high = self.maxz
if self._image_index.metadata.has_key("selections"):
x_ndx, y_ndx = self._image_index.metadata["selections"]
if y_ndx and x_ndx:
self.pd.set_data("line_value",
self._image_value.data[y_ndx,:])
self.pd.set_data("line_value2",
self._image_value.data[:,x_ndx])
xdata, ydata = self._image_index.get_data()
xdata, ydata = xdata.get_data(), ydata.get_data()
self.pd.set_data("scatter_index", array([xdata[x_ndx]]))
self.pd.set_data("scatter_index2", array([ydata[y_ndx]]))
self.pd.set_data("scatter_value",
array([self._image_value.data[y_ndx, x_ndx]]))
self.pd.set_data("scatter_value2",
array([self._image_value.data[y_ndx, x_ndx]]))
self.pd.set_data("scatter_color",
array([self._image_value.data[y_ndx, x_ndx]]))
self.pd.set_data("scatter_color2",
array([self._image_value.data[y_ndx, x_ndx]]))
else:
self.pd.set_data("scatter_value", array([]))
self.pd.set_data("scatter_value2", array([]))
self.pd.set_data("line_value", array([]))
self.pd.set_data("line_value2", array([]))
def _colormap_changed(self):
self._cmap = color_map_name_dict[self.colormap]
if hasattr(self, "polyplot"):
value_range = self.polyplot.color_mapper.range
self.polyplot.color_mapper = self._cmap(value_range)
value_range = self.cross_plot.color_mapper.range
self.cross_plot.color_mapper = self._cmap(value_range)
# FIXME: change when we decide how best to update plots using
# the shared colormap in plot object
self.cross_plot.plots["dot"][0].color_mapper = self._cmap(value_range)
self.cross_plot2.plots["dot"][0].color_mapper = self._cmap(value_range)
self.container.request_redraw()
def _num_levels_changed(self):
if self.num_levels > 3:
self.polyplot.levels = self.num_levels
self.lineplot.levels = self.num_levels
class Controller(Handler):
#---------------------------------------------------------------------------
# State traits
#---------------------------------------------------------------------------
model = Instance(Model)
view = Instance(PlotUI)
#---------------------------------------------------------------------------
# Handler interface
#---------------------------------------------------------------------------
def init(self, info):
self.model = info.object.model
self.view = info.object.view
self.model.on_trait_change(self._model_changed, "model_changed")
#---------------------------------------------------------------------------
# Public Controller interface
#---------------------------------------------------------------------------
def edit_model(self, ui_info):
self.model.configure_traits()
def edit_plot(self, ui_info):
self.view.configure_traits(view="plot_edit_view")
#---------------------------------------------------------------------------
# Private Controller interface
#---------------------------------------------------------------------------
def _model_changed(self):
if self.view is not None:
self.view.update(self.model)
class ModelView(HasTraits):
model = Instance(Model)
view = Instance(PlotUI)
traits_view = View(Item('@view',
show_label=False),
menubar=MenuBar(Menu(Action(name="Edit Model",
action="edit_model"),
Action(name="Edit Plot",
action="edit_plot"),
CloseAction,
name="File")),
handler = Controller,
title = "Function Inspector",
resizable=True)
@on_trait_change('model, view')
def update_view(self):
if self.model is not None and self.view is not None:
self.view.update(self.model)
options_dict = {'colormap' : "jet",
'num_levels' : 15,
'function' : "tanh(x**2+y)*cos(y)*jn(0,x+y*2)"}
model=Model(**options_dict)
view=PlotUI(**options_dict)
popup = ModelView(model=model, view=view)
def show_plot(**kwargs):
model = Model(**kwargs)
view = PlotUI(**kwargs)
modelview=ModelView(model=model, view=view)
modelview.configure_traits()
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage, version="%prog 1.0")
parser.add_option("-c", "--colormap",
action="store", type="string", dest="colormap", default="jet",
metavar="CMAP", help="choose a default colormapper")
parser.add_option("-n", "--nlevels",
action="store", type="int", dest="num_levels", default=15,
help="number countour levels to plot [default: %default]")
parser.add_option("-f", "--function",
action="store", type="string", dest="function",
default="tanh(x**2+y)*cos(y)*jn(0,x+y*2)",
help="function of x and y [default: %default]")
opts, args = parser.parse_args(argv[1:])
if len(args) > 0:
parser.error("Incorrect number of arguments")
show_plot(colormap=opts.colormap, num_levels=opts.num_levels,
function=opts.function)
if __name__ == "__main__":
sys.exit(main())
|
# Copyright 2018 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A binary building the graph and performing the optimization of LEO."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import pickle
from absl import flags
import tensorflow as tf
import config
import data
import model
import utils
FLAGS = flags.FLAGS
flags.DEFINE_string("checkpoint_path", "/tmp/leo", "Path to restore from and "
"save to checkpoints.")
flags.DEFINE_integer(
"checkpoint_steps", 1000, "The frequency, in number of "
"steps, of saving the checkpoints.")
flags.DEFINE_boolean("evaluation_mode", False, "Whether to run in an "
"evaluation-only mode.")
def _clip_gradients(gradients, gradient_threshold, gradient_norm_threshold):
"""Clips gradients by value and then by norm."""
if gradient_threshold > 0:
gradients = [
tf.clip_by_value(g, -gradient_threshold, gradient_threshold)
for g in gradients
]
if gradient_norm_threshold > 0:
gradients = [
tf.clip_by_norm(g, gradient_norm_threshold) for g in gradients
]
return gradients
def _construct_validation_summaries(metavalid_loss, metavalid_accuracy):
tf.summary.scalar("metavalid_loss", metavalid_loss)
tf.summary.scalar("metavalid_valid_accuracy", metavalid_accuracy)
# The summaries are passed implicitly by TensorFlow.
def _construct_training_summaries(metatrain_loss, metatrain_accuracy,
model_grads, model_vars):
tf.summary.scalar("metatrain_loss", metatrain_loss)
tf.summary.scalar("metatrain_valid_accuracy", metatrain_accuracy)
for g, v in zip(model_grads, model_vars):
histogram_name = v.name.split(":")[0]
tf.summary.histogram(histogram_name, v)
histogram_name = "gradient/{}".format(histogram_name)
tf.summary.histogram(histogram_name, g)
def _construct_examples_batch(batch_size, split, num_classes,
num_tr_examples_per_class,
num_val_examples_per_class):
data_provider = data.DataProvider(split, config.get_data_config())
examples_batch = data_provider.get_batch(batch_size, num_classes,
num_tr_examples_per_class,
num_val_examples_per_class)
return utils.unpack_data(examples_batch)
def _construct_loss_and_accuracy(inner_model, inputs, is_meta_training):
"""Returns batched loss and accuracy of the model ran on the inputs."""
call_fn = functools.partial(
inner_model.__call__, is_meta_training=is_meta_training)
per_instance_loss, per_instance_accuracy = tf.map_fn(
call_fn,
inputs,
dtype=(tf.float32, tf.float32),
back_prop=is_meta_training)
loss = tf.reduce_mean(per_instance_loss)
accuracy = tf.reduce_mean(per_instance_accuracy)
return loss, accuracy
def construct_graph(outer_model_config):
"""Constructs the optimization graph."""
inner_model_config = config.get_inner_model_config()
tf.logging.info("inner_model_config: {}".format(inner_model_config))
leo = model.LEO(inner_model_config, use_64bits_dtype=False)
num_classes = outer_model_config["num_classes"]
num_tr_examples_per_class = outer_model_config["num_tr_examples_per_class"]
metatrain_batch = _construct_examples_batch(
outer_model_config["metatrain_batch_size"], "train", num_classes,
num_tr_examples_per_class,
outer_model_config["num_val_examples_per_class"])
metatrain_loss, metatrain_accuracy = _construct_loss_and_accuracy(
leo, metatrain_batch, True)
metatrain_gradients, metatrain_variables = leo.grads_and_vars(metatrain_loss)
# Avoids NaNs in summaries.
metatrain_loss = tf.cond(tf.is_nan(metatrain_loss),
lambda: tf.zeros_like(metatrain_loss),
lambda: metatrain_loss)
metatrain_gradients = _clip_gradients(
metatrain_gradients, outer_model_config["gradient_threshold"],
outer_model_config["gradient_norm_threshold"])
_construct_training_summaries(metatrain_loss, metatrain_accuracy,
metatrain_gradients, metatrain_variables)
optimizer = tf.train.AdamOptimizer(
learning_rate=outer_model_config["outer_lr"])
global_step = tf.train.get_or_create_global_step()
train_op = optimizer.apply_gradients(
zip(metatrain_gradients, metatrain_variables), global_step)
data_config = config.get_data_config()
tf.logging.info("data_config: {}".format(data_config))
total_examples_per_class = data_config["total_examples_per_class"]
metavalid_batch = _construct_examples_batch(
outer_model_config["metavalid_batch_size"], "val", num_classes,
num_tr_examples_per_class,
total_examples_per_class - num_tr_examples_per_class)
metavalid_loss, metavalid_accuracy = _construct_loss_and_accuracy(
leo, metavalid_batch, False)
metatest_batch = _construct_examples_batch(
outer_model_config["metatest_batch_size"], "test", num_classes,
num_tr_examples_per_class,
total_examples_per_class - num_tr_examples_per_class)
_, metatest_accuracy = _construct_loss_and_accuracy(
leo, metatest_batch, False)
_construct_validation_summaries(metavalid_loss, metavalid_accuracy)
return (train_op, global_step, metatrain_accuracy, metavalid_accuracy,
metatest_accuracy)
def run_training_loop(checkpoint_path):
"""Runs the training loop, either saving a checkpoint or evaluating it."""
outer_model_config = config.get_outer_model_config()
tf.logging.info("outer_model_config: {}".format(outer_model_config))
(train_op, global_step, metatrain_accuracy, metavalid_accuracy,
metatest_accuracy) = construct_graph(outer_model_config)
num_steps_limit = outer_model_config["num_steps_limit"]
best_metavalid_accuracy = 0.
with tf.train.MonitoredTrainingSession(
checkpoint_dir=checkpoint_path,
save_summaries_steps=FLAGS.checkpoint_steps,
log_step_count_steps=FLAGS.checkpoint_steps,
save_checkpoint_steps=FLAGS.checkpoint_steps,
summary_dir=checkpoint_path) as sess:
if not FLAGS.evaluation_mode:
global_step_ev = sess.run(global_step)
while global_step_ev < num_steps_limit:
if global_step_ev % FLAGS.checkpoint_steps == 0:
# Just after saving checkpoint, calculate accuracy 10 times and save
# the best checkpoint for early stopping.
metavalid_accuracy_ev = utils.evaluate_and_average(
sess, metavalid_accuracy, 10)
tf.logging.info("Step: {} meta-valid accuracy: {}".format(
global_step_ev, metavalid_accuracy_ev))
if metavalid_accuracy_ev > best_metavalid_accuracy:
utils.copy_checkpoint(checkpoint_path, global_step_ev,
metavalid_accuracy_ev)
best_metavalid_accuracy = metavalid_accuracy_ev
_, global_step_ev, metatrain_accuracy_ev = sess.run(
[train_op, global_step, metatrain_accuracy])
if global_step_ev % (FLAGS.checkpoint_steps // 2) == 0:
tf.logging.info("Step: {} meta-train accuracy: {}".format(
global_step_ev, metatrain_accuracy_ev))
else:
assert not FLAGS.checkpoint_steps
num_metatest_estimates = (
10000 // outer_model_config["metatest_batch_size"])
test_accuracy = utils.evaluate_and_average(sess, metatest_accuracy,
num_metatest_estimates)
tf.logging.info("Metatest accuracy: %f", test_accuracy)
with tf.gfile.Open(
os.path.join(checkpoint_path, "test_accuracy"), "wb") as f:
pickle.dump(test_accuracy, f)
def main(argv):
del argv # Unused.
run_training_loop(FLAGS.checkpoint_path)
if __name__ == "__main__":
tf.app.run()
|
from setuptools import setup, find_packages
setup(
name='MusicParser',
version='1.0.1',
url='https://github.com/rubysoho07/MusicParser',
author='Yungon Park',
author_email='hahafree12@gmail.com',
description='Parsing music album from music information sites.',
install_requires=[
"requests == 2.20.0",
"beautifulsoup4 == 4.6.0"
],
packages=find_packages()
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tf_pose.runner import infer, Estimator, get_estimator
|
#
# Tests for the Unary Operator classes
#
import pybamm
import unittest
import numpy as np
from scipy.sparse import diags
class TestUnaryOperators(unittest.TestCase):
def test_unary_operator(self):
a = pybamm.Symbol("a", domain=["test"])
un = pybamm.UnaryOperator("unary test", a)
self.assertEqual(un.children[0].name, a.name)
self.assertEqual(un.domain, a.domain)
# with number
log = pybamm.log(10)
self.assertEqual(log.evaluate(), np.log(10))
def test_negation(self):
a = pybamm.Symbol("a")
nega = pybamm.Negate(a)
self.assertEqual(nega.name, "-")
self.assertEqual(nega.children[0].name, a.name)
b = pybamm.Scalar(4)
negb = pybamm.Negate(b)
self.assertEqual(negb.evaluate(), -4)
def test_absolute(self):
a = pybamm.Symbol("a")
absa = pybamm.AbsoluteValue(a)
self.assertEqual(absa.name, "abs")
self.assertEqual(absa.children[0].name, a.name)
b = pybamm.Scalar(-4)
absb = pybamm.AbsoluteValue(b)
self.assertEqual(absb.evaluate(), 4)
def test_sign(self):
b = pybamm.Scalar(-4)
signb = pybamm.sign(b)
self.assertEqual(signb.evaluate(), -1)
A = diags(np.linspace(-1, 1, 5))
b = pybamm.Matrix(A)
signb = pybamm.sign(b)
np.testing.assert_array_equal(
np.diag(signb.evaluate().toarray()), [-1, -1, 0, 1, 1]
)
def test_gradient(self):
# gradient of scalar symbol should fail
a = pybamm.Symbol("a")
with self.assertRaisesRegex(
pybamm.DomainError, "Cannot take gradient of 'a' since its domain is empty"
):
pybamm.Gradient(a)
# gradient of variable evaluating on edges should fail
a = pybamm.PrimaryBroadcastToEdges(pybamm.Scalar(1), "test")
with self.assertRaisesRegex(TypeError, "evaluates on edges"):
pybamm.Gradient(a)
# gradient of broadcast should return broadcasted zero
a = pybamm.PrimaryBroadcast(pybamm.Variable("a"), "test domain")
grad = pybamm.grad(a)
self.assertIsInstance(grad, pybamm.PrimaryBroadcastToEdges)
self.assertIsInstance(grad.child, pybamm.PrimaryBroadcast)
self.assertIsInstance(grad.child.child, pybamm.Scalar)
self.assertEqual(grad.child.child.value, 0)
# otherwise gradient should work
a = pybamm.Symbol("a", domain="test domain")
grad = pybamm.Gradient(a)
self.assertEqual(grad.children[0].name, a.name)
self.assertEqual(grad.domain, a.domain)
def test_div(self):
# divergence of scalar symbol should fail
a = pybamm.Symbol("a")
with self.assertRaisesRegex(
pybamm.DomainError,
"Cannot take divergence of 'a' since its domain is empty",
):
pybamm.Divergence(a)
# divergence of variable evaluating on edges should fail
a = pybamm.PrimaryBroadcast(pybamm.Scalar(1), "test")
with self.assertRaisesRegex(TypeError, "evaluates on nodes"):
pybamm.Divergence(a)
# divergence of broadcast should return broadcasted zero
a = pybamm.PrimaryBroadcastToEdges(pybamm.Variable("a"), "test domain")
div = pybamm.div(a)
self.assertIsInstance(div, pybamm.PrimaryBroadcast)
self.assertIsInstance(div.child, pybamm.PrimaryBroadcast)
self.assertIsInstance(div.child.child, pybamm.Scalar)
self.assertEqual(div.child.child.value, 0)
# otherwise divergence should work
a = pybamm.Symbol("a", domain="test domain")
div = pybamm.Divergence(pybamm.Gradient(a))
self.assertEqual(div.domain, a.domain)
def test_integral(self):
# time integral
a = pybamm.Symbol("a")
t = pybamm.t
inta = pybamm.Integral(a, t)
self.assertEqual(inta.name, "integral dtime")
# self.assertTrue(inta.definite)
self.assertEqual(inta.children[0].name, a.name)
self.assertEqual(inta.integration_variable[0], t)
self.assertEqual(inta.domain, [])
# space integral
a = pybamm.Symbol("a", domain=["negative electrode"])
x = pybamm.SpatialVariable("x", ["negative electrode"])
inta = pybamm.Integral(a, x)
self.assertEqual(inta.name, "integral dx ['negative electrode']")
self.assertEqual(inta.children[0].name, a.name)
self.assertEqual(inta.integration_variable[0], x)
self.assertEqual(inta.domain, [])
self.assertEqual(inta.auxiliary_domains, {})
# space integral with secondary domain
a_sec = pybamm.Symbol(
"a",
domain=["negative electrode"],
auxiliary_domains={"secondary": "current collector"},
)
x = pybamm.SpatialVariable("x", ["negative electrode"])
inta_sec = pybamm.Integral(a_sec, x)
self.assertEqual(inta_sec.domain, ["current collector"])
self.assertEqual(inta_sec.auxiliary_domains, {})
# space integral with secondary domain
a_tert = pybamm.Symbol(
"a",
domain=["negative electrode"],
auxiliary_domains={
"secondary": "current collector",
"tertiary": "some extra domain",
},
)
x = pybamm.SpatialVariable("x", ["negative electrode"])
inta_tert = pybamm.Integral(a_tert, x)
self.assertEqual(inta_tert.domain, ["current collector"])
self.assertEqual(
inta_tert.auxiliary_domains, {"secondary": ["some extra domain"]}
)
# space integral over two variables
b = pybamm.Symbol("b", domain=["current collector"])
y = pybamm.SpatialVariable("y", ["current collector"])
z = pybamm.SpatialVariable("z", ["current collector"])
inta = pybamm.Integral(b, [y, z])
self.assertEqual(inta.name, "integral dy dz ['current collector']")
self.assertEqual(inta.children[0].name, b.name)
self.assertEqual(inta.integration_variable[0], y)
self.assertEqual(inta.integration_variable[1], z)
self.assertEqual(inta.domain, [])
# Indefinite
inta = pybamm.IndefiniteIntegral(a, x)
self.assertEqual(inta.name, "a integrated w.r.t x on ['negative electrode']")
self.assertEqual(inta.children[0].name, a.name)
self.assertEqual(inta.integration_variable[0], x)
self.assertEqual(inta.domain, ["negative electrode"])
inta_sec = pybamm.IndefiniteIntegral(a_sec, x)
self.assertEqual(inta_sec.domain, ["negative electrode"])
self.assertEqual(
inta_sec.auxiliary_domains, {"secondary": ["current collector"]}
)
# backward indefinite integral
inta = pybamm.BackwardIndefiniteIntegral(a, x)
self.assertEqual(
inta.name, "a integrated backward w.r.t x on ['negative electrode']"
)
# expected errors
a = pybamm.Symbol("a", domain=["negative electrode"])
x = pybamm.SpatialVariable("x", ["separator"])
y = pybamm.Variable("y")
z = pybamm.SpatialVariable("z", ["negative electrode"])
with self.assertRaises(pybamm.DomainError):
pybamm.Integral(a, x)
with self.assertRaises(ValueError):
pybamm.Integral(a, y)
def test_index(self):
vec = pybamm.StateVector(slice(0, 5))
y_test = np.array([1, 2, 3, 4, 5])
# with integer
ind = vec[3]
self.assertIsInstance(ind, pybamm.Index)
self.assertEqual(ind.slice, slice(3, 4))
self.assertEqual(ind.evaluate(y=y_test), 4)
# with slice
ind = vec[1:3]
self.assertIsInstance(ind, pybamm.Index)
self.assertEqual(ind.slice, slice(1, 3))
np.testing.assert_array_equal(ind.evaluate(y=y_test), np.array([[2], [3]]))
# with only stop slice
ind = vec[:3]
self.assertIsInstance(ind, pybamm.Index)
self.assertEqual(ind.slice, slice(3))
np.testing.assert_array_equal(ind.evaluate(y=y_test), np.array([[1], [2], [3]]))
# errors
with self.assertRaisesRegex(TypeError, "index must be integer or slice"):
pybamm.Index(vec, 0.0)
debug_mode = pybamm.settings.debug_mode
pybamm.settings.debug_mode = True
with self.assertRaisesRegex(ValueError, "slice size exceeds child size"):
pybamm.Index(vec, 5)
pybamm.settings.debug_mode = debug_mode
def test_diff(self):
a = pybamm.StateVector(slice(0, 1))
y = np.array([5])
# negation
self.assertEqual((-a).diff(a).evaluate(y=y), -1)
self.assertEqual((-a).diff(-a).evaluate(), 1)
# absolute value
self.assertEqual((a ** 3).diff(a).evaluate(y=y), 3 * 5 ** 2)
self.assertEqual((abs(a ** 3)).diff(a).evaluate(y=y), 3 * 5 ** 2)
self.assertEqual((a ** 3).diff(a).evaluate(y=-y), 3 * 5 ** 2)
self.assertEqual((abs(a ** 3)).diff(a).evaluate(y=-y), -3 * 5 ** 2)
# sign
self.assertEqual((pybamm.sign(a)).diff(a).evaluate(y=y), 0)
# spatial operator (not implemented)
spatial_a = pybamm.SpatialOperator("name", a)
with self.assertRaises(NotImplementedError):
spatial_a.diff(a)
def test_printing(self):
a = pybamm.Symbol("a", domain="test")
self.assertEqual(str(-a), "-a")
grad = pybamm.Gradient(a)
self.assertEqual(grad.name, "grad")
self.assertEqual(str(grad), "grad(a)")
def test_id(self):
a = pybamm.Scalar(4)
un1 = pybamm.UnaryOperator("test", a)
un2 = pybamm.UnaryOperator("test", a)
un3 = pybamm.UnaryOperator("new test", a)
self.assertEqual(un1.id, un2.id)
self.assertNotEqual(un1.id, un3.id)
a = pybamm.Scalar(4)
un4 = pybamm.UnaryOperator("test", a)
self.assertEqual(un1.id, un4.id)
d = pybamm.Scalar(42)
un5 = pybamm.UnaryOperator("test", d)
self.assertNotEqual(un1.id, un5.id)
def test_delta_function(self):
a = pybamm.Symbol("a")
delta_a = pybamm.DeltaFunction(a, "right", "some domain")
self.assertEqual(delta_a.side, "right")
self.assertEqual(delta_a.child.id, a.id)
self.assertFalse(delta_a.evaluates_on_edges())
with self.assertRaisesRegex(
pybamm.DomainError, "Delta function domain cannot be None"
):
delta_a = pybamm.DeltaFunction(a, "right", None)
def test_boundary_operators(self):
a = pybamm.Symbol("a", domain="some domain")
boundary_a = pybamm.BoundaryOperator("boundary", a, "right")
self.assertEqual(boundary_a.side, "right")
self.assertEqual(boundary_a.child.id, a.id)
def test_evaluates_on_edges(self):
a = pybamm.StateVector(slice(0, 10))
self.assertFalse(a[1].evaluates_on_edges())
self.assertFalse(pybamm.Laplacian(a).evaluates_on_edges())
def test_boundary_value(self):
a = pybamm.Scalar(1)
boundary_a = pybamm.boundary_value(a, "right")
self.assertEqual(boundary_a.id, a.id)
boundary_broad_a = pybamm.boundary_value(
pybamm.PrimaryBroadcast(a, ["negative electrode"]), "left"
)
self.assertEqual(boundary_broad_a.evaluate(), np.array([1]))
a = pybamm.Symbol("a", domain=["separator"])
boundary_a = pybamm.boundary_value(a, "right")
self.assertIsInstance(boundary_a, pybamm.BoundaryValue)
self.assertEqual(boundary_a.side, "right")
self.assertEqual(boundary_a.domain, [])
self.assertEqual(boundary_a.auxiliary_domains, {})
# test with secondary domain
a_sec = pybamm.Symbol(
"a",
domain=["separator"],
auxiliary_domains={"secondary": "current collector"},
)
boundary_a_sec = pybamm.boundary_value(a_sec, "right")
self.assertEqual(boundary_a_sec.domain, ["current collector"])
self.assertEqual(boundary_a_sec.auxiliary_domains, {})
# test with secondary domain and tertiary domain
a_tert = pybamm.Symbol(
"a",
domain=["separator"],
auxiliary_domains={"secondary": "current collector", "tertiary": "bla"},
)
boundary_a_tert = pybamm.boundary_value(a_tert, "right")
self.assertEqual(boundary_a_tert.domain, ["current collector"])
self.assertEqual(boundary_a_tert.auxiliary_domains, {"secondary": ["bla"]})
# error if boundary value on tabs and domain is not "current collector"
var = pybamm.Variable("var", domain=["negative electrode"])
with self.assertRaisesRegex(pybamm.ModelError, "Can only take boundary"):
pybamm.boundary_value(var, "negative tab")
pybamm.boundary_value(var, "positive tab")
def test_x_average(self):
a = pybamm.Scalar(1)
average_a = pybamm.x_average(a)
self.assertEqual(average_a.id, a.id)
average_broad_a = pybamm.x_average(
pybamm.PrimaryBroadcast(a, ["negative electrode"])
)
self.assertEqual(average_broad_a.evaluate(), np.array([1]))
conc_broad = pybamm.Concatenation(
pybamm.PrimaryBroadcast(1, ["negative electrode"]),
pybamm.PrimaryBroadcast(2, ["separator"]),
pybamm.PrimaryBroadcast(3, ["positive electrode"]),
)
average_conc_broad = pybamm.x_average(conc_broad)
self.assertIsInstance(average_conc_broad, pybamm.Division)
for domain in [
["negative electrode"],
["separator"],
["positive electrode"],
["negative electrode", "separator", "positive electrode"],
]:
a = pybamm.Symbol("a", domain=domain)
x = pybamm.SpatialVariable("x", domain)
av_a = pybamm.x_average(a)
self.assertIsInstance(av_a, pybamm.Division)
self.assertIsInstance(av_a.children[0], pybamm.Integral)
self.assertEqual(av_a.children[0].integration_variable[0].domain, x.domain)
self.assertEqual(av_a.domain, [])
a = pybamm.Symbol("a", domain="new domain")
av_a = pybamm.x_average(a)
self.assertEqual(av_a.domain, [])
self.assertIsInstance(av_a, pybamm.Division)
self.assertIsInstance(av_a.children[0], pybamm.Integral)
self.assertEqual(av_a.children[0].integration_variable[0].domain, a.domain)
self.assertIsInstance(av_a.children[1], pybamm.Integral)
self.assertEqual(av_a.children[1].integration_variable[0].domain, a.domain)
self.assertEqual(av_a.children[1].children[0].id, pybamm.ones_like(a).id)
# x-average of symbol that evaluates on edges raises error
symbol_on_edges = pybamm.PrimaryBroadcastToEdges(1, "domain")
with self.assertRaisesRegex(
ValueError, "Can't take the x-average of a symbol that evaluates on edges"
):
pybamm.x_average(symbol_on_edges)
def test_r_average(self):
a = pybamm.Scalar(1)
average_a = pybamm.r_average(a)
self.assertEqual(average_a.id, a.id)
average_broad_a = pybamm.r_average(
pybamm.PrimaryBroadcast(a, ["negative particle"])
)
self.assertEqual(average_broad_a.evaluate(), np.array([1]))
for domain in [["negative particle"], ["positive particle"]]:
a = pybamm.Symbol("a", domain=domain)
r = pybamm.SpatialVariable("r", domain)
av_a = pybamm.r_average(a)
self.assertIsInstance(av_a, pybamm.Division)
self.assertIsInstance(av_a.children[0], pybamm.Integral)
self.assertEqual(av_a.children[0].integration_variable[0].domain, r.domain)
# electrode domains go to current collector when averaged
self.assertEqual(av_a.domain, [])
# r-average of symbol that evaluates on edges raises error
symbol_on_edges = pybamm.PrimaryBroadcastToEdges(1, "domain")
with self.assertRaisesRegex(
ValueError, "Can't take the r-average of a symbol that evaluates on edges"
):
pybamm.r_average(symbol_on_edges)
def test_yz_average(self):
a = pybamm.Scalar(1)
z_average_a = pybamm.z_average(a)
yz_average_a = pybamm.yz_average(a)
self.assertEqual(z_average_a.id, a.id)
self.assertEqual(yz_average_a.id, a.id)
z_average_broad_a = pybamm.z_average(
pybamm.PrimaryBroadcast(a, ["current collector"])
)
yz_average_broad_a = pybamm.yz_average(
pybamm.PrimaryBroadcast(a, ["current collector"])
)
self.assertEqual(z_average_broad_a.evaluate(), np.array([1]))
self.assertEqual(yz_average_broad_a.evaluate(), np.array([1]))
a = pybamm.Symbol("a", domain=["current collector"])
y = pybamm.SpatialVariable("y", ["current collector"])
z = pybamm.SpatialVariable("z", ["current collector"])
z_av_a = pybamm.z_average(a)
yz_av_a = pybamm.yz_average(a)
self.assertIsInstance(z_av_a, pybamm.Division)
self.assertIsInstance(yz_av_a, pybamm.Division)
self.assertIsInstance(z_av_a.children[0], pybamm.Integral)
self.assertIsInstance(yz_av_a.children[0], pybamm.Integral)
self.assertEqual(z_av_a.children[0].integration_variable[0].domain, z.domain)
self.assertEqual(yz_av_a.children[0].integration_variable[0].domain, y.domain)
self.assertEqual(yz_av_a.children[0].integration_variable[1].domain, z.domain)
self.assertEqual(z_av_a.domain, [])
self.assertEqual(yz_av_a.domain, [])
a = pybamm.Symbol("a", domain="bad domain")
with self.assertRaises(pybamm.DomainError):
pybamm.z_average(a)
with self.assertRaises(pybamm.DomainError):
pybamm.yz_average(a)
# average of symbol that evaluates on edges raises error
symbol_on_edges = pybamm.PrimaryBroadcastToEdges(1, "domain")
with self.assertRaisesRegex(
ValueError, "Can't take the z-average of a symbol that evaluates on edges"
):
pybamm.z_average(symbol_on_edges)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
# -*- coding: utf-8 -*-
"""
This is module that represents the helper methods.
Updated since version 1.1:
1. Added check_path_exist(), check_is_directory() and check_is_file().
Updated since version 1.2 (OpenWarp - Add Logging Functionality) :
Added support for logging
"""
__author__ = "caoweiquan322, yedtoss"
__copyright__ = "Copyright (C) 2014-2016 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import traceback
import os
def check_not_none_nor_empty(val, name):
'''
Check if the given value is None or empty string.
@param val: the given value to check
@param name: name of val
@raise TypeError: if val is not of type string
@raise ValueError: if val is None or empty string
'''
if val == None:
raise ValueError('Object ' + name + ' should not be None.')
if not isinstance(val, str) and not isinstance(val, unicode):
raise TypeError('Object ' + name + ' should be a string.')
if len(val.strip()) == 0:
raise ValueError('Object ' + name + ' should not empty string.')
def check_path_exist(val, name):
'''
Check if the given value is a legal file or directory path.
@param val: the given value to check
@param name: name of val
@raise ValueError: if val is not a legal file or directory path
'''
if not os.path.exists(val):
raise ValueError('Path "' + val + '" does not exist.')
def check_is_directory(val, name):
'''
Check if the given value is a legal directory path.
@param val: the given value to check
@param name: name of val
@raise ValueError: if val is not a legal directory path
'''
check_path_exist(val, name)
if not os.path.isdir(val):
raise ValueError('Path "' + val + '" is not a legal directory.')
def check_is_file(val, name):
'''
Check if the given value is a legal file path.
@param val: the given value to check
@param name: name of val
@raise ValueError: if val is not a legal file path
'''
check_path_exist(val, name)
if not os.path.isfile(val):
raise ValueError('Path "' + val + '" is not a legal file.')
def check_type_value(val, name, expected_type, allow_none):
'''
Check if the given value is of expected type. And also check if the val is None.
@param val: the given value to check
@param name: name of val
@param expected_type: the expected type
@param allow_none: whether the val is allowed to be None
@raise TypeError: if val is not of expected type
@raise ValueError: if val is None while not allow None
'''
if val == None and not allow_none:
raise ValueError('Object ' + name + ' should not be None.')
if not isinstance(val, expected_type):
raise TypeError('Object ' + name + ' should be of ' + str(expected_type) + '.')
def log_entrance(logger, signature, parasMap):
'''
Logs for entrance into public methods at DEBUG level.
@param logger: the logger object
@param signature: the method signature
@param parasMap: the passed parameters
'''
logger.debug('[Entering method ' + signature + ']')
if parasMap != None and len(parasMap.items()) > 0:
paraStr = '[Input parameters['
for (k,v) in parasMap.items():
paraStr += (str(k) + ':' + str(v) + ', ')
paraStr += ']]'
logger.debug(paraStr)
def log_exit(logger, signature, parasList):
'''
Logs for exit from public methods at DEBUG level.
@param logger: the logger object
@param signature: the method signature
@param parasList: the objects to return
'''
logger.debug('[Exiting method ' + signature + ']')
if parasList != None and len(parasList) > 0:
logger.debug('[Output parameter ' + str(parasList) + ']')
def log_exception(logger, signature, e):
'''
Logging exception at ERROR level.
@param logger: the logger object
@param signature: the method signature
@param e: the error
'''
# This will log the traceback.
logger.error('[Error in method ' + signature + ': Details ' + str(e) + ']')
logger.error(' Error stack:')
logger.error(traceback.format_exc())
return e
|
import unittest
import test.test_tools
test.test_tools.skip_if_missing('c-analyzer')
with test.test_tools.imports_under_tool('c-analyzer'):
from cpython.__main__ import main
class ActualChecks(unittest.TestCase):
# XXX Also run the check in "make check".
#@unittest.expectedFailure
# Failing on one of the buildbots (see https://bugs.python.org/issue36876).
@unittest.skip('activate this once all the globals have been resolved')
def test_check_c_globals(self):
try:
main('check', {})
except NotImplementedError:
raise unittest.SkipTest('not supported on this host')
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
'''def encode(shiftCount,plaintText):
abjat=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
for char in plaintText:
if(plaintText.isalpha() and plaintText.upper()):
#harus true
print(plaintText.isalpha())
#memisahkan stringnya menjadi satu-persatu
text=[plaintText[i:i + 1]
for i in range(0, len(plaintText), 1)]
for i in range(0,len(abjat),shiftCount):
if(text[i]==abjat[i]):
else :
print("maaf tidak bisa memasukan ")
print("Masukan plaintext :")
plaintText=input()
encode(3,plaintText)'''
def encrypt(text, s):
result = ""
for i in range(len(text)):
char = text[i]
#cek abjac ada yang besar atau tidak
if (char.isupper()):
result += chr((ord(char) + int(s) - 65) % 26 + 65)
# Encrypt lowercase characters in plain text
else:
result += chr((ord(char) + int(s) - 97) % 26 + 97)
return result
def decrypt():
print('Masukan kata/kalimat:')
ciphertext = input()
print('Masukan nilai value: ')
shift = int(input())
ciphertext = ciphertext.split()
sentence = []
for word in ciphertext:
cipher_ords = [ord(x) for x in word]
plaintext_ords = [o - shift for o in cipher_ords]
plaintext_chars = [chr(i) for i in plaintext_ords]
plaintext = ''.join(plaintext_chars)
sentence.append(plaintext)
sentence = ' '.join(sentence)
print('Hasil Dekripsi:', sentence)
print("Masukan plaintext : ")
plaintText= input()
print("Masukan shiftCount : ")
shiftCount=input()
print("Plain Text : " + str(plaintText))
print("Shift pattern : " + str(shiftCount))
print("Cipher: " + encrypt(str(plaintText), shiftCount))
print("========================================================")
print("========================================================")
decrypt()
|
"""
github3.repos
=============
This module contains the classes relating to repositories.
"""
from base64 import b64decode
from requests import post
from collections import Callable
from github3.events import Event
from github3.issues import Issue, IssueEvent, Label, Milestone, issue_params
from github3.git import Blob, Commit, Reference, Tag, Tree
from github3.models import GitHubObject, GitHubCore, BaseComment, BaseCommit
from github3.pulls import PullRequest
from github3.users import User, Key
from github3.decorators import requires_auth
from github3.notifications import Subscription, Thread
class Repository(GitHubCore):
"""The :class:`Repository <Repository>` object. It represents how GitHub
sends information about repositories.
"""
def __init__(self, repo, session=None):
super(Repository, self).__init__(repo, session)
#: URL used to clone via HTTPS.
self.clone_url = repo.get('clone_url', '')
#: ``datetime`` object representing when the Repository was created.
self.created_at = self._strptime(repo.get('created_at'))
#: Description of the repository.
self.description = repo.get('description', '')
# The number of forks
#: The number of forks made of this repository.
self.forks = repo.get('forks', 0)
#: Is this repository a fork?
self.fork = repo.get('fork')
# Clone url using git, e.g. git://github.com/sigmavirus24/github3.py
#: Plain git url for an anonymous clone.
self.git_url = repo.get('git_url', '')
#: Whether or not this repository has downloads enabled
self.has_downloads = repo.get('has_downloads')
#: Whether or not this repository has an issue tracker
self.has_issues = repo.get('has_issues')
#: Whether or not this repository has the wiki enabled
self.has_wiki = repo.get('has_wiki')
# e.g. https://sigmavirus24.github.com/github3.py
#: URL of the home page for the project.
self.homepage = repo.get('homepage', '')
# e.g. https://github.com/sigmavirus24/github3.py
#: URL of the project at GitHub.
self.html_url = repo.get('html_url', '')
#: Unique id of the repository.
self.id = repo.get('id', 0)
#: Language property.
self.language = repo.get('language', '')
#: Mirror property.
self.mirror_url = repo.get('mirror_url', '')
# Repository name, e.g. github3.py
#: Name of the repository.
self.name = repo.get('name', '')
# Number of open issues
#: Number of open issues on the repository.
self.open_issues = repo.get('open_issues', 0)
# Repository owner's name
#: :class:`User <github3.users.User>` object representing the
# repository owner.
self.owner = User(repo.get('owner', {}), self._session)
#: Is this repository private?
self.private = repo.get('private')
#: ``datetime`` object representing the last time commits were pushed
# to the repository.
self.pushed_at = self._strptime(repo.get('pushed_at'))
#: Size of the repository.
self.size = repo.get('size', 0)
# SSH url e.g. git@github.com/sigmavirus24/github3.py
#: URL to clone the repository via SSH.
self.ssh_url = repo.get('ssh_url', '')
#: If it exists, url to clone the repository via SVN.
self.svn_url = repo.get('svn_url', '')
#: ``datetime`` object representing the last time the repository was
# updated.
self.updated_at = self._strptime(repo.get('updated_at'))
self._api = repo.get('url', '')
# The number of watchers
#: Number of users watching the repository.
self.watchers = repo.get('watchers', 0)
#: Parent of this fork, if it exists :class;`Repository`
self.source = repo.get('source')
if self.source:
self.source = Repository(self.source, self)
#: Parent of this fork, if it exists :class:`Repository`
self.parent = repo.get('parent')
if self.parent:
self.parent = Repository(self.parent, self)
#: default branch for the repository
self.master_branch = repo.get('master_branch', '')
def __repr__(self):
return '<Repository [{0}]>'.format(self)
def __str__(self):
return '{0}/{1}'.format(self.owner, self.name)
def _update_(self, repo):
self.__init__(repo, self._session)
def _create_pull(self, data):
json = None
if data:
url = self._build_url('pulls', base_url=self._api)
json = self._json(self._post(url, data), 201)
return PullRequest(json, self._session) if json else None
@requires_auth
def add_collaborator(self, login):
"""Add ``login`` as a collaborator to a repository.
:param str login: (required), login of the user
:returns: bool -- True if successful, False otherwise
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._put(url), 204, 404)
return resp
def archive(self, format, path='', ref='master'):
"""Get the tarball or zipball archive for this repo at ref.
:param str format: (required), accepted values: ('tarball',
'zipball')
:param path: (optional), path where the file should be saved
to, default is the filename provided in the headers and will be
written in the current directory.
it can take a file-like object as well
:type path: str, file
:param str ref: (optional)
:returns: bool -- True if successful, False otherwise
"""
resp = None
written = False
if format in ('tarball', 'zipball'):
url = self._build_url(format, ref, base_url=self._api)
resp = self._get(url, allow_redirects=True, prefetch=False)
pre_opened = False
if resp and self._boolean(resp, 200, 404):
fd = None
if path:
if isinstance(getattr(path, 'write', None), Callable):
pre_opened = True
fd = path
else:
fd = open(path, 'wb')
else:
header = resp.headers['content-disposition']
i = header.find('filename=') + len('filename=')
fd = open(header[i:], 'wb')
for chunk in resp.iter_content():
fd.write(chunk)
if not pre_opened:
fd.close()
written = True
return written
def blob(self, sha):
"""Get the blob indicated by ``sha``.
:param str sha: (required), sha of the blob
:returns: :class:`Blob <github3.git.Blob>` if successful, otherwise
None
"""
url = self._build_url('git', 'blobs', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Blob(json) if json else None
def branch(self, name):
"""Get the branch ``name`` of this repository.
:param str name: (required), branch name
:type name: str
:returns: :class:`Branch <Branch>`
"""
json = None
if name:
url = self._build_url('branches', name, base_url=self._api)
json = self._json(self._get(url), 200)
return Branch(json, self) if json else None
def commit(self, sha):
"""Get a single (repo) commit. See :func:`git_commit` for the Git Data
Commit.
:param str sha: (required), sha of the commit
:returns: :class:`RepoCommit <RepoCommit>` if successful, otherwise
None
"""
url = self._build_url('commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return RepoCommit(json, self) if json else None
def commit_comment(self, comment_id):
"""Get a single commit comment.
:param int comment_id: (required), id of the comment used by GitHub
:returns: :class:`RepoComment <RepoComment>` if successful, otherwise
None
"""
url = self._build_url('comments', str(comment_id), base_url=self._api)
json = self._json(self._get(url), 200)
return RepoComment(json, self) if json else None
def compare_commits(self, base, head):
"""Compare two commits.
:param str base: (required), base for the comparison
:param str head: (required), compare this against base
:returns: :class:`Comparison <Comparison>` if successful, else None
"""
url = self._build_url('compare', base + '...' + head,
base_url=self._api)
json = self._json(self._get(url), 200)
return Comparison(json) if json else None
def contents(self, path):
"""Get the contents of the file pointed to by ``path``.
:param str path: (required), path to file, e.g.
github3/repo.py
:returns: :class:`Contents <Contents>` if successful, else None
"""
url = self._build_url('contents', path, base_url=self._api)
resp = self._get(url)
if self._boolean(resp, 200, 404):
return Contents(self._json(resp, 200))
else:
return None
@requires_auth
def create_blob(self, content, encoding):
"""Create a blob with ``content``.
:param str content: (required), content of the blob
:param str encoding: (required), ('base64', 'utf-8')
:returns: string of the SHA returned
"""
sha = ''
if encoding in ('base64', 'utf-8') and content:
url = self._build_url('git', 'blobs', base_url=self._api)
data = {'content': content, 'encoding': encoding}
json = self._json(self._post(url, data), 201)
if json:
sha = json.get('sha')
return sha
@requires_auth
def create_comment(self, body, sha, path='', position=1, line=1):
"""Create a comment on a commit.
:param str body: (required), body of the message
:param str sha: (required), commit id
:param str path: (optional), relative path of the file to comment
on
:param str position: (optional), line index in the diff to comment on
:param int line: (optional), line number of the file to comment on,
default: 1
:returns: :class:`RepoComment <RepoComment>` if successful else None
"""
line = int(line)
position = int(position)
json = None
if body and sha and line > 0:
data = {'body': body, 'commit_id': sha, 'line': line,
'path': path, 'position': position}
url = self._build_url('commits', sha, 'comments',
base_url=self._api)
json = self._json(self._post(url, data), 201)
return RepoComment(json, self) if json else None
@requires_auth
def create_commit(self, message, tree, parents, author={}, committer={}):
"""Create a commit on this repository.
:param str message: (required), commit message
:param str tree: (required), SHA of the tree object this
commit points to
:param list parents: (required), SHAs of the commits that were parents
of this commit. If empty, the commit will be written as the root
commit. Even if there is only one parent, this should be an
array.
:param dict author: (optional), if omitted, GitHub will
use the authenticated user's credentials and the current
time. Format: {'name': 'Committer Name', 'email':
'name@example.com', 'date': 'YYYY-MM-DDTHH:MM:SS+HH:00'}
:param dict committer: (optional), if ommitted, GitHub will use the
author parameters. Should be the same format as the author
parameter.
:returns: :class:`Commit <github3.git.Commit>` if successful, else
None
"""
json = None
if message and tree and isinstance(parents, list):
url = self._build_url('git', 'commits', base_url=self._api)
data = {'message': message, 'tree': tree, 'parents': parents,
'author': author, 'committer': committer}
json = self._json(self._post(url, data), 201)
return Commit(json, self) if json else None
@requires_auth
def create_download(self, name, path, description='',
content_type='text/plain'):
"""Create a new download on this repository.
I do not require you provide the size in bytes because it can be
determined by the operating system.
.. warning::
On 2012-03-11, GitHub will be deprecating the Downloads API. This
method will no longer work.
:param str name: (required), name of the file as it will appear
:param str path: (required), path to the file
:param str description: (optional), description of the file
:param str content_type: (optional), e.g. 'text/plain'
:returns: :class:`Download <Download>` if successful, else None
"""
json = None
if name and path:
url = self._build_url('downloads', base_url=self._api)
from os import stat
info = stat(path)
data = {'name': name, 'size': info.st_size,
'description': description,
'content_type': content_type}
json = self._json(self._post(url, data), 201)
if not json:
return None
form = [('key', json.get('path')),
('acl', json.get('acl')),
('success_action_status', '201'),
('Filename', json.get('name')),
('AWSAccessKeyId', json.get('accesskeyid')),
('Policy', json.get('policy')),
('Signature', json.get('signature')),
('Content-Type', json.get('mime_type'))]
file = [('file', open(path, 'rb').read())]
resp = post(json.get('s3_url'), data=form, files=file,
headers={'Accept-Charset': 'utf-8'})
return Download(json, self) if self._boolean(resp, 201, 404) else None
@requires_auth
def create_fork(self, organization=None):
"""Create a fork of this repository.
:param str organization: (required), login for organization to create
the fork under
:returns: :class:`Repository <Repository>` if successful, else None
"""
url = self._build_url('forks', base_url=self._api)
if organization:
resp = self._post(url, data={'organization': organization})
else:
resp = self._post(url)
json = self._json(resp, 202)
return Repository(json, self) if json else None
@requires_auth
def create_hook(self, name, config, events=['push'], active=True):
"""Create a hook on this repository.
:param str name: (required), name of the hook
:param dict config: (required), key-value pairs which act as settings
for this hook
:param list events: (optional), events the hook is triggered for
:param bool active: (optional), whether the hook is actually
triggered
:returns: :class:`Hook <Hook>` if successful, else None
"""
json = None
if name and config and isinstance(config, dict):
url = self._build_url('hooks', base_url=self._api)
data = {'name': name, 'config': config, 'events': events,
'active': active}
json = self._json(self._post(url, data), 201)
return Hook(json, self) if json else None
@requires_auth
def create_issue(self,
title,
body=None,
assignee=None,
milestone=None,
labels=None):
"""Creates an issue on this repository.
:param str title: (required), title of the issue
:param str body: (optional), body of the issue
:param str assignee: (optional), login of the user to assign the
issue to
:param int milestone: (optional), number of the milestone to attribute
this issue to (e.g. ``m`` is a Milestone object, ``m.number`` is
what you pass here.)
:param labels: (optional), labels to apply to this
issue
:type labels: list of strings
:returns: :class:`Issue <github3.issues.Issue>` if successful, else
None
"""
issue = {'title': title, 'body': body, 'assignee': assignee,
'milestone': milestone, 'labels': labels}
self._remove_none(issue)
json = None
if issue:
url = self._build_url('issues', base_url=self._api)
json = self._json(self._post(url, issue), 201)
return Issue(json, self) if json else None
@requires_auth
def create_key(self, title, key):
"""Create a deploy key.
:param str title: (required), title of key
:param str key: (required), key text
:returns: :class:`Key <github3.users.Key>` if successful, else None
"""
data = {'title': title, 'key': key}
url = self._build_url('keys', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Key(json, self) if json else None
@requires_auth
def create_label(self, name, color):
"""Create a label for this repository.
:param str name: (required), name to give to the label
:param str color: (required), value of the color to assign to the
label
:returns: :class:`Label <github3.issues.Label>` if successful, else
None
"""
data = {'name': name, 'color': color.strip('#')}
url = self._build_url('labels', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Label(json, self) if json else None
@requires_auth
def create_milestone(self, title, state=None, description=None,
due_on=None):
"""Create a milestone for this repository.
:param str title: (required), title of the milestone
:param str state: (optional), state of the milestone, accepted
values: ('open', 'closed'), default: 'open'
:param str description: (optional), description of the milestone
:param str due_on: (optional), ISO 8601 formatted due date
:returns: :class:`Milestone <github3.issues.Milestone>` if successful,
else None
"""
url = self._build_url('milestones', base_url=self._api)
if state not in ('open', 'closed'):
state = 'open'
data = {'title': title, 'state': state,
'description': description, 'due_on': due_on}
self._remove_none(data)
json = self._json(self._post(url, data), 201)
return Milestone(json, self) if json else None
@requires_auth
def create_pull(self, title, base, head, body=''):
"""Create a pull request using commits from ``head`` and comparing
against ``base``.
:param str title: (required)
:param str base: (required), e.g., 'username:branch', or a sha
:param str head: (required), e.g., 'master', or a sha
:param str body: (optional), markdown formatted description
:returns: :class:`PullRequest <github3.pulls.PullRequest>` if
successful, else None
"""
data = {'title': title, 'body': body, 'base': base,
'head': head}
return self._create_pull(data)
@requires_auth
def create_pull_from_issue(self, issue, base, head):
"""Create a pull request from issue #``issue``.
:param int issue: (required), issue number
:param str base: (required), e.g., 'username:branch', or a sha
:param str head: (required), e.g., 'master', or a sha
:returns: :class:`PullRequest <github3.pulls.PullRequest>` if
successful, else None
"""
data = {'issue': issue, 'base': base, 'head': head}
return self._create_pull(data)
@requires_auth
def create_ref(self, ref, sha):
"""Create a reference in this repository.
:param str ref: (required), fully qualified name of the reference,
e.g. ``refs/heads/master``. If it doesn't start with ``refs`` and
contain at least two slashes, GitHub's API will reject it.
:param str sha: (required), SHA1 value to set the reference to
:returns: :class:`Reference <github3.git.Reference>` if successful
else None
"""
data = {'ref': ref, 'sha': sha}
url = self._build_url('git', 'refs', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Reference(json, self) if json else None
@requires_auth
def create_status(self, sha, state, target_url='', description=''):
"""Create a status object on a commit.
:param str sha: (required), SHA of the commit to create the status on
:param str state: (required), state of the test; only the following
are accepted: 'pending', 'success', 'error', 'failure'
:param str target_url: (optional), URL to associate with this status.
:param str description: (optional), short description of the status
"""
json = {}
if sha and state:
data = {'state': state, 'target_url': target_url,
'description': description}
url = self._build_url('statuses', sha, base_url=self._api)
json = self._json(self._post(url, data=data), 201)
return Status(json) if json else None
@requires_auth
def create_tag(self, tag, message, sha, obj_type, tagger,
lightweight=False):
"""Create a tag in this repository.
:param str tag: (required), name of the tag
:param str message: (required), tag message
:param str sha: (required), SHA of the git object this is tagging
:param str obj_type: (required), type of object being tagged, e.g.,
'commit', 'tree', 'blob'
:param dict tagger: (required), containing the name, email of the
tagger and the date it was tagged
:param bool lightweight: (optional), if False, create an annotated
tag, otherwise create a lightweight tag (a Reference).
:returns: If lightweight == False: :class:`Tag <github3.git.Tag>` if
successful, else None. If lightweight == True: :class:`Reference
<Reference>`
"""
if lightweight and tag and sha:
return self.create_ref('refs/tags/' + tag, sha)
json = None
if tag and message and sha and obj_type and len(tagger) == 3:
data = {'tag': tag, 'message': message, 'object': sha,
'type': obj_type, 'tagger': tagger}
url = self._build_url('git', 'tags', base_url=self._api)
json = self._json(self._post(url, data), 201)
if json:
self.create_ref('refs/tags/' + tag, sha)
return Tag(json) if json else None
@requires_auth
def create_tree(self, tree, base_tree=''):
"""Create a tree on this repository.
:param list tree: (required), specifies the tree structure.
Format: [{'path': 'path/file', 'mode':
'filemode', 'type': 'blob or tree', 'sha': '44bfc6d...'}]
:param str base_tree: (optional), SHA1 of the tree you want
to update with new data
:returns: :class:`Tree <github3.git.Tree>` if successful, else None
"""
json = None
if tree and isinstance(tree, list):
data = {'tree': tree, 'base_tree': base_tree}
url = self._build_url('git', 'trees', base_url=self._api)
json = self._json(self._post(url, data), 201)
return Tree(json) if json else None
@requires_auth
def delete(self):
"""Delete this repository.
:returns: bool -- True if successful, False otherwise
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def delete_key(self, key_id):
"""Delete the key with the specified id from your deploy keys list.
:returns: bool -- True if successful, False otherwise
"""
if int(key_id) <= 0:
return False
url = self._build_url('keys', str(key_id), base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
def download(self, id_num):
"""Get a single download object by its id.
.. warning::
On 2012-03-11, GitHub will be deprecating the Downloads API. This
method will no longer work.
:param int id_num: (required), id of the download
:returns: :class:`Download <Download>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('downloads', str(id_num),
base_url=self._api)
json = self._json(self._get(url), 200)
return Download(json, self) if json else None
@requires_auth
def edit(self,
name,
description=None,
homepage=None,
private=None,
has_issues=None,
has_wiki=None,
has_downloads=None,
default_branch=None):
"""Edit this repository.
:param str name: (required), name of the repository
:param str description: (optional), If not ``None``, change the
description for this repository. API default: ``None`` - leave
value unchanged.
:param str homepage: (optional), If not ``None``, change the homepage
for this repository. API default: ``None`` - leave value unchanged.
:param bool private: (optional), If ``True``, make the repository
private. If ``False``, make the repository public. API default:
``None`` - leave value unchanged.
:param bool has_issues: (optional), If ``True``, enable issues for
this repository. If ``False``, disable issues for this repository.
API default: ``None`` - leave value unchanged.
:param bool has_wiki: (optional), If ``True``, enable the wiki for
this repository. If ``False``, disable the wiki for this
repository. API default: ``None`` - leave value unchanged.
:param bool has_downloads: (optional), If ``True``, enable downloads
for this repository. If ``False``, disable downloads for this
repository. API default: ``None`` - leave value unchanged.
:param str default_branch: (optional), If not ``None``, change the
default branch for this repository. API default: ``None`` - leave
value unchanged.
:returns: bool -- True if successful, False otherwise
"""
edit = {'name': name, 'description': description, 'homepage': homepage,
'private': private, 'has_issues': has_issues,
'has_wiki': has_wiki, 'has_downloads': has_downloads,
'default_branch': default_branch}
self._remove_none(edit)
json = self._json(self._patch(self._api, data=edit), 200)
if json:
self._update_(json)
return True
return False # (No coverage)
def is_collaborator(self, login):
"""Check to see if ``login`` is a collaborator on this repository.
:param str login: (required), login for the user
:returns: bool -- True if successful, False otherwise
"""
if login:
url = self._build_url('collaborators', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
return False
def is_fork(self):
"""Checks if this repository is a fork.
:returns: bool
"""
return self.fork
def is_private(self):
"""Checks if this repository is private.
:returns: bool
"""
return self.private
def git_commit(self, sha):
"""Get a single (git) commit.
:param str sha: (required), sha of the commit
:returns: :class:`Commit <github3.git.Commit>` if successful,
otherwise None
"""
url = self._build_url('git', 'commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Commit(json, self) if json else None
@requires_auth
def hook(self, id_num):
"""Get a single hook.
:param int id_num: (required), id of the hook
:returns: :class:`Hook <Hook>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('hooks', str(id_num), base_url=self._api)
json = self._json(self._get(url), 200)
return Hook(json, self) if json else None
def is_assignee(self, login):
"""Check if the user is a possible assignee for an issue on this
repository.
:returns: :class:`bool`
"""
url = self._build_url('assignees', login, base_url=self._api)
return self._boolean(self._get(url), 204, 404)
def issue(self, number):
"""Get the issue specified by ``number``.
:param int number: (required), number of the issue on this repository
:returns: :class:`Issue <github3.issues.Issue>` if successful, else
None
"""
json = None
if int(number) > 0:
url = self._build_url('issues', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return Issue(json, self) if json else None
@requires_auth
def key(self, id_num):
"""Get the specified deploy key.
:param int id_num: (required), id of the key
:returns: :class:`Key <Key>` if successful, else None
"""
json = None
if int(id_num) > 0:
url = self._build_url('keys', str(id_num), base_url=self._api)
json = self._json(self._get(url), 200)
return Key(json, self) if json else None
def label(self, name):
"""Get the label specified by ``name``
:param str name: (required), name of the label
:returns: :class:`Label <github3.issues.Label>` if successful, else
None
"""
json = None
if name:
url = self._build_url('labels', name, base_url=self._api)
json = self._json(self._get(url), 200)
return Label(json, self) if json else None
def iter_assignees(self, number=-1):
"""Iterate over all available assignees to which an issue may be
assigned.
:param int number: (optional), number of assignees to return. Default:
-1 returns all available assignees
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('assignees', base_url=self._api)
return self._iter(int(number), url, User)
def iter_branches(self, number=-1):
"""Iterate over the branches in this repository.
:param int number: (optional), number of branches to return. Default:
-1 returns all branches
:returns: list of :class:`Branch <Branch>`\ es
"""
url = self._build_url('branches', base_url=self._api)
return self._iter(int(number), url, Branch)
def iter_comments(self, number=-1):
"""Iterate over comments on all commits in the repository.
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
url = self._build_url('comments', base_url=self._api)
return self._iter(int(number), url, RepoComment)
def iter_comments_on_commit(self, sha, number=1):
"""Iterate over comments for a single commit.
:param sha: (required), sha of the commit to list comments on
:type sha: str
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoComment <RepoComment>`\ s
"""
url = self._build_url('commits', sha, 'comments', base_url=self._api)
return self._iter(int(number), url, RepoComment)
def iter_commits(self, sha='', path='', author='', number=-1):
"""Iterate over commits in this repository.
:param str sha: (optional), sha or branch to start listing commits
from
:param str path: (optional), commits containing this path will be
listed
:param str author: (optional), GitHub login, real name, or email to
filter commits by (using commit author)
:param int number: (optional), number of comments to return. Default:
-1 returns all comments
:returns: list of :class:`RepoCommit <RepoCommit>`\ s
"""
params = {}
if sha:
params['sha'] = sha
if path:
params['path'] = path
if author:
params['author'] = author
url = self._build_url('commits', base_url=self._api)
return self._iter(int(number), url, RepoCommit, params=params)
def iter_contributors(self, anon=False, number=-1):
"""Iterate over the contributors to this repository.
:param bool anon: (optional), True lists anonymous contributors as
well
:param int number: (optional), number of contributors to return.
Default: -1 returns all contributors
:returns: list of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('contributors', base_url=self._api)
params = {}
if anon:
params = {'anon': anon}
return self._iter(int(number), url, User, params=params)
def iter_downloads(self, number=-1):
"""Iterate over available downloads for this repository.
.. warning::
On 2012-03-11, GitHub will be deprecating the Downloads API. This
method will no longer work.
:param int number: (optional), number of downloads to return. Default:
-1 returns all available downloads
:returns: list of :class:`Download <Download>`\ s
"""
url = self._build_url('downloads', base_url=self._api)
return self._iter(int(number), url, Download)
def iter_events(self, number=-1):
"""Iterate over events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: list of :class:`Event <github3.events.Event>`\ s
"""
url = self._build_url('events', base_url=self._api)
return self._iter(int(number), url, Event)
def iter_forks(self, sort='', number=-1):
"""Iterate over forks of this repository.
:param str sort: (optional), accepted values:
('newest', 'oldest', 'watchers'), API default: 'newest'
:param int number: (optional), number of forks to return. Default: -1
returns all forks
:returns: list of :class:`Repository <Repository>`
"""
url = self._build_url('forks', base_url=self._api)
params = {}
if sort in ('newest', 'oldest', 'watchers'):
params = {'sort': sort}
return self._iter(int(number), url, Repository, params=params)
@requires_auth
def iter_hooks(self, number=-1):
"""Iterate over hooks registered on this repository.
:param int number: (optional), number of hoks to return. Default: -1
returns all hooks
:returns: list of :class:`Hook <Hook>`\ s
"""
url = self._build_url('hooks', base_url=self._api)
return self._iter(int(number), url, Hook)
def iter_issues(self,
milestone=None,
state=None,
assignee=None,
mentioned=None,
labels=None,
sort=None,
direction=None,
since=None,
number=-1):
"""Iterate over issues on this repo based upon parameters passed.
:param int milestone: (optional), 'none', or '*'
:param str state: (optional), accepted values: ('open', 'closed')
:param str assignee: (optional), 'none', '*', or login name
:param str mentioned: (optional), user's login name
:param str labels: (optional), comma-separated list of labels, e.g.
'bug,ui,@high' :param sort: accepted values:
('created', 'updated', 'comments', 'created')
:param str direction: (optional), accepted values: ('asc', 'desc')
:param str since: (optional), ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ
:param int number: (optional), Number of issues to return.
By default all issues are returned
:returns: list of :class:`Issue <github3.issues.Issue>`\ s
"""
url = self._build_url('issues', base_url=self._api)
params = {'assignee': assignee, 'mentioned': mentioned}
if milestone in ('*', 'none') or isinstance(milestone, int):
params['milestone'] = milestone
self._remove_none(params)
params.update(issue_params(None, state, labels, sort, direction,
since)) # nopep8
return self._iter(int(number), url, Issue, params=params)
def iter_issue_events(self, number=-1):
"""Iterates over issue events on this repository.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of
:class:`IssueEvent <github3.issues.IssueEvent>`\ s
"""
url = self._build_url('issues', 'events', base_url=self._api)
return self._iter(int(number), url, IssueEvent)
@requires_auth
def iter_keys(self, number=-1):
"""Iterates over deploy keys on this repository.
:param int number: (optional), number of keys to return. Default: -1
returns all available keys
:returns: generator of :class:`Key <github3.users.Key>`\ s
"""
url = self._build_url('keys', base_url=self._api)
return self._iter(int(number), url, Key)
def iter_labels(self, number=-1):
"""Iterates over labels on this repository.
:param int number: (optional), number of labels to return. Default: -1
returns all available labels
:returns: generator of :class:`Label <github3.issues.Label>`\ s
"""
url = self._build_url('labels', base_url=self._api)
return self._iter(int(number), url, Label)
def iter_languages(self, number=-1):
"""Iterate over the programming languages used in the repository.
:param int number: (optional), number of languages to return. Default:
-1 returns all used languages
:returns: list of tuples
"""
url = self._build_url('languages', base_url=self._api)
return self._iter(int(number), url, tuple)
def iter_milestones(self, state=None, sort=None, direction=None,
number=-1):
"""Iterates over the milestones on this repository.
:param str state: (optional), state of the milestones, accepted
values: ('open', 'closed')
:param str sort: (optional), how to sort the milestones, accepted
values: ('due_date', 'completeness')
:param str direction: (optional), direction to sort the milestones,
accepted values: ('asc', 'desc')
:param int number: (optional), number of milestones to return.
Default: -1 returns all milestones
:returns: generator of
:class:`Milestone <github3.issues.Milestone>`\ s
"""
url = self._build_url('milestones', base_url=self._api)
accepted = {'state': ('open', 'closed'),
'sort': ('due_date', 'completeness'),
'direction': ('asc', 'desc')}
params = {'state': state, 'sort': sort, 'direction': direction}
for (k, v) in list(params.items()):
if not (v and (v in accepted[k])): # e.g., '' or None
del params[k]
if not params:
params = None
return self._iter(int(number), url, Milestone, params)
def iter_network_events(self, number=-1):
"""Iterates over events on a network of repositories.
:param int number: (optional), number of events to return. Default: -1
returns all available events
:returns: generator of :class:`Event <github3.events.Event>`\ s
"""
base = self._api.replace('repos', 'networks', 1)
url = self._build_url('events', base_url=base)
return self._iter(int(number), url, Event)
def iter_notifications(self, all=False, participating=False, since='',
number=-1):
"""Iterates over the notifications for this repository.
:param bool all: (optional), show all notifications, including ones
marked as read
:param bool participating: (optional), show only the notifications the
user is participating in directly
:param str since: (optional), filters out any notifications updated
before the given time. The time should be passed in as UTC in the
ISO 8601 format: ``YYYY-MM-DDTHH:MM:SSZ``. Example:
"2012-10-09T23:39:01Z".
:returns: generator of :class:`Thread <github3.notifications.Thread>`
"""
url = self._build_url('notifications', base_url=self._api)
params = {'all': all, 'participating': participating, 'since': since}
for (k, v) in list(params.items()):
if not v:
del params[k]
return self._iter(int(number), url, Thread, params=params)
def iter_pulls(self, state=None, number=-1):
"""List pull requests on repository.
:param str state: (optional), accepted values: ('open', 'closed')
:param int number: (optional), number of pulls to return. Default: -1
returns all available pull requests
:returns: generator of
:class:`PullRequest <github3.pulls.PullRequest>`\ s
"""
url = self._build_url('pulls', base_url=self._api)
params = {}
if state in ('open', 'closed'):
params['state'] = state
return self._iter(int(number), url, PullRequest, params=params)
def iter_refs(self, subspace='', number=-1):
"""Iterates over references for this repository.
:param str subspace: (optional), e.g. 'tags', 'stashes', 'notes'
:param int number: (optional), number of refs to return. Default: -1
returns all available refs
:returns: generator of :class:`Reference <github3.git.Reference>`\ s
"""
if subspace:
args = ('git', 'refs', subspace)
else:
args = ('git', 'refs')
url = self._build_url(*args, base_url=self._api)
return self._iter(int(number), url, Reference)
def iter_stargazers(self, number=-1):
"""List users who have starred this repository.
:returns: generator of :class:`User <github3.users.User>`\ s
"""
url = self._build_url('stargazers', base_url=self._api)
return self._iter(int(number), url, User)
def iter_subscribers(self, number=-1):
"""Iterates over users subscribed to this repository.
:param int number: (optional), number of subscribers to return.
Default: -1 returns all subscribers available
:returns: generator of :class:`User <github3.users.User>`
"""
url = self._build_url('subscribers', base_url=self._api)
return self._iter(int(number), url, User)
def iter_statuses(self, sha, number=-1):
"""Iterates over the statuses for a specific SHA.
:param str sha: SHA of the commit to list the statuses of
:param int number: (optional), return up to number statuses. Default:
-1 returns all available statuses.
:returns: generator of :class:`Status <Status>`
"""
url = ''
if sha:
url = self._build_url('statuses', sha, base_url=self._api)
return self._iter(int(number), url, Status)
def iter_tags(self, number=-1):
"""Iterates over tags on this repository.
:param int number: (optional), return up to at most number tags.
Default: -1 returns all available tags.
:returns: generator of :class:`RepoTag <RepoTag>`\ s
"""
url = self._build_url('tags', base_url=self._api)
return self._iter(int(number), url, RepoTag)
@requires_auth
def iter_teams(self, number=-1):
"""Iterates over teams with access to this repository.
:param int number: (optional), return up to number Teams. Default: -1
returns all Teams.
:returns: generator of :class:`Team <github3.orgs.Team>`\ s
"""
from github3.orgs import Team
url = self._build_url('teams', base_url=self._api)
return self._iter(int(number), url, Team)
def mark_notifications(self, last_read=''):
"""Mark all notifications in this repository as read.
:param str last_read: (optional), Describes the last point that
notifications were checked. Anything updated since this time will
not be updated. Default: Now. Expected in ISO 8601 format:
``YYYY-MM-DDTHH:MM:SSZ``. Example: "2012-10-09T23:39:01Z".
:returns: bool
"""
url = self._build_url('notifications', base_url=self._api)
mark = {'read': True}
if last_read:
mark['last_read_at'] = last_read
return self._boolean(self._put(url, data=mark),
205, 404)
def merge(self, base, head, message=''):
"""Perform a merge from ``head`` into ``base``.
:param str base: (required), where you're merging into
:param str head: (required), where you're merging from
:param str message: (optional), message to be used for the commit
:returns: :class:`RepoCommit <RepoCommit>`
"""
url = self._build_url('merges', base_url=self._api)
data = {'base': base, 'head': head, 'commit_message': message}
json = self._json(self._post(url, data=data), 201)
return RepoCommit(json, self) if json else None
def milestone(self, number):
"""Get the milestone indicated by ``number``.
:param int number: (required), unique id number of the milestone
:returns: :class:`Milestone <github3.issues.Milestone>`
"""
url = self._build_url('milestones', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return Milestone(json, self) if json else None
@requires_auth
def pubsubhubbub(self, mode, topic, callback, secret=''):
"""Create/update a pubsubhubbub hook.
:param str mode: (required), accepted values: ('subscribe',
'unsubscribe')
:param str topic: (required), form:
https://github.com/:user/:repo/events/:event
:param str callback: (required), the URI that receives the updates
:param str secret: (optional), shared secret key that generates a
SHA1 HMAC of the payload content.
:returns: bool
"""
from re import match
m = match('https://github\.com/\w+/[\w\._-]+/events/\w+', topic)
status = False
if mode and topic and callback and m:
data = [('hub.mode', mode), ('hub.topic', topic),
('hub.callback', callback), ('hub.secret', secret)]
url = self._build_url('hub')
status = self._boolean(self._post(url, data=data), 204, 404)
return status
def pull_request(self, number):
"""Get the pull request indicated by ``number``.
:param int number: (required), number of the pull request.
:returns: :class:`PullRequest <github3.pulls.PullRequest>`
"""
json = None
if int(number) > 0:
url = self._build_url('pulls', str(number), base_url=self._api)
json = self._json(self._get(url), 200)
return PullRequest(json, self) if json else None
def readme(self):
"""Get the README for this repository.
:returns: :class:`Contents <Contents>`
"""
url = self._build_url('readme', base_url=self._api)
json = self._json(self._get(url), 200)
return Contents(json) if json else None
def ref(self, ref):
"""Get a reference pointed to by ``ref``.
The most common will be branches and tags. For a branch, you must
specify 'heads/branchname' and for a tag, 'tags/tagname'. Essentially,
the system should return any reference you provide it in the namespace,
including notes and stashes (provided they exist on the server).
:param str ref: (required)
:type ref: str
:returns: :class:`Reference <github3.git.Reference>`
"""
url = self._build_url('git', 'refs', ref, base_url=self._api)
json = self._json(self._get(url), 200)
return Reference(json, self) if json else None
@requires_auth
def remove_collaborator(self, login):
"""Remove collaborator ``login`` from the repository.
:param str login: (required), login name of the collaborator
:returns: bool
"""
resp = False
if login:
url = self._build_url('collaborators', login, base_url=self._api)
resp = self._boolean(self._delete(url), 204, 404)
return resp
@requires_auth
def set_subscription(self, subscribed, ignored):
"""Set the user's subscription for this repository
:param bool subscribed: (required), determines if notifications should
be received from this repository.
:param bool ignored: (required), determines if notifications should be
ignored from this repository.
:returns: :class;`Subscription <Subscription>`
"""
sub = {'subscribed': subscribed, 'ignored': ignored}
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._put(url, data=sub), 200)
return Subscription(json, self) if json else None
@requires_auth
def subscription(self):
"""Return subscription for this Repository.
:returns: :class:`Subscription <github3.notifications.Subscription>`
"""
url = self._build_url('subscription', base_url=self._api)
json = self._json(self._get(url), 200)
return Subscription(json, self) if json else None
def tag(self, sha):
"""Get an annotated tag.
http://learn.github.com/p/tagging.html
:param str sha: (required), sha of the object for this tag
:returns: :class:`Tag <github3.git.Tag>`
"""
url = self._build_url('git', 'tags', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tag(json) if json else None
def tree(self, sha):
"""Get a tree.
:param str sha: (required), sha of the object for this tree
:returns: :class:`Tree <github3.git.Tree>`
"""
url = self._build_url('git', 'trees', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return Tree(json, self) if json else None
def update_label(self, name, color, new_name=''):
"""Update the label ``name``.
:param str name: (required), name of the label
:param str color: (required), color code
:param str new_name: (optional), new name of the label
:returns: bool
"""
label = self.label(name)
resp = False
if label:
upd = label.update
resp = upd(new_name, color) if new_name else upd(name, color)
return resp
class Branch(GitHubCore):
"""The :class:`Branch <Branch>` object. It holds the information GitHub
returns about a branch on a :class:`Repository <Repository>`.
"""
def __init__(self, branch, session=None):
super(Branch, self).__init__(branch, session)
#: Name of the branch.
self.name = branch.get('name')
#: Returns the branch's :class:`RepoCommit <RepoCommit>` or
# ``None``.
self.commit = branch.get('commit')
if self.commit:
self.commit = RepoCommit(self.commit, self._session)
#: Returns '_links' attribute.
self.links = branch.get('_links', {})
def __repr__(self):
return '<Repository Branch [{0}]>'.format(self.name)
class Contents(GitHubObject):
"""The :class:`Contents <Contents>` object. It holds the information
concerning any content in a repository requested via the API.
"""
def __init__(self, content):
super(Contents, self).__init__(content)
# links
self._api = content['_links'].get('self', '')
#: Dictionary of links
self.links = content.get('_links')
# should always be 'base64'
#: Returns encoding used on the content.
self.encoding = content.get('encoding', '')
# content, base64 encoded and decoded
#: Base64-encoded content of the file.
self.content = content.get('content', '')
#: Decoded content of the file.
self.decoded = self.content
if self.encoding == 'base64':
self.decoded = b64decode(self.content.encode())
# file name, path, and size
#: Name of the content.
self.name = content.get('name', '')
#: Path to the content.
self.path = content.get('path', '')
#: Size of the content
self.size = content.get('size', 0)
#: SHA string.
self.sha = content.get('sha', '')
# should always be 'file'
#: Type of content.
self.type = content.get('type', '')
def __repr__(self):
return '<Content [{0}]>'.format(self.path)
def __str__(self):
return self.decoded
@property
def git_url(self):
"""API URL for this blob"""
return self.links['git']
@property
def html_url(self):
"""URL pointing to the content on GitHub."""
return self.links['html']
class Download(GitHubCore):
"""The :class:`Download <Download>` object. It represents how GitHub sends
information about files uploaded to the downloads section of a repository.
.. warning::
On 2013-03-11, this API will be deprecated by GitHub. There will also
be a new version of github3.py to accompany this at that date.
"""
def __init__(self, download, session=None):
super(Download, self).__init__(download, session)
self._api = download.get('url', '')
#: URL of the download at GitHub.
self.html_url = download.get('html_url', '')
#: Unique id of the download on GitHub.
self.id = download.get('id', 0)
#: Name of the download.
self.name = download.get('name', '')
#: Description of the download.
self.description = download.get('description', '')
#: Size of the download.
self.size = download.get('size', 0)
#: How many times this particular file has been downloaded.
self.download_count = download.get('download_count', 0)
#: Content type of the download.
self.content_type = download.get('content_type', '')
def __repr__(self):
return '<Download [{0}]>'.format(self.name)
@requires_auth
def delete(self):
"""Delete this download if authenticated"""
return self._boolean(self._delete(self._api), 204, 404)
def saveas(self, path=''):
"""Save this download to the path specified.
:param str path: (optional), if no path is specified, it will be
saved in the current directory with the name specified by GitHub.
it can take a file-like object as well
:returns: bool
"""
if not path:
path = self.name
resp = self._get(self.html_url, allow_redirects=True, prefetch=False)
if self._boolean(resp, 200, 404):
if isinstance(getattr(path, 'write', None), Callable):
file_like = True
fd = path
else:
file_like = False
fd = open(path, 'wb')
for chunk in resp.iter_content():
fd.write(chunk)
if not file_like:
fd.close()
return True
return False # (No coverage)
class Hook(GitHubCore):
"""The :class:`Hook <Hook>` object. This handles the information returned
by GitHub about hooks set on a repository."""
def __init__(self, hook, session=None):
super(Hook, self).__init__(hook, session)
self._api = hook.get('url', '')
#: datetime object representing when this hook was last updated.
self.updated_at = None
if hook.get('updated_at'):
self.updated_at = self._strptime(hook.get('updated_at'))
#: datetime object representing the date the hook was created.
self.created_at = self._strptime(hook.get('created_at'))
#: The name of the hook.
self.name = hook.get('name')
#: Events which trigger the hook.
self.events = hook.get('events')
#: Whether or not this Hook is marked as active on GitHub
self.active = hook.get('active')
#: Dictionary containing the configuration for the Hook.
self.config = hook.get('config')
#: Unique id of the hook.
self.id = hook.get('id')
def __repr__(self):
return '<Hook [{0}]>'.format(self.name)
def _update_(self, hook):
self.__init__(hook, self._session)
@requires_auth
def delete(self):
"""Delete this hook.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def delete_subscription(self):
"""Delete the user's subscription to this repository.
:returns: bool
"""
url = self._build_url('subscription', base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def edit(self, name, config, events=[], add_events=[], rm_events=[],
active=True):
"""Edit this hook.
:param str name: (required), name of the service being called
:param dict config: (required), key-value pairs of settings for this
hook
:param list events: (optional), which events should this be triggered
for
:param list add_events: (optional), events to be added to the list of
events that this hook triggers for
:param list rm_events: (optional), events to be remvoed from the list
of events that this hook triggers for
:param bool active: (optional), should this event be active
:returns: bool
"""
json = None
if name and config and isinstance(config, dict):
data = {'name': name, 'config': config, 'active': active}
if events:
data['events'] = events
if add_events:
data['add_events'] = add_events
if rm_events:
data['remove_events'] = rm_events
json = self._json(self._patch(self._api, data=data), 200)
if json:
self._update_(json)
return True
return False
def is_active(self):
"""Checks whether the hook is marked as active on GitHub or not.
:returns: bool
"""
return self.active
@requires_auth
def test(self):
"""Test this hook
:returns: bool
"""
url = self._build_url('tests', base_url=self._api)
return self._boolean(self._post(url), 204, 404)
class RepoTag(GitHubObject):
"""The :class:`RepoTag <RepoTag>` object. This stores the information
representing a tag that was created on a repository.
"""
def __init__(self, tag):
super(RepoTag, self).__init__(tag)
#: Name of the tag.
self.name = tag.get('name')
#: URL for the GitHub generated zipball associated with the tag.
self.zipball_url = tag.get('zipball_url')
#: URL for the GitHub generated tarball associated with the tag.
self.tarball_url = tag.get('tarball_url')
#: Dictionary containing the SHA and URL of the commit.
self.commit = tag.get('commit', {})
def __repr__(self):
return '<Repository Tag [{0}]>'.format(self)
def __str__(self):
return self.name
class RepoComment(BaseComment):
"""The :class:`RepoComment <RepoComment>` object. This stores the
information about a comment on a file in a repository.
"""
def __init__(self, comment, session=None):
super(RepoComment, self).__init__(comment, session)
#: Commit id on which the comment was made.
self.commit_id = comment.get('commit_id')
#: URL of the comment on GitHub.
self.html_url = comment.get('html_url')
#: The line number where the comment is located.
self.line = comment.get('line')
#: The path to the file where the comment was made.
self.path = comment.get('path')
#: The position in the diff where the comment was made.
self.position = comment.get('position')
#: datetime object representing when the comment was updated.
self.updated_at = comment.get('updated_at')
if self.updated_at:
self.updated_at = self._strptime(self.updated_at)
#: Login of the user who left the comment.
self.user = None
if comment.get('user'):
self.user = User(comment.get('user'), self)
def __repr__(self):
return '<Repository Comment [{0}/{1}]>'.format(self.commit_id[:7],
self.user.login or '') # nopep8
def _update_(self, comment):
super(RepoComment, self)._update_(comment)
self.__init__(comment, self._session)
@requires_auth
def update(self, body, sha, line, path, position):
"""Update this comment.
:param str body: (required)
:param str sha: (required), sha id of the commit to comment on
:param int line: (required), line number to comment on
:param str path: (required), relative path of the file you're
commenting on
:param int position: (required), line index in the diff to comment on
:returns: bool
"""
json = None
if body and sha and path and line > 0 and position > 0:
data = {'body': body, 'commit_id': sha, 'line': line,
'path': path, 'position': position}
json = self._json(self._post(self._api, data), 200)
if json:
self._update_(json)
return True
return False
class RepoCommit(BaseCommit):
"""The :class:`RepoCommit <RepoCommit>` object. This represents a commit as
viewed by a :class:`Repository`. This is different from a Commit object
returned from the git data section.
"""
def __init__(self, commit, session=None):
super(RepoCommit, self).__init__(commit, session)
#: :class:`User <github3.users.User>` who authored the commit.
self.author = commit.get('author')
if self.author:
self.author = User(self.author, self._session)
#: :class:`User <github3.users.User>` who committed the commit.
self.committer = commit.get('committer')
if self.committer:
self.committer = User(self.committer, self._session)
#: :class:`Commit <github3.git.Commit>`.
self.commit = commit.get('commit')
if self.commit:
self.commit = Commit(self.commit, self._session)
self.sha = commit.get('sha')
#: The number of additions made in the commit.
self.additions = 0
#: The number of deletions made in the commit.
self.deletions = 0
#: Total number of changes in the files.
self.total = 0
if commit.get('stats'):
self.additions = commit['stats'].get('additions')
self.deletions = commit['stats'].get('deletions')
self.total = commit['stats'].get('total')
#: The files that were modified by this commit.
self.files = commit.get('files', [])
def __repr__(self):
return '<Repository Commit [{0}]>'.format(self.sha[:7])
def diff(self):
"""Return the diff"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.diff'})
return resp.content if self._boolean(resp, 200, 404) else None
def patch(self):
"""Return the patch"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.patch'})
return resp.content if self._boolean(resp, 200, 404) else None
class Comparison(GitHubCore):
"""The :class:`Comparison <Comparison>` object. This encapsulates the
information returned by GitHub comparing two commit objects in a
repository."""
def __init__(self, compare):
super(Comparison, self).__init__(compare)
self._api = compare.get('url', '')
#: URL to view the comparison at GitHub
self.html_url = compare.get('html_url')
#: Permanent link to this comparison.
self.permalink_url = compare.get('permalink_url')
#: URL to see the diff between the two commits.
self.diff_url = compare.get('diff_url')
#: Patch URL at GitHub for the comparison.
self.patch_url = compare.get('patch_url')
#: :class:`RepoCommit <RepoCommit>` object representing the base of
# comparison.
self.base_commit = RepoCommit(compare.get('base_commit'), None)
#: Behind or ahead.
self.status = compare.get('status')
#: Number of commits ahead by.
self.ahead_by = compare.get('ahead_by')
#: Number of commits behind by.
self.behind_by = compare.get('behind_by')
#: Number of commits difference in the comparison.
self.total_commits = compare.get('total_commits')
#: List of :class:`RepoCommit <RepoCommit>` objects.
self.commits = [RepoCommit(com) for com in compare.get('commits')]
#: List of dicts describing the files modified.
self.files = compare.get('files', [])
def __repr__(self):
return '<Comparison of {0} commits>'.format(self.total_commits)
def diff(self):
"""Return the diff"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.diff'})
return resp.content if self._boolean(resp, 200, 404) else None
def patch(self):
"""Return the patch"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.patch'})
return resp.content if self._boolean(resp, 200, 404) else None
class Status(GitHubObject):
"""The :class:`Status <Status>` object. This represents information from
the Repo Status API."""
def __init__(self, status):
super(Status, self).__init__(status)
#: datetime object representing the creation of the status object
self.created_at = self._strptime(status.get('created_at'))
#: :class:`User <github3.users.User>` who created the object
self.creator = User(status.get('creator'))
#: Short description of the Status
self.description = status.get('description')
#: GitHub ID for the status object
self.id = status.get('id')
#: State of the status, e.g., 'success', 'pending', 'failed', 'error'
self.state = status.get('state')
#: URL to view more information about the status
self.target_url = status.get('target_url')
#: datetime object representing the last time the status was updated
self.updated_at = None
if status.get('updated_at'):
self.updated_at = self._strptime(status.get('updated_at'))
def __repr__(self):
return '<Status [{s.id}:{s.state}]>'.format(s=self)
|
"""
WSGI config for scrum project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "scrum.settings")
application = get_wsgi_application()
|
# -*- coding: utf-8 -*-
"""
biosppy.signals.ecg
-------------------
This module provides methods to process Electrocardiographic (ECG) signals.
Implemented code assumes a single-channel Lead I like ECG signal.
:copyright: (c) 2015-2017 by Instituto de Telecomunicacoes
:license: BSD 3-clause, see LICENSE for more details.
"""
# Imports
# compat
from __future__ import absolute_import, division, print_function
from six.moves import range, zip
# 3rd party
import numpy as np
import scipy.signal as ss
# local
from . import tools as st
from .. import plotting, utils
def ecg(signal=None, sampling_rate=1000., show=True):
"""Process a raw ECG signal and extract relevant signal features using
default parameters.
Parameters
----------
signal : array
Raw ECG signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
show : bool, optional
If True, show a summary plot.
Returns
-------
ts : array
Signal time axis reference (seconds).
filtered : array
Filtered ECG signal.
rpeaks : array
R-peak location indices.
templates_ts : array
Templates time axis reference (seconds).
templates : array
Extracted heartbeat templates.
heart_rate_ts : array
Heart rate time axis reference (seconds).
heart_rate : array
Instantaneous heart rate (bpm).
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# ensure numpy
signal = np.array(signal)
sampling_rate = float(sampling_rate)
# filter signal
order = int(0.3 * sampling_rate)
filtered, _, _ = st.filter_signal(signal=signal,
ftype='FIR',
band='bandpass',
order=order,
frequency=[3, 45],
sampling_rate=sampling_rate)
# segment
rpeaks, = hamilton_segmenter(signal=filtered, sampling_rate=sampling_rate)
# correct R-peak locations
rpeaks, = correct_rpeaks(signal=filtered,
rpeaks=rpeaks,
sampling_rate=sampling_rate,
tol=0.05)
# extract templates
templates, rpeaks = extract_heartbeats(signal=filtered,
rpeaks=rpeaks,
sampling_rate=sampling_rate,
before=0.2,
after=0.4)
# compute heart rate
hr_idx, hr = st.get_heart_rate(beats=rpeaks,
sampling_rate=sampling_rate,
smooth=True,
size=3)
# get time vectors
length = len(signal)
T = (length - 1) / sampling_rate
ts = np.linspace(0, T, length, endpoint=False)
ts_hr = ts[hr_idx]
ts_tmpl = np.linspace(-0.2, 0.4, templates.shape[1], endpoint=False)
# plot
if show:
plotting.plot_ecg(ts=ts,
raw=signal,
filtered=filtered,
rpeaks=rpeaks,
templates_ts=ts_tmpl,
templates=templates,
heart_rate_ts=ts_hr,
heart_rate=hr,
path=None,
show=True)
# output
args = (ts, filtered, rpeaks, ts_tmpl, templates, ts_hr, hr)
names = ('ts', 'filtered', 'rpeaks', 'templates_ts', 'templates',
'heart_rate_ts', 'heart_rate')
return utils.ReturnTuple(args, names)
def _extract_heartbeats(signal=None, rpeaks=None, before=200, after=400):
"""Extract heartbeat templates from an ECG signal, given a list of
R-peak locations.
Parameters
----------
signal : array
Input ECG signal.
rpeaks : array
R-peak location indices.
before : int, optional
Number of samples to include before the R peak.
after : int, optional
Number of samples to include after the R peak.
Returns
-------
templates : array
Extracted heartbeat templates.
rpeaks : array
Corresponding R-peak location indices of the extracted heartbeat
templates.
"""
R = np.sort(rpeaks)
length = len(signal)
templates = []
newR = []
for r in R:
a = r - before
if a < 0:
continue
b = r + after
if b > length:
break
templates.append(signal[a:b])
newR.append(r)
templates = np.array(templates)
newR = np.array(newR, dtype='int')
return templates, newR
def extract_heartbeats(signal=None, rpeaks=None, sampling_rate=1000.,
before=0.2, after=0.4):
"""Extract heartbeat templates from an ECG signal, given a list of
R-peak locations.
Parameters
----------
signal : array
Input ECG signal.
rpeaks : array
R-peak location indices.
sampling_rate : int, float, optional
Sampling frequency (Hz).
before : float, optional
Window size to include before the R peak (seconds).
after : int, optional
Window size to include after the R peak (seconds).
Returns
-------
templates : array
Extracted heartbeat templates.
rpeaks : array
Corresponding R-peak location indices of the extracted heartbeat
templates.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
if rpeaks is None:
raise TypeError("Please specify the input R-peak locations.")
if before < 0:
raise ValueError("Please specify a non-negative 'before' value.")
if after < 0:
raise ValueError("Please specify a non-negative 'after' value.")
# convert delimiters to samples
before = int(before * sampling_rate)
after = int(after * sampling_rate)
# get heartbeats
templates, newR = _extract_heartbeats(signal=signal,
rpeaks=rpeaks,
before=before,
after=after)
return utils.ReturnTuple((templates, newR), ('templates', 'rpeaks'))
def compare_segmentation(reference=None, test=None, sampling_rate=1000.,
offset=0, minRR=None, tol=0.05):
"""Compare the segmentation performance of a list of R-peak positions
against a reference list.
Parameters
----------
reference : array
Reference R-peak location indices.
test : array
Test R-peak location indices.
sampling_rate : int, float, optional
Sampling frequency (Hz).
offset : int, optional
Constant a priori offset (number of samples) between reference and
test R-peak locations.
minRR : float, optional
Minimum admissible RR interval (seconds).
tol : float, optional
Tolerance between corresponding reference and test R-peak
locations (seconds).
Returns
-------
TP : int
Number of true positive R-peaks.
FP : int
Number of false positive R-peaks.
performance : float
Test performance; TP / len(reference).
acc : float
Accuracy rate; TP / (TP + FP).
err : float
Error rate; FP / (TP + FP).
match : list
Indices of the elements of 'test' that match to an R-peak
from 'reference'.
deviation : array
Absolute errors of the matched R-peaks (seconds).
mean_deviation : float
Mean error (seconds).
std_deviation : float
Standard deviation of error (seconds).
mean_ref_ibi : float
Mean of the reference interbeat intervals (seconds).
std_ref_ibi : float
Standard deviation of the reference interbeat intervals (seconds).
mean_test_ibi : float
Mean of the test interbeat intervals (seconds).
std_test_ibi : float
Standard deviation of the test interbeat intervals (seconds).
"""
# check inputs
if reference is None:
raise TypeError("Please specify an input reference list of R-peak \
locations.")
if test is None:
raise TypeError("Please specify an input test list of R-peak \
locations.")
if minRR is None:
minRR = np.inf
sampling_rate = float(sampling_rate)
# ensure numpy
reference = np.array(reference)
test = np.array(test)
# convert to samples
minRR = minRR * sampling_rate
tol = tol * sampling_rate
TP = 0
FP = 0
matchIdx = []
dev = []
for i, r in enumerate(test):
# deviation to closest R in reference
ref = reference[np.argmin(np.abs(reference - (r + offset)))]
error = np.abs(ref - (r + offset))
if error < tol:
TP += 1
matchIdx.append(i)
dev.append(error)
else:
if len(matchIdx) > 0:
bdf = r - test[matchIdx[-1]]
if bdf < minRR:
# false positive, but removable with RR interval check
pass
else:
FP += 1
else:
FP += 1
# convert deviations to time
dev = np.array(dev, dtype='float')
dev /= sampling_rate
nd = len(dev)
if nd == 0:
mdev = np.nan
sdev = np.nan
elif nd == 1:
mdev = np.mean(dev)
sdev = 0.
else:
mdev = np.mean(dev)
sdev = np.std(dev, ddof=1)
# interbeat interval
th1 = 1.5 # 40 bpm
th2 = 0.3 # 200 bpm
rIBI = np.diff(reference)
rIBI = np.array(rIBI, dtype='float')
rIBI /= sampling_rate
good = np.nonzero((rIBI < th1) & (rIBI > th2))[0]
rIBI = rIBI[good]
nr = len(rIBI)
if nr == 0:
rIBIm = np.nan
rIBIs = np.nan
elif nr == 1:
rIBIm = np.mean(rIBI)
rIBIs = 0.
else:
rIBIm = np.mean(rIBI)
rIBIs = np.std(rIBI, ddof=1)
tIBI = np.diff(test[matchIdx])
tIBI = np.array(tIBI, dtype='float')
tIBI /= sampling_rate
good = np.nonzero((tIBI < th1) & (tIBI > th2))[0]
tIBI = tIBI[good]
nt = len(tIBI)
if nt == 0:
tIBIm = np.nan
tIBIs = np.nan
elif nt == 1:
tIBIm = np.mean(tIBI)
tIBIs = 0.
else:
tIBIm = np.mean(tIBI)
tIBIs = np.std(tIBI, ddof=1)
# output
perf = float(TP) / len(reference)
acc = float(TP) / (TP + FP)
err = float(FP) / (TP + FP)
args = (TP, FP, perf, acc, err, matchIdx, dev, mdev, sdev, rIBIm, rIBIs,
tIBIm, tIBIs)
names = ('TP', 'FP', 'performance', 'acc', 'err', 'match', 'deviation',
'mean_deviation', 'std_deviation', 'mean_ref_ibi', 'std_ref_ibi',
'mean_test_ibi', 'std_test_ibi',)
return utils.ReturnTuple(args, names)
def correct_rpeaks(signal=None, rpeaks=None, sampling_rate=1000., tol=0.05):
"""Correct R-peak locations to the maximum within a tolerance.
Parameters
----------
signal : array
ECG signal.
rpeaks : array
R-peak location indices.
sampling_rate : int, float, optional
Sampling frequency (Hz).
tol : int, float, optional
Correction tolerance (seconds).
Returns
-------
rpeaks : array
Cerrected R-peak location indices.
Notes
-----
* The tolerance is defined as the time interval :math:`[R-tol, R+tol[`.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
if rpeaks is None:
raise TypeError("Please specify the input R-peaks.")
tol = int(tol * sampling_rate)
length = len(signal)
newR = []
for r in rpeaks:
a = r - tol
if a < 0:
continue
b = r + tol
if b > length:
break
newR.append(a + np.argmax(signal[a:b]))
newR = sorted(list(set(newR)))
newR = np.array(newR, dtype='int')
return utils.ReturnTuple((newR,), ('rpeaks',))
def ssf_segmenter(signal=None, sampling_rate=1000., threshold=20, before=0.03,
after=0.01):
"""ECG R-peak segmentation based on the Slope Sum Function (SSF).
Parameters
----------
signal : array
Input filtered ECG signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
threshold : float, optional
SSF threshold.
before : float, optional
Search window size before R-peak candidate (seconds).
after : float, optional
Search window size after R-peak candidate (seconds).
Returns
-------
rpeaks : array
R-peak location indices.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# convert to samples
winB = int(before * sampling_rate)
winA = int(after * sampling_rate)
Rset = set()
length = len(signal)
# diff
dx = np.diff(signal)
dx[dx >= 0] = 0
dx = dx ** 2
# detection
idx, = np.nonzero(dx > threshold)
idx0 = np.hstack(([0], idx))
didx = np.diff(idx0)
# search
sidx = idx[didx > 1]
for item in sidx:
a = item - winB
if a < 0:
a = 0
b = item + winA
if b > length:
continue
r = np.argmax(signal[a:b]) + a
Rset.add(r)
# output
rpeaks = list(Rset)
rpeaks.sort()
rpeaks = np.array(rpeaks, dtype='int')
return utils.ReturnTuple((rpeaks,), ('rpeaks',))
def christov_segmenter(signal=None, sampling_rate=1000.):
"""ECG R-peak segmentation algorithm.
Follows the approach by Christov [Chri04]_.
Parameters
----------
signal : array
Input filtered ECG signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
Returns
-------
rpeaks : array
R-peak location indices.
References
----------
.. [Chri04] Ivaylo I. Christov, "Real time electrocardiogram QRS
detection using combined adaptive threshold", BioMedical Engineering
OnLine 2004, vol. 3:28, 2004
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
length = len(signal)
# algorithm parameters
v100ms = int(0.1 * sampling_rate)
v50ms = int(0.050 * sampling_rate)
v300ms = int(0.300 * sampling_rate)
v350ms = int(0.350 * sampling_rate)
v200ms = int(0.2 * sampling_rate)
v1200ms = int(1.2 * sampling_rate)
M_th = 0.4 # paper is 0.6
# Pre-processing
# 1. Moving averaging filter for power-line interference suppression:
# averages samples in one period of the powerline
# interference frequency with a first zero at this frequency.
b = np.ones(int(0.02 * sampling_rate)) / 50.
a = [1]
X = ss.filtfilt(b, a, signal)
# 2. Moving averaging of samples in 28 ms interval for electromyogram
# noise suppression a filter with first zero at about 35 Hz.
b = np.ones(int(sampling_rate / 35.)) / 35.
X = ss.filtfilt(b, a, X)
X, _, _ = st.filter_signal(signal=X,
ftype='butter',
band='lowpass',
order=7,
frequency=40.,
sampling_rate=sampling_rate)
X, _, _ = st.filter_signal(signal=X,
ftype='butter',
band='highpass',
order=7,
frequency=9.,
sampling_rate=sampling_rate)
k, Y, L = 1, [], len(X)
for n in range(k + 1, L - k):
Y.append(X[n] ** 2 - X[n - k] * X[n + k])
Y = np.array(Y)
Y[Y < 0] = 0
# Complex lead
# Y = abs(scipy.diff(X)) # 1-lead
# 3. Moving averaging of a complex lead (the sintesis is
# explained in the next section) in 40 ms intervals a filter
# with first zero at about 25 Hz. It is suppressing the noise
# magnified by the differentiation procedure used in the
# process of the complex lead sintesis.
b = np.ones(int(sampling_rate / 25.)) / 25.
Y = ss.lfilter(b, a, Y)
# Init
MM = M_th * np.max(Y[:int(5 * sampling_rate)]) * np.ones(5)
MMidx = 0
M = np.mean(MM)
slope = np.linspace(1.0, 0.6, int(sampling_rate))
Rdec = 0
R = 0
RR = np.zeros(5)
RRidx = 0
Rm = 0
QRS = []
Rpeak = []
current_sample = 0
skip = False
F = np.mean(Y[:v350ms])
# Go through each sample
while current_sample < len(Y):
if QRS:
# No detection is allowed 200 ms after the current one. In
# the interval QRS to QRS+200ms a new value of M5 is calculated: newM5 = 0.6*max(Yi)
if current_sample <= QRS[-1] + v200ms:
Mnew = M_th * max(Y[QRS[-1]:QRS[-1] + v200ms])
# The estimated newM5 value can become quite high, if
# steep slope premature ventricular contraction or artifact
# appeared, and for that reason it is limited to newM5 = 1.1*M5 if newM5 > 1.5* M5
# The MM buffer is refreshed excluding the oldest component, and including M5 = newM5.
Mnew = Mnew if Mnew <= 1.5 * MM[MMidx - 1] else 1.1 * MM[MMidx - 1]
MM[MMidx] = Mnew
MMidx = np.mod(MMidx + 1, 5)
# M is calculated as an average value of MM.
Mtemp = np.mean(MM)
M = Mtemp
skip = True
# M is decreased in an interval 200 to 1200 ms following
# the last QRS detection at a low slope, reaching 60 % of its
# refreshed value at 1200 ms.
elif current_sample >= QRS[-1] + v200ms and current_sample < QRS[-1] + v1200ms:
M = Mtemp * slope[current_sample - QRS[-1] - v200ms]
# After 1200 ms M remains unchanged.
# R = 0 V in the interval from the last detected QRS to 2/3 of the expected Rm.
if current_sample >= QRS[-1] and current_sample < QRS[-1] + (2 / 3.) * Rm:
R = 0
# In the interval QRS + Rm * 2/3 to QRS + Rm, R decreases
# 1.4 times slower then the decrease of the previously discussed
# steep slope threshold (M in the 200 to 1200 ms interval).
elif current_sample >= QRS[-1] + (2 / 3.) * Rm and current_sample < QRS[-1] + Rm:
R += Rdec
# After QRS + Rm the decrease of R is stopped
# MFR = M + F + R
MFR = M + F + R
# QRS or beat complex is detected if Yi = MFR
if not skip and Y[current_sample] >= MFR:
QRS += [current_sample]
Rpeak += [QRS[-1] + np.argmax(Y[QRS[-1]:QRS[-1] + v300ms])]
if len(QRS) >= 2:
# A buffer with the 5 last RR intervals is updated at any new QRS detection.
RR[RRidx] = QRS[-1] - QRS[-2]
RRidx = np.mod(RRidx + 1, 5)
skip = False
# With every signal sample, F is updated adding the maximum
# of Y in the latest 50 ms of the 350 ms interval and
# subtracting maxY in the earliest 50 ms of the interval.
if current_sample >= v350ms:
Y_latest50 = Y[current_sample - v50ms:current_sample]
Y_earliest50 = Y[current_sample - v350ms:current_sample - v300ms]
F += (max(Y_latest50) - max(Y_earliest50)) / 1000.
# Rm is the mean value of the buffer RR.
Rm = np.mean(RR)
current_sample += 1
rpeaks = []
for i in Rpeak:
a, b = i - v100ms, i + v100ms
if a < 0:
a = 0
if b > length:
b = length
rpeaks.append(np.argmax(signal[a:b]) + a)
rpeaks = sorted(list(set(rpeaks)))
rpeaks = np.array(rpeaks, dtype='int')
return utils.ReturnTuple((rpeaks,), ('rpeaks',))
def engzee_segmenter(signal=None, sampling_rate=1000., threshold=0.48):
"""ECG R-peak segmentation algorithm.
Follows the approach by Engelse and Zeelenberg [EnZe79]_ with the
modifications by Lourenco *et al.* [LSLL12]_.
Parameters
----------
signal : array
Input filtered ECG signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
threshold : float, optional
Detection threshold.
Returns
-------
rpeaks : array
R-peak location indices.
References
----------
.. [EnZe79] W. Engelse and C. Zeelenberg, "A single scan algorithm for
QRS detection and feature extraction", IEEE Comp. in Cardiology,
vol. 6, pp. 37-42, 1979
.. [LSLL12] A. Lourenco, H. Silva, P. Leite, R. Lourenco and A. Fred,
"Real Time Electrocardiogram Segmentation for Finger Based ECG
Biometrics", BIOSIGNALS 2012, pp. 49-54, 2012
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# algorithm parameters
changeM = int(0.75 * sampling_rate)
Miterate = int(1.75 * sampling_rate)
v250ms = int(0.25 * sampling_rate)
v1200ms = int(1.2 * sampling_rate)
v1500ms = int(1.5 * sampling_rate)
v180ms = int(0.18 * sampling_rate)
p10ms = int(np.ceil(0.01 * sampling_rate))
p20ms = int(np.ceil(0.02 * sampling_rate))
err_kill = int(0.01 * sampling_rate)
inc = 1
mmth = threshold
mmp = 0.2
# Differentiator (1)
y1 = [signal[i] - signal[i - 4] for i in range(4, len(signal))]
# Low pass filter (2)
c = [1, 4, 6, 4, 1, -1, -4, -6, -4, -1]
y2 = np.array([np.dot(c, y1[n - 9:n + 1]) for n in range(9, len(y1))])
y2_len = len(y2)
# vars
MM = mmth * max(y2[:Miterate]) * np.ones(3)
MMidx = 0
Th = np.mean(MM)
NN = mmp * min(y2[:Miterate]) * np.ones(2)
NNidx = 0
ThNew = np.mean(NN)
update = False
nthfpluss = []
rpeaks = []
# Find nthf+ point
while True:
# If a previous intersection was found, continue the analysis from there
if update:
if inc * changeM + Miterate < y2_len:
a = (inc - 1) * changeM
b = inc * changeM + Miterate
Mnew = mmth * max(y2[a:b])
Nnew = mmp * min(y2[a:b])
elif y2_len - (inc - 1) * changeM > v1500ms:
a = (inc - 1) * changeM
Mnew = mmth * max(y2[a:])
Nnew = mmp * min(y2[a:])
if len(y2) - inc * changeM > Miterate:
MM[MMidx] = Mnew if Mnew <= 1.5 * MM[MMidx - 1] else 1.1 * MM[MMidx - 1]
NN[NNidx] = Nnew if abs(Nnew) <= 1.5 * abs(NN[NNidx - 1]) else 1.1 * NN[NNidx - 1]
MMidx = np.mod(MMidx + 1, len(MM))
NNidx = np.mod(NNidx + 1, len(NN))
Th = np.mean(MM)
ThNew = np.mean(NN)
inc += 1
update = False
if nthfpluss:
lastp = nthfpluss[-1] + 1
if lastp < (inc - 1) * changeM:
lastp = (inc - 1) * changeM
y22 = y2[lastp:inc * changeM + err_kill]
# find intersection with Th
try:
nthfplus = np.intersect1d(np.nonzero(y22 > Th)[0], np.nonzero(y22 < Th)[0] - 1)[0]
except IndexError:
if inc * changeM > len(y2):
break
else:
update = True
continue
# adjust index
nthfplus += int(lastp)
# if a previous R peak was found:
if rpeaks:
# check if intersection is within the 200-1200 ms interval. Modification: 300 ms -> 200 bpm
if nthfplus - rpeaks[-1] > v250ms and nthfplus - rpeaks[-1] < v1200ms:
pass
# if new intersection is within the <200ms interval, skip it. Modification: 300 ms -> 200 bpm
elif nthfplus - rpeaks[-1] < v250ms:
nthfpluss += [nthfplus]
continue
# no previous intersection, find the first one
else:
try:
aux = np.nonzero(y2[(inc - 1) * changeM:inc * changeM + err_kill] > Th)[0]
bux = np.nonzero(y2[(inc - 1) * changeM:inc * changeM + err_kill] < Th)[0] - 1
nthfplus = int((inc - 1) * changeM) + np.intersect1d(aux, bux)[0]
except IndexError:
if inc * changeM > len(y2):
break
else:
update = True
continue
nthfpluss += [nthfplus]
# Define 160ms search region
windowW = np.arange(nthfplus, nthfplus + v180ms)
# Check if the condition y2[n] < Th holds for a specified
# number of consecutive points (experimentally we found this number to be at least 10 points)"
i, f = windowW[0], windowW[-1] if windowW[-1] < len(y2) else -1
hold_points = np.diff(np.nonzero(y2[i:f] < ThNew)[0])
cont = 0
for hp in hold_points:
if hp == 1:
cont += 1
if cont == p10ms - 1: # -1 is because diff eats a sample
max_shift = p20ms # looks for X's max a bit to the right
if nthfpluss[-1] > max_shift:
rpeaks += [np.argmax(signal[i - max_shift:f]) + i - max_shift]
else:
rpeaks += [np.argmax(signal[i:f]) + i]
break
else:
cont = 0
rpeaks = sorted(list(set(rpeaks)))
rpeaks = np.array(rpeaks, dtype='int')
return utils.ReturnTuple((rpeaks,), ('rpeaks',))
def gamboa_segmenter(signal=None, sampling_rate=1000., tol=0.002):
"""ECG R-peak segmentation algorithm.
Follows the approach by Gamboa.
Parameters
----------
signal : array
Input filtered ECG signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
tol : float, optional
Tolerance parameter.
Returns
-------
rpeaks : array
R-peak location indices.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# convert to samples
v_100ms = int(0.1 * sampling_rate)
v_300ms = int(0.3 * sampling_rate)
hist, edges = np.histogram(signal, 100, density=True)
TH = 0.01
F = np.cumsum(hist)
v0 = edges[np.nonzero(F > TH)[0][0]]
v1 = edges[np.nonzero(F < (1 - TH))[0][-1]]
nrm = max([abs(v0), abs(v1)])
norm_signal = signal / float(nrm)
d2 = np.diff(norm_signal, 2)
b = np.nonzero((np.diff(np.sign(np.diff(-d2)))) == -2)[0] + 2
b = np.intersect1d(b, np.nonzero(-d2 > tol)[0])
if len(b) < 3:
rpeaks = []
else:
b = b.astype('float')
rpeaks = []
previous = b[0]
for i in b[1:]:
if i - previous > v_300ms:
previous = i
rpeaks.append(np.argmax(signal[int(i):int(i + v_100ms)]) + i)
rpeaks = sorted(list(set(rpeaks)))
rpeaks = np.array(rpeaks, dtype='int')
return utils.ReturnTuple((rpeaks,), ('rpeaks',))
def hamilton_segmenter(signal=None, sampling_rate=1000.):
"""ECG R-peak segmentation algorithm.
Follows the approach by Hamilton [Hami02]_.
Parameters
----------
signal : array
Input filtered ECG signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
Returns
-------
rpeaks : array
R-peak location indices.
References
----------
.. [Hami02] P.S. Hamilton, "Open Source ECG Analysis Software
Documentation", E.P.Limited, 2002
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
sampling_rate = float(sampling_rate)
length = len(signal)
dur = length / sampling_rate
# algorithm parameters
v1s = int(1. * sampling_rate)
v100ms = int(0.1 * sampling_rate)
TH_elapsed = np.ceil(0.36 * sampling_rate)
sm_size = int(0.08 * sampling_rate)
init_ecg = 8 # seconds for initialization
if dur < init_ecg:
init_ecg = int(dur)
# filtering
filtered, _, _ = st.filter_signal(signal=signal,
ftype='butter',
band='lowpass',
order=4,
frequency=25.,
sampling_rate=sampling_rate)
filtered, _, _ = st.filter_signal(signal=filtered,
ftype='butter',
band='highpass',
order=4,
frequency=3.,
sampling_rate=sampling_rate)
# diff
dx = np.abs(np.diff(filtered, 1) * sampling_rate)
# smoothing
dx, _ = st.smoother(signal=dx, kernel='hamming', size=sm_size, mirror=True)
# buffers
qrspeakbuffer = np.zeros(init_ecg)
noisepeakbuffer = np.zeros(init_ecg)
peak_idx_test = np.zeros(init_ecg)
noise_idx = np.zeros(init_ecg)
rrinterval = sampling_rate * np.ones(init_ecg)
a, b = 0, v1s
all_peaks, _ = st.find_extrema(signal=dx, mode='max')
for i in range(init_ecg):
peaks, values = st.find_extrema(signal=dx[a:b], mode='max')
try:
ind = np.argmax(values)
except ValueError:
pass
else:
# peak amplitude
qrspeakbuffer[i] = values[ind]
# peak location
peak_idx_test[i] = peaks[ind] + a
a += v1s
b += v1s
# thresholds
ANP = np.median(noisepeakbuffer)
AQRSP = np.median(qrspeakbuffer)
TH = 0.475
DT = ANP + TH * (AQRSP - ANP)
DT_vec = []
indexqrs = 0
indexnoise = 0
indexrr = 0
npeaks = 0
offset = 0
beats = []
# detection rules
# 1 - ignore all peaks that precede or follow larger peaks by less than 200ms
lim = int(np.ceil(0.2 * sampling_rate))
diff_nr = int(np.ceil(0.045 * sampling_rate))
bpsi, bpe = offset, 0
for f in all_peaks:
DT_vec += [DT]
# 1 - Checking if f-peak is larger than any peak following or preceding it by less than 200 ms
peak_cond = np.array((all_peaks > f - lim) * (all_peaks < f + lim) * (all_peaks != f))
peaks_within = all_peaks[peak_cond]
if peaks_within.any() and (max(dx[peaks_within]) > dx[f]):
continue
# 4 - If the peak is larger than the detection threshold call it a QRS complex, otherwise call it noise
if dx[f] > DT:
# 2 - look for both positive and negative slopes in raw signal
if f < diff_nr:
diff_now = np.diff(signal[0:f + diff_nr])
elif f + diff_nr >= len(signal):
diff_now = np.diff(signal[f - diff_nr:len(dx)])
else:
diff_now = np.diff(signal[f - diff_nr:f + diff_nr])
diff_signer = diff_now[diff_now > 0]
if len(diff_signer) == 0 or len(diff_signer) == len(diff_now):
continue
# RR INTERVALS
if npeaks > 0:
# 3 - in here we check point 3 of the Hamilton paper
# that is, we check whether our current peak is a valid R-peak.
prev_rpeak = beats[npeaks - 1]
elapsed = f - prev_rpeak
# if the previous peak was within 360 ms interval
if elapsed < TH_elapsed:
# check current and previous slopes
if prev_rpeak < diff_nr:
diff_prev = np.diff(signal[0:prev_rpeak + diff_nr])
elif prev_rpeak + diff_nr >= len(signal):
diff_prev = np.diff(signal[prev_rpeak - diff_nr:len(dx)])
else:
diff_prev = np.diff(signal[prev_rpeak - diff_nr:prev_rpeak + diff_nr])
slope_now = max(diff_now)
slope_prev = max(diff_prev)
if (slope_now < 0.5 * slope_prev):
# if current slope is smaller than half the previous one, then it is a T-wave
continue
if dx[f] < 3. * np.median(qrspeakbuffer): # avoid retarded noise peaks
beats += [int(f) + bpsi]
else:
continue
if bpe == 0:
rrinterval[indexrr] = beats[npeaks] - beats[npeaks - 1]
indexrr += 1
if indexrr == init_ecg:
indexrr = 0
else:
if beats[npeaks] > beats[bpe - 1] + v100ms:
rrinterval[indexrr] = beats[npeaks] - beats[npeaks - 1]
indexrr += 1
if indexrr == init_ecg:
indexrr = 0
elif dx[f] < 3. * np.median(qrspeakbuffer):
beats += [int(f) + bpsi]
else:
continue
npeaks += 1
qrspeakbuffer[indexqrs] = dx[f]
peak_idx_test[indexqrs] = f
indexqrs += 1
if indexqrs == init_ecg:
indexqrs = 0
if dx[f] <= DT:
# 4 - not valid
# 5 - If no QRS has been detected within 1.5 R-to-R intervals,
# there was a peak that was larger than half the detection threshold,
# and the peak followed the preceding detection by at least 360 ms,
# classify that peak as a QRS complex
tf = f + bpsi
# RR interval median
RRM = np.median(rrinterval) # initial values are good?
if len(beats) >= 2:
elapsed = tf - beats[npeaks - 1]
if elapsed >= 1.5 * RRM and elapsed > TH_elapsed:
if dx[f] > 0.5 * DT:
beats += [int(f) + offset]
# RR INTERVALS
if npeaks > 0:
rrinterval[indexrr] = beats[npeaks] - beats[npeaks - 1]
indexrr += 1
if indexrr == init_ecg:
indexrr = 0
npeaks += 1
qrspeakbuffer[indexqrs] = dx[f]
peak_idx_test[indexqrs] = f
indexqrs += 1
if indexqrs == init_ecg:
indexqrs = 0
else:
noisepeakbuffer[indexnoise] = dx[f]
noise_idx[indexnoise] = f
indexnoise += 1
if indexnoise == init_ecg:
indexnoise = 0
else:
noisepeakbuffer[indexnoise] = dx[f]
noise_idx[indexnoise] = f
indexnoise += 1
if indexnoise == init_ecg:
indexnoise = 0
# Update Detection Threshold
ANP = np.median(noisepeakbuffer)
AQRSP = np.median(qrspeakbuffer)
DT = ANP + 0.475 * (AQRSP - ANP)
beats = np.array(beats)
r_beats = []
thres_ch = 0.85
adjacency = 0.05 * sampling_rate
for i in beats:
error = [False, False]
if i - lim < 0:
window = signal[0:i + lim]
add = 0
elif i + lim >= length:
window = signal[i - lim:length]
add = i - lim
else:
window = signal[i - lim:i + lim]
add = i - lim
# meanval = np.mean(window)
w_peaks, _ = st.find_extrema(signal=window, mode='max')
w_negpeaks, _ = st.find_extrema(signal=window, mode='min')
zerdiffs = np.where(np.diff(window) == 0)[0]
w_peaks = np.concatenate((w_peaks, zerdiffs))
w_negpeaks = np.concatenate((w_negpeaks, zerdiffs))
pospeaks = sorted(zip(window[w_peaks], w_peaks), reverse=True)
negpeaks = sorted(zip(window[w_negpeaks], w_negpeaks))
try:
twopeaks = [pospeaks[0]]
except IndexError:
twopeaks = []
try:
twonegpeaks = [negpeaks[0]]
except IndexError:
twonegpeaks = []
# getting positive peaks
for i in range(len(pospeaks) - 1):
if abs(pospeaks[0][1] - pospeaks[i + 1][1]) > adjacency:
twopeaks.append(pospeaks[i + 1])
break
try:
posdiv = abs(twopeaks[0][0] - twopeaks[1][0])
except IndexError:
error[0] = True
# getting negative peaks
for i in range(len(negpeaks) - 1):
if abs(negpeaks[0][1] - negpeaks[i + 1][1]) > adjacency:
twonegpeaks.append(negpeaks[i + 1])
break
try:
negdiv = abs(twonegpeaks[0][0] - twonegpeaks[1][0])
except IndexError:
error[1] = True
# choosing type of R-peak
n_errors = sum(error)
try:
if not n_errors:
if posdiv > thres_ch * negdiv:
# pos noerr
r_beats.append(twopeaks[0][1] + add)
else:
# neg noerr
r_beats.append(twonegpeaks[0][1] + add)
elif n_errors == 2:
if abs(twopeaks[0][1]) > abs(twonegpeaks[0][1]):
# pos allerr
r_beats.append(twopeaks[0][1] + add)
else:
# neg allerr
r_beats.append(twonegpeaks[0][1] + add)
elif error[0]:
# pos poserr
r_beats.append(twopeaks[0][1] + add)
else:
# neg negerr
r_beats.append(twonegpeaks[0][1] + add)
except IndexError:
continue
rpeaks = sorted(list(set(r_beats)))
rpeaks = np.array(rpeaks, dtype='int')
return utils.ReturnTuple((rpeaks,), ('rpeaks',))
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPaleotree(RPackage):
"""Paleontological and Phylogenetic Analyses of Evolution
Provides tools for transforming, a posteriori time-scaling, and modifying
phylogenies containing extinct (i.e. fossil) lineages"""
homepage = "https://github.com/dwbapst/paleotree"
url = "https://cloud.r-project.org/src/contrib/paleotree_3.1.3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/paleotree"
version('3.3.25', sha256='aa64b9120075581229439227a12db776d052b03eb5f9721692a16a9402ac8712')
version('3.3.0', sha256='f8f6b0228dd5290b251cad3a8626689442b5aa793d8f072c8c2c7813a063df90')
version('3.1.3', sha256='4c1cc8a5e171cbbbd88f78914f86d5e6d144ae573816fbeeff2ab54a814ec614')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r-ape@4.1:', type=('build', 'run'))
depends_on('r-phangorn@2.0.0:', type=('build', 'run'))
depends_on('r-phytools@0.6-00:', type=('build', 'run'))
depends_on('r-jsonlite', when='@3.3.0:', type=('build', 'run'))
depends_on('r-png', when='@3.3.0:', type=('build', 'run'))
depends_on('r-rcurl', when='@3.3.0:', type=('build', 'run'))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "jrdsite",
"color": "red",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("jrdsite")
}
]
|
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import collections
from . import compat
from . import connresource
from . import exceptions
class CursorFactory(connresource.ConnectionResource):
"""A cursor interface for the results of a query.
A cursor interface can be used to initiate efficient traversal of the
results of a large query.
"""
__slots__ = ('_state', '_args', '_prefetch', '_query', '_timeout')
def __init__(self, connection, query, state, args, prefetch, timeout):
super().__init__(connection)
self._args = args
self._prefetch = prefetch
self._query = query
self._timeout = timeout
self._state = state
if state is not None:
state.attach()
@compat.aiter_compat
@connresource.guarded
def __aiter__(self):
prefetch = 50 if self._prefetch is None else self._prefetch
return CursorIterator(self._connection,
self._query, self._state,
self._args, prefetch,
self._timeout)
@connresource.guarded
def __await__(self):
if self._prefetch is not None:
raise exceptions.InterfaceError(
'prefetch argument can only be specified for iterable cursor')
cursor = Cursor(self._connection, self._query,
self._state, self._args)
return cursor._init(self._timeout).__await__()
def __del__(self):
if self._state is not None:
self._state.detach()
self._connection._maybe_gc_stmt(self._state)
class BaseCursor(connresource.ConnectionResource):
__slots__ = ('_state', '_args', '_portal_name', '_exhausted', '_query')
def __init__(self, connection, query, state, args):
super().__init__(connection)
self._args = args
self._state = state
if state is not None:
state.attach()
self._portal_name = None
self._exhausted = False
self._query = query
def _check_ready(self):
if self._state is None:
raise exceptions.InterfaceError(
'cursor: no associated prepared statement')
if self._state.closed:
raise exceptions.InterfaceError(
'cursor: the prepared statement is closed')
if not self._connection._top_xact:
raise exceptions.NoActiveSQLTransactionError(
'cursor cannot be created outside of a transaction')
async def _bind_exec(self, n, timeout):
self._check_ready()
if self._portal_name:
raise exceptions.InterfaceError(
'cursor already has an open portal')
con = self._connection
protocol = con._protocol
self._portal_name = con._get_unique_id('portal')
buffer, _, self._exhausted = await protocol.bind_execute(
self._state, self._args, self._portal_name, n, True, timeout)
return buffer
async def _bind(self, timeout):
self._check_ready()
if self._portal_name:
raise exceptions.InterfaceError(
'cursor already has an open portal')
con = self._connection
protocol = con._protocol
self._portal_name = con._get_unique_id('portal')
buffer = await protocol.bind(self._state, self._args,
self._portal_name,
timeout)
return buffer
async def _exec(self, n, timeout):
self._check_ready()
if not self._portal_name:
raise exceptions.InterfaceError(
'cursor does not have an open portal')
protocol = self._connection._protocol
buffer, _, self._exhausted = await protocol.execute(
self._state, self._portal_name, n, True, timeout)
return buffer
def __repr__(self):
attrs = []
if self._exhausted:
attrs.append('exhausted')
attrs.append('') # to separate from id
if self.__class__.__module__.startswith('asyncpg.'):
mod = 'asyncpg'
else:
mod = self.__class__.__module__
return '<{}.{} "{!s:.30}" {}{:#x}>'.format(
mod, self.__class__.__name__,
self._state.query,
' '.join(attrs), id(self))
def __del__(self):
if self._state is not None:
self._state.detach()
self._connection._maybe_gc_stmt(self._state)
class CursorIterator(BaseCursor):
__slots__ = ('_buffer', '_prefetch', '_timeout')
def __init__(self, connection, query, state, args, prefetch, timeout):
super().__init__(connection, query, state, args)
if prefetch <= 0:
raise exceptions.InterfaceError(
'prefetch argument must be greater than zero')
self._buffer = collections.deque()
self._prefetch = prefetch
self._timeout = timeout
@compat.aiter_compat
@connresource.guarded
def __aiter__(self):
return self
@connresource.guarded
async def __anext__(self):
if self._state is None:
self._state = await self._connection._get_statement(
self._query, self._timeout, named=True)
self._state.attach()
if not self._portal_name:
buffer = await self._bind_exec(self._prefetch, self._timeout)
self._buffer.extend(buffer)
if not self._buffer and not self._exhausted:
buffer = await self._exec(self._prefetch, self._timeout)
self._buffer.extend(buffer)
if self._buffer:
return self._buffer.popleft()
raise StopAsyncIteration
class Cursor(BaseCursor):
"""An open *portal* into the results of a query."""
__slots__ = ()
async def _init(self, timeout):
if self._state is None:
self._state = await self._connection._get_statement(
self._query, timeout, named=True)
self._state.attach()
self._check_ready()
await self._bind(timeout)
return self
@connresource.guarded
async def fetch(self, n, *, timeout=None):
r"""Return the next *n* rows as a list of :class:`Record` objects.
:param float timeout: Optional timeout value in seconds.
:return: A list of :class:`Record` instances.
"""
self._check_ready()
if n <= 0:
raise exceptions.InterfaceError('n must be greater than zero')
if self._exhausted:
return []
recs = await self._exec(n, timeout)
if len(recs) < n:
self._exhausted = True
return recs
@connresource.guarded
async def fetchrow(self, *, timeout=None):
r"""Return the next row.
:param float timeout: Optional timeout value in seconds.
:return: A :class:`Record` instance.
"""
self._check_ready()
if self._exhausted:
return None
recs = await self._exec(1, timeout)
if len(recs) < 1:
self._exhausted = True
return None
return recs[0]
@connresource.guarded
async def forward(self, n, *, timeout=None) -> int:
r"""Skip over the next *n* rows.
:param float timeout: Optional timeout value in seconds.
:return: A number of rows actually skipped over (<= *n*).
"""
self._check_ready()
if n <= 0:
raise exceptions.InterfaceError('n must be greater than zero')
protocol = self._connection._protocol
status = await protocol.query('MOVE FORWARD {:d} {}'.format(
n, self._portal_name), timeout)
advanced = int(status.split()[1])
if advanced < n:
self._exhausted = True
return advanced
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"BudgetPeriodEnum",},
)
class BudgetPeriodEnum(proto.Message):
r"""Message describing Budget period. """
class BudgetPeriod(proto.Enum):
r"""Possible period of a Budget."""
UNSPECIFIED = 0
UNKNOWN = 1
DAILY = 2
CUSTOM_PERIOD = 5
__all__ = tuple(sorted(__protobuf__.manifest))
|
# Copyright 2016 Intel Corporation
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ironic.common.i18n import _
opts = [
cfg.StrOpt('dhcp_provider',
default='neutron',
help=_('DHCP provider to use. "neutron" uses Neutron, and '
'"none" uses a no-op provider.')),
cfg.BoolOpt('ipxe_no_pxedhcp',
default=False,
help=_('If the dhcp provider is authoritative and there is no'
' ProxyDHCP server'))
]
def register_opts(conf):
conf.register_opts(opts, group='dhcp')
|
print("Moje ulubione potrawy to:", "curry", "pad thai", "burrito", sep="\n")
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import math
import sys
import unittest
from unittest import TestCase, main
from nni.algorithms.compression.pytorch.pruning import TransformerHeadPruner
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from sdk.models.pytorch_models.transformer import TransformerEncoder
def validate_sparsity(wrapper, sparsity, bias=False):
masks = [wrapper.weight_mask]
if bias and wrapper.bias_mask is not None:
masks.append(wrapper.bias_mask)
for m in masks:
actual_sparsity = (m == 0).sum().item() / m.numel()
msg = 'actual sparsity: {:.2f}, target sparsity: {:.2f}'.format(actual_sparsity, sparsity)
assert math.isclose(actual_sparsity, sparsity, abs_tol=0.1), msg
class Model(nn.Module):
"""
A binary classifier using a transformer encoder for contextual embedding.
"""
def __init__(self, n_layer, hidden_dim, n_head):
super(Model, self).__init__()
self.embedding = TransformerEncoder(vocab_size=100, hidden_dim=hidden_dim, n_layers=n_layer, n_heads=n_head)
self.classifier = nn.Linear(hidden_dim, 1)
def forward(self, x, mask):
raw_output = self.embedding(x, mask)
pooled_output = raw_output[0]
prediction = F.sigmoid(self.classifier(pooled_output)).squeeze()
return prediction
def train(model, dataloader, criterion, optimizer):
model.train()
device = next(model.parameters()).device
for _ in range(2):
y = torch.ones(10).to(device)
out = model(torch.randint(0, 100, (4, 10)).to(device), torch.ones(10).to(device))
loss = criterion(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def dry_run(model):
device = next(model.parameters()).device
for _ in range(2):
y = torch.ones(10).to(device)
_ = model(torch.randint(0, 100, (4, 10)).to(device), torch.ones(10).to(device))
def head_pruner_tests(criterion, global_sort, use_graph, iterative):
print("Testing criterion {} with global_sort={} and use_graph={}".format(criterion, global_sort, use_graph))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Build config list and arguments
config_list = [{'sparsity': 0.5, 'op_types': ['Linear']}]
kwargs = {'ranking_criterion': criterion, 'head_hidden_dim': 64}
if global_sort:
kwargs['global_sort'] = True
else:
kwargs['global_sort'] = False
if use_graph:
attention_name_groups = list(zip(['embedding.layers.{}.self_attn.q_proj'.format(i) for i in range(6)],
['embedding.layers.{}.self_attn.k_proj'.format(i) for i in range(6)],
['embedding.layers.{}.self_attn.v_proj'.format(i) for i in range(6)],
['embedding.layers.{}.self_attn.output_proj'.format(i) for i in range(6)]))
kwargs['attention_name_groups'] = attention_name_groups
else:
dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device))
kwargs['dummy_input'] = dummy_input
if iterative:
kwargs['num_iterations'] = 2
kwargs['epochs_per_iteration'] = 1
n_layers = 6
n_heads = 8
hidden_dim = 512
model = Model(n_layers, hidden_dim, n_heads)
model.to(device)
kwargs['optimizer'] = torch.optim.SGD(model.parameters(), lr=0.001)
def trainer(model, optimizer, criterion, epoch):
return train(model, None, criterion, optimizer)
kwargs['trainer'] = trainer
kwargs['criterion'] = nn.BCELoss()
def forward_runner(model):
return dry_run(model)
kwargs['forward_runner'] = forward_runner
# create pruner and call compress()
pruner = TransformerHeadPruner(model, config_list, **kwargs)
pruner.compress()
# test model and mask export
pruner.export_model('./model_tmp.pth', './mask_tmp.pth', device=device)
dummy_input = (torch.randint(0, 100, (10, 32)).to(device), torch.ones(32).to(device))
pruner.export_model('./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth',
dummy_input=dummy_input, opset_version=10)
# validate sparsity
if not global_sort:
for wrapper in pruner.modules_wrapper:
validate_sparsity(wrapper, wrapper.config['sparsity'])
class PrunerTestCase(TestCase):
def test_head_pruner(self):
for criterion in ["l1_weight", "l2_weight", "l1_activation", "l2_activation", "taylorfo"]:
for global_sort in [False, True]:
for use_graph in [False, True]:
for iterative in [False, True]:
head_pruner_tests(criterion, global_sort, use_graph, iterative)
file_paths = ['./model_tmp.pth', './mask_tmp.pth', './onnx_tmp.pth', './search_history.csv',
'./search_result.json']
for f in file_paths:
if os.path.exists(f):
os.remove(f)
if __name__ == '__main__':
main()
|
from colorsys import hsv_to_rgb
from functools import reduce
import math
import operator
import queue
import time
import threading
from cuesdk import CueSdk
from cuesdk.helpers import ColorRgb
def swirls2(env, px, py):
"""
Source https://www.shadertoy.com/view/4dX3Rf
"""
t = env.time
x = px - (env.resolution.x / 2)
y = py - (env.resolution.y / 2)
r = math.sqrt(x**2 + y**2)
angle = math.atan2(x, y) - math.sin(t) * r / 200 + t
intensity = 0.5 + 0.25 * math.sin(15 * angle)
return hsv_to_rgb(angle / math.pi, intensity, 1)
def gradient(env, px, py):
x = px / env.resolution.x
y = py / env.resolution.y
return (x, y, 0)
def rainbow45(env, px, py):
uvx = px / env.resolution.x
uvy = py / env.resolution.y
direction = math.radians(45)
xr = uvx * math.cos(direction) - uvy * math.sin(direction)
vec = [0.0, 0.66, 0.33]
rgb = [(math.sin(-8 * env.time + (xr + v) * math.pi * 2) * 0.5 + 0.5)
for v in vec]
return rgb
class Resolution:
def __init__(self, x, y):
self.x = x
self.y = y
class FxEnv:
def __init__(self, resolution, time=0):
self.resolution = resolution
self.time = time
class DeviceFrame:
def __init__(self, leds):
self.leds = leds.copy()
if leds:
max_by_x, max_by_y = reduce(
lambda acc, pt: (max(acc[0], pt[0]), max(acc[1], pt[1])),
leds.values(), (float('-inf'), float('-inf')))
self.env = FxEnv(Resolution(max_by_x, max_by_y))
self.colors = dict.fromkeys(leds, (0, 0, 0))
self.empty = False
else:
self.empty = True
def update(self, frame_time, fx):
if self.empty:
return
self.env.time = frame_time
for key in self.colors:
self.colors[key] = ColorRgb.from_vec3(
*fx(self.env,
self.leds[key][0],
self.leds[key][1])).rgb
def read_keys(input_queue):
while True:
input_str = input()
input_queue.put(input_str)
def main():
sdk = CueSdk()
connected = sdk.connect()
if not connected:
err = sdk.get_last_error()
print("Handshake failed: %s" % err)
return
frames = list()
device_count = sdk.get_device_count()
for device_index in range(device_count):
led_positions = sdk.get_led_positions_by_device_index(device_index)
frames.append(DeviceFrame(led_positions))
if not frames:
return
# List of effects.
fxs = [gradient, rainbow45, swirls2]
fxi = 0
input_queue = queue.Queue()
input_thread = threading.Thread(target=read_keys,
args=(input_queue, ),
daemon=True)
input_thread.start()
print('Working...\nPress "q" to close program\n'
"Press any other key to switch between effects")
while True:
if input_queue.qsize() > 0:
input_str = input_queue.get()
if input_str.lower() == "q":
print("Exiting.")
break
else:
fxi = (fxi + 1) % len(fxs)
print("Switching to %s" % fxs[fxi].__name__)
frame_time = time.time()
for di in range(device_count):
frame = frames[di]
if not frame.empty:
frame.update(frame_time, fxs[fxi])
sdk.set_led_colors_buffer_by_device_index(di, frame.colors)
sdk.set_led_colors_flush_buffer()
if __name__ == "__main__":
main()
|
# import the neccessary package
from keras.utils import np_utils
import numpy as np
import h5py
class HDF5DatasetGenerator:
def __init__(self,dbPath,batchSize,preprocessors=None,
aug=None,binarize=True, classes=2):
# store the batch size, preprocessors, and data augmentor,
# whether or not the labels should be binarized, along with
# the total number of classes
# aug Defaulting to none, we should also supply a Keras ImageDataGenerator
# to apply augmentation directly innside our HDF5DatasetGenerator
# binarize: whether to use binarizers
self.batchSize= batchSize
self.preprocessors = preprocessors
self.aug = aug
self.binarize = binarize
self.classes = classes
# open the HDF5 database for reading and determine the total
# number of entries in the database
self.db = h5py.File(dbPath)
self.numImages = self.db["labels"].shape[0]
# next we need to define a generator function which as the name suggests
# is responsible for yielding batchs of images and classes to the .fit_generator
# function when training a network
def generator(self,passes=np.inf):
# initialize the epoch count
epochs = 0
# keep looping infinitely -- the model will stop once we
# have reached the desired number of epochs
while epochs < passes:
# loop over the HDF5 dataset
for i in range(0,self.numImages,self.batchSize):
# extract the images and labels from the HDF5 dataset
images = self.db["images"][i:i+self.batchSize]
labels = self.db["labels"][i:i+self.batchSize]
# check to see if the labels should be binarized
if self.binarize:
labels = np_utils.to_categorical(labels,self.classes)
# check to see if our preprocessors are not None
if self.preprocessors is not None:
#initialize the list of processed images
procImages =[]
# loop over the images
for image in images:
# loop over the preprocessors and apply each
# to the image
for p in self.preprocessors:
image = p.preprocess(image)
# update the list of processed images
procImages.append(image)
# update the images array to the processed images
images = np.array(procImages)
# if the data augmentor exists, apply it
if self.aug is not None:
(images,labels) = next(self.aug.flow(images,labels,batch_size=self.batchSize))
yield (images,labels)
epochs +=1
# close the database
def close(self):
self.db.close()
|
#!/usr/bin/python
#****************************************************************************
#* ivpm.py
#*
#* This is the bootstrap ivpm.py script that is included with each project.
#* This script ensures that the *actual* ivpm is downloaded in the
#* project packages dir
#****************************************************************************
import os.path
import sys
import subprocess
#********************************************************************
#* download_ivpm
#*
#*
#********************************************************************
def download_ivpm(packages_dir):
if os.path.isdir(packages_dir) == False:
os.makedirs(packages_dir)
cwd = os.getcwd()
os.chdir(packages_dir)
status = os.system("git clone https://github.com/mballance/ivpm.git")
os.chdir(cwd);
def main():
scripts_dir = os.path.dirname(os.path.realpath(__file__))
project_dir = os.path.dirname(scripts_dir)
packages_dir = project_dir + "/packages"
ivpm_dir = packages_dir + "/ivpm"
help_requested = False;
for arg in sys.argv:
if (arg == "-help" or arg == "--help"
or arg == "-h" or arg == "--h"
or arg == "-?"):
help_requested = True
# First see if we've already downloaded IVPM
if os.path.isdir(ivpm_dir) == False:
if (help_requested):
print("Local help");
sys.exit(1);
download_ivpm(packages_dir)
# Bring in the actual IVPM script and call it
sys.path.insert(0, ivpm_dir + "/src")
from ivpm.__main__ import main
main(project_dir)
if __name__ == "__main__":
main()
|
from collections import Sequence, Iterable
isproperty = lambda attr: not (not attr.__name__ \
or attr.__name__.startswith('_') \
or callable(attr) or isinstance(attr, property) )
ispropertyof = lambda obj, name: getattr(obj, name) \
and isproperty(getattr(obj, name))
ismethod = lambda attr: not (not attr.__name__ \
or attr.__name__.startswith('_') \
or isinstance(attr, property) ) and callable(attr)
ismethodof = lambda obj, name: getattr(obj, name) \
and ismethod(getattr(obj, name))
iscallable = lambda attr: attr.__name__ and not attr.__name__.startswith('_') \
and (callable(attr) or isinstance(attr, property))
iscallableof = lambda obj, name: getattr(obj, name) \
and iscallable(getattr(obj, name))
def replacer(token, replacements, ws=None, non_alnum=None):
replaced = []
i, n = 0, len(token)
while i < n:
for (k,v) in replacements.items():
l_k = len(k)
if i + l_k <= n and token[i:i+l_k] == k:
replaced.append(v)
i += l_k
break
else:
c = token[i]
if ws is not None and c.isspace():
if i == 0 or not token[i-1].isspace():
replaced.append(ws)
elif non_alnum is not None and not c.isalnum():
replaced.append(non_alnum)
else:
replaced.append(c)
i += 1
return ''.join(replaced)
def camel2py(token):
i, n = 0, len(token)
pytoken, li = [token[:i]], i
while i < n:
c = token[i]
if 'A' <= c and c <= 'Z':
pytoken.append(token[li:i])
if i > 0 and token[i-1] != '_': pytoken.append('_')
pytoken.append(chr(ord(c) + 0x61 - 0x41))
li = i + 1
i += 1
pytoken.append(token[li:])
return ''.join(pytoken)
assert camel2py("MyClass") == 'my_class'
assert camel2py("_MyClass") == '_my_class'
assert camel2py("__MyClass") == '__my_class'
assert camel2py("__MyClass__") == '__my_class__'
assert camel2py("__MyClassM__") == '__my_class_m__'
assert camel2py("__MyClassMethod__") == '__my_class_method__'
assert camel2py("__MyClass_Method__") == '__my_class_method__'
TO_CONTANT_NAME_REPLACMENTS = \
{ "a": ''
, 'e': ''
, 'i': ''
, 'o': ''
, 'u': ''
, 'ck': 'k'
, 'br': 'b', 'cr': 'c', 'fr': 'f', 'gr': 'g', 'kr': 'k', 'pr': 'p', 'tr': 'p'
, 'colorspace': '', 'color space': ''
}
def to_contant_name(token, max_length=8, prefix='', suffix=''
, replacements=TO_CONTANT_NAME_REPLACMENTS):
if token.isupper(): token = token.lower()
if len(token) > max_length:
short_token = replacer(token, replacements, '_', '')
if len(short_token) > len(token):
short_token = replacer(token, {}, '_', '')
else:
short_token = replacer(token, {}, '_', '')
camel_token = camel2py(short_token)
if len(camel_token) >= max_length+3:
camel_token = short_token.upper()
return (prefix + camel_token + suffix).upper()
def define_constants(tokens, max_length=8, prefix='', suffix=''):
if isinstance(tokens, str):
tokens = tuple(token.strip() for token in tokens.strip().split(','))
return dict((to_contant_name(token, max_length, prefix, suffix), token) for token in tokens)
def enumerate_constants(tokens, max_length=8, prefix='', suffix=''):
if isinstance(tokens, str):
tokens = tuple(token.strip() for token in tokens.strip().split(','))
return dict((to_contant_name(tokens[i_tokens], max_length, prefix, suffix), i_tokens) \
for i_tokens in range(len(tokens)))
def maskerate_constants(tokens, max_length=8, prefix='', suffix=''):
if isinstance(tokens, str):
tokens = tuple(token.strip() for token in tokens.strip().split(','))
return dict((to_contant_name(tokens[i_tokens], max_length, prefix, suffix), 2**i_tokens) \
for i_tokens in range(len(tokens)))
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import extended_volumes
from nova import compute
from nova import db
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_block_device
from nova.tests import fake_instance
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID1)
return fake_instance.fake_instance_obj(args[1], **inst)
def fake_compute_get_all(*args, **kwargs):
db_list = [fakes.stub_instance(1), fakes.stub_instance(2)]
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj._make_instance_list(args[1],
instance_obj.InstanceList(),
db_list, fields)
def fake_bdms_get_all_by_instance(*args, **kwargs):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': UUID1, 'source_type': 'volume',
'destination_type': 'volume', 'id': 1}),
fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': UUID2, 'source_type': 'volume',
'destination_type': 'volume', 'id': 2})]
class ExtendedVolumesTest(test.TestCase):
content_type = 'application/json'
prefix = 'os-extended-volumes:'
def setUp(self):
super(ExtendedVolumesTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_bdms_get_all_by_instance)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Extended_volumes'])
return_server = fakes.fake_instance_get()
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def test_show(self):
url = '/v2/fake/servers/%s' % UUID1
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
exp_volumes = [{'id': UUID1}, {'id': UUID2}]
if self.content_type == 'application/json':
actual = server.get('%svolumes_attached' % self.prefix)
elif self.content_type == 'application/xml':
actual = [dict(elem.items()) for elem in
server.findall('%svolume_attached' % self.prefix)]
self.assertEqual(exp_volumes, actual)
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
exp_volumes = [{'id': UUID1}, {'id': UUID2}]
for i, server in enumerate(self._get_servers(res.body)):
if self.content_type == 'application/json':
actual = server.get('%svolumes_attached' % self.prefix)
elif self.content_type == 'application/xml':
actual = [dict(elem.items()) for elem in
server.findall('%svolume_attached' % self.prefix)]
self.assertEqual(exp_volumes, actual)
class ExtendedVolumesXmlTest(ExtendedVolumesTest):
content_type = 'application/xml'
prefix = '{%s}' % extended_volumes.Extended_volumes.namespace
def _get_server(self, body):
return etree.XML(body)
def _get_servers(self, body):
return etree.XML(body).getchildren()
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateBdsInstanceDetails(object):
"""
The information about to-be-updated Big Data Service cluster.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateBdsInstanceDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this UpdateBdsInstanceDetails.
:type display_name: str
:param bootstrap_script_url:
The value to assign to the bootstrap_script_url property of this UpdateBdsInstanceDetails.
:type bootstrap_script_url: str
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateBdsInstanceDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateBdsInstanceDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'display_name': 'str',
'bootstrap_script_url': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'bootstrap_script_url': 'bootstrapScriptUrl',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._bootstrap_script_url = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
"""
Gets the display_name of this UpdateBdsInstanceDetails.
Name of the cluster.
:return: The display_name of this UpdateBdsInstanceDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateBdsInstanceDetails.
Name of the cluster.
:param display_name: The display_name of this UpdateBdsInstanceDetails.
:type: str
"""
self._display_name = display_name
@property
def bootstrap_script_url(self):
"""
Gets the bootstrap_script_url of this UpdateBdsInstanceDetails.
Pre-authenticated URL of the bootstrap script in Object Store that can be downloaded and executed..
:return: The bootstrap_script_url of this UpdateBdsInstanceDetails.
:rtype: str
"""
return self._bootstrap_script_url
@bootstrap_script_url.setter
def bootstrap_script_url(self, bootstrap_script_url):
"""
Sets the bootstrap_script_url of this UpdateBdsInstanceDetails.
Pre-authenticated URL of the bootstrap script in Object Store that can be downloaded and executed..
:param bootstrap_script_url: The bootstrap_script_url of this UpdateBdsInstanceDetails.
:type: str
"""
self._bootstrap_script_url = bootstrap_script_url
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateBdsInstanceDetails.
Simple key-value pair that is applied without any predefined name, type, or scope.
Exists for cross-compatibility only. For example, `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this UpdateBdsInstanceDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateBdsInstanceDetails.
Simple key-value pair that is applied without any predefined name, type, or scope.
Exists for cross-compatibility only. For example, `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this UpdateBdsInstanceDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateBdsInstanceDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For example, `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this UpdateBdsInstanceDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateBdsInstanceDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For example, `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this UpdateBdsInstanceDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
""" Implementation of abstract Disposable.
"""
from abc import ABCMeta, abstractmethod
class Disposable(metaclass=ABCMeta):
"""
Implementation of the disposable pattern. A disposable is usually
returned on resource allocation. Calling .dispose() on the returned
disposable is freeing the resource.
Note: Multiple calls to .dispose() have to be handled by the
implementation.
>>> class MyDisposable(Disposable):
... def dispose(self):
... print('DISPOSED')
>>> with MyDisposable():
... print('working')
working
DISPOSED
"""
@abstractmethod
def dispose(self) -> None:
""" .dispose() method has to be overwritten"""
def __enter__(self):
""" Called on entry of a new context """
return self
def __exit__(self, _type, _value, _traceback):
""" Called on exit of the context. .dispose() is called here """
self.dispose()
|
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os, logging
from nonstdlib.debug import *
class ListHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.records = []
def __getitem__(self, key):
return self.records[key]
def handle(self, record):
self.records.append(record)
## Configure the logging system for the tests.
handler = ListHandler()
root = logging.getLogger()
root.setLevel(0)
root.addHandler(handler)
## Make sure there aren't any stupid typos in the public interface.
debug("Debug level")
info("Info level")
warning("Warning level")
warn("Warning level")
error("Error level")
critical("Critical error")
fatal("Fatal error")
## Make sure different scopes are properly incorporated into the output.
info("Module level")
def foo(): # (no fold)
info("Function level")
foo()
class Bar: # (no fold)
def __init__(self):
info("Method level")
Bar()
def test_public_interface():
assert handler[0].levelno == 10
assert handler[0].levelname == 'DEBUG'
assert handler[0].msg == "Debug level"
assert handler[1].levelno == 20
assert handler[1].levelname == 'INFO'
assert handler[1].msg == "Info level"
assert handler[2].levelno == 30
assert handler[2].levelname == 'WARNING'
assert handler[2].msg == "Warning level"
assert handler[3].levelno == 30
assert handler[3].levelname == 'WARNING'
assert handler[3].msg == "Warning level"
assert handler[4].levelno == 40
assert handler[4].levelname == 'ERROR'
assert handler[4].msg == "Error level"
assert handler[5].levelno == 50
assert handler[5].levelname == 'CRITICAL'
assert handler[5].msg == "Critical error"
assert handler[6].levelno == 50
assert handler[6].levelname == 'CRITICAL'
assert handler[6].msg == "Fatal error"
def test_logger_names():
assert handler[7].name == '10_test_debug'
assert handler[7].msg == "Module level"
assert handler[8].name == '10_test_debug.foo'
assert handler[8].msg == "Function level"
assert handler[9].name == '10_test_debug.Bar'
assert handler[9].msg == "Method level"
def test_logger_scopes():
assert handler[7].pathname == __file__
assert handler[7].lineno == 42
assert handler[7].funcName == '<module>'
assert handler[8].pathname == __file__
assert handler[8].lineno == 45
assert handler[8].funcName == 'foo'
assert handler[9].pathname == __file__
assert handler[9].lineno == 51
assert handler[9].funcName == '__init__'
def test_log_level():
assert log_level(1) == 1
assert log_level("99") == 99
assert log_level("info") == logging.INFO
def test_verbosity():
assert verbosity(0) == logging.WARNING
assert verbosity(1) == logging.INFO
assert verbosity(2) == logging.DEBUG
assert verbosity(3) == 0
|
#!/usr/bin/python3
# -*-: coding: utf-8 -*-
"""
:author: lubosson
:date: 2019-04-15
:desc:
"""
import sys
sys.path.append('..')
""" 关键字搜索API """
SEARCH_API = "https://xcx.qichacha.com/wxa/v1/base/advancedSearchNew"
""" 企业详情API """
COMPANY_DETAIL_API = "https://xcx.qichacha.com/wxa/v1/base/getEntDetail"
""" 地区代码列表 """
AREA_API = "https://xcx.qichacha.com/wxa/v1/admin/getAreaList"
""" web浏览器no-login COOKIE """
COOKIE = "zg_did=%7B%22did%22%3A%20%22168dbc0b22f6e5-0d361e70cfef92-10306653-13c680-168dbc0b23013bd%22%7D; _uab_collina=154987506595105102560196; acw_tc=78c7474915498750659746725e47bcf5da5e01750eaa818d83d5019d1f; saveFpTip=true; UM_distinctid=168e101305e193-0665042ea0cf1-133b6850-13c680-168e101305f37d; CNZZDATA1254842228=1871928231-1549959491-https%253A%252F%252Fwww.qichacha.com%252F%7C1549959491; QCCSESSID=780j6eils4m98fspmr9cvtc9p5; hasShow=1; zg_de1d1a35bfa24ce29bbf2c7eb17e6c4f=%7B%22sid%22%3A%201551756182960%2C%22updated%22%3A%201551756803803%2C%22info%22%3A%201551242110203%2C%22superProperty%22%3A%20%22%7B%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22%22%2C%22cuid%22%3A%20%22fc6fca91d248e7cf976bd652db7e11c6%22%7D"
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36"
""" 伪装请求头,更多参数抓包qcc小程序 """
REQUEST_HEADERS = {
"User-Agent": USER_AGENT,
"Cookie": COOKIE
}
"""
授权企查查小程序返回TOKEN 过期时间1h, 自行更新
可走代理方式模拟应用登陆获取该token
"""
TOKEN = "9a62aaad7cda6c73a35d598f93e8d169"
|
#!/usr/bin/env python3
import strawberryfields as sf
from strawberryfields.ops import *
from strawberryfields.utils import scale
from numpy import pi, sqrt
import numpy as np
# initialize engine and program objects
eng = sf.Engine(backend="gaussian")
gaussian_cloning = sf.Program(4)
with gaussian_cloning.context as q:
# state to be cloned
Coherent(0.7+1.2j) | q[0]
# 50-50 beamsplitter
BS = BSgate(pi/4, 0)
# symmetric Gaussian cloning scheme
BS | (q[0], q[1])
BS | (q[1], q[2])
MeasureX | q[1]
MeasureP | q[2]
Xgate(scale(q[1], sqrt(2))) | q[0]
Zgate(scale(q[2], sqrt(2))) | q[0]
# after the final beamsplitter, modes q[0] and q[3]
# will contain identical approximate clones of the
# initial state Coherent(0.1+0j)
BS | (q[0], q[3])
# end circuit
# run the engine
results = eng.run(gaussian_cloning, run_options={"modes": [0, 3]})
# return the cloning fidelity
fidelity = sqrt(results.state.fidelity_coherent([0.7+1.2j, 0.7+1.2j]))
# return the cloned displacement
alpha = results.state.displacement()
# run the engine over an ensemble
reps = 1000
f = np.empty([reps])
a = np.empty([reps], dtype=np.complex128)
for i in range(reps):
eng.reset()
results = eng.run(gaussian_cloning, run_options={"modes": [0]})
f[i] = results.state.fidelity_coherent([0.7+1.2j])
a[i] = results.state.displacement()
print("Fidelity of cloned state:", np.mean(f))
print("Mean displacement of cloned state:", np.mean(a))
print("Mean covariance matrix of cloned state:", np.cov([a.real, a.imag]))
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities with minimum-depends for use in setup.py
"""
import email
import os
import re
import subprocess
import sys
from setuptools.command import sdist
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
with open(mailmap, 'r') as fp:
for l in fp:
try:
canonical_email, alias = re.match(
r'[^#]*?(<.+>).*(<.+>).*', l).groups()
except AttributeError:
continue
mapping[alias] = canonical_email
return mapping
def _parse_git_mailmap(git_dir, mailmap='.mailmap'):
mailmap = os.path.join(os.path.dirname(git_dir), mailmap)
return parse_mailmap(mailmap)
def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
for alias, email_address in mapping.iteritems():
changelog = changelog.replace(alias, email_address)
return changelog
# Get requirements from the first file that exists
def get_reqs_from_files(requirements_files):
for requirements_file in requirements_files:
if os.path.exists(requirements_file):
with open(requirements_file, 'r') as fil:
return fil.read().split('\n')
return []
def parse_requirements(requirements_files=['requirements.txt',
'tools/pip-requires']):
requirements = []
for line in get_reqs_from_files(requirements_files):
# For the requirements list, we need to inject only the portion
# after egg= so that distutils knows the package it's looking for
# such as:
# -e git://github.com/openstack/nova/master#egg=nova
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
line))
# such as:
# http://github.com/openstack/nova/zipball/master#egg=nova
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(requirements_files=['requirements.txt',
'tools/pip-requires']):
dependency_links = []
# dependency_links inject alternate locations to find packages listed
# in requirements
for line in get_reqs_from_files(requirements_files):
# skip comments and blank lines
if re.match(r'(\s*#)|(\s*$)', line):
continue
# lines with -e or -f need the whole line, minus the flag
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
# lines that are only urls can go in unmolested
elif re.match(r'\s*https?:', line):
dependency_links.append(line)
return dependency_links
def _run_shell_command(cmd, throw_on_error=False):
if os.name == 'nt':
output = subprocess.Popen(["cmd.exe", "/C", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
output = subprocess.Popen(["/bin/sh", "-c", cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = output.communicate()
if output.returncode and throw_on_error:
raise Exception("%s returned %d" % cmd, output.returncode)
if len(out) == 0:
return None
if len(out[0].strip()) == 0:
return None
return out[0].strip()
def _get_git_directory():
parent_dir = os.path.dirname(__file__)
while True:
git_dir = os.path.join(parent_dir, '.git')
if os.path.exists(git_dir):
return git_dir
parent_dir, child = os.path.split(parent_dir)
if not child: # reached to root dir
return None
def write_git_changelog():
"""Write a changelog based on the git changelog."""
new_changelog = 'ChangeLog'
git_dir = _get_git_directory()
if not os.getenv('SKIP_WRITE_GIT_CHANGELOG'):
if git_dir:
git_log_cmd = 'git --git-dir=%s log' % git_dir
changelog = _run_shell_command(git_log_cmd)
mailmap = _parse_git_mailmap(git_dir)
with open(new_changelog, "w") as changelog_file:
changelog_file.write(canonicalize_emails(changelog, mailmap))
else:
open(new_changelog, 'w').close()
def generate_authors():
"""Create AUTHORS file using git commits."""
jenkins_email = 'jenkins@review.(openstack|stackforge).org'
old_authors = 'AUTHORS.in'
new_authors = 'AUTHORS'
git_dir = _get_git_directory()
if not os.getenv('SKIP_GENERATE_AUTHORS'):
if git_dir:
# don't include jenkins email address in AUTHORS file
git_log_cmd = ("git --git-dir=" + git_dir +
" log --format='%aN <%aE>' | sort -u | "
"egrep -v '" + jenkins_email + "'")
changelog = _run_shell_command(git_log_cmd)
mailmap = _parse_git_mailmap(git_dir)
with open(new_authors, 'w') as new_authors_fh:
new_authors_fh.write(canonicalize_emails(changelog, mailmap))
if os.path.exists(old_authors):
with open(old_authors, "r") as old_authors_fh:
new_authors_fh.write('\n' + old_authors_fh.read())
else:
open(new_authors, 'w').close()
_rst_template = """%(heading)s
%(underline)s
.. automodule:: %(module)s
:members:
:undoc-members:
:show-inheritance:
"""
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
cmdclass = dict()
def _find_modules(arg, dirname, files):
for filename in files:
if filename.endswith('.py') and filename != '__init__.py':
arg["%s.%s" % (dirname.replace('/', '.'),
filename[:-3])] = True
class LocalSDist(sdist.sdist):
"""Builds the ChangeLog and Authors files from VC first."""
def run(self):
write_git_changelog()
generate_authors()
# sdist.sdist is an old style class, can't use super()
sdist.sdist.run(self)
cmdclass['sdist'] = LocalSDist
# If Sphinx is installed on the box running setup.py,
# enable setup.py to build the documentation, otherwise,
# just ignore it
try:
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
builders = ['html', 'man']
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
option_dict = self.distribution.get_option_dict('build_sphinx')
source_dir = os.path.join(option_dict['source_dir'][1], 'api')
if not os.path.exists(source_dir):
os.makedirs(source_dir)
for pkg in self.distribution.packages:
if '.' not in pkg:
os.path.walk(pkg, _find_modules, modules)
module_list = modules.keys()
module_list.sort()
autoindex_filename = os.path.join(source_dir, 'autoindex.rst')
with open(autoindex_filename, 'w') as autoindex:
autoindex.write(""".. toctree::
:maxdepth: 1
""")
for module in module_list:
output_filename = os.path.join(source_dir,
"%s.rst" % module)
heading = "The :mod:`%s` Module" % module
underline = "=" * len(heading)
values = dict(module=module, heading=heading,
underline=underline)
print "Generating %s" % output_filename
with open(output_filename, 'w') as output_file:
output_file.write(_rst_template % values)
autoindex.write(" %s.rst\n" % module)
def run(self):
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
for builder in self.builders:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
class LocalBuildLatex(LocalBuildDoc):
builders = ['latex']
cmdclass['build_sphinx'] = LocalBuildDoc
cmdclass['build_sphinx_latex'] = LocalBuildLatex
except ImportError:
pass
return cmdclass
def _get_revno(git_dir):
"""Return the number of commits since the most recent tag.
We use git-describe to find this out, but if there are no
tags then we fall back to counting commits since the beginning
of time.
"""
describe = _run_shell_command(
"git --git-dir=%s describe --always" % git_dir)
if "-" in describe:
return describe.rsplit("-", 2)[-2]
# no tags found
revlist = _run_shell_command(
"git --git-dir=%s rev-list --abbrev-commit HEAD" % git_dir)
return len(revlist.splitlines())
def _get_version_from_git(pre_version):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
git_dir = _get_git_directory()
if git_dir:
if pre_version:
try:
return _run_shell_command(
"git --git-dir=" + git_dir + " describe --exact-match",
throw_on_error=True).replace('-', '.')
except Exception:
sha = _run_shell_command(
"git --git-dir=" + git_dir + " log -n1 --pretty=format:%h")
return "%s.a%s.g%s" % (pre_version, _get_revno(git_dir), sha)
else:
return _run_shell_command(
"git --git-dir=" + git_dir + " describe --always").replace(
'-', '.')
return None
def _get_version_from_pkg_info(package_name):
"""Get the version from PKG-INFO file if we can."""
try:
pkg_info_file = open('PKG-INFO', 'r')
except (IOError, OSError):
return None
try:
pkg_info = email.message_from_file(pkg_info_file)
except email.MessageError:
return None
# Check to make sure we're in our own dir
if pkg_info.get('Name', None) != package_name:
return None
return pkg_info.get('Version', None)
def get_version(package_name, pre_version=None):
"""Get the version of the project. First, try getting it from PKG-INFO, if
it exists. If it does, that means we're in a distribution tarball or that
install has happened. Otherwise, if there is no PKG-INFO file, pull the
version from git.
We do not support setup.py version sanity in git archive tarballs, nor do
we support packagers directly sucking our git repo into theirs. We expect
that a source tarball be made from our git repo - or that if someone wants
to make a source tarball from a fork of our repo with additional tags in it
that they understand and desire the results of doing that.
"""
version = os.environ.get("OSLO_PACKAGE_VERSION", None)
if version:
return version
version = _get_version_from_pkg_info(package_name)
if version:
return version
version = _get_version_from_git(pre_version)
if version:
return version
raise Exception("Versioning for this project requires either an sdist"
" tarball, or access to an upstream git repository.")
|
from datetime import timedelta, date
from django.db.models import Max
from custom.icds_reports.utils.aggregation_helpers.distributed.base import BaseICDSAggregationDistributedHelper
from custom.icds_reports.const import AGG_DASHBOARD_ACTIVITY
from django.utils.functional import cached_property
from django.contrib.auth.models import User
from corehq.apps.users.dbaccessors.all_commcare_users import get_user_docs_by_username
from dimagi.utils.chunked import chunked
class DashboardActivityReportAggregate(BaseICDSAggregationDistributedHelper):
aggregate_parent_table = AGG_DASHBOARD_ACTIVITY
def __init__(self, date):
self.date = date
self.last_agg_date = self.get_last_agg_date()
def aggregate(self, cursor):
drop_query = self.drop_table_query()
create_table_query, create_table_param = self.create_table_query()
add_query, add_params = self.add_latest_users_list()
rollover_query, rollover_param = self.rollover_previous_data()
update_queries = self.update_queries()
cursor.execute(drop_query)
cursor.execute(create_table_query, create_table_param)
cursor.cursor.execute(add_query, add_params)
cursor.execute(rollover_query, rollover_param)
for query, param in update_queries:
cursor.execute(query, param)
@cached_property
def dashboard_users(self):
usernames = User.objects.filter(username__regex=r'^\d*\.[a-zA-Z]*@.*').values_list('username',
flat=True)
user_docs = list()
for user_list in chunked(usernames, 200):
user_docs.extend(get_user_docs_by_username(user_list))
return user_docs
@cached_property
def transformed_locations(self):
"""
:return: Returns a dict containing location_id as key and its info(loc_level,parents) as value
eg: {
block_loc_id1: {
'loc_level':3,
'parents':{
'district_id': 'district_loc_id1',
'state_id': 'state_loc_id1'
}
}
}
"""
from custom.icds_reports.models.aggregate import AwcLocation
locations = (AwcLocation.objects.filter(aggregation_level=3).
exclude(state_is_test=1).
values('state_id', 'district_id', 'block_id'))
transformed_locations = dict()
for loc in locations:
state_id = loc['state_id']
district_id = loc['district_id']
block_id = loc['block_id']
if state_id not in transformed_locations:
transformed_locations[state_id] = {'loc_level': 1}
if district_id not in transformed_locations:
transformed_locations[district_id] = {
'loc_level': 2,
'parents': {
'state_id': state_id
}
}
if block_id not in transformed_locations:
transformed_locations[block_id] = {
'loc_level': 3,
'parents': {
'district_id': district_id,
'state_id': state_id
}
}
return transformed_locations
def get_user_locations(self):
user_locations = list()
for user in self.dashboard_users:
state_id, district_id, block_id, user_level = None, None, None, None
usr_assigned_actual_loc = user['location_id'] and user['location_id'] in self.transformed_locations
if usr_assigned_actual_loc and user.get('is_active'):
user_level = self.transformed_locations.get(user['location_id'])['loc_level']
if user_level == 1:
state_id = user['location_id']
district_id = 'All'
block_id = 'All'
elif user_level == 2:
state_id = self.transformed_locations.get(user['location_id'])['parents']['state_id']
district_id = user['location_id']
block_id = 'All'
elif user_level == 3:
state_id = self.transformed_locations.get(user['location_id'])['parents']['state_id']
district_id = self.transformed_locations.get(user['location_id'])['parents']['district_id']
block_id = user['location_id']
user_locations.append((
user['username'],
state_id,
district_id,
block_id,
user_level
))
return user_locations
@property
def tablename(self):
return "{}_{}".format(self.aggregate_parent_table, self.date.strftime("%Y-%m-%d"))
def drop_table_query(self):
return 'DROP TABLE IF EXISTS "{}"'.format(self.tablename)
def create_table_query(self):
return """
CREATE TABLE IF NOT EXISTS "{tablename}" (
CHECK (date = DATE %(date)s),
LIKE "{parent_tablename}" INCLUDING DEFAULTS INCLUDING CONSTRAINTS INCLUDING INDEXES
) INHERITS ("{parent_tablename}")
""".format(
parent_tablename=self.aggregate_parent_table,
tablename=self.tablename,
), {
"date": self.date.strftime("%Y-%m-%d"),
}
def get_last_agg_date(self):
from custom.icds_reports.models.aggregate import DashboardUserActivityReport
#because we dont expect the report to fail for 7 consecutive days
seven_days_back = self.date - timedelta(days=7)
result = (DashboardUserActivityReport.objects.
filter(date__lt=self.date.strftime("%Y-%m-%d"), date__gt=seven_days_back).
aggregate(Max('date')))
last_agg_date = result.get('date__max')
return last_agg_date or date(1970, 1, 1) # return the oldest date in default case
def add_latest_users_list(self):
parameters = {'value{}'.format(index): tuple(list(loc) + [self.date])
for index, loc in enumerate(self.get_user_locations())
}
param_keys = ['%({})s'.format(param) for param in parameters.keys()]
return """
INSERT INTO "{tablename}" (
username, state_id, district_id,block_id,
user_level,date
)
VALUES {param_keys}
""".format(
tablename=self.tablename,
param_keys=','.join(param_keys)
), parameters
def rollover_previous_data(self):
query_param = {'date': self.date,
'last_agg_date': self.last_agg_date}
return """
UPDATE "{tablename}" user_activity SET
location_launched = ut.location_launched,
last_activity = ut.last_activity
FROM (
SELECT
username,
location_launched,
last_activity
FROM "{parent_tablename}" WHERE date = %(last_agg_date)s
)ut
WHERE user_activity.username = ut.username
""".format(
tablename=self.tablename,
parent_tablename=self.aggregate_parent_table
), query_param
def update_queries(self):
last_time_to_consider = self.date
latest_month = (last_time_to_consider - timedelta(days=1)).replace(day=1)
yield """
UPDATE "{tablename}" user_activity
SET location_launched = CASE WHEN num_launched_blocks>0 THEN TRUE ELSE FALSE END
FROM (
SELECT
block_id, district_id, state_id, num_launched_blocks,aggregation_level
FROM agg_awc where month=%(latest_month)s and aggregation_level<=3
) ut
WHERE (
user_activity.user_level= ut.aggregation_level AND
user_activity.state_id = ut.state_id AND
user_activity.district_id = ut.district_id AND
user_activity.block_id = ut.block_id AND
user_activity.location_launched is not TRUE
)
""".format(
tablename=self.tablename
), {
'latest_month': latest_month
}
yield """
UPDATE "{tablename}" user_activity
SET
last_activity = ut.last_activity
FROM (
SELECT audit.username, max(audit.time_of_use) AS last_activity FROM "{tablename}" user_activity
left join icds_audit_entry_record audit ON user_activity.username = audit.username
where audit.time_of_use>=%(last_agg_date)s AND
audit.time_of_use<%(last_time_to_consider)s
GROUP BY audit.username
)ut
WHERE user_activity.username = ut.username and ut.last_activity is not null;
""".format(
tablename=self.tablename
), {
'last_agg_date': self.last_agg_date,
'last_time_to_consider': last_time_to_consider
}
|
"""
serialize_to_hdf5
===============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class serialize_to_hdf5(Operator):
"""Serialize the inputs in an hdf5 format.
Parameters
----------
file_path : str
Output file path with .h5 extension
export_floats : bool
Converts double to float to reduce file size
(default is true)
export_flat_vectors : bool
If true, vectors and matrices data are
exported flat (x1,y1,z1,x2,y2,z2..)
(default is false)
data1 :
Only the data set explicitly to export is
exported
data2 :
Only the data set explicitly to export is
exported
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.serialization.serialize_to_hdf5()
>>> # Make input connections
>>> my_file_path = str()
>>> op.inputs.file_path.connect(my_file_path)
>>> my_export_floats = bool()
>>> op.inputs.export_floats.connect(my_export_floats)
>>> my_export_flat_vectors = bool()
>>> op.inputs.export_flat_vectors.connect(my_export_flat_vectors)
>>> my_data1 = dpf.()
>>> op.inputs.data1.connect(my_data1)
>>> my_data2 = dpf.()
>>> op.inputs.data2.connect(my_data2)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.serialization.serialize_to_hdf5(
... file_path=my_file_path,
... export_floats=my_export_floats,
... export_flat_vectors=my_export_flat_vectors,
... data1=my_data1,
... data2=my_data2,
... )
"""
def __init__(
self,
file_path=None,
export_floats=None,
export_flat_vectors=None,
data1=None,
data2=None,
config=None,
server=None,
):
super().__init__(name="serialize_to_hdf5", config=config, server=server)
self._inputs = InputsSerializeToHdf5(self)
self._outputs = OutputsSerializeToHdf5(self)
if file_path is not None:
self.inputs.file_path.connect(file_path)
if export_floats is not None:
self.inputs.export_floats.connect(export_floats)
if export_flat_vectors is not None:
self.inputs.export_flat_vectors.connect(export_flat_vectors)
if data1 is not None:
self.inputs.data1.connect(data1)
if data2 is not None:
self.inputs.data2.connect(data2)
@staticmethod
def _spec():
description = """Serialize the inputs in an hdf5 format."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="file_path",
type_names=["string"],
optional=False,
document="""Output file path with .h5 extension""",
),
1: PinSpecification(
name="export_floats",
type_names=["bool"],
optional=False,
document="""Converts double to float to reduce file size
(default is true)""",
),
2: PinSpecification(
name="export_flat_vectors",
type_names=["bool"],
optional=False,
document="""If true, vectors and matrices data are
exported flat (x1,y1,z1,x2,y2,z2..)
(default is false)""",
),
3: PinSpecification(
name="data",
type_names=["any"],
optional=False,
document="""Only the data set explicitly to export is
exported""",
),
4: PinSpecification(
name="data",
type_names=["any"],
optional=False,
document="""Only the data set explicitly to export is
exported""",
),
},
map_output_pin_spec={},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
"""
return Operator.default_config(name="serialize_to_hdf5", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsSerializeToHdf5
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsSerializeToHdf5
"""
return super().outputs
class InputsSerializeToHdf5(_Inputs):
"""Intermediate class used to connect user inputs to
serialize_to_hdf5 operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.serialize_to_hdf5()
>>> my_file_path = str()
>>> op.inputs.file_path.connect(my_file_path)
>>> my_export_floats = bool()
>>> op.inputs.export_floats.connect(my_export_floats)
>>> my_export_flat_vectors = bool()
>>> op.inputs.export_flat_vectors.connect(my_export_flat_vectors)
>>> my_data1 = dpf.()
>>> op.inputs.data1.connect(my_data1)
>>> my_data2 = dpf.()
>>> op.inputs.data2.connect(my_data2)
"""
def __init__(self, op: Operator):
super().__init__(serialize_to_hdf5._spec().inputs, op)
self._file_path = Input(serialize_to_hdf5._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._file_path)
self._export_floats = Input(serialize_to_hdf5._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._export_floats)
self._export_flat_vectors = Input(
serialize_to_hdf5._spec().input_pin(2), 2, op, -1
)
self._inputs.append(self._export_flat_vectors)
self._data1 = Input(serialize_to_hdf5._spec().input_pin(3), 3, op, 0)
self._inputs.append(self._data1)
self._data2 = Input(serialize_to_hdf5._spec().input_pin(4), 4, op, 1)
self._inputs.append(self._data2)
@property
def file_path(self):
"""Allows to connect file_path input to the operator.
Output file path with .h5 extension
Parameters
----------
my_file_path : str
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.serialize_to_hdf5()
>>> op.inputs.file_path.connect(my_file_path)
>>> # or
>>> op.inputs.file_path(my_file_path)
"""
return self._file_path
@property
def export_floats(self):
"""Allows to connect export_floats input to the operator.
Converts double to float to reduce file size
(default is true)
Parameters
----------
my_export_floats : bool
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.serialize_to_hdf5()
>>> op.inputs.export_floats.connect(my_export_floats)
>>> # or
>>> op.inputs.export_floats(my_export_floats)
"""
return self._export_floats
@property
def export_flat_vectors(self):
"""Allows to connect export_flat_vectors input to the operator.
If true, vectors and matrices data are
exported flat (x1,y1,z1,x2,y2,z2..)
(default is false)
Parameters
----------
my_export_flat_vectors : bool
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.serialize_to_hdf5()
>>> op.inputs.export_flat_vectors.connect(my_export_flat_vectors)
>>> # or
>>> op.inputs.export_flat_vectors(my_export_flat_vectors)
"""
return self._export_flat_vectors
@property
def data1(self):
"""Allows to connect data1 input to the operator.
Only the data set explicitly to export is
exported
Parameters
----------
my_data1 :
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.serialize_to_hdf5()
>>> op.inputs.data1.connect(my_data1)
>>> # or
>>> op.inputs.data1(my_data1)
"""
return self._data1
@property
def data2(self):
"""Allows to connect data2 input to the operator.
Only the data set explicitly to export is
exported
Parameters
----------
my_data2 :
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.serialize_to_hdf5()
>>> op.inputs.data2.connect(my_data2)
>>> # or
>>> op.inputs.data2(my_data2)
"""
return self._data2
class OutputsSerializeToHdf5(_Outputs):
"""Intermediate class used to get outputs from
serialize_to_hdf5 operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.serialization.serialize_to_hdf5()
>>> # Connect inputs : op.inputs. ...
"""
def __init__(self, op: Operator):
super().__init__(serialize_to_hdf5._spec().outputs, op)
|
# Generated by Django 3.0.7 on 2020-06-08 17:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('post', '0005_post_previous_post'),
]
operations = [
migrations.AddField(
model_name='post',
name='next_post',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='next', to='post.Post'),
),
migrations.AlterField(
model_name='post',
name='previous_post',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='previous', to='post.Post'),
),
]
|
import os
import mock
import pytest
from six.moves import shlex_quote
from mlflow.exceptions import ExecutionException
from mlflow.projects._project_spec import EntryPoint
from mlflow.utils.file_utils import TempDir
from tests.projects.utils import load_project, TEST_PROJECT_DIR
def test_entry_point_compute_params():
"""
Tests that EntryPoint correctly computes a final set of parameters to use when running a project
"""
project = load_project()
entry_point = project.get_entry_point("greeter")
# Pass extra "excitement" param, use default value for `greeting` param
with TempDir() as storage_dir:
params, extra_params = entry_point.compute_parameters(
{"name": "friend", "excitement": 10}, storage_dir)
assert params == {"name": "friend", "greeting": "hi"}
assert extra_params == {"excitement": "10"}
# Don't pass extra "excitement" param, pass value for `greeting`
params, extra_params = entry_point.compute_parameters(
{"name": "friend", "greeting": "hello"}, storage_dir)
assert params == {"name": "friend", "greeting": "hello"}
assert extra_params == {}
# Raise exception on missing required parameter
with pytest.raises(ExecutionException):
entry_point.compute_parameters({}, storage_dir)
def test_entry_point_compute_command():
"""
Tests that EntryPoint correctly computes the command to execute in order to run the entry point.
"""
project = load_project()
entry_point = project.get_entry_point("greeter")
with TempDir() as tmp:
storage_dir = tmp.path()
command = entry_point.compute_command({"name": "friend", "excitement": 10}, storage_dir)
assert command == "python greeter.py hi friend --excitement 10"
with pytest.raises(ExecutionException):
entry_point.compute_command({}, storage_dir)
# Test shell escaping
name_value = "friend; echo 'hi'"
command = entry_point.compute_command({"name": name_value}, storage_dir)
assert command == "python greeter.py %s %s" % (shlex_quote("hi"), shlex_quote(name_value))
def test_path_parameter():
"""
Tests that MLflow file-download APIs get called when necessary for arguments of type `path`.
"""
project = load_project()
entry_point = project.get_entry_point("line_count")
with mock.patch("mlflow.data.download_uri") as download_uri_mock:
download_uri_mock.return_value = 0
# Verify that we don't attempt to call download_uri when passing a local file to a
# parameter of type "path"
with TempDir() as tmp:
dst_dir = tmp.path()
local_path = os.path.join(TEST_PROJECT_DIR, "MLproject")
params, _ = entry_point.compute_parameters(
user_parameters={"path": local_path},
storage_dir=dst_dir)
assert params["path"] == os.path.abspath(local_path)
assert download_uri_mock.call_count == 0
# Verify that we raise an exception when passing a non-existent local file to a
# parameter of type "path"
with TempDir() as tmp, pytest.raises(ExecutionException):
dst_dir = tmp.path()
entry_point.compute_parameters(
user_parameters={"path": os.path.join(dst_dir, "some/nonexistent/file")},
storage_dir=dst_dir)
# Verify that we do call `download_uri` when passing a URI to a parameter of type "path"
for i, prefix in enumerate(["dbfs:/", "s3://", "gs://"]):
with TempDir() as tmp:
dst_dir = tmp.path()
params, _ = entry_point.compute_parameters(
user_parameters={"path": os.path.join(prefix, "some/path")},
storage_dir=dst_dir)
assert os.path.dirname(params["path"]) == dst_dir
assert download_uri_mock.call_count == i + 1
def test_uri_parameter():
"""Tests parameter resolution for parameters of type `uri`."""
project = load_project()
entry_point = project.get_entry_point("download_uri")
with mock.patch("mlflow.data.download_uri") as download_uri_mock, TempDir() as tmp:
dst_dir = tmp.path()
# Test that we don't attempt to locally download parameters of type URI
entry_point.compute_command(user_parameters={"uri": "file://%s" % dst_dir},
storage_dir=dst_dir)
assert download_uri_mock.call_count == 0
# Test that we raise an exception if a local path is passed to a parameter of type URI
with pytest.raises(ExecutionException):
entry_point.compute_command(user_parameters={"uri": dst_dir}, storage_dir=dst_dir)
def test_params():
defaults = {
"alpha": "float",
"l1_ratio": {"type": "float", "default": 0.1},
"l2_ratio": {"type": "float", "default": 0.0003},
"random_str": {"type": "string", "default": "hello"},
}
entry_point = EntryPoint("entry_point_name", defaults, "command_name script.py")
user1 = {}
with pytest.raises(ExecutionException):
entry_point._validate_parameters(user1)
user_2 = {"beta": 0.004}
with pytest.raises(ExecutionException):
entry_point._validate_parameters(user_2)
user_3 = {"alpha": 0.004, "gamma": 0.89}
expected_final_3 = {"alpha": '0.004', "l1_ratio": '0.1', "l2_ratio": '0.0003',
"random_str": "hello"}
expected_extra_3 = {"gamma": "0.89"}
final_3, extra_3 = entry_point.compute_parameters(user_3, None)
assert expected_extra_3 == extra_3
assert expected_final_3 == final_3
user_4 = {"alpha": 0.004, "l1_ratio": 0.0008, "random_str_2": "hello"}
expected_final_4 = {"alpha": '0.004', "l1_ratio": '0.0008', "l2_ratio": '0.0003',
"random_str": "hello"}
expected_extra_4 = {"random_str_2": "hello"}
final_4, extra_4 = entry_point.compute_parameters(user_4, None)
assert expected_extra_4 == extra_4
assert expected_final_4 == final_4
user_5 = {"alpha": -0.99, "random_str": "hi"}
expected_final_5 = {"alpha": '-0.99', "l1_ratio": '0.1', "l2_ratio": '0.0003',
"random_str": "hi"}
expected_extra_5 = {}
final_5, extra_5 = entry_point.compute_parameters(user_5, None)
assert expected_final_5 == final_5
assert expected_extra_5 == extra_5
user_6 = {"alpha": 0.77, "ALPHA": 0.89}
expected_final_6 = {"alpha": '0.77', "l1_ratio": '0.1', "l2_ratio": '0.0003',
"random_str": "hello"}
expected_extra_6 = {"ALPHA": "0.89"}
final_6, extra_6 = entry_point.compute_parameters(user_6, None)
assert expected_extra_6 == extra_6
assert expected_final_6 == final_6
def test_path_params():
with TempDir() as tmp:
dest_path = tmp.path()
data_file = "s3://path.test/resources/data_file.csv"
defaults = {
"constants": {"type": "uri", "default": "s3://path.test/b1"},
"data": {"type": "path", "default": data_file}
}
entry_point = EntryPoint("entry_point_name", defaults, "command_name script.py")
with mock.patch("mlflow.data.download_uri") as download_uri_mock:
final_1, extra_1 = entry_point.compute_parameters({}, None)
assert (final_1 == {"constants": "s3://path.test/b1", "data": data_file})
assert (extra_1 == {})
assert download_uri_mock.call_count == 0
with mock.patch("mlflow.data.download_uri") as download_uri_mock:
user_2 = {"alpha": 0.001, "constants": "s3://path.test/b_two"}
final_2, extra_2 = entry_point.compute_parameters(user_2, None)
assert (final_2 == {"constants": "s3://path.test/b_two", "data": data_file})
assert (extra_2 == {"alpha": "0.001"})
assert download_uri_mock.call_count == 0
with mock.patch("mlflow.data.download_uri") as download_uri_mock:
user_3 = {"alpha": 0.001}
final_3, extra_3 = entry_point.compute_parameters(user_3, dest_path)
assert (final_3 == {"constants": "s3://path.test/b1",
"data": "%s/data_file.csv" % dest_path})
assert (extra_3 == {"alpha": "0.001"})
assert download_uri_mock.call_count == 1
with mock.patch("mlflow.data.download_uri") as download_uri_mock:
user_4 = {"data": "s3://another.example.test/data_stash/images.tgz"}
final_4, extra_4 = entry_point.compute_parameters(user_4, dest_path)
assert (final_4 == {"constants": "s3://path.test/b1",
"data": "%s/images.tgz" % dest_path})
assert (extra_4 == {})
assert download_uri_mock.call_count == 1
|
import csv
from urllib import request
from bs4 import BeautifulSoup
from house_info import house, gethtml_bs
def get_city_dict():
with open ('citys.csv') as f:
reader = csv.reader(f)
#print(dict(reader))
return dict(reader)
def get_area_dict(url):
html, bs = gethtml_bs(url)
areas_tag = bs.find('div', {'data-role': 'ershoufang'}).findChildren('a')
return {tag.get_text(): url[0:-12] + tag.get('href') for tag in areas_tag}
def get_ershoufang_url(url):
"""并不是所有的城市都有二手房功能"""
html, bs = gethtml_bs(url)
ershoufang_url = url+'ershoufang/'
return ershoufang_url if bs.find('a', {'href': ershoufang_url}) else None
def run():
citys = get_city_dict()
print(', '.join(citys.keys()))
print('请输入城市: ')
input_name = input()
try:
city_url = citys[input_name]
print(input_name, city_url)
ershoufang_url = get_ershoufang_url(city_url)
print(ershoufang_url)
if ershoufang_url:
areas = get_area_dict(ershoufang_url)
print(', '.join(areas.keys()))
print('请输入地区: ')
input_area = input()
area_url = areas[input_area]
print(input_area, area_url)
#得到房屋所需信息, 放到houses.csv文件中
house(area_url)
else:
print('该城市尚未开通二手房功能,敬请期待!')
except KeyError as k:
print("输入有误!")
run()
except:
raise
if __name__ == '__main__':
run()
|
import random
"""This program plays a game of Rock, Paper, Scissors between two Players,
and reports both Player's scores each round."""
"""The Player class is the parent class for all of the Players
in this game"""
class Player:
moves = ['rock', 'paper', 'scissors']
def __init__(self):
# Initializing of the list for the move function
# in the class CyclePlayer
self.my_move = self.moves
# Fist round random choice
self.their_move = random.choice(self.moves)
def learn(self, my_move, their_move):
# Storing moves of players
self.my_move = my_move
self.their_move = their_move
class RandomPlayer(Player):
def move(self):
# Computer chooses random move
return random.choice(self.moves)
class ReflectPlayer(Player):
def move(self):
# Computer plays the same move the player
# played the previous round
return self.their_move
class CyclePlayer(Player):
def move(self):
# Computer choses a different move
# from the last round
if self.my_move == self.moves[0]:
return self.moves[1]
elif self.my_move == self.moves[1]:
return self.moves[2]
else:
return self.moves[0]
class HumanPlayer(Player):
def move(self):
# Unless there is a match with the list or quit,
# keep asking for input
while True:
move_human = input("Rock, paper, scissors? > ")
if move_human.lower() in self.moves:
return move_human.lower()
elif move_human.lower() == 'quit':
print("Goodbye!\n")
exit()
class Game:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
# Initializing score
self.score_p1 = 0
self.score_p2 = 0
def beats(self, one, two):
return ((one == 'rock' and two == 'scissors') or
(one == 'scissors' and two == 'paper') or
(one == 'paper' and two == 'rock'))
def rounds(self):
# Unless there is a valid choice or quit,
# keep asking for input
while True:
self.number_rounds = input(
"How many rounds do you want want play? > ")
if self.number_rounds.isdigit():
return self.number_rounds
elif self.number_rounds.lower() == 'quit':
print("Goodbye!\n")
exit()
def play_round(self):
# Storing game move
move1 = self.p1.move()
move2 = self.p2.move()
# Resault of the match and show player score
if self.beats(move1, move2):
self.score_p1 += 1
winner = '** PLAYER ONE WINS **'
elif move1 == move2:
self.score_p1 = self.score_p1
self.score_p2 = self.score_p2
winner = '** TIE **'
else:
self.score_p2 += 1
winner = '** PLAYER TWO WINS **'
# Output the match information
print(
f"> You played {move1}"
f"\n> Opponent played {move2}"
f"\n{winner}"
f"\nScore: Player one ( {self.score_p1} ),"
f"Player two ( {self.score_p2} )"
)
self.p1.learn(move1, move2)
self.p2.learn(move2, move1)
def play_game(self):
print(
">>>> Game start! <<<<"
"\n(To quit game, please enter \'quit\'"
" when the game will ask for the number of rounds or your move)"
)
self.rounds()
# Outputs, depending in the game
for round in range(int(self.number_rounds)):
print(f"\nRound {round + 1} --")
self.play_round()
if self.score_p1 == self.score_p2:
print(
f"\n-- The game ended in a tie! --"
f"\nScore: Player one ( {self.score_p1} ),"
f"Player two ( {self.score_p2} )"
)
elif self.score_p1 > self.score_p2:
print(
f"\n-- Player ONE has won! --"
f"\nScore: Player one ( {self.score_p1} )*,"
f"Player two ( {self.score_p2} )"
)
else:
print(
f"\n-- Player TWO has won! --"
f"\nScore: Player one ( {self.score_p1} ),"
f"Player two ( {self.score_p2} )*"
)
if __name__ == '__main__':
# The game will happen with a human player and
# randomly chosen attitude from the computer
game = Game(HumanPlayer(), random.choice(
[RandomPlayer(), ReflectPlayer(), CyclePlayer()]))
game.play_game()
|
import os
import pandas as pd
import pypospack.utils
from pypospack.pyposmat.data.pipeline import PyposmatPipeline
pypospack_root_dir = pypospack.utils.get_pypospack_root_directory()
configuration_dir = 'examples/PCA_param_clusters_in_qoi_space/configuration/'
config_fn_0 = os.path.join(pypospack_root_dir,
configuration_dir,
'configure_param_clustering.in')
config_fn_1 = os.path.join(pypospack_root_dir,
configuration_dir,
'configure_qoi_pca_transform.in')
config_fn_2 = os.path.join(pypospack_root_dir,
configuration_dir,
'configure_qoi_plot.in')
pyposmat_data_fn = os.path.join(pypospack_root_dir,
'data/Ni__eam__born_exp_fs__3.5NN/pyposmat.kde.4.out')
if __name__ == "__main__":
# normalize, pca transform, and cluster parameter space
pipeline_0 = PyposmatPipeline(configuration_fn=config_fn_0,
data_fn=pyposmat_data_fn)
pipeline_0.read_configuration(config_fn_0)
pipeline_0.read_data(pyposmat_data_fn)
pipeline_0.run()
# normalize and pca transform qoi space
pipeline_1 = PyposmatPipeline(configuration_fn=config_fn_1,
data_fn=pyposmat_data_fn)
pipeline_1.read_configuration(config_fn_1)
pipeline_1.read_data(pyposmat_data_fn)
pipeline_1.run()
pipeline_1.df['cluster_id'] = pipeline_0.df['cluster_id'] # transfer cluster labels
# plot parameter clusters in qoi space
pipeline_2 = PyposmatPipeline(configuration_fn=config_fn_2,
df=pipeline_1.df)
pipeline_2.read_configuration(config_fn_2)
pipeline_2.run()
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for ImageIOTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
if not (hasattr(tf, "version") and tf.version.VERSION.startswith("2.")):
tf.compat.v1.enable_eager_execution()
import tensorflow_io as tfio # pylint: disable=wrong-import-position
def test_tiff_io_tensor():
"""Test case for TIFFImageIOTensor"""
width = 560
height = 320
channels = 4
images = []
for filename in [
"small-00.png",
"small-01.png",
"small-02.png",
"small-03.png",
"small-04.png"]:
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
"test_image",
filename), 'rb') as f:
png_contents = f.read()
image_v = tf.image.decode_png(png_contents, channels=channels)
assert image_v.shape == [height, width, channels]
images.append(image_v)
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_image", "small.tiff")
filename = "file://" + filename
tiff = tfio.IOTensor.from_tiff(filename)
assert tiff.keys == list(range(5))
for i in tiff.keys:
assert np.all(images[i].numpy() == tiff(i).to_tensor().numpy())
if __name__ == "__main__":
test.main()
|
# pyOCD debugger
# Copyright (c) 2006-2013,2018 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# -*- coding: utf-8 -*-
"""The APFS file entry implementation."""
from dfdatetime import apfs_time as dfdatetime_apfs_time
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import apfs_path_spec
from dfvfs.vfs import attribute
from dfvfs.vfs import apfs_attribute
from dfvfs.vfs import file_entry
class APFSDirectory(file_entry.Directory):
"""File system directory that uses pyfsapfs."""
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
APFSPathSpec: APFS path specification.
"""
try:
fsapfs_file_entry = self._file_system.GetAPFSFileEntryByPathSpec(
self.path_spec)
except errors.PathSpecError:
return
location = getattr(self.path_spec, 'location', None)
for fsapfs_sub_file_entry in fsapfs_file_entry.sub_file_entries:
directory_entry = fsapfs_sub_file_entry.name
if not location or location == self._file_system.PATH_SEPARATOR:
directory_entry = self._file_system.JoinPath([directory_entry])
else:
directory_entry = self._file_system.JoinPath([
location, directory_entry])
yield apfs_path_spec.APFSPathSpec(
identifier=fsapfs_sub_file_entry.identifier, location=directory_entry,
parent=self.path_spec.parent)
class APFSFileEntry(file_entry.FileEntry):
"""File system file entry that uses pyfsapfs."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_APFS
# Mappings of APFS file types to dfVFS file entry types.
_ENTRY_TYPES = {
0x1000: definitions.FILE_ENTRY_TYPE_PIPE,
0x2000: definitions.FILE_ENTRY_TYPE_DEVICE,
0x4000: definitions.FILE_ENTRY_TYPE_DIRECTORY,
0x6000: definitions.FILE_ENTRY_TYPE_DEVICE,
0x8000: definitions.FILE_ENTRY_TYPE_FILE,
0xa000: definitions.FILE_ENTRY_TYPE_LINK,
0xc000: definitions.FILE_ENTRY_TYPE_SOCKET,
0xe000: definitions.FILE_ENTRY_TYPE_WHITEOUT}
def __init__(
self, resolver_context, file_system, path_spec, fsapfs_file_entry=None,
is_root=False, is_virtual=False):
"""Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
fsapfs_file_entry (Optional[pyfsapfs.file_entry]): APFS file entry.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
entry emulated by the corresponding file system.
Raises:
BackEndError: if the pyfsapfs file entry is missing.
"""
if not fsapfs_file_entry:
fsapfs_file_entry = file_system.GetAPFSFileEntryByPathSpec(path_spec)
if not fsapfs_file_entry:
raise errors.BackEndError('Missing pyfsapfs file entry.')
super(APFSFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._fsapfs_file_entry = fsapfs_file_entry
self.entry_type = self._ENTRY_TYPES.get(
fsapfs_file_entry.file_mode & 0xf000, None)
def _GetAttributes(self):
"""Retrieves the attributes.
Returns:
list[Attribute]: attributes.
"""
if self._attributes is None:
stat_attribute = self._GetStatAttribute()
self._attributes = [stat_attribute]
for fsapfs_extended_attribute in (
self._fsapfs_file_entry.extended_attributes):
extended_attribute = apfs_attribute.APFSExtendedAttribute(
fsapfs_extended_attribute)
self._attributes.append(extended_attribute)
return self._attributes
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
APFSDirectory: a directory.
"""
if self._directory is None:
self._directory = APFSDirectory(self._file_system, self.path_spec)
return self._directory
def _GetLink(self):
"""Retrieves the link.
Returns:
str: path of the linked file.
"""
if self._link is None:
self._link = self._fsapfs_file_entry.symbolic_link_target
if self._link and self._link[0] != self._file_system.PATH_SEPARATOR:
# TODO: make link absolute.
self._link = '/{0:s}'.format(self._link)
return self._link
def _GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object.
"""
stat_object = super(APFSFileEntry, self)._GetStat()
# Ownership and permissions stat information.
stat_object.mode = self._fsapfs_file_entry.file_mode & 0x0fff
stat_object.uid = self._fsapfs_file_entry.owner_identifier
stat_object.gid = self._fsapfs_file_entry.group_identifier
# Other stat information.
stat_object.ino = self._fsapfs_file_entry.identifier
stat_object.fs_type = 'APFS'
return stat_object
def _GetStatAttribute(self):
"""Retrieves a stat attribute.
Returns:
StatAttribute: a stat attribute.
"""
stat_attribute = attribute.StatAttribute()
stat_attribute.group_identifier = self._fsapfs_file_entry.group_identifier
stat_attribute.inode_number = self._fsapfs_file_entry.identifier
stat_attribute.mode = self._fsapfs_file_entry.file_mode & 0x0fff
# TODO: implement number of hard links support in pyfsapfs
# stat_attribute.number_of_links = self._fsapfs_file_entry.number_of_links
stat_attribute.owner_identifier = self._fsapfs_file_entry.owner_identifier
stat_attribute.size = self._fsapfs_file_entry.size
stat_attribute.type = self.entry_type
return stat_attribute
def _GetSubFileEntries(self):
"""Retrieves a sub file entries generator.
Yields:
APFSFileEntry: a sub file entry.
"""
if self.entry_type == definitions.FILE_ENTRY_TYPE_DIRECTORY:
directory = self._GetDirectory()
for path_spec in directory.entries:
yield APFSFileEntry(
self._resolver_context, self._file_system, path_spec)
@property
def access_time(self):
"""dfdatetime.DateTimeValues: access time or None if not available."""
timestamp = self._fsapfs_file_entry.get_access_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
@property
def added_time(self):
"""dfdatetime.DateTimeValues: added time or None if not available."""
timestamp = self._fsapfs_file_entry.get_added_time_as_integer()
if timestamp is None:
return None
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
@property
def change_time(self):
"""dfdatetime.DateTimeValues: change time or None if not available."""
timestamp = self._fsapfs_file_entry.get_inode_change_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
@property
def creation_time(self):
"""dfdatetime.DateTimeValues: creation time or None if not available."""
timestamp = self._fsapfs_file_entry.get_creation_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
@property
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamp = self._fsapfs_file_entry.get_modification_time_as_integer()
return dfdatetime_apfs_time.APFSTime(timestamp=timestamp)
@property
def name(self):
"""str: name of the file entry, which does not include the full path."""
# The root directory file name is typically 'root', dfVFS however uses ''.
if self._is_root:
return ''
return self._fsapfs_file_entry.name
@property
def size(self):
"""int: size of the file entry in bytes or None if not available."""
return self._fsapfs_file_entry.size
def GetAPFSFileEntry(self):
"""Retrieves the APFS file entry.
Returns:
pyfsapfs.file_entry: APFS file entry.
"""
return self._fsapfs_file_entry
def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
APFSFileEntry: linked file entry or None if not available.
"""
link = self._GetLink()
if not link:
return None
# TODO: is there a way to determine the identifier here?
link_identifier = None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = apfs_path_spec.APFSPathSpec(
location=link, parent=parent_path_spec)
is_root = bool(
link == self._file_system.LOCATION_ROOT or
link_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER)
return APFSFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root)
def GetParentFileEntry(self):
"""Retrieves the parent file entry.
Returns:
APFSFileEntry: parent file entry or None if not available.
"""
parent_location = None
location = getattr(self.path_spec, 'location', None)
if location is not None:
parent_location = self._file_system.DirnamePath(location)
if parent_location == '':
parent_location = self._file_system.PATH_SEPARATOR
parent_identifier = self._fsapfs_file_entry.parent_identifier
if parent_identifier is None:
return None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = apfs_path_spec.APFSPathSpec(
location=parent_location, identifier=parent_identifier,
parent=parent_path_spec)
is_root = bool(
parent_location == self._file_system.LOCATION_ROOT or
parent_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER)
return APFSFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
if __name__ == '__main__':
n = int(input("Value of n? "))
x = float(input("Value of x? "))
S = 0.0
for k in range(1, n + 1):
a = math.log(k * x) / (k * k)
S += a
print(f"S = {S}")
|
# Importing the needed python packages
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
import time
import sys
from pylab import *
from matplotlib.patches import Rectangle
# Defining the right hand side of the ODEs (rate of changes of predator and prey)
def NegativeFBmodel(A,B,kAB,kBA):
dA = 1-kBA*B
dB = kAB*A
return np.array((dA,dB))
kAB = 1
kBA = 1
coords = np.linspace(-5,5,21)
X, Y = np.meshgrid (coords, coords)
Vx, Vy = NegativeFBmodel(X,Y,kAB,kBA)
p=plt.quiver(X,Y,Vx,Vy)
plt.xlabel('conc. A')
plt.ylabel('conc. B')
plt.title('Linear negative feedback phase portrait')
|
import hashlib
from django.core.cache import cache
class ExponentialCache(object):
""" ExponentialCache interfaces with the cache for the growth and backoff classes
The `increment` function updates the key's hit count and the `delete_key` function clears it
"""
@classmethod
def _get_cache_key(cls, key):
if isinstance(key, str):
key = key.encode('utf-8')
key_hash = hashlib.md5(key).hexdigest() if key else ''
return 'django-exp-backoff.{}'.format(key_hash)
@classmethod
def increment(cls, key):
cache_key = cls._get_cache_key(key)
try:
return cache.incr(cache_key)
except ValueError:
cache.set(cache_key, 1)
return 1
@classmethod
def delete_key(cls, key):
try:
cache.delete(cls._get_cache_key(key))
except ValueError:
pass
class ExponentialBackoff(ExponentialCache):
""" ExponentialBackoff provides functions for slowly backing off
The `should_backoff` method checks if you should backoff based on whether
the number of hits is a power of 2
"""
@classmethod
def should_backoff(cls, key):
cache_key = cls._get_cache_key(key)
return not cls._number_is_power_of_two(cache.get(cache_key) or 1)
@classmethod
def _number_is_power_of_two(cls, x):
# it turns out that x & (x - 1) == 0 if and only if x is a power of two
# http://stackoverflow.com/a/600306/240553
return x > 0 and (x & (x - 1) == 0)
class ExponentialGrowth(ExponentialCache):
@classmethod
def exponential(cls, key, base):
cache_key = cls._get_cache_key(key)
return base ** (cache.get(cache_key) or 0)
def is_rate_limited(rate_limit_key):
ExponentialBackoff.increment(rate_limit_key)
return ExponentialBackoff.should_backoff(rate_limit_key)
def get_exponential(rate_limit_key, exponential_base=2):
ExponentialGrowth.increment(rate_limit_key)
return ExponentialGrowth.exponential(rate_limit_key, exponential_base)
def clear_limit(rate_limit_key):
ExponentialCache.delete_key(rate_limit_key)
|
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implement T2T-ViT Transformer
"""
import copy
import math
#from scipy.stats import ortho_group
import numpy as np
import paddle
import paddle.nn as nn
from droppath import DropPath
class Identity(nn.Layer):
""" Identity layer
The output of this layer is the input without any change.
Use this layer to avoid using 'if' condition in forward methods
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class PatchEmbedding(nn.Layer):
"""Patch Embeddings
Apply patch embeddings (tokens-to-token) on input images. Embeddings is
implemented using one of the following ops: Performer, Transformer.
Attributes:
image_size: int, input image size, default: 224
token_type: string, type of token embedding, in ['performer', 'transformer', 'convolution'], default: 'performer'
patch_size: int, size of patch, default: 4
in_channels: int, input image channels, default: 3
embed_dim: int, embedding dimension, default: 96
token_dim: int, intermediate dim for patch_embedding module, default: 64
"""
def __init__(self,
image_size=224,
token_type='performer',
in_channels=3,
embed_dim=768,
token_dim=64):
super().__init__()
if token_type == 'transformer':
# paddle v 2.1 has bugs on nn.Unfold,
# use paddle.nn.functional.unfold method instead
# replacements see forward method.
#self.soft_split0 = nn.Unfold(kernel_size=7, strides=4, paddings=2)
#self.soft_split1 = nn.Unfold(kernel_size=3, strides=2, paddings=1)
#self.soft_split2 = nn.Unfold(kernel_size=3, strides=2, paddings=1)
self.attn1 = TokenTransformer(dim=in_channels * 7 * 7,
in_dim=token_dim,
num_heads=1,
mlp_ratio=1.0)
self.attn2 = TokenTransformer(dim=token_dim * 3 * 3,
in_dim=token_dim,
num_heads=1,
mlp_ratio=1.0)
self.proj = nn.Linear(token_dim * 3 * 3, embed_dim)
elif token_type == 'performer':
# paddle v 2.1 has bugs on nn.Unfold,
# use paddle.nn.functional.unfold method instead
# replacements see forward method.
#self.soft_split0 = nn.Unfold(kernel_sizes=7, strides=4, paddings=2)
#self.soft_split1 = nn.Unfold(kernel_sizes=3, strides=2, paddings=1)
#self.soft_split2 = nn.Unfold(kernel_sizes=3, strides=2, paddings=1)
self.attn1 = TokenPerformer(dim=in_channels * 7 * 7,
in_dim=token_dim,
kernel_ratio=0.5)
self.attn2 = TokenPerformer(dim=token_dim * 3 * 3,
in_dim=token_dim,
kernel_ratio=0.5)
self.proj = nn.Linear(token_dim * 3 * 3, embed_dim)
elif token_type == 'convolution': # NOTE: currently not supported!!!
# 1st conv
self.soft_split0 = nn.Conv2D(in_channels=in_channels,
out_channels=token_dim,
kernel_size=7,
stride=4,
padding=2)
# 2nd conv
self.soft_split1 = nn.Conv2D(in_channels=token_dim,
out_channels=token_dim,
kernel_size=3,
stride=2,
padding=1)
# 3rd conv
self.proj = nn.Conv2D(in_channels=token_dim,
out_channels=embed_dim,
kernel_size=3,
stride=2,
padding=1)
else:
raise ValueError(f'token_type: {token_type} is not supported!')
# 3 soft splits, each has stride 4, 2, 2, respectively.
self.num_patches = (image_size // (4 * 2 * 2)) * (image_size // (4 * 2 * 2))
def forward(self, x):
# x = self.soft_split0(x)
# input x: [B, C, IMAGE_H, IMAGE_W]
x = paddle.nn.functional.unfold(x, kernel_sizes=7, strides=4, paddings=2)
# unfolded x: [B, C * k * k, k * k * num_patches]
x = x.transpose([0, 2, 1])
# transposed x: [B, k * k * num_patches, C * k * k]
x = self.attn1(x)
B, HW, C = x.shape
x = x.transpose([0, 2, 1])
x = x.reshape([B, C, int(np.sqrt(HW)), int(np.sqrt(HW))])
#x = self.soft_split1(x)
x = paddle.nn.functional.unfold(x, kernel_sizes=3, strides=2, paddings=1)
x = x.transpose([0, 2, 1])
x = self.attn2(x)
B, HW, C = x.shape
x = x.transpose([0, 2, 1])
x = x.reshape([B, C, int(np.sqrt(HW)), int(np.sqrt(HW))])
#x = self.soft_split2(x)
x = paddle.nn.functional.unfold(x, kernel_sizes=3, strides=2, paddings=1)
x = x.transpose([0, 2, 1])
x = self.proj(x)
return x
class Mlp(nn.Layer):
""" MLP module
Impl using nn.Linear and activation is GELU, dropout is applied.
Ops: fc -> act -> dropout -> fc -> dropout
Attributes:
fc1: nn.Linear
fc2: nn.Linear
act: GELU
dropout1: dropout after fc1
dropout2: dropout after fc2
"""
def __init__(self, in_features, hidden_features=None, out_features=None, dropout=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
w_attr_1, b_attr_1 = self._init_weights()
self.fc1 = nn.Linear(in_features,
hidden_features,
weight_attr=w_attr_1,
bias_attr=b_attr_1)
w_attr_2, b_attr_2 = self._init_weights()
self.fc2 = nn.Linear(hidden_features,
out_features,
weight_attr=w_attr_2,
bias_attr=b_attr_2)
self.act = nn.GELU()
self.dropout = nn.Dropout(dropout)
def _init_weights(self):
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.XavierUniform())
bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=1e-6))
return weight_attr, bias_attr
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class Attention(nn.Layer):
""" Self-Attention
Args:
dim: int, all heads dimension
dim_head: int, single heads dimension, default: None
num_heads: int, num of heads
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
skip_connection: bool, if Ture, use v to do skip connection, used in TokenTransformer
"""
def __init__(self,
dim,
in_dim=None,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attention_dropout=0.,
dropout=0.,
skip_connection=False):
super().__init__()
self.num_heads = num_heads
self.in_dim = in_dim or dim
self.dim_head = dim // num_heads
self.scale = qk_scale or self.dim_head ** -0.5
# same as original repo
self.qkv = nn.Linear(dim, self.in_dim * 3, bias_attr=qkv_bias)
self.attn_dropout = nn.Dropout(attention_dropout)
self.proj = nn.Linear(self.in_dim, self.in_dim)
self.proj_dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(axis=-1)
# use V to do skip connection, used in TokenTransformer
self.skip = skip_connection
def transpose_multihead(self, x):
if self.skip: # token transformer
new_shape = x.shape[:-1] + [self.num_heads, self.in_dim]
else: # regular attention
new_shape = x.shape[:-1] + [self.num_heads, self.dim_head]
x = x.reshape(new_shape)
x = x.transpose([0, 2, 1, 3])
return x
def forward(self, x):
B, H, C = x.shape
qkv = self.qkv(x).chunk(3, axis=-1)
q, k, v = map(self.transpose_multihead, qkv)
q = q * self.scale
attn = paddle.matmul(q, k, transpose_y=True)
attn = self.softmax(attn)
attn = self.attn_dropout(attn)
z = paddle.matmul(attn, v)
z = z.transpose([0, 2, 1, 3])
if self.skip: # token transformer
z = z.reshape([B, -1, self.in_dim])
else: # regular attention
z = z.reshape([B, -1, C])
z = self.proj(z)
z = self.proj_dropout(z)
# skip connection
if self.skip:
z = z + v.squeeze(1)
return z
class Block(nn.Layer):
""" Transformer block layers
Transformer block layers contains regular self-attention layers,
mlp layers, norms layers and residual blocks.
Args:
dim: int, all heads dimension
num_heads: int, num of heads
mlp_ratio: ratio to multiply on mlp input dim as mlp hidden dim, default: 4.
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, scale factor to replace dim_head ** -0.5, default: None
dropout: float, dropout rate for projection dropout, default: 0.
attention_dropout: float, dropout rate for attention dropout, default: 0.
droppath: float, drop path rate, default: 0.
"""
def __init__(self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
dropout=0.,
attention_dropout=0.,
droppath=0.):
super().__init__()
self.norm1 = nn.LayerNorm(dim, epsilon=1e-6)
self.attn = Attention(dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
dropout=dropout,
attention_dropout=attention_dropout)
self.drop_path = DropPath(droppath) if droppath > 0. else Identity()
self.norm2 = nn.LayerNorm(dim, epsilon=1e-6)
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio),
dropout=dropout)
def forward(self, x):
h = x
x = self.norm1(x)
x = self.attn(x)
x = self.drop_path(x)
x = h + x
h = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = h + x
return x
class TokenPerformer(nn.Layer):
""" Token Performer layers
Performer layers contains single-attention layers,
mlp layers, norms layers and residual blocks. This module
is used in 'tokens-to-token', which converts image into tokens
and gradually tokenized the tokens.
Args:
dim: int, all heads dimension
in_dim: int, qkv and out dimension in attention
num_heads: int, num of heads
kernel_ratio: ratio to multiply on prm input dim, default: 0.5.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self, dim, in_dim, num_heads=1, kernel_ratio=0.5, dropout=0.1):
super().__init__()
self.embed_dim = in_dim * num_heads
self.kqv = nn.Linear(dim, 3 * self.embed_dim)
self.dropout = nn.Dropout(dropout)
self.proj = nn.Linear(self.embed_dim, self.embed_dim)
self.num_heads = num_heads
self.norm1 = nn.LayerNorm(dim, epsilon=1e-6)
self.norm2 = nn.LayerNorm(self.embed_dim, epsilon=1e-6)
self.mlp = nn.Sequential(nn.Linear(self.embed_dim, self.embed_dim),
nn.GELU(),
nn.Linear(self.embed_dim, self.embed_dim),
nn.Dropout(dropout))
self.m = int(self.embed_dim * kernel_ratio)
self.w = np.random.random(size=(int(self.embed_dim * kernel_ratio), self.embed_dim))
# TODO: init with orthognal matrix
#self.w, _ = np.linalg.qr(self.w)
self.w = paddle.create_parameter(
shape=[int(self.embed_dim * kernel_ratio), self.embed_dim],
dtype='float32',
default_initializer=nn.initializer.Assign(self.w / math.sqrt(self.m)))
# paddle version 2.1 does not support einsum
def prm_exp(self, x):
# x: [B, T, hs]
# w: [m, hs]
# return x: B, T, m
xd = (x * x).sum(axis=-1, keepdim=True)
xd = xd.expand([xd.shape[0], xd.shape[1], self.m]) / 2
# same as einsum('bti,mi->btm', x, self.w)
wtx = paddle.matmul(x, self.w, transpose_y=True)
out = paddle.exp(wtx - xd) / math.sqrt(self.m)
return out
def single_attention(self, x):
kqv = self.kqv(x).chunk(3, axis=-1)
k, q, v = kqv[0], kqv[1], kqv[2]
qp = self.prm_exp(q)
kp = self.prm_exp(k)
# same as einsum('bti,bi->bt, qp, kp.sum(axi=1).unsqueeze(2)')
D = paddle.matmul(qp, kp.sum(axis=1).unsqueeze(2))
# same as einsum('bti,bim->bnm')
kptv = paddle.matmul(v, kp, transpose_x=True)
# same as einsum('bti,bni->btn')
y = paddle.matmul(qp, kptv, transpose_y=True)
y = y / (D.expand([D.shape[0], D.shape[1], self.embed_dim]) + 1e-8)
# skip connection
y = self.proj(y)
y = self.dropout(y)
y = v + y
return y
def forward(self, x):
x = self.norm1(x)
x = self.single_attention(x)
h = x
x = self.norm2(x)
x = self.mlp(x)
x = h + x
return x
class TokenTransformer(nn.Layer):
""" Token Transformer layers
Transformer layers contains regular self-attention layers,
mlp layers, norms layers and residual blocks. This module
is used in 'tokens-to-token', which converts image into tokens
and gradually tokenized the tokens.
Args:
dim: int, all heads dimension
in_dim: int, qkv and out dimension in attention
num_heads: int, num of heads
mlp_ratio: ratio to multiply on mlp input dim as mlp hidden dim, default: 1.
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, scale factor to replace dim_head ** -0.5, default: None
dropout: float, dropout rate for projection dropout, default: 0.
attention_dropout: float, dropout rate for attention dropout, default: 0.
droppath: float, drop path rate, default: 0.
"""
def __init__(self,
dim,
in_dim,
num_heads,
mlp_ratio=1.0,
qkv_bias=False,
qk_scale=None,
dropout=0.,
attention_dropout=0,
droppath=0.):
super().__init__()
self.norm1 = nn.LayerNorm(dim, epsilon=1e-6)
self.attn = Attention(dim,
in_dim=in_dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
dropout=dropout,
attention_dropout=attention_dropout,
skip_connection=True)
self.drop_path = DropPath(droppath) if droppath > 0. else Identity()
self.norm2 = nn.LayerNorm(in_dim, epsilon=1e-6)
self.mlp = Mlp(in_features=in_dim,
hidden_features=int(in_dim * mlp_ratio),
out_features=in_dim,
dropout=dropout)
def forward(self, x):
x = self.norm1(x)
x = self.attn(x)
h = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = h + x
return x
class T2TViT(nn.Layer):
""" T2T-ViT model
Args:
image_size: int, input image size, default: 224
in_channels: int, input image channels, default: 3
num_classes: int, num of classes, default: 1000
token_type: string, type of token embedding ['performer', 'transformer'], default: 'performer'
embed_dim: int, dim of each patch after patch embedding, default: 768
depth: int, num of self-attention blocks, default: 12
num_heads: int, num of attention heads, default: 12
mlp_ratio: float, mlp hidden dim = mlp_ratio * mlp_in_dim, default: 4.
qkv_bias: bool, if True, qkv projection is set with bias, default: True
qk_scale: float, scale factor to replace dim_head ** -0.5, default: None
dropout: float, dropout rate for linear projections, default: 0.
attention_dropout: float, dropout rate for attention, default: 0.
droppath: float, drop path rate, default: 0.
token_dim: int, intermediate dim for patch_embedding module, default: 64
"""
def __init__(self,
image_size=224,
in_channels=3,
num_classes=1000,
token_type='performer',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
dropout=0.,
attention_dropout=0.,
droppath=0,
token_dim=64):
super().__init__()
self.num_classes = num_classes
# convert image to paches: T2T-Module
self.patch_embed = PatchEmbedding(image_size=image_size,
token_type=token_type,
in_channels=in_channels,
embed_dim=embed_dim,
token_dim=token_dim)
num_patches = self.patch_embed.num_patches
# tokens add for classification
self.cls_token = paddle.create_parameter(
shape=[1, 1, embed_dim],
dtype='float32',
default_initializer=nn.initializer.Constant(0.0))
# positional embeddings for patch positions
self.pos_embed = paddle.create_parameter(
shape=[1, num_patches + 1, embed_dim],
dtype='float32',
default_initializer=nn.initializer.Constant(0.0))
# dropout for positional embeddings
self.pos_dropout = nn.Dropout(dropout)
# droppath deacay rate
depth_decay = paddle.linspace(0, droppath, depth)
# craete self-attention layers
layer_list = []
for i in range(depth):
block_layers = Block(dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
dropout=dropout,
attention_dropout=attention_dropout,
droppath=depth_decay[i])
layer_list.append(copy.deepcopy(block_layers))
self.blocks = nn.LayerList(layer_list)
self.norm = nn.LayerNorm(embed_dim, epsilon=1e-6)
# classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else Identity()
def forward_features(self, x):
# Patch Embedding
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand([x.shape[0], -1, -1])
x = paddle.concat([cls_tokens, x], axis=1)
x = x + self.pos_embed
x = self.pos_dropout(x)
# Self-Attention blocks
for block in self.blocks:
x = block(x)
x = self.norm(x)
return x[:, 0] # returns only cls_tokens
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def build_t2t_vit(config):
"""build t2t-vit model using config"""
model = T2TViT(image_size=config.DATA.IMAGE_SIZE,
token_type=config.MODEL.TRANS.TOKEN_TYPE,
embed_dim=config.MODEL.TRANS.EMBED_DIM,
depth=config.MODEL.TRANS.DEPTH,
num_heads=config.MODEL.TRANS.NUM_HEADS,
mlp_ratio=config.MODEL.TRANS.MLP_RATIO,
qk_scale=config.MODEL.TRANS.QK_SCALE,
qkv_bias=config.MODEL.TRANS.QKV_BIAS)
return model
|
import pygame, math, random, json
pygame.init()
pygame.font.init()
# Basic setup
pygame.display.set_caption('PARTICLES DEMO')
win = pygame.display.set_mode((1920 // 1.5, 1080 // 1.5))
clock = pygame.time.Clock()
# Camera
cam = [0, 0]
# A text drawing method
def write(text, x, y, size=32):
font = pygame.font.SysFont('freesanbold.ttf', size)
text1 = font.render(str(text), True, (255, 255, 255))
win.blit(text1, (x, y))
class Particles_emitter:
# "fire" particles are set as default
def __init__(self, path="data/particles/fire.json"):
# Emitter position
self.pos = [0, 0]
self.particles = []
# Goes through the particle JSON file and parses it into data
ijson = open(path)
data = ijson.read()
ijson.close()
data = json.loads(data)
self.colour = (data['colour'][0], data['colour'][1], data['colour'][2])
self.initial_velocity = data['initial_velocity']
self.velocity_randomness = data['velocity_randomness']
self.radius = data['radius']
self.gravity = data['gravity']
self.shrink = data['shrink']
self.lifetime = data['lifetime']
self.tone_variance = data['tone_variance']
def spawn_particle(self):
# Does magic to spawn a new particle
# 0 = x, 1 = y, 2 = radius, 3 = acc, 4 = colour, 5 = age
darkness = max(random.randrange(self.tone_variance, 11), 1) / 10
vel = self.initial_velocity.copy()
vel[0] *= random.randrange(self.velocity_randomness[0][0], self.velocity_randomness[0][1])
vel[1] *= random.randrange(self.velocity_randomness[1][0], self.velocity_randomness[1][1])
self.particles.append([self.pos[0], self.pos[1], random.randrange(self.radius, int(self.radius * 1.5)), vel, (self.colour[0] * darkness, self.colour[1] * darkness, self.colour[2] * darkness), 0])
def draw(self):
# Draws every particle
for p in self.particles:
pygame.draw.circle(win, p[4], (p[0] - cam[0], p[1] - cam[1]), p[2])
def update(self, delta):
# Goes through the particles and does magic
for p in self.particles:
p[0] += p[3][0] * delta
p[1] += p[3][1] * delta
p[3][1] += self.gravity[1] * delta
p[3][0] += self.gravity[0] * delta
p[2] -= (self.shrink + random.randrange(0, 35) / 10) * delta
p[5] += 1 * delta
if p[5] >= self.lifetime + random.randrange(0, 35) / 10:
self.particles.remove(p)
def start():
run = True
wave = 0
# Create emitters
emitters = [Particles_emitter()]
while run:
# Basic stuff
delta = clock.tick(60) / 1000
for e in pygame.event.get():
if e.type == pygame.QUIT:
run = False
# Handles input
keys = pygame.key.get_pressed()
if keys[pygame.K_d]:
cam[0] += 300 * delta
if keys[pygame.K_a]:
cam[0] -= 300 * delta
if keys[pygame.K_w]:
cam[1] -= 300 * delta
if keys[pygame.K_s]:
cam[1] += 300 * delta
if keys[pygame.K_ESCAPE]:
run = False
# Moves the emitter
emitters[0].pos[1] = math.sin(wave * 5) * 256 + win.get_height() / 2
emitters[0].pos[0] = math.cos(wave * 3) * 256 + win.get_width() / 2
win.fill((25, 25, 25))
# Goes through the emitters spawns a particle, draws and updates
for p in emitters:
p.spawn_particle()
p.draw()
p.update(delta)
# Adds to "wave" variable
wave += 1 * delta
write('FPS: ' + str(int(clock.get_fps())), 10, 10)
write('USE WASD TO MOVE CAMERA', 10, 10 + 32)
pygame.display.update()
start()
pygame.quit()
|
import cocotb
from cocotb.triggers import RisingEdge, FallingEdge
def gen():
yield 4
class Cocotb_backend:
def __init__(self, clk, valid, data, ack):
self.__clk = clk
self.__valid = valid
self.__data = data
self.__ack = ack
def decorator(self, func):
return cocotb.coroutine(func)
@cocotb.coroutine
def get_data(self):
yield gen()
return self.__data
def set_data(self, val):
yield gen()
self.__data = val
@cocotb.coroutine
def get_valid(self):
yield gen()
return self.__valid
@cocotb.coroutine
def set_valid(self, val):
yield gen()
self.__valid = val
@cocotb.coroutine
def get_ack(self):
yield gen()
return self.__ack
@cocotb.coroutine
def set_ack(self, val):
yield gen()
self.__ack = val
def active_edge(self):
yield RisingEdge(self.__clk)
def inactive_edge(self):
yield FallingEdge(self.__clk)
|
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from trackintel.visualization.util import regular_figure, save_fig
from trackintel.visualization.osm import plot_osm_streets
def plot_positionfixes(positionfixes, out_filename=None, plot_osm=False):
"""Plots positionfixes (optionally to a file).
Parameters
----------
positionfixes : GeoDataFrame
The positionfixes to plot.
out_filename : str
The file to plot to, if this is not set, the plot will simply be shown.
plot_osm : bool
If this is set to True, it will download an OSM street network and plot
below the staypoints.
"""
_, ax = regular_figure()
if plot_osm:
west = positionfixes['geom'].x.min()
east = positionfixes['geom'].x.max()
north = positionfixes['geom'].y.max()
south = positionfixes['geom'].y.min()
plot_osm_streets(north, south, east, west, ax)
positionfixes.plot(ax=ax, markersize=0.5)
if out_filename is not None:
save_fig(out_filename, formats=['png'])
else:
plt.show()
|
# -*- coding: latin-1 -*-
from __future__ import division
from PyQt4 import QtGui
from functools import partial
import sys, time
try:
import picamera
except:
pass
def tira_foto(self):
'''
Método que tira fotos com a picamera
A foto tirada é mostrada no label_camera, que foi alterado para mostrar imagens.
As configurações de tempo de exposição e resolução são configuradas baseada
nas informações da GUI
'''
# picamera apenas para raspberry (sistema unix)
if sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# Objeto para capturar e manipular as imagens
with picamera.PiCamera() as camera:
# Pega os valores da resolução na combobox
valores = self.ui.comboBox_cameraResolucao.currentText().split('x')
img_x, img_y = int(valores[0]), int(valores[1])
camera.resolution = (img_x, img_y)
camera.start_preview()
# valor do tempo para estabilizar a imagem
tempo_delay = int(self.ui.spinBox_cameraDelay.value())/1000
# tempo para estabilizar a imagem
time.sleep(tempo_delay)
camera.capture(self.caminho_inicial + '/imagens/teste.jpg')
self.alerta_toolbar('tirando foto')
else:
# Quando estiver testando no windows
self.alerta_toolbar('Raspicam nao disponivel')
# Escala a imagem para mostrar na GUI
self.ui.label_camera.setScaledContents(True)
# Insere a imagem na tela
self.ui.label_camera.setPixmap(QtGui.QPixmap(self.caminho_inicial + '/imagens/teste.jpg'))
self.alerta_toolbar('foto salva')
def foto_update(self):
'''
Método recursivo.
É chamado quando a checkbox de autoupdate da imagem é ativada.
A função ativa uma thread do QT no modo singleShot após a quantidad de tempo
escolhida no spinBox da GUI. caso a checkbox continue ativada, a função se
chamará novamente de forma recursiva até que a checkbox seja desabilitada
ou a conecção seja desfeita.
'''
# Chama a Thread apenas se a checkbox estiver ativada
if self.ui.checkBox_cameraAutoUpdate.isChecked():
tira_foto(self)
tempo_delay = 1000*int(self.ui.spinBox_cameraRefresh.value())
self.timer_foto.singleShot(tempo_delay,partial(foto_update,self))
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.compile_context as compile_context
import oneflow.python.framework.distribute as distribute
import oneflow.python.framework.hob as hob
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.lib.core.enable_if as enable_if
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.framework.user_op_attr_pb2 as user_op_attr_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.core.common.shape_pb2 as shape_util
import oneflow
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.framework.hob as hob
import oneflow.python.experimental.name_scope as name_scope
import oneflow.core.vm.instruction_pb2 as instr_util
import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_util
import oneflow.python.vm.id_util as id_util
import oneflow.python.eager.vm_util as vm_util
import oneflow.python.eager.eager_blob_util as eager_blob_util
import oneflow.python.lib.core.enable_if as enable_if
import random
import oneflow.python.eager.gradient_util as gradient_util
import oneflow.python.eager.blob_register as blob_register_util
import oneflow as flow
import traceback
blob_register = blob_register_util.GetDefaultBlobRegister()
class UserOp(object):
def __init__(self, op_name, op_type_name=None):
self.op_conf_ = op_conf_util.OperatorConf()
self.op_conf_.name = op_name
if op_type_name is not None:
self.op_conf_.user_conf.op_type_name = op_type_name
device_tag = oneflow.current_scope().device_parallel_desc_symbol.device_tag
self.op_conf_.device_tag = device_tag
self.output_arg_key_list_ = []
@property
def op_conf(self):
return self.op_conf_
def InferAndTryRun(self):
raise NotImplementedError
def MakeRemoteBlob(self, lbi):
raise NotImplementedError
def RemoteBlobList(self):
remote_blob_list = []
for k in self.op_conf_.user_conf.output:
if k not in self.output_arg_key_list_:
raise ValueError(
"output_arg_name {} of {} op is not set in python op builder".format(
k, self.op_conf_.name
)
)
for output_arg_name in self.output_arg_key_list_:
assert output_arg_name in self.op_conf_.user_conf.output
for i in range(len(self.op_conf_.user_conf.output[output_arg_name].s)):
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = self.op_conf_.name
lbi.blob_name = "{}_{}".format(output_arg_name, i)
remote_blob_list.append(self.MakeRemoteBlob(lbi))
return tuple(remote_blob_list)
def SoleOutputBlob(self):
blobs = self.RemoteBlobList()
assert len(blobs) == 1
return blobs[0]
class UserOpModule(object):
@property
def opkernel_object(self):
return self.opkernel_object_
def set_opkernel_object(self, opkernel_object):
assert not hasattr(self, "opkernel_object_")
self.opkernel_object_ = opkernel_object
def InitOpKernel(self):
raise NotImplementedError
@oneflow_export("user_op_builder")
def api_user_op_builder(op_name):
r"""Build a wrapper of user op.
For instance::
def myargmax(
input: remote_blob_util.BlobDef) -> remote_blob_util.BlobDef:
return (
flow.user_op_builder("myargmax")
.Op("argmax")
.Input("in", [input])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
Args:
op_name (str): name of new user op
Returns:
UserOpConfBuilder: `UserOpConfBuilder` object used to build a wrapper of user op.
"""
api = enable_if.unique([lazy_user_op_builder, eager_user_op_builder])
return api(op_name)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def lazy_user_op_builder(op_name):
job_name = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_name
return UserOpConfBuilder(LazyUserOp, op_name, None)
class LazyUserOp(UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InferAndTryRun(self):
compile_context.CurJobAddOp(self.op_conf_)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.RemoteBlob(lbi)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def eager_user_op_builder(op_name):
job_name = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_name
return UserOpConfBuilder(EagerUserOp, op_name, None)
class EagerUserOp(UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InferAndTryRun(self):
interpret_util.Forward(self.op_conf_)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.EagerLogicalBlob(lbi)
in_physical_placement = hob.env_initialized & hob.is_current_placement_physical
@oneflow_export("consistent_user_op_builder")
def api_consistent_user_op_builder(op_name):
job_name = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_name
return UserOpConfBuilder(ConsistentUserOp, op_name, None)
class ConsistentUserOp(UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InferAndTryRun(self):
interpret_util.ConsistentForward(self.op_conf_)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.RemoteBlob(lbi)
class UserOpConfBuilder(object):
def __init__(self, user_op_or_module_class, op_name, op_type_name):
self.user_op_ = user_op_or_module_class(op_name, op_type_name)
def CheckAndComplete(self):
assert self.user_op_.op_conf_.user_conf.op_type_name != ""
self.user_op_.op_conf_ = c_api_util.CheckAndCompleteUserOpConf(
self.user_op_.op_conf_
)
return self
def Build(self):
r"""Build op when in/output and other attribute set up.
Returns:
self
"""
return self.CheckAndComplete().user_op_
def OpName(self, op_name):
job_name = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_name
self.user_op_.op_conf_.name = op_name
user_conf = self.user_op_.op_conf_.user_conf
def GetLbn(output_name, i):
return "{}/{}_{}".format(op_name, output_name, i)
for output_name, output in user_conf.output.items():
output.s[:] = [GetLbn(output_name, i) for i in range(len(output.s))]
return self
def Op(self, op_type_name):
r"""set typename of op
Args:
op_type_name (string): op type name
Returns:
self
"""
self.user_op_.op_conf_.user_conf.op_type_name = op_type_name
return self
def Input(self, input_name, input_blob_list):
r"""Set input blob of op
Args:
input_name (str): input name of blob
input_blob_list : list of blobs
Returns:
self
"""
assert isinstance(input_blob_list, (tuple, list))
input_conf = self.user_op_.op_conf_.user_conf.input
input_conf[input_name].ClearField("s")
for input_blob in input_blob_list:
# assert type(input_blob) is blob_desc.BlobDesc
input_conf[input_name].s.append(input_blob.unique_name)
return self
def InputSize(self, input_name, input_blob_size):
input_conf = self.user_op_.op_conf_.user_conf.input
assert input_blob_size >= 0
assert input_name not in input_conf
for i in range(input_blob_size):
unique_name = "%s/%s_%s" % (self.user_op_.op_conf_.name, input_name, i)
input_conf[input_name].s.append(unique_name)
return self
def Output(self, output_name, num=1):
r"""Set output blob of op
Args:
output_name (str): name of output blob
num (int, optional): Defaults to 1.
Returns:
self
"""
assert isinstance(num, int) and num >= 1
out_lbns = []
for i in range(num):
lbn = "{}/{}_{}".format(self.user_op_.op_conf_.name, output_name, i)
out_lbns.append(lbn)
self.user_op_.op_conf_.user_conf.output[output_name].s[:] = out_lbns
self.user_op_.output_arg_key_list_.append(output_name)
return self
def Attr(self, attr_name, attr_value, attr_type_name=None):
r"""Set value of op's attribute.
Args:
attr_name (str): attribute name of op
attr_value (Any): attribute value of op
Raises:
ValueError: raised when value is not idential to op's attribute type.
Returns:
[type]: [description]
"""
if attr_type_name != None:
print(
"""WARNING: Argument 'attr_type_name' of UserOpConfBuilder.Attr has been deprecated. Please remove it.
For instance:
- .Attr("out_num", out_num, "AttrTypeInt64")
+ .Attr("out_num", out_num)
"""
)
print(traceback.format_stack()[-2])
attribute = user_op_attr_util.UserOpAttrVal()
assert isinstance(attr_name, str)
attr_type = c_api_util.GetUserOpAttrType(
self.user_op_.op_conf_.user_conf.op_type_name, attr_name
)
if attr_type == user_op_attr_util.kAtInt32:
assert isinstance(attr_value, int)
attribute.at_int32 = attr_value
elif attr_type == user_op_attr_util.kAtInt64:
assert isinstance(attr_value, int)
attribute.at_int64 = attr_value
elif attr_type == user_op_attr_util.kAtBool:
assert isinstance(attr_value, bool)
attribute.at_bool = attr_value
elif attr_type == user_op_attr_util.kAtFloat:
assert isinstance(attr_value, float)
attribute.at_float = attr_value
elif attr_type == user_op_attr_util.kAtDouble:
assert isinstance(attr_value, float)
attribute.at_double = attr_value
elif attr_type == user_op_attr_util.kAtString:
assert isinstance(attr_value, str)
attribute.at_string = attr_value
elif attr_type == user_op_attr_util.kAtShape:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, int) for x in attr_value)
attribute.at_shape.dim[:] = list(attr_value)
elif attr_type == user_op_attr_util.kAtDataType:
assert (
isinstance(attr_value.oneflow_proto_dtype, int)
and attr_value in oneflow.dtypes()
)
attribute.at_data_type = attr_value.oneflow_proto_dtype
elif attr_type == user_op_attr_util.kAtListInt32:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, int) for x in attr_value)
attribute.at_list_int32.val[:] = list(attr_value)
elif attr_type == user_op_attr_util.kAtListInt64:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, int) for x in attr_value)
attribute.at_list_int64.val[:] = list(attr_value)
elif attr_type == user_op_attr_util.kAtListFloat:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, float) for x in attr_value)
attribute.at_list_float.val[:] = list(attr_value)
elif attr_type == user_op_attr_util.kAtListDataType:
assert isinstance(attr_value, (tuple, list))
assert all(
isinstance(x.oneflow_proto_dtype, int) and x in oneflow.dtypes()
for x in attr_value
)
attribute.at_list_data_type.val[:] = list(
[x.oneflow_proto_dtype for x in attr_value]
)
elif attr_type == user_op_attr_util.kAtListShape:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, tuple) or isinstance(x, list) for x in attr_value)
for i in range(len(attr_value)):
shape = shape_util.ShapeProto()
shape.dim[:] = list(attr_value[i])
attribute.at_list_shape.val.append(shape)
elif attr_type == user_op_attr_util.kAtListString:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, str) for x in attr_value)
attribute.at_list_string.val[:] = list(attr_value)
else:
raise ValueError("Invalid op attribute type {}".format(attr_type))
self.user_op_.op_conf_.user_conf.attr[attr_name].CopyFrom(attribute)
return self
@oneflow_export("user_op_module_builder")
def api_user_op_module_builder(op_type_name):
api = enable_if.unique(
[lazy_user_op_module_builder, eager_logical_user_op_module_builder]
)
return api(op_type_name)
class UserOpModuleBuilder(UserOpConfBuilder):
def __init__(self, *args, **kwargs):
UserOpConfBuilder.__init__(self, *args, **kwargs)
self.user_op_module.op_conf.scope_symbol_id = flow.current_scope().symbol_id
@property
def user_op_module(self):
return self.user_op_
def Op(self, op_type_name):
raise ValueError(
"user op module builder of {} can't call '.Op(op_type_name)' method".format(
op_type_name
)
)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def lazy_user_op_module_builder(op_type_name):
job_name = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_type_name
return UserOpModuleBuilder(LazyUserOpModule, op_name, op_type_name)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def eager_logical_user_op_module_builder(op_type_name):
job_name = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_type_name
return UserOpModuleBuilder(EagerLogicalUserOpModule, op_name, op_type_name)
class LazyUserOpModule(UserOpModule, UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InitOpKernel(self):
self.set_opkernel_object(None)
def InferAndTryRun(self):
assert hob.in_global_mode(None)
compile_context.CurJobAddOp(self.op_conf_)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.RemoteBlob(lbi)
class EagerLogicalUserOpModule(UserOpModule, UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InitOpKernel(self):
def BuildInstruction(builder):
self.set_opkernel_object(builder.NewOpKernelObject(self.op_conf))
vm_util.LogicalRun(BuildInstruction)
def InferAndTryRun(self):
assert hob.in_global_mode(None)
interpret_util.OpKernelForward(self.op_conf, self.opkernel_object)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.EagerLogicalBlob(lbi)
@oneflow_export("consistent_user_op_module_builder")
def api_consistent_user_op_module_builder(op_type_name):
api = enable_if.unique(
[
lazy_consistent_user_op_module_builder,
eager_consistent_user_op_module_builder,
]
)
return api(op_type_name)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def lazy_consistent_user_op_module_builder(op_type_name):
job_name = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_type_name
return UserOpModuleBuilder(LazyConsistentUserOpModule, op_name, op_type_name)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def eager_consistent_user_op_module_builder(op_type_name):
job_name = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_type_name
return UserOpModuleBuilder(EagerConsistentUserOpModule, op_name, op_type_name)
class LazyConsistentUserOpModule(UserOpModule, UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InitOpKernel(self):
self.set_opkernel_object(None)
def InferAndTryRun(self):
assert hob.in_global_mode(None)
compile_context.CurJobAddConsistentOp(self.op_conf_)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.RemoteBlob(lbi)
class EagerConsistentUserOpModule(UserOpModule, UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InitOpKernel(self):
def BuildInstruction(builder):
self.set_opkernel_object(builder.NewOpKernelObject(self.op_conf))
vm_util.LogicalRun(BuildInstruction)
def InferAndTryRun(self):
assert hob.in_global_mode(None)
interpret_util.OpKernelConsistentForward(self.op_conf, self.opkernel_object)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.EagerLogicalBlob(lbi)
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListTensorboardTimeSeries
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardTimeSeries_sync]
from google.cloud import aiplatform_v1
def sample_list_tensorboard_time_series():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListTensorboardTimeSeriesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tensorboard_time_series(request=request)
# Handle the response
for response in page_result:
print(response)
# [END aiplatform_generated_aiplatform_v1_TensorboardService_ListTensorboardTimeSeries_sync]
|
import sys
import os
from datetime import timedelta
from argparse import ArgumentParser
from .atsobjs import loadObjects, AtsObject
def get_nearby_objects(ats_objects: dict, target: AtsObject, radius: int=200, nres: int=20) -> list:
""" Does the actual work of getting nearby objects """
dists = [(x.distInRadius(target, radius), x) for k,x in ats_objects.items() if target.name != k]
psinr = [(d, x) for d,x in dists if d is not None and d > 0]
psinr.sort(key=lambda x: x[0])
#psinr = [x for x in ats_objects if (x.distInRadius(target, radius) is not None) and (x.dist > 0)]
#psinr.sort(key=lambda x: x.dist)
return psinr[:20]
def get_ono():
parser = ArgumentParser(prog="Object Distance Calculator")
parser.add_argument("--target", help="Partial name of Source")
parser.add_argument("--radius", help="Radius to look in", default=100, type=float)
parser.add_argument("--nres", help="Number of results", default=20, type=float)
parser.add_argument("--speed", help="Speed you are travelling at", default=16, type=float)
args = parser.parse_args()
if not args.target:
raise ValueError("No target was given")
fpath = os.path.dirname(os.path.abspath(__file__))
dbpath = os.path.join(fpath, "data/atsdata.json")
ats_objects = loadObjects(dbpath)
target_key = [v for k,v in ats_objects.items() if args.target.lower() in k.lower()]
if len(target_key) > 1:
raise ValueError("Found more then one candidate for {}, please specify more of the name".format(args.target))
elif len(target_key) < 1:
raise ValueError("Target {} does not exist in the objects database".format(args.target))
target = target_key[0]
# target = [x for x in planets if args.target.lower() in x.name.lower()][0]
psinr = get_nearby_objects(ats_objects, target)
print("The {} closest objects to {}".format(args.nres, target.name))
print("{0:<25s}\t{1:15s}\t{2:10s}\t{3:10s}\t{4:20s}".format(
"Name of Object",
"Empire",
"Type",
"Time (Duration)",
"Distance (PC)"
))
print("=" * 100)
for _,x in psinr:
print("{0:<25s}\t{1:15s}\t{2:10s}\t{3:20s}\t[{4:>.2f}]".format(
x.name,
x.empire,
x.type,
str(timedelta(seconds=target.timeToObject(x, args.speed, dist=x.dist))),
x.dist
))
if __name__ == "__main__":
get_ono()
|
import numpy as np
def tour_select(fitness, tournament_size):
"""Tournament selection. Choose number of individuals to participate
and select the one with the best fitness.
Parameters
----------
fitness : array_like
An array of each individual's fitness.
tournament_size : int
Number of participants in the tournament.
Returns
-------
int
The index of the best individual.
"""
aspirants = np.random.choice(len(fitness)-1, tournament_size, replace=False)
chosen = []
for ind in aspirants:
chosen.append([ind, fitness[ind]])
chosen.sort(key=lambda x: x[1])
return chosen[0][0]
|
#!/usr/bin/env python
#
# Copyright 2017 Pixar Animation Studios
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
""" Configuration file for the OpenTimelineIO Python Package. """
import os
import sys
import unittest
from setuptools import setup
import setuptools.command.build_py
import distutils.version
import pip
# Add command to upload to PyPI
# Set TWINE_USERNAME and TWINE_PASSWORD variables
# PyCharm Check: Emulate terminal in output console
if sys.argv[-1] == 'up':
os.system('rmdir /S/Q build')
os.system('rmdir /S/Q dist')
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
elif sys.argv[-1] == 'sdist':
os.system('rmdir /S/Q build')
os.system('rmdir /S/Q dist')
os.system('python setup.py sdist')
sys.exit()
# Make sure the environment contains an up to date enough version of pip.
PIP_VERSION = pip.__version__
REQUIRED_PIP_VERSION = "6.0.0"
if (
distutils.version.StrictVersion(PIP_VERSION)
<= distutils.version.StrictVersion(REQUIRED_PIP_VERSION)
):
sys.stderr.write(
"Your pip version is: '{}', OpenTimelineIO requires at least "
"version '{}'. Please update pip by running:\n"
"pip install -U pip\n".format(
PIP_VERSION,
REQUIRED_PIP_VERSION,
)
)
sys.exit(1)
# Make sure the environment contains an up to date enough version of setuptools.
try:
import setuptools.version
SETUPTOOLS_VERSION = setuptools.version.__version__
except ImportError:
SETUPTOOLS_VERSION = setuptools.__version__
REQUIRED_SETUPTOOLS_VERSION = '20.5.0'
if (
distutils.version.StrictVersion(SETUPTOOLS_VERSION)
<= distutils.version.StrictVersion(REQUIRED_SETUPTOOLS_VERSION)
):
sys.stderr.write(
"Your setuptools version is: '{}', OpenTimelineIO requires at least "
"version '{}'. Please update setuptools by running:\n"
"pip install -U setuptools\n".format(
SETUPTOOLS_VERSION,
REQUIRED_SETUPTOOLS_VERSION,
)
)
sys.exit(1)
# check the python version first
if (
sys.version_info[0] < 2 or
(sys.version_info[0] == 2 and sys.version_info[1] < 7)
):
sys.exit(
'OpenTimelineIO requires python2.7 or greater, detected version:'
' {}.{}'.format(
sys.version_info[0],
sys.version_info[1]
)
)
# Metadata that gets stamped into the __init__ files during the build phase.
PROJECT_METADATA = {
"version": "0.11.0.dev2",
"author": 'Pixar Animation Studios',
"author_email": 'opentimelineio@pixar.com',
"license": 'Modified Apache 2.0 License',
}
METADATA_TEMPLATE = """
__version__ = "{version}"
__author__ = "{author}"
__author_email__ = "{author_email}"
__license__ = "{license}"
"""
def _append_version_info_to_init_scripts(build_lib):
"""Stamp PROJECT_METADATA into __init__ files."""
for module in [
"opentimelineio_py",
"opentimelineio_py_contrib",
# "opentimelineview",
]:
target_file = os.path.join(build_lib, module, "__init__.py")
source_file = os.path.join(
os.path.dirname(__file__),
module, "__init__.py"
)
# get the base data from the original file
with open(source_file, 'r') as fi:
src_data = fi.read()
# write that + the suffix to the target file
with open(target_file, 'w') as fo:
fo.write(src_data)
fo.write(METADATA_TEMPLATE.format(**PROJECT_METADATA))
class AddMetadataToInits(setuptools.command.build_py.build_py):
"""Stamps PROJECT_METADATA into __init__ files."""
def run(self):
setuptools.command.build_py.build_py.run(self)
if not self.dry_run:
_append_version_info_to_init_scripts(self.build_lib)
def test_otio():
"""Discovers and runs tests"""
try:
# Clear the environment of a preset media linker
del os.environ['OTIO_DEFAULT_MEDIA_LINKER']
except KeyError:
pass
return unittest.TestLoader().discover('tests')
# copied from first paragraph of README.md
LONG_DESCRIPTION = """
Copy the source code from the last pure python: https://github.com/PixarAnimationStudios/OpenTimelineIO/tree/last_pure_python
Why did I create an old version of the warehouse: https://github.com/PixarAnimationStudios/OpenTimelineIO/issues/756
**I did not add new features, and try to keep it available!**
The following is the original document description:
Main web site: http://opentimeline.io/
Documentation: https://opentimelineio.readthedocs.io/
GitHub: https://github.com/PixarAnimationStudios/OpenTimelineIO
Discussion group: https://lists.aswf.io/g/otio-discussion
OpenTimelineIO is an interchange format and API for
editorial cut information. OTIO is not a container format for media, rather it
contains information about the order and length of cuts and references to
external media.
OTIO includes both a file format and an API for manipulating that format. It
also includes a plugin architecture for writing adapters to convert from/to
existing editorial timeline formats. It also implements a dependency- less
library for dealing strictly with time, opentime.
You can provide adapters for your video editing tool or pipeline as needed.
Each adapter allows for import/export between that proprietary tool and the
OpenTimelineIO format."""
setup(
name='OpenTimelineIO-Py',
description='Editorial interchange format and API',
long_description=LONG_DESCRIPTION,
url='http://opentimeline.io',
project_urls={
'Source':
'https://github.com/PixarAnimationStudios/OpenTimelineIO',
'Documentation':
'https://opentimelineio.readthedocs.io/',
'Issues':
'https://github.com/PixarAnimationStudios/OpenTimelineIO/issues',
},
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Multimedia :: Video :: Non-Linear Editor',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: Other/Proprietary License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
'Natural Language :: English',
],
keywords='film tv editing editorial edit non-linear edl time',
platforms='any',
packages=[
'opentimelineio_py',
'opentimelineio_py.adapters',
'opentimelineio_py.algorithms',
'opentimelineio_py.core',
'opentimelineio_py.schema',
'opentimelineio_py.schemadef',
'opentimelineio_py.plugins',
'opentimelineio_py.console',
'opentimelineio_py_contrib',
'opentimelineio_py_contrib.adapters',
# 'opentimelineio_py_contrib.adapters.aaf_adapter',
# 'opentimelineview',
],
package_data={
'opentimelineio_py': [
'adapters/builtin_adapters.plugin_manifest.json',
],
'opentimelineio_py_contrib': [
'adapters/contrib_adapters.plugin_manifest.json',
]
},
install_requires=[
# 'pyaaf2==1.2.0'
],
entry_points={
'console_scripts': [
# 'otioview = opentimelineview.console:main',
'otiocat = opentimelineio_py.console.otiocat:main',
'otioconvert = opentimelineio_py.console.otioconvert:main',
'otiostat = opentimelineio_py.console.otiostat:main',
'otioautogen_serialized_schema_docs = opentimelineio_py.console.autogen_serialized_datamodel:main',
],
},
extras_require={
'dev': [
'flake8>=3.5',
'coverage>=4.5',
'tox>=3.0',
'urllib3>=1.24.3'
],
# 'view': [
# 'PySide2~=5.11'
# ]
},
test_suite='setup.test_otio',
tests_require=[
'mock;python_version<"3.3"',
],
# because we need to open() the adapters manifest, we aren't zip-safe
zip_safe=False,
# Use the code that wires the PROJECT_METADATA into the __init__ files.
cmdclass={'build_py': AddMetadataToInits},
# expand the project metadata dictionary to fill in those values
**PROJECT_METADATA
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.