input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>framawiki/pywikibot
# -*- coding: utf-8 -*-
"""
Script to copy files from a local Wikimedia wiki to Wikimedia Commons.
It uses CommonsHelper to not leave any information out and CommonSense
to automatically categorise the file. After copying, a NowCommons
template is added to the local wiki's file. It uses a local exclusion
list to skip files with templates not allow on Wikimedia Commons. If no
categories have been found, the file will be tagged on Commons.
This bot uses a graphical interface and may not work from commandline
only environment.
Requests for improvement for CommonsHelper output should be directed to
Magnus Manske at his talk page. Please be very specific in your request
(describe current output and expected output) and note an example file,
so he can test at: [[de:Benutzer Diskussion:Magnus Manske]]. You can
write him in German and English.
Command line options:
-always Skip the GUI validation
-setcat: Set the category of the copied image
-delete Delete the image after the image has been transferred. This will
only work if the user has sysops privileges, otherwise the image
will only be marked for deletion.
¶ms;
Examples
--------
Work on a single image:
python pwb.py imagecopy -page:Image:<imagename>
Work on the 100 newest images:
python pwb.py imagecopy -newimages:100
Work on all images in a category:<cat>:
python pwb.py imagecopy -cat:<cat>
Work on all images which transclude a template:
python pwb.py imagecopy -transcludes:<template>
Work on a single image and deletes the image when the transfer is complete
(only works if the user has sysops privilege, otherwise it will be marked for
deletion):
python pwb.py imagecopy -page:Image:<imagename> -delete
By default the bot works on your home wiki (set in user-config)
"""
#
# (C) Pywikibot team, 2003-2020
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import codecs
import re
import threading
import webbrowser
from os import path
from requests.exceptions import RequestException
import pywikibot
from pywikibot import config, i18n, pagegenerators
from pywikibot.comms.http import fetch
from pywikibot.specialbots import UploadRobot
from pywikibot.tools import remove_last_args
from scripts.image import ImageRobot
try:
from pywikibot.userinterfaces.gui import Tkdialog, Tkinter
except ImportError as _tk_error:
Tkinter = _tk_error
Tkdialog = object
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp
}
nowCommonsTemplate = {
'_default': '{{NowCommons|%s}}',
'af': '{{NowCommons|File:%s}}',
'als': '{{NowCommons|%s}}',
'am': '{{NowCommons|File:%s}}',
'ang': '{{NowCommons|File:%s}}',
'ar': '{{الآن كومنز|%s}}',
'ast': '{{EnCommons|File:%s}}',
'az': '{{NowCommons|%s}}',
'bar': '{{NowCommons|%s}}',
'bg': '{{NowCommons|%s}}',
'bn': '{{NowCommons|File:%s}}',
'bs': '{{NowCommons|%s}}',
'ca': '{{AraCommons|%s}}',
'cs': '{{NowCommons|%s}}',
'cy': '{{NowCommons|File:%s}}',
'da': '{{NowCommons|File:%s}}',
'de': '{{NowCommons|%s}}',
'dsb': '{{NowCommons|%s}}',
'el': '{{NowCommons|%s}}',
'en': '{{subst:ncd|%s}}',
'eo': '{{Nun en komunejo|%s}}',
'es': '{{EnCommons|File:%s}}',
'et': '{{NüüdCommonsis|File:%s}}',
'fa': '{{NowCommons|%s}}',
'fi': '{{NowCommons|%s}}',
'fo': '{{NowCommons|File:%s}}',
'fr': '{{Image sur Commons|%s}}',
'fy': '{{NowCommons|%s}}',
'ga': '{{Ag Cómhaoin|File:%s}}',
'gl': '{{EnCommons]|File:%s}}',
'gv': '{{NowCommons|File:%s}}',
'he': '{{גם בוויקישיתוף|%s}}',
'hr': '{{NowCommons|%s}}',
'hsb': '{{NowCommons|%s}}',
'hu': '{{Azonnali-commons|%s}}',
'ia': '{{NowCommons|File:%s}}',
'id': '{{NowCommons|File:%s}}',
'ilo': '{{NowCommons|File:%s}}',
'io': '{{NowCommons|%s}}',
'is': '{{NowCommons|%s}}',
'it': '{{NowCommons|%s}}',
'ja': '{{NowCommons|File:%s}}',
'jv': '{{NowCommons|File:%s}}',
'ka': '{{NowCommons|File:%s}}',
'kn': '{{NowCommons|File:%s}}',
'ko': '{{NowCommons|File:%s}}',
'ku': '{{NowCommons|%s}}',
'lb': '{{Elo op Commons|%s}}',
'li': '{{NowCommons|%s}}',
'lt': '{{NowCommons|File:%s}}',
'lv': '{{NowCommons|File:%s}}',
'mk': '{{NowCommons|File:%s}}',
'mn': '{{NowCommons|File:%s}}',
'ms': '{{NowCommons|%s}}',
'nds-nl': '{{NoenCommons|File:%s}}',
'nl': '{{NuCommons|%s}}',
'nn': '{{No på Commons|File:%s}}',
'no': '{{NowCommons|%s}}',
'oc': '{{NowCommons|File:%s}}',
'pl': '{{NowCommons|%s}}',
'pt': '{{NowCommons|%s}}',
'ro': '{{AcumCommons|File:%s}}',
'ru': '{{Перенесено на Викисклад|%s}}',
'sa': '{{NowCommons|File:%s}}',
'scn': '{{NowCommons|File:%s}}',
'sh': '{{NowCommons|File:%s}}',
'sk': '{{NowCommons|File:%s}}',
'sl': '{{OdslejZbirka|%s}}',
'sq': '{{NowCommons|File:%s}}',
'sr': '{{NowCommons|File:%s}}',
'st': '{{NowCommons|File:%s}}',
'su': '{{IlaharKiwari|File:%s}}',
'sv': '{{NowCommons|%s}}',
'sw': '{{NowCommons|%s}}',
'ta': '{{NowCommons|File:%s}}',
'th': '{{มีที่คอมมอนส์|File:%s}}',
'tl': '{{NasaCommons|File:%s}}',
'tr': '{{NowCommons|%s}}',
'uk': '{{NowCommons|File:%s}}',
'ur': '{{NowCommons|File:%s}}',
'vec': '{{NowCommons|%s}}',
'vi': '{{NowCommons|File:%s}}',
'vo': '{{InKobädikos|%s}}',
'wa': '{{NowCommons|%s}}',
'zh': '{{NowCommons|File:%s}}',
'zh-min-nan': '{{Commons ū|%s}}',
'zh-yue': '{{subst:Ncd|File:%s}}',
}
moveToCommonsTemplate = {
'ar': ['نقل إلى كومنز'],
'en': ['Commons ok', 'Copy to Wikimedia Commons', 'Move to commons',
'Movetocommons', 'To commons',
'Copy to Wikimedia Commons by BotMultichill'],
'fi': ['Commonsiin'],
'fr': ['Image pour Commons'],
'hsb': ['Kopěruj do Wikimedia Commons'],
'hu': ['Commonsba'],
'is': ['Færa á Commons'],
'ms': ['Hantar ke Wikimedia Commons'],
'nl': ['Verplaats naar Wikimedia Commons', 'VNC'],
'pl': ['Do Commons', 'NaCommons', 'Na Commons'],
'ru': ['На Викисклад'],
'sl': ['Skopiraj v Zbirko'],
'sr': ['За оставу', 'Пребацити на оставу'],
'sv': ['Till Commons'],
'zh': ['Copy to Wikimedia Commons'],
}
def pageTextPost(url, parameters):
"""
Get data from commons helper page.
@param url: This parameter is not used here, we keep it here to avoid user
scripts from breaking.
@param parameters: Data that will be submitted to CommonsHelper.
@type parameters: dict
@return: A CommonHelper description message.
@rtype: str
"""
gotInfo = False
while not gotInfo:
try:
commonsHelperPage = fetch(
'https://commonshelper.toolforge.org/',
method='POST',
data=parameters)
data = commonsHelperPage.data.content.decode('utf-8')
gotInfo = True
except RequestException:
pywikibot.output("Got a RequestException, let's try again")
return data
class imageTransfer(threading.Thread):
"""Facilitate transfer of image/file to commons."""
def __init__(self, imagePage, newname, category, delete_after_done=False):
"""Initializer."""
self.imagePage = imagePage
self.image_repo = imagePage.site.image_repository()
self.newname = newname
self.category = category
self.delete_after_done = delete_after_done
threading.Thread.__init__(self)
def run(self):
"""Run the bot."""
tosend = {'language': self.imagePage.site.lang.encode('utf-8'),
'image': self.imagePage.title(
with_ns=False).encode('utf-8'),
'newname': self.newname.encode('utf-8'),
'project': self.imagePage.site.family.name.encode('utf-8'),
'username': '',
'commonsense': '1',
'remove_categories': '1',
'ignorewarnings': '1',
'doit': 'Uitvoeren'
}
pywikibot.output(tosend)
CH = pageTextPost('https://commonshelper.toolforge.org/index.php',
tosend)
pywikibot.output('Got CH desc.')
tablock = CH.split('<textarea ')[1].split('>')[0]
CH = CH.split('<textarea ' + tablock + '>')[1].split('</textarea>')[0]
CH = CH.replace('×', '×')
CH = self.fixAuthor(CH)
pywikibot.output(CH)
# I want every picture to be tagged with the bottemplate so i can check
# my contributions later.
CH = ('\n\n{{BotMoveToCommons|%s.%s|year={{subst:CURRENTYEAR}}'
'|month={{subst:CURRENTMONTHNAME}}|day={{subst:CURRENTDAY}}}}'
% (self.imagePage.site.lang, self.imagePage.site.family.name)
+ CH)
if self.category:
CH = CH.replace(
'{{subst:Unc}} <!-- Remove this line once you have '
'added categories -->', '')
CH += '[[Category:' + self.category + ']]'
bot = UploadRobot(url=self.imagePage.fileUrl(), description=CH,
use_filename=self.newname, keep_filename=True,
verify_description=False, ignore_warning=True,
target_site=self.image_repo)
bot.run()
# Should check if the image actually was uploaded
if pywikibot.Page(self.image_repo,
'Image:' + self.newname).exists():
# Get a fresh copy, force to get the page so we don't run into edit
# conflicts
imtxt = self.imagePage.get(force=True)
# Remove the move to commons templates
if self.imagePage.site.lang in moveToCommonsTemplate:
for moveTemplate in moveToCommonsTemplate[
self.imagePage.site.lang]:
imtxt = re.sub(r'(?i)\{\{' + moveTemplate + r'[^\}]*\}\}',
'', imtxt)
# add {{NowCommons}}
if self.imagePage.site.lang in nowCommonsTemplate:
addTemplate = nowCommonsTemplate[
self.imagePage.site.lang] % self.newname
else:
addTemplate = nowCommonsTemplate['_default'] % self.newname
commentText = i18n.twtranslate(
self.imagePage.site,
'commons-file-now-available',
{'localfile': self.imagePage.title(with_ns=False),
'commonsfile': self.newname})
pywikibot.showDiff(self.imagePage.get(), imtxt + addTemplate)
self.imagePage.put(imtxt + addTemplate, comment=commentText)
self.gen = pagegenerators.FileLinksGenerator(self.imagePage)
self.preloadingGen = pagegenerators.PreloadingGenerator(self.gen)
moveSummary = i18n.twtranslate(
self.imagePage.site,
'commons-file-moved',
{'localfile': self.imagePage.title(with_ns=False),
'commonsfile': self.newname})
# If the image is uploaded under a different name, replace all
# instances
if self.imagePage.title(with_ns=False) != self.newname:
imagebot = ImageRobot(
generator=self.preloadingGen,
oldImage=self.imagePage.title(with_ns=False),
newImage=self.newname,
summary=moveSummary, always=True, loose=True)
imagebot.run()
# If the user want to delete the page and
# the user has sysops privilege, delete the page, otherwise
# it will be marked for deletion.
if self.delete_after_done:
self.imagePage.delete(moveSummary, False)
return
def fixAuthor(self, pageText):
"""Fix the author field in the information template."""
informationRegex = re.compile(
r'\|Author=Original uploader was '
r'(?P<author>\[\[:\w+:\w+:\w+\|\w+\]\] at \[.+\])')
selfRegex = re.compile(
r'{{self\|author='
r'(?P<author>\[\[:\w+:\w+:\w+\|\w+\]\] at \[.+\])\|')
# Find the |Author=Original uploader was ....
informationMatch = informationRegex.search(pageText)
# Find the {{self|author=
selfMatch = selfRegex.search(pageText)
# Check if both are found and are equal
if (informationMatch and selfMatch):
if informationMatch.group('author') == selfMatch.group('author'):
# Replace |Author=Original uploader was ... with |Author= ...
pageText = informationRegex.sub(r'|Author=\g<author>',
pageText)
return pageText
def load_global_archivo():
"""Load/create Uploadbot.localskips.txt and save the path in `archivo`."""
global archivo
archivo = config.datafilepath('Uploadbot.localskips.txt')
if not path.exists(archivo):
with open(archivo, 'w') as tocreate:
tocreate.write('{{NowCommons')
def getautoskip():
"""Get a list of templates to skip."""
with codecs.open(archivo, 'r', 'utf-8') as f:
txt = f.read()
toreturn = txt.split('{{')[1:]
return toreturn
class TkdialogIC(Tkdialog):
"""The dialog window for image info."""
@remove_last_args(('commonsconflict',))
def __init__(self, image_title, content, uploader, url, templates):
"""Initializer."""
# Check if `Tkinter` wasn't imported
if isinstance(Tkinter, ImportError):
raise Tkinter
super(TkdialogIC, self).__init__()
self.root = Tkinter.Tk()
# "%dx%d%+d%+d" % (width, height, xoffset, yoffset)
# Always appear the same size and in the bottom-left corner
self.root.geometry('600x200+100-100')
self.root.title(image_title)
self.changename = ''
self.skip = 0
self.url = url
self.uploader = 'Unknown'
# uploader.decode('utf-8')
scrollbar = Tkinter.Scrollbar(self.root, orient=Tkinter.VERTICAL)
label = Tkinter.Label(self.root, text='Enter new name or leave blank.')
imageinfo = Tkinter.Label(self.root, text='Uploaded by {}.'.format(
uploader))
textarea = Tkinter.Text(self.root)
textarea.insert(Tkinter.END, content.encode('utf-8'))
textarea.config(state=Tkinter.DISABLED,
height=8, width=40, padx=0, pady=0,
wrap=Tkinter.WORD, yscrollcommand=scrollbar.set)
scrollbar.config(command=textarea.yview)
self.entry = Tkinter.Entry(self.root)
self.templatelist = Tkinter.Listbox(self.root, bg='white', height=5)
for template in templates:
self.templatelist.insert(Tkinter.END, template)
autoskip_button = Tkinter.Button(self.root, text='Add to AutoSkip',
command=self.add2_auto_skip)
browser_button = Tkinter.Button(self.root, text='View in browser',
command=self.open_in_browser)
skip_button = Tkinter.Button(self.root, text='Skip',
command=self.skip_file)
ok_button = Tkinter.Button(self.root, text='OK', command=self.ok_file)
# Start grid
label.grid(row=0)
ok_button.grid(row=0, column=1, rowspan=2)
skip_button.grid(row=0, column=2, rowspan=2)
browser_button.grid(row=0, column=3, rowspan=2)
self.entry.grid(row=1)
textarea.grid(row=2, column=1, columnspan=3)
scrollbar.grid(row=2, column=5)
self.templatelist.grid(row=2, column=0)
autoskip_button.grid(row=3, column=0)
imageinfo.grid(row=3, column=1, columnspan=4)
def ok_file(self):
"""The | |
be done
drone_x_range = [.1, -.1]
drone_y_range = [.1, -.1]
drone_z_range = [.025, -.025]
rot_matrix = R.from_euler('ZYX',[self.quad.state[5], self.quad.state[4], self.quad.state[3]],degrees=False).as_dcm().reshape(3,3)
drone_pos = np.array([self.quad.state[0], self.quad.state[1], self.quad.state[2]])
edge_ind = 0
#Collision check for drone's centroid
# for i, line in enumerate(self.line_list):
# edge_i, edge_j, u_v = line
# # p1, p2, p3 = Point3D(edge_i[0], edge_i[1], edge_i[2]), Point3D(edge_j[0], edge_j[1], edge_j[2]), Point3D(drone_pos[0], drone_pos[1], drone_pos[2])
# # l1 = Line3D(p1, p2)
# # distance = l1.distance(p3).evalf()
# distance_from_center = edge_i - drone_pos
# distance = np.linalg.norm(np.cross(distance_from_center, u_v)) / np.linalg.norm(u_v)
# #print "Edge: {0}, (Numeric) Distance from the center: {1:.3}".format(i, distance)
# if distance < max_distance:
# print "Collision detected!"
# print "Index: {0}, Drone center x={1:.3}, y={2:.3}, z={3:.3}".format(i, drone_pos[0], drone_pos[1], drone_pos[2])
# return True
# Collision check for Drone's corner points
for x_rng in drone_x_range:
for y_rng in drone_y_range:
for z_rng in drone_z_range:
drone_range = np.array([x_rng, y_rng, z_rng])
drone_range_world = np.dot(rot_matrix.T, drone_range.reshape(-1,1)).ravel()
drone_edge_point = np.array([drone_pos[0]+drone_range_world[0], drone_pos[1]+drone_range_world[1], drone_pos[2]+drone_range_world[2]])
edge_ind += 1
for i, line in enumerate(self.line_list):
edge_i, edge_j, u_v = line
distance_from_center = edge_i - drone_edge_point
distance = np.linalg.norm(np.cross(distance_from_center, u_v)) / np.linalg.norm(u_v)
#print "Edge: {0}, (Numeric) Distance from the center: {1:.3}".format(i, distance)
if distance < max_distance:
print("Collision detected!")
print("Index: {0}, Drone corner x={1:.3}, y={2:.3}, z={3:.3}".format(i, drone_edge_point[0], drone_edge_point[1], drone_edge_point[2]))
return True
# print "No Collision!"
return False
else:
return False
def isThereAnyGate(self, img_rgb):
# loop over the boundaries
low_red = np.array([161, 155, 84])
high_red = np.array([179, 255, 255])
hsv_frame = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HSV)
red_mask = cv2.inRange(img_rgb, low_red, high_red)
red = cv2.bitwise_and(img_rgb, img_rgb, mask=red_mask)
if red.any():
#print "there is a gate on the frame!"
return True
return False
def test_collision(self, gate_index):
phi = np.random.uniform(-np.pi/6, np.pi/6)
theta = np.random.uniform(-np.pi/6, np.pi/6)
psi = np.random.uniform(-np.pi/6, np.pi/6)
print("\nCenter Drone Pos x={0:.3}, y={1:.3}, z={2:.3}".format(self.track[gate_index].position.x_val, self.track[gate_index].position.y_val, self.track[gate_index].position.z_val))
quad_pose = [self.track[gate_index].position.x_val, self.track[gate_index].position.y_val, self.track[gate_index].position.z_val, -phi, -theta, psi]
self.quad.state = [quad_pose[0], quad_pose[1], quad_pose[2], phi, theta, psi, 0., 0., 0., 0., 0., 0.]
self.client.simSetVehiclePose(QuadPose(quad_pose), True)
self.check_collision()
time.sleep(5)
rot_matrix = Rotation.from_quat([self.track[gate_index].orientation.x_val, self.track[gate_index].orientation.y_val,
self.track[gate_index].orientation.z_val, self.track[gate_index].orientation.w_val]).as_dcm().reshape(3,3)
gate_x_range = [np.random.uniform(0.6, 1.0), -np.random.uniform(0.6, 1.0)]
gate_z_range = [np.random.uniform(0.6, 1.0), -np.random.uniform(0.6, 1.0)]
edge_ind = 0
#print "\nGate Ind: {0}, Gate x={1:.3}, y={2:.3}, z={3:.3}".format(i+1, self.track[i].position.x_val, self.track[i].position.y_val, self.track[i].position.z_val)
gate_pos = np.array([self.track[gate_index].position.x_val, self.track[gate_index].position.y_val, self.track[gate_index].position.z_val])
gate_edge_list = []
for x_rng in gate_x_range:
gate_edge_range = np.array([x_rng/1.5, 0., 0.25*np.random.uniform(-1,1)])
gate_edge_world = np.dot(rot_matrix, gate_edge_range.reshape(-1,1)).ravel()
gate_edge_point = np.array([gate_pos[0]+gate_edge_world[0], gate_pos[1]+gate_edge_world[1], gate_pos[2]+gate_edge_world[2]])
print("\nEdge Drone Pos x={0:.3}, y={1:.3}, z={2:.3}".format(gate_edge_point[0], gate_edge_point[1], gate_edge_point[2]))
self.quad.state = [gate_edge_point[0], gate_edge_point[1], gate_edge_point[2], phi, theta, psi, 0., 0., 0., 0., 0., 0.]
quad_pose = [gate_edge_point[0], gate_edge_point[1], gate_edge_point[2], -phi, -theta, psi]
self.client.simSetVehiclePose(QuadPose(quad_pose), True)
self.check_collision()
time.sleep(5)
for z_rng in gate_z_range:
gate_edge_range = np.array([0.25*np.random.uniform(-1,1), 0., z_rng/1.5])
gate_edge_world = np.dot(rot_matrix, gate_edge_range.reshape(-1,1)).ravel()
gate_edge_point = np.array([gate_pos[0]+gate_edge_world[0], gate_pos[1]+gate_edge_world[1], gate_pos[2]+gate_edge_world[2]])
edge_ind += 1
print("\nEdge Drone Pos x={0:.3}, y={1:.3}, z={2:.3}".format(gate_edge_point[0], gate_edge_point[1], gate_edge_point[2]))
self.quad.state = [gate_edge_point[0], gate_edge_point[1], gate_edge_point[2], phi, theta, psi, 0., 0., 0., 0., 0., 0.]
quad_pose = [gate_edge_point[0], gate_edge_point[1], gate_edge_point[2], -phi, -theta, psi]
self.client.simSetVehiclePose(QuadPose(quad_pose), True)
self.check_collision()
time.sleep(5)
def check_completion(self, quad_pose, eps=0.45):
x, y, z = quad_pose[0], quad_pose[1], quad_pose[2]
xd = self.track[-1].position.x_val
yd = self.track[-1].position.y_val
zd = self.track[-1].position.z_val
psid = Rotation.from_quat([self.track[-1].orientation.x_val, self.track[-1].orientation.y_val,
self.track[-1].orientation.z_val, self.track[-1].orientation.w_val]).as_euler('ZYX',degrees=False)[0]
target = [xd, yd, zd, psid]
check_arrival = False
if ( (abs(abs(xd)-abs(x)) <= eps) and (abs(abs(yd)-abs(y)) <= eps) and (abs(abs(zd)-abs(z)) <= eps)):
self.quad.calculate_cost(target=target, final_calculation=True)
check_arrival = True
return check_arrival
def test_algorithm(self, method = "MAX", use_model = False, safe_mode = True, time_or_speed = 1, v_average = 1.):
pose_prediction = np.zeros((1000,4),dtype=np.float32)
prediction_std = np.zeros((4,1),dtype=np.float32)
labels_dict = {0:3, 1:4, 2:5, 3:10, 4:-1}
gate_target = self.track[0]
gate_psi = Rotation.from_quat([gate_target.orientation.x_val, gate_target.orientation.y_val, gate_target.orientation.z_val, gate_target.orientation.w_val]).as_euler('ZYX',degrees=False)[0]
psi_start = gate_psi - np.pi/2 #drone kapi karsisinde olacak sekilde durmali
#if drone is at initial point
quad_pose = [self.drone_init.position.x_val, self.drone_init.position.y_val, self.drone_init.position.z_val, 0., 0., psi_start]
self.state0 = [self.drone_init.position.x_val, self.drone_init.position.y_val, self.drone_init.position.z_val, 0., 0., psi_start, 0., 0., 0., 0., 0., 0.]
self.client.simSetVehiclePose(QuadPose(quad_pose), True)
self.quad = Quadrotor(self.state0)
self.curr_idx = 0
self.test_states[method].append(self.quad.state)
track_completed = False
fail_check = False
collision_check = False
init_start = True
covariance_sum = 0.
prediction_std = [0., 0., 0., 0.]
sign_coeff = 0.
covariance_list = []
cov_rep_num = 5
anyGate = True
noise_on = False
previous_idx = 0
final_target = [self.track[-1].position.x_val, self.track[-1].position.y_val, self.track[-1].position.z_val]
if self.flight_log:
f=open(self.log_path, "a")
while((not track_completed) and (not fail_check)):
if noise_on:
if (self.curr_idx-previous_idx) >= 3:
noise_on = False
previous_idx = self.curr_idx
else:
if (self.curr_idx-previous_idx) >= 10 and self.curr_idx >= 15:
noise_on = True
previous_idx = self.curr_idx
sign_coeff = 1.
if noise_on:
self.brightness = random.uniform(2,4)
self.contrast = random.uniform(2,4)
self.saturation = random.uniform(0,0.1)
self.transformation = transforms.Compose([
transforms.Resize([200, 200]),
#transforms.Lambda(self.gaussian_blur),
transforms.ColorJitter(brightness=self.brightness, contrast=self.contrast, saturation=self.saturation),
transforms.ToTensor()])
else:
self.brightness = 0.
self.contrast = 0.
self.saturation = 0.
self.transformation = transforms.Compose([
transforms.Resize([200, 200]),
#transforms.Lambda(self.gaussian_blur),
#transforms.ColorJitter(brightness=self.brightness, contrast=self.contrast, saturation=self.saturation),
transforms.ToTensor()])
noise_coeff = self.brightness + self.contrast + self.saturation
image_response = self.client.simGetImages([airsim.ImageRequest('0', airsim.ImageType.Scene, False, False)])[0]
#if len(image_response.image_data_uint8) == image_response.width * image_response.height * 3:
img1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8) # get numpy array
img_rgb = img1d.reshape(image_response.height, image_response.width, 3) # reshape array to 4 channel image array H X W X 3
img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2RGB)
# anyGate = self.isThereAnyGate(img_rgb)
#cv2.imwrite(os.path.join(self.base_path, 'images', "frame" + str(self.curr_idx).zfill(len(str(self.num_samples))) + '.png'), img_rgb)
img = Image.fromarray(img_rgb)
image = self.transformation(img)
quad_pose = [self.quad.state[0], self.quad.state[1], self.quad.state[2], -self.quad.state[3], -self.quad.state[4], self.quad.state[5]]
with torch.no_grad():
# Determine Gat location with Neural Networks
pose_gate_body = self.Dronet(image)
predicted_r = np.copy(pose_gate_body[0][0])
if predicted_r < 3.0:
self.period_denum = 3.0
elif predicted_r < 5.0:
self.period_denum = 3.0
else:
self.period_denum = 5.0
for i,num in enumerate(pose_gate_body.reshape(-1,1)):
#print(num, i , self.curr_idx)
pose_prediction[self.curr_idx][i] = num.item()
if self.curr_idx >= 11:
pose_gate_cov = self.lstmR(torch.from_numpy(pose_prediction[self.curr_idx-11:self.curr_idx+1].reshape(1,12,4)).to(self.device))
for i, p_g_c in enumerate(pose_gate_cov.reshape(-1,1)):
prediction_std[i] = p_g_c.item()
prediction_std = np.clip(prediction_std, 0, prediction_std)
prediction_std = prediction_std.ravel()
covariance_sum = np.sum(prediction_std)
self.test_covariances[method].append(covariance_sum)
covariance_list.append(covariance_sum)
if self.curr_idx >= (11 + cov_rep_num):
covariance_sum = np.sum(covariance_list[-cov_rep_num:]) / float(cov_rep_num)
if covariance_sum > 10.:
anyGate = False
# Gate ground truth values will be implemented
pose_gate_body = pose_gate_body.numpy().reshape(-1,1)
# Trajectory generate
waypoint_world = spherical_to_cartesian(self.quad.state, pose_gate_body)
pos0 = [self.quad.state[0], self.quad.state[1], self.quad.state[2]]
vel0 = [self.quad.state[6], self.quad.state[7], self.quad.state[8]]
ang_vel0 = [self.quad.state[9], self.quad.state[10], self.quad.state[11]]
posf = [waypoint_world[0], waypoint_world[1], waypoint_world[2]]
yaw0 = self.quad.state[5]
yaw_diff = pose_gate_body[3][0]
yawf = (self.quad.state[5]+yaw_diff) + np.pi/2
#yawf = Rotation.from_quat([self.track[self.current_gate].orientation.x_val, self.track[self.current_gate].orientation.y_val,
# self.track[self.current_gate].orientation.z_val, self.track[self.current_gate].orientation.w_val]).as_euler('ZYX',degrees=False)[0] - np.pi/2
print("\nCurrent index: {0}".format(self.curr_idx))
print("Predicted r: {0:.3}, Noise coeff: {1:.4}, Covariance sum: {2:.3}".format(pose_gate_body[0][0], sign_coeff*noise_coeff, covariance_sum))
#print "Brightness: {0:.3}, Contast: {1:.3}, Saturation: {2:.3}".format(self.brightness, self.contrast, self.saturation)
if self.flight_log:
f.write("\nCurrent index: {0}".format(self.curr_idx))
f.write("\nPredicted r: {0:.3}, Noise coeff: {1:.4}, Covariance sum: {2:.3}".format(pose_gate_body[0][0], sign_coeff*noise_coeff, covariance_sum))
#f.write("\nBrightness: {0:.3}, Contast: {1:.3}, Saturation: {2:.3}".format(self.brightness, self.contrast, self.saturation))
f.write("\nMP algorithm: " + method)
f.write("\nEstimated time of arrival: {0:.3} s.".format(self.Tf))
f.write("\nGate Predicted, x: {0:.3}, y: {1:.3}, z: {2:.3}, psi: {3:.3} deg".format(waypoint_world[0], waypoint_world[1], waypoint_world[2], yawf*180/np.pi))
self.curr_idx += 1
if self.flight_log:
f.close()
def update(self, mode):
'''
convetion of names:
p_a_b: pose of frame b relative to frame a
t_a_b: translation vector from a to b
q_a_b: rotation quaternion from a to b
o: origin
b: UAV body frame
g: gate frame
'''
# create and set pose for the quad
#p_o_b, phi_base = racing_utils.geom_utils.randomQuadPose(UAV_X_RANGE, UAV_Y_RANGE, UAV_Z_RANGE, UAV_YAW_RANGE, UAV_PITCH_RANGE, UAV_ROLL_RANGE)
# create and set gate pose relative to the quad
#p_o_g, r, theta, psi, phi_rel = racing_utils.geom_utils.randomGatePose(p_o_b, phi_base, R_RANGE, CAM_FOV, correction)
#self.client.simSetObjectPose(self.tgt_name, p_o_g_new, True)
#min_vel, min_acc, min_jerk, pos_waypoint_interp, min_acc_stop, min_jerk_full_stop
MP_list = ["min_acc", "min_jerk", "min_jerk_full_stop", "min_vel"]
#MP_list = ["min_vel"]
if self.with_gate:
# gate_name = "gate_0"
# self.tgt_name = self.client.simSpawnObject(gate_name, "RedGate16x16", Pose(position_val=Vector3r(0,0,15)), 0.75)
# self.client.simSetObjectPose(self.tgt_name, self.track[0], True)
for i, gate in enumerate(self.track):
#print ("gate: ", gate)
gate_name = "gate_" + str(i)
self.tgt_name = self.client.simSpawnObject(gate_name, "RedGate16x16", Pose(position_val=Vector3r(0,0,15)), 0.75)
self.client.simSetObjectPose(self.tgt_name, gate, True)
# request quad img from AirSim
time.sleep(0.001)
self.find_gate_edges()
self.find_gate_distances()
if self.flight_log:
f=open(self.log_path, "w")
f.write("\nMode %s \n" % mode)
f.close()
if mode == "TEST":
self.mp_classifier.load_state_dict(torch.load(self.base_path + '/classifier_files/best_2.pt'))
#self.time_regressor = load(self.base_path + '/classifier_files/dt_regressor.sav')
self.time_coeff = 1.5
v_average = 1.5
#self.mp_scaler = load(self.base_path + 'classifier_files/mp_scaler.bin')
#self.time_scaler = load(self.base_path + 'classifier_files/time_scaler.bin')
print("\n>>> PREDICTION MODE: DICE, SAFE MODE: ON")
self.test_algorithm(use_model=True, method="DICE_SAFE", safe_mode = True, time_or_speed = 0, v_average = v_average)
for method in MP_list:
print("\n>>> TEST MODE: " + method)
self.test_algorithm(method = method, time_or_speed = 0, v_average = v_average)
self.test_number = "0_0"
pickle.dump([self.test_states,self.test_arrival_time,self.test_costs, self.test_safe_counter, self.test_distribution_on_noise, self.test_distribution_off_noise, | |
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`544`-compliant type hint utilities.
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from abc import abstractmethod
from beartype.roar import BeartypeDecorHintPep544Exception
from beartype._data.hint.pep.sign.datapepsigncls import HintSign
from beartype._util.cls.utilclstest import is_type_builtin, is_type_subclass
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_8
from typing import Any, Dict, Optional
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ PRIVATE ~ mappings }....................
# Conditionally initialized by the _init() function below.
_HINT_PEP484_IO_GENERIC_TO_PEP544_PROTOCOL: Dict[type, type] = {}
'''
Dictionary mapping from each :mod:`typing` **IO generic base class** (i.e.,
either :class:`typing.IO` itself *or* a subclass of :class:`typing.IO` defined
by the :mod:`typing` module) to the associated :mod:`beartype` **IO protocol**
(i.e., either :class:`_Pep544IO` itself *or* a subclass of :class:`_Pep544IO`
defined by this submodule).
'''
# ....................{ PRIVATE ~ classes }....................
# Conditionally initialized by the _init() function below.
_Pep544IO: Any = None
'''
:pep:`544`-compliant protocol base class for :class:`_Pep544TextIO` and
:class:`_Pep544BinaryIO`.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible classes (text
vs. binary, read vs. write vs. read/write, append-only, unbuffered). The TextIO
and BinaryIO subclasses below capture the distinctions between text vs. binary,
which is pervasive in the interface; however we currently do not offer a way to
track the other distinctions in the type system.
Design
----------
This base class intentionally duplicates the contents of the existing
:class:`typing.IO` generic base class by substituting the useless
:class:`typing.Generic` superclass of the latter with the useful
:class:`typing.Protocol` superclass of the former. Why? Because *no* stdlib
classes excluding those defined by the :mod:`typing` module itself subclass
:class:`typing.IO`. However, :class:`typing.IO` leverages neither the
:class:`abc.ABCMeta` metaclass *nor* the :class:`typing.Protocol` superclass
needed to support structural subtyping. Therefore, *no* stdlib objects
(including those returned by the :func:`open` builtin) satisfy either
:class:`typing.IO` itself or any subclasses of :class:`typing.IO` (e.g.,
:class:`typing.BinaryIO`, :class:`typing.TextIO`). Therefore,
:class:`typing.IO` and all subclasses thereof are functionally useless for all
practical intents. The conventional excuse `given by Python maintainers to
justify this abhorrent nonsensicality is as follows <typeshed_>`__:
There are a lot of "file-like" classes, and the typing IO classes are meant
as "protocols" for general files, but they cannot actually be protocols
because the file protocol isn't very well defined—there are lots of methods
that exist on some but not all filelike classes.
Like most :mod:`typing`-oriented confabulation, that, of course, is bollocks.
Refactoring the family of :mod:`typing` IO classes from inveterate generics
into pragmatic protocols is both technically trivial and semantically useful,
because that is exactly what :mod:`beartype` does. It works. It necessitates
modifying three lines of existing code. It preserves backward compatibility. In
short, it should have been done a decade ago. If the file protocol "isn't very
well defined," the solution is to define that protocol with a rigorous type
hierarchy satisfying all possible edge cases. The solution is *not* to pretend
that no solutions exist, that the existing non-solution suffices, and instead
do nothing. Welcome to :mod:`typing`, where no one cares that nothing works as
advertised (or at all)... *and no one ever will.*
.. _typeshed:
https://github.com/python/typeshed/issues/3225#issuecomment-529277448
'''
# Conditionally initialized by the _init() function below.
_Pep544BinaryIO: Any = None
'''
Typed version of the return of open() in binary mode.
'''
# Conditionally initialized by the _init() function below.
_Pep544TextIO: Any = None
'''
Typed version of the return of open() in text mode.
'''
# ....................{ TESTERS }....................
# If the active Python interpreter targets at least Python >= 3.8 and thus
# supports PEP 544, define these functions appropriately.
if IS_PYTHON_AT_LEAST_3_8:
def is_hint_pep544_ignorable_or_none(
hint: object, hint_sign: HintSign) -> Optional[bool]:
# Return either:
# * If this hint is the "typing.Protocol" superclass directly
# parametrized by one or more type variables (e.g.,
# "typing.Protocol[S, T]"), true. For unknown and presumably
# uninteresting reasons, *ALL* possible objects satisfy this
# superclass. Ergo, this superclass and *ALL* parametrizations of
# this superclass are synonymous with the "object" root superclass.
# * Else, "None".
return repr(hint).startswith('typing.Protocol[') or None
def is_hint_pep484_generic_io(hint: object) -> bool:
# Attempt to...
try:
# Return true only if this hint is a PEP 484-compliant IO generic
# base class.
return hint in _HINT_PEP484_IO_GENERIC_TO_PEP544_PROTOCOL
# If this hint is unhashable, this hint is by definition *NOT* a PEP
# 484-compliant IO generic base class. In this case, return false.
except TypeError:
return False
def is_hint_pep544_protocol(hint: object) -> bool:
# Defer version-dependent imports.
from typing import Protocol
# Return true only if this hint is...
return (
# A PEP 544-compliant protocol *AND*...
is_type_subclass(hint, Protocol) and # type: ignore[arg-type]
# *NOT* a builtin type. For unknown reasons, some but *NOT* all
# builtin types erroneously present themselves to be PEP
# 544-compliant protocols under Python >= 3.8: e.g.,
# >>> from typing import Protocol
# >>> isinstance(str, Protocol)
# False # <--- this makes sense
# >>> isinstance(int, Protocol)
# True # <--- this makes no sense whatsoever
#
# Since builtin types are obviously *NOT* PEP 544-compliant
# protocols, explicitly exclude all such types. Why, Guido? Why?
not (isinstance(hint, type) and is_type_builtin(hint))
)
# Else, the active Python interpreter targets at most Python < 3.8 and thus
# fails to support PEP 544. In this case, fallback to declaring this function
# to unconditionally return False.
else:
def is_hint_pep544_ignorable_or_none(
hint: object, hint_sign: HintSign) -> Optional[bool]:
return None
def is_hint_pep484_generic_io(hint: object) -> bool:
return False
def is_hint_pep544_protocol(hint: object) -> bool:
return False
# ....................{ TESTERS ~ doc }....................
is_hint_pep544_ignorable_or_none.__doc__ = '''
``True`` only if the passed object is a :pep:`544`-compliant **ignorable
type hint,** ``False`` only if this object is a :pep:`544`-compliant
unignorable type hint, and ``None`` if this object is *not* `PEP
544`_-compliant.
Specifically, this tester function returns ``True`` only if this object is
a deeply ignorable :pep:`544`-compliant type hint, including:
* A parametrization of the :class:`typing.Protocol` abstract base class
(ABC) by one or more type variables. As the name implies, this ABC is
generic and thus fails to impose any meaningful constraints. Since a type
variable in and of itself also fails to impose any meaningful
constraints, these parametrizations are safely ignorable in all possible
contexts: e.g.,
.. code-block:: python
from typing import Protocol, TypeVar
T = TypeVar('T')
def noop(param_hint_ignorable: Protocol[T]) -> T: pass
This tester is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as this tester is only safely callable
by the memoized parent
:func:`beartype._util.hint.utilhinttest.is_hint_ignorable` tester.
Parameters
----------
hint : object
Type hint to be inspected.
hint_sign : HintSign
**Sign** (i.e., arbitrary object uniquely identifying this hint).
Returns
----------
Optional[bool]
Either:
* If this object is :pep:`544`-compliant:
* If this object is a ignorable, ``True``.
* Else, ``False``.
* If this object is *not* :pep:`544`-compliant, ``None``.
'''
is_hint_pep484_generic_io.__doc__ = '''
``True`` only if the passed object is a functionally useless
:pep:`484`-compliant :mod:`typing` **IO generic superclass** (i.e., either
:class:`typing.IO` itself *or* a subclass of :class:`typing.IO` defined by
the :mod:`typing` module effectively unusable at runtime due to botched
implementation details) that is losslessly replaceable with a useful
:pep:`544`-compliant :mod:`beartype` **IO protocol** (i.e., either
:class:`beartype._data.hint.pep.proposal.datapep544._Pep544IO` itself
*or* a subclass of that class defined by this submodule intentionally
designed to be usable at runtime).
This tester is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Parameters
----------
hint : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is a :pep:`484`-compliant IO generic base
class.
See Also
----------
:class:`beartype._data.hint.pep.proposal.datapep544._Pep544IO`
Further commentary.
'''
is_hint_pep544_protocol.__doc__ = '''
``True`` only if the passed object is a :pep:`544`-compliant **protocol**
(i.e., subclass of the :class:`typing.Protocol` superclass).
This tester is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Parameters
----------
hint : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is a :pep:`544`-compliant protocol.
'''
# ....................{ REDUCERS }....................
def reduce_hint_pep484_generic_io_to_pep544_protocol(hint: type) -> type:
'''
:pep:`544`-compliant :mod:`beartype` **IO protocol** (i.e., either
:class:`beartype._data.hint.pep.proposal.datapep544._Pep544IO`
itself *or* a subclass of that class defined by this submodule
intentionally designed to be usable at runtime) corresponding to the passed
:pep:`484`-compliant :mod:`typing` **IO generic base class** (i.e., either
:class:`typing.IO` itself *or* a subclass of :class:`typing.IO` defined by
the :mod:`typing` module effectively unusable at runtime due to botched
implementation details).
| |
<filename>src/app/beer_garden/local_plugins/manager.py
# -*- coding: utf-8 -*-
import string
import time
from concurrent.futures import ThreadPoolExecutor, wait
from concurrent.futures._base import Future
from enum import Enum
import json
import logging
import os
import sys
from brewtils.models import Event, Events, Runner, System
from brewtils.specification import _SYSTEM_SPEC
from brewtils.stoppable_thread import StoppableThread
from importlib.machinery import SourceFileLoader
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
from random import choice
from types import ModuleType
from typing import Any, Dict, Iterable, List, Optional
import beer_garden.config as config
from beer_garden.errors import PluginValidationError
from beer_garden.events import publish, publish_event
from beer_garden.local_plugins.env_help import expand_string
from beer_garden.local_plugins.runner import ProcessRunner
# This is ... complicated. See the PluginManager docstring
lpm_proxy = None # type: Optional[PluginManager]
CONFIG_NAME = "beer.conf"
logger = logging.getLogger(__name__)
def runner(*args, **kwargs):
return lpm_proxy.get_runner(*args, **kwargs)
def runners(*args, **kwargs):
return lpm_proxy.get_runners()
def update(*args, **kwargs):
return lpm_proxy.update(*args, **kwargs)
def has_instance_id(*args, **kwargs):
return lpm_proxy.has_instance_id(*args, **kwargs)
@publish_event(Events.RUNNER_STARTED)
def start(*args, **kwargs):
return lpm_proxy.restart(*args, **kwargs)
@publish_event(Events.RUNNER_STOPPED)
def stop(*args, **kwargs):
return lpm_proxy.stop_one(*args, **kwargs)
@publish_event(Events.RUNNER_REMOVED)
def remove(*args, **kwargs):
kwargs.pop("remove", None)
return stop(*args, remove=True, **kwargs)
def rescan(*args, **kwargs) -> List[Runner]:
"""Scans plugin directory and starts any new runners"""
new_runners = lpm_proxy.scan_path(*args, **kwargs)
for runner in new_runners:
publish(
Event(
name=Events.RUNNER_STARTED.name,
payload_type=Runner.__name__,
payload=runner,
)
)
return new_runners
def reload(path: str = None, system: System = None):
"""Reload runners in a directory
Args:
path: The path to reload. It's expected this will be only the final part of the
full path, so it will be appended to the overall plugin path.
system: If path is not specified, will attempt to determine the correct path to
use based on the given system
Will first remove any existing runners (stopping them if necessary) and then
initiate a rescan on the directory.
"""
all_runners = runners()
if path is None:
for instance in system.instances:
for runner in all_runners:
if runner.instance_id == instance.id:
path = runner.path
break
logger.debug(f"Reloading runners in directory {path}")
for r in [r for r in all_runners if r.path == path]:
remove(runner_id=r.id, remove=True)
return rescan(paths=[lpm_proxy.plugin_path() / path])
def handle_event(event):
# Only care about local garden
if event.garden == config.get("garden.name"):
if event.name == Events.INSTANCE_INITIALIZED.name:
lpm_proxy.handle_initialize(event)
elif event.name == Events.INSTANCE_STOPPED.name:
lpm_proxy.handle_stopped(event)
class PluginManager(StoppableThread):
"""Manages creation and destruction of PluginRunners
The instance of this class is intended to be created with a
``multiprocessing.managers.BaseManager``. This essentially means that it lives in
its own process, and access to it is achieved using a proxy object. This proxy
object is the module-scoped ``lpm_proxy``.
We do this because we can use the proxy object in the main application process AND
we can pass it to the entry point processes. The entry point processes can then use
the proxy transparently.
There are some catches, of course. One is performance, but I don't think it's
significant enough to be a concern. The other is important enough to emphasize:
HEY: RETURN TYPES FOR ALL PUBLIC METHODS OF THIS CLASS **MUST** BE PICKLEABLE!
This proxy concept is pretty neat, but at some point the data does need to traverse
a process boundary. All public (non-underscore) methods are exposed (able to be
called by a client using the proxy). That means that anything returned from those
methods must be pickleable to work.
"""
def __init__(
self,
plugin_dir=None,
log_dir=None,
connection_info=None,
username=None,
password=<PASSWORD>,
):
super().__init__(logger=logging.getLogger(__name__), name="PluginManager")
self._display_name = "Plugin Manager"
self._runners: List[ProcessRunner] = []
self._plugin_path = Path(plugin_dir) if plugin_dir else None
self._log_dir = log_dir
self._connection_info = connection_info
self._username = username
self._password = password
def run(self):
self.logger.debug(self._display_name + " is started")
while not self.wait(1):
self.monitor()
self.logger.debug(self._display_name + " is stopped")
def monitor(self):
"""Ensure that processes are still alive
Iterate through all runners, restarting any processes that have stopped.
This must be done inside of the PluginManager class because of the
process.poll() check. That can only be done from within this class.
"""
for runner in self._runners:
if self.stopped():
break
if runner.process and runner.process.poll() is not None:
if runner.restart:
self.logger.warning(f"Runner {runner} stopped, restarting")
self._restart(runner)
elif not runner.stopped and not runner.dead:
self.logger.warning(f"Runner {runner} is dead, not restarting")
runner.dead = True
def plugin_path(self):
return self._plugin_path
def paths(self) -> List[str]:
"""All current runner paths"""
return list({runner.process_cwd for runner in self._runners})
def get_runner(self, runner_id=None) -> Optional[Runner]:
"""Get a representation of current runners"""
process_runner = self._from_runner_id(runner_id)
if process_runner:
return process_runner.state()
def get_runners(self) -> List[Runner]:
"""Get a representation of current runners"""
return [runner.state() for runner in self._runners]
def has_instance_id(self, instance_id: str) -> bool:
"""Bool indicating if a given instance ID has an associated runner"""
return self._from_instance_id(instance_id=instance_id) is not None
def handle_initialize(self, event: Event) -> None:
"""Called whenever an INSTANCE_INITIALIZED occurs
This associates the event instance with a specific runner.
"""
runner_id = event.payload.metadata.get("runner_id")
if runner_id:
instance = event.payload
self.logger.debug(f"Associating {runner_id} with {instance.id}")
runner = self._from_runner_id(runner_id)
runner.associate(instance=instance)
runner.restart = True
def handle_stopped(self, event: Event) -> None:
"""Called whenever an INSTANCE_STOPPED occurs
If the event instance is associated with any runner that this PluginManager
knows about then that runner will be marked as stopped and no longer monitored.
"""
runner = self._from_instance_id(event.payload.id)
if runner:
runner.stopped = True
runner.restart = False
runner.dead = False
def restart(self, runner_id=None, instance_id=None) -> Optional[Runner]:
"""Restart the runner for a particular Instance ID"""
runner = self._from_runner_id(runner_id) or self._from_instance_id(instance_id)
if runner:
return self._restart(runner).state()
def update(
self, runner_id=None, instance_id=None, restart=None, stopped=None
) -> Runner:
"""Update a runner state"""
runner = self._from_runner_id(runner_id) or self._from_instance_id(instance_id)
if not runner:
return
if stopped is not None:
runner.stopped = stopped
if restart is not None:
runner.restart = restart
return runner.state()
def stop_one(
self, runner_id=None, instance_id=None, send_sigterm=True, remove=False
) -> Runner:
"""Stop the runner for a given runner or Instance ID
The PluginManager has no ability to places messages on the message queue, so
it's possible that a stop message will already have been sent to the plugin
that's being asked to stop. If that's NOT the case then send_sigterm should be
set to True to attempt to stop the runner gracefully.
This will wait for the runner to stop for plugin.local.timeout.shutdown seconds.
If the runner is not stopped after that time its process will be killed with
SIGKILL.
Args:
runner_id: The runner ID to stop
instance_id: The instance ID to stop
send_sigterm: If true, send SIGTERM before waiting
remove: Flag controlling if the runner should be removed from runner list
Returns:
The stopped runner
"""
runner = self._from_runner_id(runner_id) or self._from_instance_id(instance_id)
if not runner:
raise Exception(
f"Could not determine runner using runner ID {runner_id} and "
f"instance ID {instance_id}"
)
if send_sigterm:
runner.term()
runner.join(config.get("plugin.local.timeout.shutdown"))
if runner.is_alive():
runner.dead = True
runner.kill()
runner.stopped = True
runner.restart = False
if remove:
self._runners.remove(runner)
return runner.state()
def stop_all(self) -> None:
"""Stop all known runners"""
return self._stop_multiple(self._runners)
def scan_path(self, paths: Iterable[str] = None) -> List[Runner]:
"""Scan a given directory for valid plugins
Note: This scan does not walk the directory tree - all plugins must be
in the top level of the given path.
Args:
paths: The paths to scan. If None will be all subdirectories of the plugin
path specified at initialization
Returns:
None
"""
plugin_paths = paths or self._plugin_path.iterdir()
new_runners = []
try:
for path in plugin_paths:
try:
if path.is_dir() and path not in self.paths():
new_runners += self._create_runners(path)
except Exception as ex:
self.logger.exception(f"Error loading plugin at {path}: {ex}")
except Exception as ex:
self.logger.exception(f"Error scanning plugin path: {ex}")
return [runner.state() for runner in new_runners]
def _from_instance_id(self, instance_id: str) -> Optional[ProcessRunner]:
for runner in self._runners:
if runner.instance_id == instance_id:
return runner
return None
def _from_runner_id(self, runner_id: str) -> Optional[ProcessRunner]:
for runner in self._runners:
if runner.runner_id == runner_id:
return runner
return None
def _restart(self, runner: ProcessRunner) -> ProcessRunner:
new_runner = ProcessRunner(
runner_id=runner.runner_id,
process_args=runner.process_args,
process_cwd=runner.process_cwd,
process_env=runner.process_env,
capture_streams=runner.capture_streams,
)
self._runners.remove(runner)
self._runners.append(new_runner)
new_runner.start()
return new_runner
def _stop_multiple(self, runners: Iterable[ProcessRunner] = None) -> None:
# If not specified, default to all runners
if runners is None:
runners = self._runners
# If empty, we're done
if len(runners) < 1:
return
shutdown_pool = ThreadPoolExecutor(len(runners))
stop_futures: List[Future] = []
for runner in runners:
stop_futures.append(
shutdown_pool.submit(
self.stop_one, runner_id=runner.runner_id, send_sigterm=True
)
)
wait(stop_futures)
for runner in runners:
self._runners.remove(runner)
def _create_runners(self, plugin_path: Path) -> List[ProcessRunner]:
"""Create and start ProcessRunners for a particular directory
It will use the validator to validate the config.
Args:
plugin_path: The path of the plugin
Returns:
Newly created | |
should contain the date, the payment currency and the payment rate specified on the voucher
amount_in_company_currency = self.pool.get('res.currency').compute(cr, uid, currency_id, company_currency.id, amount, context=context)
res['value']['paid_amount_in_company_currency'] = amount_in_company_currency
return res
def onchange_amount(self, cr, uid, ids, amount, rate, partner_id, journal_id, currency_id, ttype, date, payment_rate_currency_id, company_id, context=None):
if context is None:
context = {}
ctx = context.copy()
ctx.update({'date': date})
#read the voucher rate with the right date in the context
currency_id = currency_id or self.pool.get('res.company').browse(cr, uid, company_id, context=ctx).currency_id.id
voucher_rate = self.pool.get('res.currency').read(cr, uid, currency_id, ['rate'], context=ctx)['rate']
ctx.update({
'voucher_special_currency': payment_rate_currency_id,
'voucher_special_currency_rate': rate * voucher_rate})
res = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals = self.onchange_rate(cr, uid, ids, rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in vals.keys():
res[key].update(vals[key])
return res
def recompute_payment_rate(self, cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=None):
if context is None:
context = {}
#on change of the journal, we need to set also the default value for payment_rate and payment_rate_currency_id
currency_obj = self.pool.get('res.currency')
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
company_id = journal.company_id.id
payment_rate = 1.0
currency_id = currency_id or journal.company_id.currency_id.id
payment_rate_currency_id = currency_id
ctx = context.copy()
ctx.update({'date': date})
o2m_to_loop = False
if ttype == 'receipt':
o2m_to_loop = 'line_cr_ids'
elif ttype == 'payment':
o2m_to_loop = 'line_dr_ids'
if o2m_to_loop and 'value' in vals and o2m_to_loop in vals['value']:
for voucher_line in vals['value'][o2m_to_loop]:
if voucher_line['currency_id'] != currency_id:
# we take as default value for the payment_rate_currency_id, the currency of the first invoice that
# is not in the voucher currency
payment_rate_currency_id = voucher_line['currency_id']
tmp = currency_obj.browse(cr, uid, payment_rate_currency_id, context=ctx).rate
payment_rate = tmp / currency_obj.browse(cr, uid, currency_id, context=ctx).rate
break
vals['value'].update({
'payment_rate': payment_rate,
'currency_id': currency_id,
'payment_rate_currency_id': payment_rate_currency_id
})
#read the voucher rate with the right date in the context
voucher_rate = self.pool.get('res.currency').read(cr, uid, currency_id, ['rate'], context=ctx)['rate']
ctx.update({
'voucher_special_currency_rate': payment_rate * voucher_rate,
'voucher_special_currency': payment_rate_currency_id})
res = self.onchange_rate(cr, uid, ids, payment_rate, amount, currency_id, payment_rate_currency_id, company_id, context=ctx)
for key in res.keys():
vals[key].update(res[key])
return vals
def basic_onchange_partner(self, cr, uid, ids, partner_id, journal_id, ttype, context=None):
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
res = {'value': {'account_id': False}}
if not partner_id or not journal_id:
return res
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
account_id = False
if journal.type in ('sale','sale_refund'):
account_id = partner.property_account_receivable.id
elif journal.type in ('purchase', 'purchase_refund','expense'):
account_id = partner.property_account_payable.id
else:
account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id
res['value']['account_id'] = account_id
return res
def onchange_partner_id(self, cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=None):
if not journal_id:
return {}
if context is None:
context = {}
#TODO: comment me and use me directly in the sales/purchases views
res = self.basic_onchange_partner(cr, uid, ids, partner_id, journal_id, ttype, context=context)
if ttype in ['sale', 'purchase']:
return res
ctx = context.copy()
# not passing the payment_rate currency and the payment_rate in the context but it's ok because they are reset in recompute_payment_rate
ctx.update({'date': date})
vals = self.recompute_voucher_lines(cr, uid, ids, partner_id, journal_id, amount, currency_id, ttype, date, context=ctx)
vals2 = self.recompute_payment_rate(cr, uid, ids, vals, currency_id, date, ttype, journal_id, amount, context=context)
for key in vals.keys():
res[key].update(vals[key])
for key in vals2.keys():
res[key].update(vals2[key])
#TODO: can probably be removed now
#TODO: onchange_partner_id() should not returns [pre_line, line_dr_ids, payment_rate...] for type sale, and not
# [pre_line, line_cr_ids, payment_rate...] for type purchase.
# We should definitively split account.voucher object in two and make distinct on_change functions. In the
# meanwhile, bellow lines must be there because the fields aren't present in the view, what crashes if the
# onchange returns a value for them
if ttype == 'sale':
del(res['value']['line_dr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
elif ttype == 'purchase':
del(res['value']['line_cr_ids'])
del(res['value']['pre_line'])
del(res['value']['payment_rate'])
return res
def recompute_voucher_lines(self, cr, uid, ids, partner_id, journal_id, price, currency_id, ttype, date, context=None):
"""
Returns a dict that contains new values and context
@param partner_id: latest value from user input for field partner_id
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
def _remove_noise_in_o2m():
"""if the line is partially reconciled, then we must pay attention to display it only once and
in the good o2m.
This function returns True if the line is considered as noise and should not be displayed
"""
if line.reconcile_partial_id:
if currency_id == line.currency_id.id:
if line.amount_residual_currency <= 0:
return True
else:
if line.amount_residual <= 0:
return True
return False
if context is None:
context = {}
context_multi_currency = context.copy()
currency_pool = self.pool.get('res.currency')
move_line_pool = self.pool.get('account.move.line')
partner_pool = self.pool.get('res.partner')
journal_pool = self.pool.get('account.journal')
line_pool = self.pool.get('account.voucher.line')
#set default values
default = {
'value': {'line_dr_ids': [] ,'line_cr_ids': [] ,'pre_line': False,},
}
#drop existing lines
line_ids = ids and line_pool.search(cr, uid, [('voucher_id', '=', ids[0])]) or False
if line_ids:
line_pool.unlink(cr, uid, line_ids)
if not partner_id or not journal_id:
return default
journal = journal_pool.browse(cr, uid, journal_id, context=context)
partner = partner_pool.browse(cr, uid, partner_id, context=context)
currency_id = currency_id or journal.company_id.currency_id.id
total_credit = 0.0
total_debit = 0.0
account_type = None
if context.get('account_id'):
account_type = self.pool['account.account'].browse(cr, uid, context['account_id'], context=context).type
if ttype == 'payment':
if not account_type:
account_type = 'payable'
total_debit = price or 0.0
else:
total_credit = price or 0.0
if not account_type:
account_type = 'receivable'
if not context.get('move_line_ids', False):
ids = move_line_pool.search(cr, uid, [('state','=','valid'), ('account_id.type', '=', account_type), ('reconcile_id', '=', False), ('partner_id', '=', partner_id)], context=context)
else:
ids = context['move_line_ids']
invoice_id = context.get('invoice_id', False)
company_currency = journal.company_id.currency_id.id
move_lines_found = []
#order the lines by most old first
ids.reverse()
account_move_lines = move_line_pool.browse(cr, uid, ids, context=context)
#compute the total debit/credit and look for a matching open amount or invoice
for line in account_move_lines:
if _remove_noise_in_o2m():
continue
if invoice_id:
if line.invoice.id == invoice_id:
#if the invoice linked to the voucher line is equal to the invoice_id in context
#then we assign the amount on that line, whatever the other voucher lines
move_lines_found.append(line.id)
elif currency_id == company_currency:
#otherwise treatments is the same but with other field names
if line.amount_residual == price:
#if the amount residual is equal the amount voucher, we assign it to that voucher
#line, whatever the other voucher lines
move_lines_found.append(line.id)
break
#otherwise we will split the voucher amount on each line (by most old first)
total_credit += line.credit or 0.0
total_debit += line.debit or 0.0
elif currency_id == line.currency_id.id:
if line.amount_residual_currency == price:
move_lines_found.append(line.id)
break
total_credit += line.credit and line.amount_currency or 0.0
total_debit += line.debit and line.amount_currency or 0.0
remaining_amount = price
#voucher line creation
for line in account_move_lines:
if _remove_noise_in_o2m():
continue
if line.currency_id and currency_id == line.currency_id.id:
amount_original = abs(line.amount_currency)
amount_unreconciled = abs(line.amount_residual_currency)
else:
#always use the amount booked in the company currency as the basis of the conversion into the voucher currency
amount_original = currency_pool.compute(cr, uid, company_currency, currency_id, line.credit or line.debit or 0.0, context=context_multi_currency)
amount_unreconciled = currency_pool.compute(cr, uid, company_currency, currency_id, abs(line.amount_residual), context=context_multi_currency)
line_currency_id = line.currency_id and line.currency_id.id or company_currency
rs = {
'name':line.move_id.name,
'type': line.credit and 'dr' or 'cr',
'move_line_id':line.id,
'account_id':line.account_id.id,
'amount_original': amount_original,
'amount': (line.id in move_lines_found) and min(abs(remaining_amount), amount_unreconciled) or 0.0,
'date_original':line.date,
'date_due':line.date_maturity,
'amount_unreconciled': amount_unreconciled,
'currency_id': line_currency_id,
}
remaining_amount -= rs['amount']
#in case a corresponding move_line hasn't been found, we now try to assign the voucher amount
#on existing invoices: we split voucher amount by most old first, but only for lines in the same currency
if not move_lines_found:
if currency_id == line_currency_id:
if line.credit:
amount = min(amount_unreconciled, abs(total_debit))
rs['amount'] = amount
total_debit -= amount
else:
amount = min(amount_unreconciled, abs(total_credit))
rs['amount'] = amount
total_credit -= amount
if rs['amount_unreconciled'] == rs['amount']:
rs['reconcile'] = True
if rs['type'] == 'cr':
default['value']['line_cr_ids'].append(rs)
else:
default['value']['line_dr_ids'].append(rs)
if len(default['value']['line_cr_ids']) > 0:
default['value']['pre_line'] = 1
elif len(default['value']['line_dr_ids']) > 0:
default['value']['pre_line'] = 1
default['value']['writeoff_amount'] = self._compute_writeoff_amount(cr, uid, default['value']['line_dr_ids'], default['value']['line_cr_ids'], price, ttype)
return default
def onchange_payment_rate_currency(self, cr, uid, ids, currency_id, payment_rate, payment_rate_currency_id, date, amount, company_id, context=None):
if context is None:
context = {}
res = {'value': {}}
if currency_id:
#set the default payment rate of the voucher and compute the paid amount in company currency
ctx = context.copy()
ctx.update({'date': date})
#read the voucher rate with the right date in the context
voucher_rate = self.pool.get('res.currency').read(cr, uid, currency_id, | |
<gh_stars>10-100
#!/usr/bin/env python
"""
Copyright 2010-2018 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This module takes care of building a workflow using either user
choices interactively, or an option file containing all needed
parameters.
This module compiles results from a cluster simulation and creates
a csv file containing information about all stations/realizations.
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import glob
import shutil
import argparse
import datetime
import numpy as np
from scipy import integrate
# Import Broadband modules
from install_cfg import InstallCfg
from station_list import StationList
import bband_utils
import xml_handler
import velocity_models
# Import Pynga and its utilities
import pynga.utils as putils
# Initialize global variables
INSTALL = InstallCfg.getInstance()
CM2G = 980.664999
def parse_arguments():
"""
Parse command-line options
"""
parser = argparse.ArgumentParser(description="Exports data from a BBP cluster "
"simulation to a flat file.")
parser.add_argument("--input_dir", "-i", dest="input_dir",
help="input directory")
parser.add_argument("--output_dir", "-o", dest="output_dir",
help="output directory")
parser.add_argument("--copy", "-c", dest="copy_timeseries", action='store_true',
help="copy all timeseries into the output directory")
parser.add_argument("--prefix", "-p", dest="prefix",
default="BBP_Study",
help="prefix for output files")
parser.add_argument("--suffix", "-s", dest="suffix",
default="",
help="suffix for output files")
args = parser.parse_args()
if args.input_dir is None:
print("[ERROR]: Please provide input directory!")
sys.exit(-1)
else:
if not os.path.isdir(args.input_dir):
print("[ERROR]: Please provide valid input directory!")
sys.exit(-1)
if not "Sims" in os.listdir(args.input_dir):
print("[ERROR]: Please provide top-level cluster simulation directory!")
sys.exit(-1)
if args.output_dir is None:
print("[ERROR]: Please provide output directory!")
sys.exit(-1)
return args
def collect_simulation_params(args):
"""
This function collects simulation-wide parameters
"""
# Get paths to one xml and a SRC file
first_realization = args.realizations[0]
xml_dir = os.path.join(args.input_dir, "Xml")
xml_files = glob.glob("%s/*.xml" % (xml_dir))
xml_path = os.path.join(xml_dir, xml_files[0])
src_dir = os.path.join(args.top_level_indir, args.realizations[0])
src_files = glob.glob("%s/*.src" % (src_dir))
src_path = os.path.join(src_dir, src_files[0])
html_dir = os.path.join(args.top_level_outdir, args.realizations[0])
html_file = glob.glob("%s/*.html" % (html_dir))[0]
# Get simulation method from html file
args.general_method = get_method_from_html(html_file).lower()
# Parse SRC and get magnitude
src_keys = bband_utils.parse_src_file(src_path)
args.general_magnitude = src_keys["magnitude"]
# Parse XML file
workflow_obj = xml_handler.parse_xml(xml_path)
args.bbp_software_info_version = str(workflow_obj.version)
modules = []
for item in workflow_obj.workflow:
modules.append(str(item.getName()))
args.bbp_software_info_modules = modules
if "WccSiteamp" in modules:
args.bbp_software_info_site = "GP2014"
else:
args.bbp_software_info_site = "None"
args.general_eqid = "-999"
def calculate_vs30(vmodel_file):
"""
Calculates the Vs30 from the velocity model file
"""
# Need to calculate Vs30 by adding each layer's up to 30m
total_time = 0.0
remaining_width = 30.0
input_file = open(vmodel_file, 'r')
for line in input_file:
line = line.strip()
if not line:
continue
tokens = [float(item) for item in line.split()]
if len(tokens) != 6:
continue
layer_width = tokens[0] * 1000 # Convert to meters
layer_vs = tokens[2] * 1000
if layer_width <= remaining_width:
remaining_width = remaining_width - layer_width
total_time = total_time + (layer_width / layer_vs)
else:
total_time = total_time + (remaining_width / layer_vs)
remaining_width = 0.0
# Check if all done
if remaining_width == 0.0:
break
input_file.close()
# Calculate Vs30 based on total_time
return (30.0 / total_time)
def calculate_mechanism(rake):
"""
Compute Mechanism based on rake angle
"""
if rake >= -180 and rake < -150:
return "Strike-Slip"
if rake >= -30 and rake <= 30:
return "Strike-Slip"
if rake > 150 and rake <= 180:
return "Strike-Slip"
if rake >= -120 and rake < -60:
return "Normal"
if rake > 60 and rake <= 120:
return "Reverse"
if rake > 30 and rake <= 60:
return "Reverse-Oblique"
if rake > 120 and rake <= 150:
return "Reverse-Oblique"
if rake >= -150 and rake < -120:
return "Normal-Oblique"
if rake >= -60 and rake < -30:
return "Normal-Oblique"
return "Unknown"
def read_bbp(bbp_file):
"""
This function reads the input bbp_file and returns 4 arrays
containing the timestamps and 3 time series. This function
converts all BBP files from cm/2^2 to g
"""
times = []
comp1 = []
comp2 = []
comp3 = []
ifile = open(bbp_file, 'r')
for line in ifile:
line = line.strip()
# Skip comments
if line.startswith('%') or line.startswith('#'):
continue
pieces = [float(x) for x in line.split()]
times.append(pieces[0])
comp1.append(pieces[1] / CM2G)
comp2.append(pieces[2] / CM2G)
comp3.append(pieces[3] / CM2G)
# Close input file
ifile.close()
# All done, return arrays
return times, comp1, comp2, comp3
def calculate_arias(F, dt, percent):
"""
For a given motion, this function will tell you at what time a
given percentage of arias intensity is reached (if time starts
at 0 sec)
"""
n = len(F)
a_i = [pow(value, 2) for value in F]
I = integrate.cumtrapz(a_i) * dt
# Arias Intensity
Ia = (F[0]**2) * dt / 2.0 + I[n-2] + (F[n-1]**2) * dt / 2.0
It = (percent / 100.0) * Ia
if I[0] < It:
index = len(I) - len(I[I >= It])
if index == len(I):
index = index - 1
else:
index = 0
t = index * dt
return t, index, It
def get_vmodel_from_html(html_file):
"""
Parse vmodel name from html_file
"""
input_file = open(html_file, 'r')
for line in input_file:
if line.find("Velocity model version") > 0:
line = next(input_file)
break
token = line[4:].split(" ")[0]
input_file.close()
return token
def get_method_from_html(html_file):
"""
Parse simulation method name from html_file
"""
input_file = open(html_file, 'r')
for line in input_file:
if line.find("Simulation Method") > 0:
line = next(input_file)
break
token = line[4:].split("<")[0]
input_file.close()
return token
def calculate_distances(src_files, site):
"""
Calculate Rrup, Rjb, Rx using multiple SRC files
"""
rrup = 10000000
rjb = 10000000
rx = 10000000
for src_file in src_files:
src_keys = bband_utils.parse_src_file(src_file)
origin = (src_keys['lon_top_center'],
src_keys['lat_top_center'])
dims = (src_keys['fault_length'], src_keys['dlen'],
src_keys['fault_width'], src_keys['dwid'],
src_keys['depth_to_top'])
mech = (src_keys['strike'], src_keys['dip'],
src_keys['rake'])
site_geom = [float(site.lon), float(site.lat), 0.0]
(fault_trace1, up_seis_depth,
low_seis_depth, ave_dip,
dummy1, dummy2) = putils.FaultTraceGen(origin, dims, mech)
my_rjb, my_rrup, my_rx = putils.DistanceToSimpleFaultSurface(site_geom,
fault_trace1,
up_seis_depth,
low_seis_depth,
ave_dip)
rjb = min(my_rjb, rjb)
rrup = min(my_rrup, rrup)
rx = min(my_rx, rx)
return rrup, rjb, rx
def calculate_timeseries_param(station, site, args, realization):
"""
Calculate/collect parameters from timeseries
"""
vel_file = os.path.join(args.top_level_outdir, station["vel_file_name"])
acc_file = os.path.join(args.top_level_outdir, station["acc_file_name"])
# Read velocity timeseries
num_samples = 0
time_0 = -999
time_1 = -999
dt = -999
pgv1 = -999
pgv2 = -999
pgv3 = -999
pga1 = -999
pga2 = -999
pga3 = -999
input_bbp = open(vel_file)
for line in input_bbp:
line = line.strip()
if not line:
continue
if line.startswith("#") or line.startswith("%"):
continue
tokens = [float(token) for token in line.split()]
num_samples = num_samples + 1
if dt == -999:
if time_0 == -999:
time_0 = tokens[0]
elif time_1 == -999:
time_1 = tokens[0]
if time_0 != -999 and time_1 != -999:
dt = time_1 - time_0
pgv1 = max(pgv1, abs(tokens[1]))
pgv2 = max(pgv2, abs(tokens[2]))
pgv3 = max(pgv3, abs(tokens[3]))
input_bbp.close()
input_bbp = open(acc_file)
for line in input_bbp:
line = line.strip()
if not line:
continue
if line.startswith("#") or line.startswith("%"):
continue
tokens = [float(token) for token in line.split()]
pga1 = max(pga1, abs(tokens[1]))
pga2 = max(pga2, abs(tokens[2]))
pga3 = max(pga3, abs(tokens[3]))
input_bbp.close()
# Convert cm/s/s to g
pga1 = pga1 * 0.00101971621
pga2 = pga2 * 0.00101971621
pga3 = pga3 * 0.00101971621
# Calculate frequencies
nyquist = 1.0 / (2.0 * dt)
luf = 1.25 * site.high_freq_corner
huf = 0.8 * min(nyquist, site.low_freq_corner)
ufb = abs(luf-huf)
# And periods...
lup = 1.0 / site.high_freq_corner
hup = 1.0 / site.low_freq_corner
upb = abs(lup-hup)
# Calculate arias duration values
bbp_data = read_bbp(acc_file)
t5_h1, _, _ = calculate_arias(bbp_data[1], dt, 5)
t5_h2, _, _ = calculate_arias(bbp_data[2], dt, 5)
t5_v, _, _ = calculate_arias(bbp_data[3], dt, 5)
t75_h1, _, _ = calculate_arias(bbp_data[1], dt, 75)
t75_h2, _, _ = calculate_arias(bbp_data[2], dt, 75)
t75_v, _, _ = calculate_arias(bbp_data[3], dt, 75)
t95_h1, _, _ = calculate_arias(bbp_data[1], dt, 95)
t95_h2, _, _ = calculate_arias(bbp_data[2], dt, 95)
t95_v, _, _ = calculate_arias(bbp_data[3], dt, 95)
# Calculate times
station["ai_h1"] = -999
station["ai_h2"] = -999
station["ai_v"] = -999
station["ad5_75_h1"] = t75_h1 - t5_h1
station["ad5_75_h2"] = t75_h2 - t5_h2
station["ad5_75_v"] = t75_v - t5_v
station["ad5_95_h1"] = t95_h1 - t5_h1
station["ad5_95_h2"] = t95_h2 - t5_h2
station["ad5_95_v"] = t95_v - | |
value is not None else _Rail.Type.Gap )
if atEitherEnd or value is not None :
label = GafferUI.Label( path )
label.setToolTip( "Click to select \"%s\"" % path )
self.__connections.extend( [
label.enterSignal().connect( lambda gadget : gadget.setHighlighted( True ) ),
label.leaveSignal().connect( lambda gadget : gadget.setHighlighted( False ) ),
label.buttonPressSignal().connect( IECore.curry( Gaffer.WeakMethod( self.__labelButtonPress ) ) ),
] )
else :
GafferUI.Label( "..." )
GafferUI.Spacer( IECore.V2i( 0 ), parenting = { "expand" : True } )
if atEitherEnd or value is not None :
d = self.__diffCreator()
d.update( ( prevDisplayedValue, fullValue ) )
if prevDisplayedValue != fullValue and isinstance( d, SideBySideDiff ) :
d.frame( 0 ).setVisible( False )
prevDisplayedValue = fullValue
prevValue = value
self._mainColumn()[:] = rows
def __labelButtonPress( self, label, event ) :
script = self.__target.scene.ancestor( Gaffer.ScriptNode )
script.context()["ui:scene:selectedPaths"] = IECore.StringVectorData( [ label.getText() ] )
##########################################################################
# History section
##########################################################################
class _HistorySection( Section ) :
__HistoryItem = collections.namedtuple( "__HistoryItem", [ "target", "value" ] )
def __init__( self, inspector, diffCreator = TextDiff, **kw ) :
Section.__init__( self, collapsed = None, **kw )
self.__inspector = inspector
self.__diffCreator = diffCreator
def update( self, targets ) :
Section.update( self, targets )
self.__target = targets[0]
self.__connections = []
if self.__target.path is None :
return
history = []
target = self.__target
while target is not None :
history.append( self.__HistoryItem( target, self.__inspector( target ) ) )
target = self.__sourceTarget( target )
history.reverse()
rows = []
for i in range( 0, len( history ) ) :
if i >= 2 and history[i].value == history[i-1].value and history[i].value == history[i-2].value :
if i != len( history ) - 1 :
# if the last line we output was a gap, and this one would be too, then
# just skip it.
continue
row = Row( borderWidth = 0, alternate = len( rows ) % 2 )
rows.append( row )
with row.listContainer() :
if i == 0 :
_Rail( _Rail.Type.Top if len( history ) > 1 else _Rail.Type.Single )
elif i == len( history ) - 1 :
_Rail( _Rail.Type.Bottom )
else :
if history[i-1].value == history[i].value :
_Rail( _Rail.Type.Gap )
else :
_Rail( _Rail.Type.Middle )
if i == 0 or i == ( len( history ) - 1 ) or history[i-1].value != history[i].value :
GafferUI.NameLabel( history[i].target.scene.node(), formatter = lambda l : ".".join( x.getName() for x in l ) )
else :
GafferUI.Label( "..." )
GafferUI.Spacer( IECore.V2i( 0 ), parenting = { "expand" : True } )
diff = self.__diffCreator()
diff.update( [
history[i-1].value if i > 0 else None,
history[i].value
] )
if (i == 0 or history[i-1].value != history[i].value) and isinstance( diff, SideBySideDiff ) :
diff.frame( 0 if history[i].value is not None else 1 ).setVisible( False )
self._mainColumn()[:] = rows
def __sourceTarget( self, target ) :
if isinstance( target.scene.node(), Gaffer.DependencyNode ) :
sourceScene = target.scene.node().correspondingInput( target.scene )
if sourceScene is None :
return None
sourceScene = sourceScene.source()
if sourceScene.node() == target.scene.node() :
return None
if not GafferScene.SceneAlgo.exists( sourceScene, target.path ) :
return None
return SceneInspector.Target( sourceScene, target.path )
return None
SceneInspector.HistorySection = _HistorySection ## REMOVE ME!!
##########################################################################
# Node section
##########################################################################
class __NodeSection( Section ) :
def __init__( self ) :
Section.__init__( self, collapsed = None )
with self._mainColumn() :
self.__row = DiffRow( self.__Inspector(), diffCreator = functools.partial( TextDiff, highlightDiffs = False ) )
def update( self, targets ) :
Section.update( self, targets )
self.__row.update( targets )
class __Inspector( Inspector ) :
def name( self ) :
return "Node Name"
def __call__( self, target ) :
node = target.scene.node()
return node.relativeName( node.scriptNode() )
SceneInspector.registerSection( __NodeSection, tab = None )
##########################################################################
# Path section
##########################################################################
class __PathSection( Section ) :
def __init__( self ) :
Section.__init__( self, collapsed = None )
with self._mainColumn() :
self.__row = DiffRow( self.__Inspector(), functools.partial( TextDiff, highlightDiffs = False ) )
def update( self, targets ) :
Section.update( self, targets )
self.__row.update( targets )
class __Inspector( Inspector ) :
def name( self ) :
return "Location"
def __call__( self, target ) :
return target.path or "Invalid"
SceneInspector.registerSection( __PathSection, tab = "Selection" )
##########################################################################
# Transform section
##########################################################################
class __TransformSection( Section ) :
def __init__( self ) :
Section.__init__( self, collapsed = True, label = "Transform" )
with self._mainColumn() :
index = 0
for transform in ( "transform", "fullTransform" ) :
inspector = self.__Inspector( transform )
DiffRow(
inspector,
alternate = index % 2,
)
index += 1
for component in ( "t", "r", "s", "h" ) :
DiffRow(
self.__Inspector( transform, component ),
diffCreator = functools.partial( self.__diffCreator, name = inspector.name() ),
alternate = index % 2,
)
index += 1
def update( self, targets ) :
Section.update( self, targets )
for row in self._mainColumn() :
if isinstance( row, DiffRow ) :
row.update( targets )
@staticmethod
def __diffCreator( name ) :
diff = TextDiff()
for i in range( 0, 2 ) :
diff.setCornerWidget( i, GafferUI.Label( "<sup>From " + name + "</sup>" ) )
return diff
class __Inspector( Inspector ) :
def __init__( self, accessor, component = None ) :
self.__accessor = accessor
self.__component = component
def name( self ) :
result = "Local" if self.__accessor == "transform" else "World"
result += {
"t" : " Translate",
"r" : " Rotate",
"s" : " Scale",
"h" : " Shear",
None : " Matrix",
}[self.__component]
return result
def __call__( self, target ) :
if target.path is None :
return None
matrix = getattr( target.scene, self.__accessor )( target.path )
if self.__component is None :
return matrix
try :
components = dict( zip( "shrt", matrix.extractSHRT() ) )
except :
# decomposition can fail if we have 0 scale.
return "Unavailable"
if self.__component == "r" :
return components[self.__component] * 180.0 / math.pi
else :
return components[self.__component]
SceneInspector.registerSection( __TransformSection, tab = "Selection" )
##########################################################################
# Bound section
##########################################################################
class __BoundSection( Section ) :
def __init__( self ) :
Section.__init__( self, collapsed = True, label = "Bounding box" )
with self._mainColumn() :
self.__localBoundRow = DiffRow( self.__Inspector() )
self.__worldBoundRow = DiffRow( self.__Inspector( world = True ), alternate = True )
def update( self, targets ) :
Section.update( self, targets )
self.__localBoundRow.update( targets )
self.__worldBoundRow.update( targets )
class __Inspector( Inspector ) :
def __init__( self, world=False ) :
self.__world = world
def name( self ) :
return "World" if self.__world else "Local"
def __call__( self, target ) :
if target.path is None :
return None
bound = target.bound()
if self.__world :
transform = target.fullTransform()
bound = bound.transform( transform )
return bound
SceneInspector.registerSection( __BoundSection, tab = "Selection" )
##########################################################################
# Attributes section
##########################################################################
class __AttributesSection( Section ) :
def __init__( self ) :
Section.__init__( self, collapsed = True, label = "Attributes" )
with self._mainColumn() :
self.__diffColumn = DiffColumn( self.__Inspector(), filterable=True )
def update( self, targets ) :
Section.update( self, targets )
self.__diffColumn.update( targets )
class __Inspector( Inspector ) :
def __init__( self, attributeName = None ) :
self.__attributeName = attributeName
def name( self ) :
return self.__attributeName or ""
def inspectsAttributes( self ) :
return True
def __call__( self, target, ignoreInheritance = False ) :
if target.path is None :
return None
if ignoreInheritance :
attributes = target.attributes()
else :
attributes = target.fullAttributes()
return attributes.get( self.__attributeName )
def children( self, target ) :
attributeNames = target.fullAttributes().keys() if target.path else []
return [ self.__class__( attributeName ) for attributeName in attributeNames ]
SceneInspector.registerSection( __AttributesSection, tab = "Selection" )
##########################################################################
# Object section
##########################################################################
class __ObjectSection( Section ) :
def __init__( self ) :
Section.__init__( self, collapsed = True, label = "Object" )
with self._mainColumn() :
DiffColumn(
self.__TopologyInspector(),
label = "Topology"
)
DiffColumn(
self.__ParametersInspector(),
label = "Parameters"
)
DiffColumn(
self.__PrimitiveVariablesInspector(),
label = "Primitive Variables"
)
def update( self, targets ) :
Section.update( self, targets )
for diffColumn in self._mainColumn() :
diffColumn.update( targets )
def _summary( self, targets ) :
if not len( targets ) :
return ""
objects = [
target.object() if target.path is not None else IECore.NullObject.defaultNullObject()
for target in targets
]
typeNames = [ o.typeName().split( ":" )[-1] for o in objects ]
typeNames = [ "None" if t == "NullObject" else t for t in typeNames ]
if len( typeNames ) == 1 or typeNames[0] == typeNames[1] :
return typeNames[0] if typeNames[0] != "None" else ""
else :
return " / ".join( typeNames )
class __TopologyInspector( Inspector ) :
def __init__( self, interpolation = None, property = None ) :
Inspector.__init__( self )
self.__interpolation = interpolation
self.__property = property
def name( self ) :
if self.__interpolation is not None :
return str( self.__interpolation )
else :
return IECore.CamelCase.toSpaced( self.__property )
def __call__( self, target ) :
if target.path is None :
return None
object = target.object()
if isinstance( object, IECore.NullObject ) :
return None
if self.__interpolation is not None :
return object.variableSize( self.__interpolation ) if isinstance( object, IECore.Primitive ) else None
else :
return getattr( object, self.__property, None )
def children( self, target ) :
if target.path is None :
return []
object = target.object()
if not isinstance( object, IECore.Primitive ) :
return []
result = []
if isinstance( object, IECore.MeshPrimitive ) :
result.append( self.__class__( property = "interpolation" ) )
for i in [
IECore.PrimitiveVariable.Interpolation.Constant,
IECore.PrimitiveVariable.Interpolation.Uniform,
IECore.PrimitiveVariable.Interpolation.Vertex,
IECore.PrimitiveVariable.Interpolation.Varying,
IECore.PrimitiveVariable.Interpolation.FaceVarying,
] :
result.append( self.__class__( interpolation = i ) )
return result
class __ParametersInspector( Inspector ) :
def __init__( self, parameterName = None ) :
Inspector.__init__( self )
self.__parameterName = parameterName
def name( self ) :
return self.__parameterName
def __call__( self, target ) :
parameters = self.__parameters( target )
if parameters is None :
return None
return parameters.get( self.__parameterName )
def children( self, target ) :
parameters = self.__parameters( target )
if parameters is None :
return []
return [ self.__class__( p ) for p in parameters.keys() ]
def __parameters( self, target ) :
if target.path is None :
return None
object = target.object()
if isinstance( object, ( IECore.Camera, IECore.ExternalProcedural ) ) :
return object.parameters()
elif isinstance( object, IECore.Light ) :
return object.parameters
return None
class __PrimitiveVariablesInspector( Inspector ) :
def __init__( self, primitiveVariableName = None ) :
Inspector.__init__( self )
self.__primitiveVariableName = primitiveVariableName
def name( self ) :
return self.__primitiveVariableName
def __call__( self, target ) | |
for each repeat
Instr2Text.setText('When the image of a %s is shown you will press the key that is on the SAME side of the screen that the %s is shown on. \n\nFor example, a correct response would be pressing the "1" key when the %s is on the left side of the screen\n\nPress space to continue.' % (congr_txt,congr_txt,congr_txt))
# keep track of which components have finished
Instr2Components = [Instr2Text]
for thisComponent in Instr2Components:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "Instr2"-------
while continueRoutine:
# get current time
t = Instr2Clock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *Instr2Text* updates
if t >= 0.0 and Instr2Text.status == NOT_STARTED:
# keep track of start time/frame for later
Instr2Text.tStart = t
Instr2Text.frameNStart = frameN # exact frame index
Instr2Text.setAutoDraw(True)
if Instr2Text.status == STARTED and bool(event.getKeys('space')):
Instr2Text.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Instr2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Instr2"-------
for thisComponent in Instr2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "Instr2" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "Instr3"-------
t = 0
Instr3Clock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
Instr3Components = [Instr3Text]
for thisComponent in Instr3Components:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "Instr3"-------
while continueRoutine:
# get current time
t = Instr3Clock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *Instr3Text* updates
if t >= 0.0 and Instr3Text.status == NOT_STARTED:
# keep track of start time/frame for later
Instr3Text.tStart = t
Instr3Text.frameNStart = frameN # exact frame index
Instr3Text.setAutoDraw(True)
if Instr3Text.status == STARTED and bool(event.getKeys('space')):
Instr3Text.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Instr3Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Instr3"-------
for thisComponent in Instr3Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "Instr3" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
demo1trials = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('demo.xls', selection='0,1'),
seed=None, name='demo1trials')
thisExp.addLoop(demo1trials) # add the loop to the experiment
thisDemo1trial = demo1trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisDemo1trial.rgb)
if thisDemo1trial != None:
for paramName in thisDemo1trial.keys():
exec(paramName + '= thisDemo1trial.' + paramName)
for thisDemo1trial in demo1trials:
currentLoop = demo1trials
# abbreviate parameter names if possible (e.g. rgb = thisDemo1trial.rgb)
if thisDemo1trial != None:
for paramName in thisDemo1trial.keys():
exec(paramName + '= thisDemo1trial.' + paramName)
# ------Prepare to start Routine "setimg"-------
t = 0
setimgClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
if (xpos == -.25 and corrResp == 1) or (xpos == .25 and corrResp == 0):
stimimg=congr
elif (xpos == -.25 and corrResp == 0) or (xpos == .25 and corrResp == 1):
stimimg=incongr
# keep track of which components have finished
setimgComponents = []
for thisComponent in setimgComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "setimg"-------
while continueRoutine:
# get current time
t = setimgClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in setimgComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "setimg"-------
for thisComponent in setimgComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "setimg" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "InstrTrial"-------
t = 0
InstrTrialClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
stim.setPos((xpos, 0))
stim.setImage(stimimg)
answerprompt.setText('Press %s' % corrResp)
demoinput = event.BuilderKeyResponse()
thisExp.addData('img', stimimg)
# keep track of which components have finished
InstrTrialComponents = [Fixation, stim, answerprompt, demoinput]
for thisComponent in InstrTrialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "InstrTrial"-------
while continueRoutine:
# get current time
t = InstrTrialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *Fixation* updates
if t >= 0.0 and Fixation.status == NOT_STARTED:
# keep track of start time/frame for later
Fixation.tStart = t
Fixation.frameNStart = frameN # exact frame index
Fixation.setAutoDraw(True)
# *stim* updates
if t >= 0.0 and stim.status == NOT_STARTED:
# keep track of start time/frame for later
stim.tStart = t
stim.frameNStart = frameN # exact frame index
stim.setAutoDraw(True)
# *answerprompt* updates
if t >= 0.0 and answerprompt.status == NOT_STARTED:
# keep track of start time/frame for later
answerprompt.tStart = t
answerprompt.frameNStart = frameN # exact frame index
answerprompt.setAutoDraw(True)
# *demoinput* updates
if t >= 0.0 and demoinput.status == NOT_STARTED:
# keep track of start time/frame for later
demoinput.tStart = t
demoinput.frameNStart = frameN # exact frame index
demoinput.status = STARTED
# keyboard checking is just starting
win.callOnFlip(demoinput.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
if demoinput.status == STARTED:
theseKeys = event.getKeys(keyList=['1', '0'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
demoinput.keys = theseKeys[-1] # just the last key pressed
demoinput.rt = demoinput.clock.getTime()
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in InstrTrialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "InstrTrial"-------
for thisComponent in InstrTrialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if demoinput.keys in ['', [], None]: # No response was made
demoinput.keys=None
demo1trials.addData('demoinput.keys',demoinput.keys)
if demoinput.keys != None: # we had a response
demo1trials.addData('demoinput.rt', demoinput.rt)
# the Routine "InstrTrial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 1 repeats of 'demo1trials'
# ------Prepare to start Routine "Instr4"-------
t = 0
Instr4Clock.reset() # clock
frameN = -1
continueRoutine = True
# | |
<gh_stars>0
import numpy as np
import pt_utils
### WARNING: NOTHING IN THIS FILE IS CHROMATIC!
### *CHROMATIC OPERATIONS CAN BE FOUND IN THE pt_musicutils.py FILE
### ALL OPERATIONS ARE BASED ON THE F-LYDIAN NOTE PARADIGM: 0b111111100000
# =================================================================
thirds_only = np.array([4])
# =================================================================
scales_and_thirds = np.array([4,2])
# =================================================================
fifths_and_scales_and_thirds = np.array([4, 2, 1])
# =================================================================
all_close_voicings = np.array([4,2,6])
# =================================================================
# with adjustments for low D and P, this may be the richest option.
# prefers, thirds, seconds, then fifths, seconds, fourths, sixths....
# 0 is a excluded as trivial, since it cannot improve on others'
# solutions.
all_voicings = np.array([4, 1, 2, 6, 3, 5, 6])
# =================================================================
v_options = np.array([thirds_only, scales_and_thirds, fifths_and_scales_and_thirds, all_close_voicings, all_voicings], dtype=object)
def get_binary_KPDVE_note(kpdve):
'''
Parameters
----------
kpdve
a np.array(5) in form KPDVE
Returns
-------
int
a representation of a chord in binary (circle-of-fifths)
7 of F Major 7 chord
>>> get_binary_KPDVE_note(np.array([0,0,0,4,3]))
64
G a fifth above C as degree 1 in F lydian
>>> get_binary_KPDVE_note(np.array([0,0,1,1,1]))
512
G V7 b9 -- dominant in C Harmonic minor... top note Ab
>>> get_binary_KPDVE_note(np.array([0,4,2,4,4]))
4
'''
return pt_utils.rotate_bits_right(apply_filter_for_p(get_binary_DVE_note(kpdve[2], kpdve[3], kpdve[4]), kpdve[1]), kpdve[0])
def get_binary_KPDVE_chord(kpdve):
'''
return a binary pitch-class set for a given kpdve array
Parameters
----------
kpdve
a np.array(5) in form KPDVE
Returns
-------
int
a representation of a chord in binary (circle-of-fifths)
F Major 7 chord
>>> get_binary_KPDVE_chord(np.array([0,0,0,4,3]))
3264
G a fifth above C as degree 1 in F lydian
>>> get_binary_KPDVE_chord(np.array([0,0,2,1,1]))
768
Major 9 pile of fifths on Eb
>>> get_binary_KPDVE_chord(np.array([9,0,0,1,2]))
7
V7 of V in C Major
>>> get_binary_KPDVE_chord(np.array([0,1,3,4,3]))
1424
G V7 b9 -- dominant in C Harmonic minor...
>>> get_binary_KPDVE_chord(np.array([0,4,2,4,4]))
2852
'''
chord = 0
for i in range(kpdve[4]+1):
chord |= get_binary_KPDVE_note(np.array([kpdve[0], kpdve[1], kpdve[2], kpdve[3], i]))
return chord
def get_ordered_chord_notes(kpdve):
'''
Parameters
----------
kpdve : np.array(5)
a kpdve array whose notes are to be returned from the root up.
Returns
-------
an array of notes, root first.
>>> get_ordered_chord_notes(np.array([0,0,0,4,3]))
array([0, 4, 1, 5])
'''
chord = []
for i in range(kpdve[4]+1):
chord.append(pt_utils.bit_locs(get_binary_KPDVE_note(np.array([kpdve[0], kpdve[1], kpdve[2], kpdve[3], i])))[0])
return np.array(chord)
def get_binary_K(k):
"""
Parameters
----------
k : int
a value for Key
Returns
-------
int
returns binary number representing pitch classes for a given major key.
(int) -> int
returns 0b111111100000
>>> get_binary_K(0)
4064
returns 0b011111110000
>>> get_binary_K(1)
2032
returns 0b111110000011
>>> get_binary_K(10)
3971
"""
return pt_utils.rotate_bits_right(pt_utils.C_M_FIFTHS, k)
def get_binary_P(p):
"""
Parameters
----------
p : TYPE
DESCRIPTION.
Returns
-------
int
returns distortion byte 0b100000010000 at position P
returns 0b100000010000
>>> get_binary_P(0)
2064
returns 0b010000001000
>>> get_binary_P(1)
1032
returns 0b000001000010
>>> get_binary_P(10)
66
"""
return pt_utils.rotate_bits_right(pt_utils.P_PARSER, p)
def get_binary_KP(k, p):
'''
(int, int) -> int
Apply the P displacement P(x) to the C Major grouping and return adjusted for key
P must be between 0 and 6
P allows all twelve notes to be part of a major key
0 returns unaffected K
1-3 return K^get_binary_p(p-1)
4-6 return K^get_binary_p(p+5)
this means 0:C Major
(for CM) 1:G Major (Dominant)
2:D Melodic Minor
3:A Harmonic Minor (relative minor)
4:C Harmonic Major
5:C Melodic Minor (parallel minor)
6:F Major (subdominant)
returns 0b111111100000
>>> get_binary_KP(0, 0)
4064
returns 0b011111110000
>>> get_binary_KP(0, 1)
2032
returns 0b010111110100
>>> get_binary_KP(1, 2)
1524
returns 0b111111100000 CM as dominant
>>> get_binary_KP(11, 1)
4064
returns 0b111111100000 CM as subdominant
>>> get_binary_KP(1, 6)
4064
'''
if p == 0:
return get_binary_K(k)
result = pt_utils.rotate_bits_right(apply_filter_for_p(pt_utils.C_M_FIFTHS, p), k)
return result
# right now for demonstration purposes... doesn't *quite* account for un-wrinkled modes.
def raw_kp_voxel():
keys = [get_binary_K(k) for k in range(12)]
patterns = [get_binary_P(p) for p in range(12)]
return np.array([[k ^ p for p in patterns] for k in keys])
# Main Call from Partita
def get_KPDVE_list_for_notegroup(notegroup, v_opt=-1):
'''
Parameters
----------
notegroup : a twelve-bit integer for a circle-based pitch-class set
Returns
-------
a Numpy array of form [[K1,P1,D1,V1,E1] ... [Kn, Pn, Dn, Vn, Dn]]
F Major 7
>>> get_KPDVE_list_for_notegroup(0b110011000000)
array([[ 0, 0, 0, 4, 3],
[ 0, 3, 0, 4, 3],
[ 0, 6, 0, 4, 3],
[ 1, 6, 6, 4, 3],
[10, 1, 2, 4, 3],
[11, 0, 1, 4, 3],
[11, 1, 1, 4, 3],
[11, 4, 1, 4, 3]])
G Dominant 7 b9 #2
>>> get_KPDVE_list_for_notegroup(0b101100100101)
array([[9, 4, 5, 4, 5]])
No Match
>>> get_KPDVE_list_for_notegroup(0b11111111)
array([[12, 7, 7, 7, 7]])
'''
kp_list = get_KP_list_for_notegroup(notegroup)
# this is the point to check for bitonality
if len(kp_list) == 0:
return(np.array([pt_utils.MODVALS]))
return KP_list_to_KPDVE_list(kp_list, notegroup, v_opt)
# THIS IS THE POINT WHERE THE PENTATONIC LIST HAS TO HAPPEN?
def get_KP_list_for_notegroup(notegroup, pentatonic=False):
'''
Parameters
----------
notegroup : a twelve-bit integer for a circle-based pitch-class set
Returns
-------
a Numpy array of form [K,P,D,V,E]
G Dominant 7 b9 #2
>>> get_KP_list_for_notegroup(0b101100100101)
array([[9, 4, 0, 0, 0]])
No Match
>>> get_KP_list_for_notegroup(0b11111111)
array([], dtype=float64)
'''
kp_list = []
for i in range(pt_utils.MODVALS[0]):
for j in range(pt_utils.MODVALS[1]):
kp_temp = get_binary_KP(i, j)
if pentatonic == True:
kp_temp = pt_utils.pentatonic_transform(kp_temp)
if notegroup | kp_temp == kp_temp:
kp_list.append([i, j, 0, 0, 0])
return np.array(kp_list)
def KP_list_to_KPDVE_list(kp_list, notegroup, v_opt=0):
'''
Parameters
----------
kp_list : np.array of KPDVE values
notegroup : a twelve-bit integer for a circle-based pitch-class set
v_opt : int, optional
restrict voicing loops to thirds (see global options)
Returns
-------
kpdve_list (array of np.array(5))
>>>
'''
kpdve_list = kp_list.copy()
for kp in kpdve_list:
kp += DVE_vals_for_CM_notegroup(undo_KP_to_analyze(notegroup, kp[0], kp[1]), v_opt)
return kpdve_list
def DVE_vals_for_CM_notegroup(notegroup, v_opt=-1):
'''
take a notegroup which has been switched to the c major paradigm.
find root position by testing all notes as possible D. Choose the most efficient.
Parameters
----------
notegroup : int
pitch class set (circle-fifths-based).
v_opt : int, optional
restrict voicing loops to thirds (see global options). The default is 0.
Returns
-------
np.array(n, 5)
([0,0,d, v_opt ,e])
>>> DVE_vals_for_CM_notegroup(0b110011000000)
array([0, 0, 0, 4, 3])
'''
v_vals = v_options[v_opt]
minsteps = 7
d = 7
v = 4
e = pt_utils.bit_count(notegroup)
for notevalue in pt_utils.bit_locs(notegroup):
for v_val in v_vals:
steps_temp = num_steps_to_DVE_match_at_D(notegroup, notevalue, v_val)
if steps_temp < minsteps or (steps_temp == minsteps and notevalue < d):
minsteps = steps_temp
d = notevalue
v = v_val
e = steps_temp
return np.array([0, 0, d, v, e])
def num_steps_to_DVE_match_at_D(notegroup, d, v = 4):
'''
(int, int, int) -> int
return the number of steps it takes for a growing tower of intervals can contain all the notes of a chord
F 5 chord
>>> num_steps_to_DVE_match_at_D(0b110010000000, 0, 4)
2
F 7 chord
>>> num_steps_to_DVE_match_at_D(0b110011000000, 0, 4)
3
F 9 chord (no 5)
>>> num_steps_to_DVE_match_at_D(0b101011000000, 0, 4)
4
'''
count = 0
tower_byte = pt_utils.binary_note_at_loc(d)
while tower_byte & notegroup != notegroup and count < 7:
count += 1
tower_byte |= pt_utils.binary_note_at_loc(DVE_linear_eq(d, v, count))
return count
def get_binary_DVE_note(d, v, e):
'''
(int, int, int) -> int
return the note at the end of the tunnel: a single bit, at the last extension of the DVE sequence
this function takes care of the case for c major (where all transformations occur)
>>> get_binary_DVE_note(0, 0, 0)
2048
return the fifth above D (A)
>>> get_binary_DVE_note(3, 4, 2)
128
'''
return pt_utils.rotate_bits_right(pt_utils.LEFT_BIT, DVE_linear_eq(d, v, e))
# --------------------------
def DVE_linear_eq(d, v, e):
'''
Parameters
----------
d : int
degree value in KPDVE.
v : int
voicing value in KPDVE.
e : int
extension value in KPDVE.
Returns
-------
int
for a notegroup reduced to C Major, the index of the last output note
in a KPDVE encoding
'''
return (d + v * e) % 7
# --------------------------
def get_binary_DVE_chord(d, v, e):
'''
Parameters
----------
d : int
degree value in KPDVE.
v : int
voicing value in KPDVE.
e : int
extension value in KPDVE.
Returns
-------
int
a chord from root to last extension, encoded as 12-bit pitch-class set
F Major 7 Chord
>>> get_binary_DVE_chord(0, 4, 3)
3264
'''
notegroup_cM = 0
for i in range(e+1):
notegroup_cM |= get_binary_DVE_note(d, v, i)
return notegroup_cM
def apply_filter_for_p(notegroup_cM, p):
'''
| |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
Copyright and License for this python code - Apache 2.0+ license
==========================================================================
Copyright (c) 2017, <NAME>. All rights reserved.
THIS SOFTWARE PROVIDED "AS-IS" WITH NO WARRANTY.
Copyright and License from original matlab/C code - BSD license
==========================================================================
Copyright (c) 2009-2016, <NAME> and <NAME>. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Tel-Aviv University nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY <NAME> AND <NAME> ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
"""
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from time import time # take out in favor of timeit at some point
import numpy as np
from numpy.random import rand , randn
from numpy.linalg import lstsq , cond , norm
from scipy.linalg import qr , solve_triangular , hadamard # use scipy's? or numpy's?
from scipy.sparse.linalg import LinearOperator , aslinearoperator , lsqr
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def rht_r( A , K , L ) :
"""
Actual recursion. We focus in a bit on the basic operation here. We recurse computing products like
U = HP(A|K,L) = H A[K:L,:]
where K < L, K,L in {0} \/ {2^k : k = 1,2,3,...}. For example,
U_m = HP( A | 0 , 2^(m-1) ) and V_m = HP( A | 2^(m-1) , 2^m ) where m = ceil( log2( M ) ), M = A.shape[0]
A simple example, our termination condition, is when L = K+2:
U = HP(A|K,K+2) = H A[K:K+2,:]
= 1/sqrt(2) [ 1 , 1 ] [ A[ K ,:] ]
[ 1 , -1 ] [ A[K+1,:] ]
= 1/sqrt(2) [ 1 ] A[K,:] + [ 1 ] A[K+1,:]
[ 1 ] + [ -1 ]
= 1/sqrt(2) [ A[K,:] + A[K+1,:] ]
[ A[K,:] - A[K+1,:] ]
Otherwise, we have
HP( A | 0 , 2^l ) , l > 1 (otherwise L = 2^1 = 2 = 2+0 = K+2)
or HP( A | 2^k , 2^l ) , k > 1 or l > 2 if k == 1 (otherwise K = 2, L = 2^2 = 4, and L = K+2)
We want to split in half, which is easiest when K == 0:
A[ 0 : 2^l , : ] -> [ A[ 0 : 2^(l-1) ] ]
[ A[ 2^(l-1) : 2^l ] ]
Note that 2^(l-1) = 2^l/2 = L/2. When K > 0,
A[2^k:2^l,:] -> A[ 2^k + 0:2^l-2^k ,:] -> [ A[ 2^k + 0:2^(l-1)-2^(k-1) , : ] ]
[ A[ 2^k + 2^(l-1)-2^(k-1) : 2^l , : ] ]
Here note that 2^(l-1) = L/2 and 2^(k-1) = K/2, so that (of course) 2^(l-1)-2^(k-1) = (L-K)/2.
"""
if L < K : return rht_r( A , L , K ) # a bit of argument check
if L - K == 2 : # recursion termination condition
Kp1 = K + np.uint64(1)
if A.shape[0] < Kp1 : return 0.70710678118654746 * np.vstack( | |
id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar kind: Required. Whether the rule is custom or default.Constant filled by server. Possible
values include: "Custom", "Default".
:vartype kind: str or ~azure.mgmt.network.v2021_05_01_preview.models.UserRuleKind
:ivar system_data: The system metadata related to this resource.
:vartype system_data: ~azure.mgmt.network.v2021_05_01_preview.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'kind': {'required': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
_subtype_map = {
'kind': {'Custom': 'UserRule', 'Default': 'DefaultUserRule'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(BaseUserRule, self).__init__(**kwargs)
self.kind = 'BaseUserRule' # type: str
self.system_data = None
class CloudErrorBody(msrest.serialization.Model):
"""An error response from the service.
:ivar code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:vartype code: str
:ivar message: A message describing the error, intended to be suitable for display in a user
interface.
:vartype message: str
:ivar target: The target of the particular error. For example, the name of the property in
error.
:vartype target: str
:ivar details: A list of additional details about the error.
:vartype details: list[~azure.mgmt.network.v2021_05_01_preview.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["CloudErrorBody"]] = None,
**kwargs
):
"""
:keyword code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:paramtype code: str
:keyword message: A message describing the error, intended to be suitable for display in a user
interface.
:paramtype message: str
:keyword target: The target of the particular error. For example, the name of the property in
error.
:paramtype target: str
:keyword details: A list of additional details about the error.
:paramtype details: list[~azure.mgmt.network.v2021_05_01_preview.models.CloudErrorBody]
"""
super(CloudErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class ConfigurationGroup(msrest.serialization.Model):
"""The network configuration group resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar display_name: A friendly name for the network group.
:vartype display_name: str
:ivar description: A description of the network group.
:vartype description: str
:ivar member_type: Group member type.
:vartype member_type: str
:ivar provisioning_state: The provisioning state of the scope assignment resource. Possible
values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.network.v2021_05_01_preview.models.ProvisioningState
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'member_type': {'key': 'properties.memberType', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
member_type: Optional[str] = None,
**kwargs
):
"""
:keyword id: Resource ID.
:paramtype id: str
:keyword display_name: A friendly name for the network group.
:paramtype display_name: str
:keyword description: A description of the network group.
:paramtype description: str
:keyword member_type: Group member type.
:paramtype member_type: str
"""
super(ConfigurationGroup, self).__init__(**kwargs)
self.id = id
self.display_name = display_name
self.description = description
self.member_type = member_type
self.provisioning_state = None
class ConnectivityConfiguration(ProxyResource):
"""The network manager connectivity configuration resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar etag: A unique read-only string that changes whenever the resource is updated.
:vartype etag: str
:ivar system_data: The system metadata related to this resource.
:vartype system_data: ~azure.mgmt.network.v2021_05_01_preview.models.SystemData
:ivar display_name: A friendly name for the resource.
:vartype display_name: str
:ivar description: A description of the connectivity configuration.
:vartype description: str
:ivar connectivity_topology: Connectivity topology type. Possible values include:
"HubAndSpoke", "Mesh".
:vartype connectivity_topology: str or
~azure.mgmt.network.v2021_05_01_preview.models.ConnectivityTopology
:ivar hubs: List of hubItems.
:vartype hubs: list[~azure.mgmt.network.v2021_05_01_preview.models.Hub]
:ivar is_global: Flag if global mesh is supported. Possible values include: "False", "True".
:vartype is_global: str or ~azure.mgmt.network.v2021_05_01_preview.models.IsGlobal
:ivar applies_to_groups: Groups for configuration.
:vartype applies_to_groups:
list[~azure.mgmt.network.v2021_05_01_preview.models.ConnectivityGroupItem]
:ivar provisioning_state: The provisioning state of the connectivity configuration resource.
Possible values include: "Succeeded", "Updating", "Deleting", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.network.v2021_05_01_preview.models.ProvisioningState
:ivar delete_existing_peering: Flag if need to remove current existing peerings. Possible
values include: "False", "True".
:vartype delete_existing_peering: str or
~azure.mgmt.network.v2021_05_01_preview.models.DeleteExistingPeering
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'connectivity_topology': {'key': 'properties.connectivityTopology', 'type': 'str'},
'hubs': {'key': 'properties.hubs', 'type': '[Hub]'},
'is_global': {'key': 'properties.isGlobal', 'type': 'str'},
'applies_to_groups': {'key': 'properties.appliesToGroups', 'type': '[ConnectivityGroupItem]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'delete_existing_peering': {'key': 'properties.deleteExistingPeering', 'type': 'str'},
}
def __init__(
self,
*,
display_name: Optional[str] = None,
description: Optional[str] = None,
connectivity_topology: Optional[Union[str, "ConnectivityTopology"]] = None,
hubs: Optional[List["Hub"]] = None,
is_global: Optional[Union[str, "IsGlobal"]] = None,
applies_to_groups: Optional[List["ConnectivityGroupItem"]] = None,
delete_existing_peering: Optional[Union[str, "DeleteExistingPeering"]] = None,
**kwargs
):
"""
:keyword display_name: A friendly name for the resource.
:paramtype display_name: str
:keyword description: A description of the connectivity configuration.
:paramtype description: str
:keyword connectivity_topology: Connectivity topology type. Possible values include:
"HubAndSpoke", "Mesh".
:paramtype connectivity_topology: str or
~azure.mgmt.network.v2021_05_01_preview.models.ConnectivityTopology
:keyword hubs: List of hubItems.
:paramtype hubs: list[~azure.mgmt.network.v2021_05_01_preview.models.Hub]
:keyword is_global: Flag if global mesh is supported. Possible values include: "False", "True".
:paramtype is_global: str or ~azure.mgmt.network.v2021_05_01_preview.models.IsGlobal
:keyword applies_to_groups: Groups for configuration.
:paramtype applies_to_groups:
list[~azure.mgmt.network.v2021_05_01_preview.models.ConnectivityGroupItem]
:keyword delete_existing_peering: Flag if need to remove current existing peerings. Possible
values include: "False", "True".
:paramtype delete_existing_peering: str or
~azure.mgmt.network.v2021_05_01_preview.models.DeleteExistingPeering
"""
super(ConnectivityConfiguration, self).__init__(**kwargs)
self.system_data = None
self.display_name = display_name
self.description = description
self.connectivity_topology = connectivity_topology
self.hubs = hubs
self.is_global = is_global
self.applies_to_groups = applies_to_groups
self.provisioning_state = None
self.delete_existing_peering = delete_existing_peering
class ConnectivityConfigurationListResult(msrest.serialization.Model):
"""Result of the request to list network manager connectivity configurations. It contains a list of configurations and a link to get the next set of results.
:ivar value: Gets a page of Connectivity Configurations.
:vartype value: list[~azure.mgmt.network.v2021_05_01_preview.models.ConnectivityConfiguration]
:ivar next_link: Gets the URL to get the next page of results.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ConnectivityConfiguration]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ConnectivityConfiguration"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Gets a page of Connectivity Configurations.
:paramtype value:
list[~azure.mgmt.network.v2021_05_01_preview.models.ConnectivityConfiguration]
:keyword next_link: Gets the URL to get the next page of results.
:paramtype next_link: str
"""
super(ConnectivityConfigurationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ConnectivityGroupItem(msrest.serialization.Model):
"""Connectivity group item.
All required parameters must be populated in order to send to Azure.
:ivar network_group_id: Required. Network group Id.
:vartype network_group_id: str
:ivar use_hub_gateway: Flag if need to use hub gateway. Possible values include: "False",
"True".
:vartype use_hub_gateway: str or ~azure.mgmt.network.v2021_05_01_preview.models.UseHubGateway
:ivar is_global: Flag if global is supported. Possible values include: "False", "True".
:vartype is_global: str or ~azure.mgmt.network.v2021_05_01_preview.models.IsGlobal
:ivar group_connectivity: Required. Group connectivity type. Possible values include: "None",
"DirectlyConnected".
:vartype group_connectivity: str or
~azure.mgmt.network.v2021_05_01_preview.models.GroupConnectivity
"""
_validation = {
'network_group_id': {'required': True},
'group_connectivity': {'required': True},
}
_attribute_map = {
'network_group_id': {'key': 'networkGroupId', 'type': 'str'},
'use_hub_gateway': {'key': 'useHubGateway', 'type': 'str'},
'is_global': {'key': 'isGlobal', 'type': 'str'},
'group_connectivity': {'key': 'groupConnectivity', 'type': 'str'},
}
def __init__(
self,
*,
network_group_id: str,
group_connectivity: Union[str, "GroupConnectivity"],
use_hub_gateway: Optional[Union[str, "UseHubGateway"]] = None,
is_global: Optional[Union[str, "IsGlobal"]] = None,
**kwargs
):
"""
:keyword network_group_id: Required. Network group Id.
:paramtype network_group_id: str
:keyword use_hub_gateway: Flag if need to use hub gateway. Possible values include: "False",
"True".
:paramtype use_hub_gateway: | |
<gh_stars>1-10
#
# Hi all,
# this is the Python code I used to make the visualization "Temperature circle"
# (https://twitter.com/anttilip/status/892318734244884480).
# Please be aware that originally I wrote this for my tests only so the
# code was not ment to be published and is a mess and has no comments.
# Feel free to improve, modify, do whatever you want with it. If you decide
# to use the code, make an improved version of it, or it is useful for you
# in some another way I would be happy to know about it. You can contact me
# for example in Twitter (@anttilip). Unchecked demo data (no quarantees)
# for year 2017 Jan-Jul is included here and this code draws only a single image.
# The animation code is basically just a loop through the years. To keep
# it simple, I only included one year here.
#
# Thanks and have fun!
# Antti
#
# ---------
#
# Copyright 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
backgroundcolor = '#faf2eb'
fontname = 'Lato'
yearname = '2017'
data2017 = {
'AMERICA': [
['Antigua and Barbuda', 0.68],
['Argentina', 0.89],
['Bahamas', 0.65],
['Barbados', 0.68],
['Belize', 1.22],
['Bolivia', 1.22],
['Brazil', 1.23],
['Canada', 1.72],
['Chile', 0.93],
['Colombia', 0.88],
['Costa Rica', 0.76],
['Cuba', 0.78],
['Dominica', 0.64],
['Dominican Republic', 0.82],
['Ecuador', 1.16],
['El Salvador', 0.66],
['Grenada', 0.75],
['Guatemala', 1.25],
['Guyana', 0.65],
['Haiti', 0.56],
['Honduras', 1.1],
['Jamaica', 0.51],
['Mexico', 1.75],
['Nicaragua', 0.96],
['Panama', 0.65],
['Paraguay', 1.02],
['Peru', 1.25],
['Saint Kitts and Nevis', 0.68],
['Saint Lucia', 0.73],
['Saint Vincent and the Grenadines', 0.75],
['Suriname', 0.62],
['Trinidad and Tobago', 0.73],
['United States', 1.92],
['Uruguay', 1.02],
['Venezuela', 0.86],
],
'OCEANIA': [
['Australia', 0.77],
['Fiji', 0.64],
['Kiribati', 0.21],
['Marshall Islands', 0.66],
['Micronesia', 0.9],
['Nauru', 0.82],
['New Zealand', 0.47],
['Palau', 0.94],
['Papua New Guinea', 0.92],
['Samoa', 0.77],
['Solomon Island', 1.0],
['Tonga', 0.86],
['Vanuatu', 1.17],
],
'EUROPE': [
['Albania', 1.07],
['Andorra', 1.88],
['Armenia', 0.38],
['Austria', 1.66],
['Azerbaijan', 0.51],
['Belarus', 1.58],
['Belgium', 1.79],
['Bosnia and Herzegovina', 1.4],
['Bulgaria', 0.89],
['Croatia', 1.5],
['Cyprus', 0.38],
['Czech Republic', 1.68],
['Denmark', 1.73],
['Estonia', 1.67],
['Finland', 1.48],
['France', 1.62],
['Georgia', 0.44],
['Germany', 1.76],
['Greece', 0.77],
['Hungary', 1.49],
['Iceland', 1.66],
['Ireland', 1.57],
['Italy', 1.57],
['Latvia', 1.70],
['Liechtenstein', 1.74],
['Lithuania', 1.70],
['Luxembourg', 1.79],
['Macedonia', 0.99],
['Malta', 1.03],
['Moldova', 1.12],
['Montenegro', 1.25],
['Netherlands', 1.77],
['Norway', 1.63],
['Poland', 1.67],
['Portugal', 1.71],
['Romania', 1.14],
['San Marino', 1.59],
['Serbia', 1.23],
['Slovakia', 1.56],
['Slovenia', 1.59],
['Spain', 1.89],
['Sweden', 1.69],
['Switzerland', 1.76],
['Ukraine', 1.23],
['United Kingdom', 1.68],
],
'AFRICA': [
['Algeria', 1.79],
['Angola', 0.70],
['Benin', 1.13],
['Botswana', 0.65],
['Burkina Faso', 1.20],
['Burundi', 1.20],
['Cameroon', 1.05],
['Cape Verde', 0.72],
['Central African Republic', 1.06],
['Chad', 1.04],
['Comoros', 0.90],
['Congo', 0.88],
['Democratic Republic of Congo', 0.97],
['Djibouti', 1.2],
['Egypt', 0.7],
['Equatorial Guinea', 0.92],
['Eritrea', 1.22],
['Ethiopia', 1.35],
['Gabon', 0.86],
['Gambia', 1.43],
['Ghana', 1.08],
['Guinea', 1.34],
['Guinea-Bissau', 1.39],
['Ivory Coast', 1.22],
['Kenya', 1.14],
['Lesotho', 0.84],
['Liberia', 1.21],
['Libya', 0.94],
['Madagascar', 1.16],
['Malawi', 0.89],
['Mali', 1.32],
['Mauritania', 1.56],
['Mauritius', 1.16],
['Morocco', 1.86],
['Mozambique', 0.90],
['Namibia', 0.94],
['Niger', 0.90],
['Nigeria', 1.10],
['Rwanda', 1.23],
['Sao Tome and Principe', 0.86],
['Senegal', 1.41],
['Seychelles', 0.99],
['Sierra Leone', 1.29],
['Somalia', 1.19],
['South Africa', 0.91],
['South Sudan', 1.27],
['Sudan', 1.17],
['Swaziland', 0.69],
['Tanzania', 1.01],
['Togo', 1.20],
['Tunisia', 1.81],
['Uganda', 1.26],
['Zambia', 0.59],
['Zimbabwe', 0.58],
],
'ASIA': [
['Afghanistan', 1.78],
['Bahrain', 1.48],
['Bangladesh', 0.52],
['Bhutan', 0.61],
['Brunei', 0.77],
['Burma (Myanmar)', 0.65],
['Cambodia', 0.84],
['China', 1.80],
['East Timor', 0.34],
['India', 0.96],
['Indonesia', 0.67],
['Iran', 1.48],
['Iraq', 0.68],
['Israel', 0.52],
['Japan', 1.03],
['Jordan', 0.56],
['Kazakhstan', 1.91],
['Kuwait', 1.24],
['Kyrgyzstan', 1.57],
['Laos', 0.87],
['Lebanon', 0.42],
['Malaysia', 0.79],
['Maldives', 0.70],
['Mongolia', 3.05],
['Nepal', 0.71],
['North Korea', 2.01],
['Oman', 1.53],
['Pakistan', 1.76],
['Philippines', 0.81],
['Qatar', 1.86],
['Russian Federation', 3.01],
['Saudi Arabia', 1.46],
['Singapore', 0.51],
['South Korea', 1.65],
['Sri Lanka', 0.90],
['Syria', 0.40],
['Tajikistan', 1.39],
['Thailand', 0.85],
['Turkey', 0.39],
['Turkmenistan', 1.50],
['United Arab Emirates', 2.08],
['Uzbekistan', 1.54],
['Vietnam', 0.72],
['Yemen', 1.37],
]
}
def rotText(areaText, defaultspacing, rotangleoffset, rText, fontname):
angle = areaText[0][1]
for ii, l in enumerate(areaText):
if ii > 0:
angle += defaultspacing + l[1]
plt.text(
(rText) * np.sin(np.deg2rad(angle)),
(rText) * np.cos(np.deg2rad(angle)),
'{}'.format(l[0]),
{'ha': 'center', 'va': 'center'},
rotation=-angle + rotangleoffset,
fontsize=15,
fontname=fontname,
)
plt.rcParams['axes.facecolor'] = backgroundcolor
mpl.rcParams.update({'font.size': 22})
cmap = plt.get_cmap('RdYlBu_r')
norm = mpl.colors.Normalize(vmin=-2.0, vmax=2.0)
Ncountries = 0
Ncontinents = 0
for countrylist in data2017.items():
Ncountries += len(countrylist[1])
Ncontinents += 1
spaceBetweenContinents = 3.0 # degrees
Nspaces = Ncontinents - 1
anglePerCountry = (345.0 - Nspaces * spaceBetweenContinents) / (Ncountries - 1)
fig, ax = plt.subplots(figsize=(12, 12))
renderer = fig.canvas.get_renderer()
transf = ax.transData.inverted()
limitangles = np.linspace(np.deg2rad(5.0), np.deg2rad(355.0), 500)
scaleRs = [
[1.5, '-2.0', True, 0.25],
[0.5 * (1.5 + 2.25), '-1.0', True, 0.25],
[2.25, '0.0', True, 1.0],
[0.5 * (3.0 + 2.25), '+1.0', True, 0.25],
[3.0, '+2.0', True, 0.25],
[3.3, '$^\\circ$C', False, 0.0]
]
for r in scaleRs:
if r[2]:
ax.plot(r[0] * np.sin(limitangles), r[0] * np.cos(limitangles), linewidth=r[3], color='#888888', linestyle='-')
plt.text(
0.0,
r[0],
'{}'.format(r[1]),
{'ha': 'center', 'va': 'center'},
fontsize=12,
fontname=fontname,
)
angle = 7.5
rText = 3.96
for continent in ['AFRICA', 'ASIA', 'EUROPE', 'AMERICA', 'OCEANIA']:
for country in data2017[continent]:
if angle < 185.0:
rotangle = -angle + 90.0
else:
rotangle = -angle - 90.0
plt.text(
(rText) * np.sin(np.deg2rad(angle)),
(rText) * np.cos(np.deg2rad(angle)),
'{}'.format(country[0]),
{'ha': 'center', 'va': 'center'},
rotation=rotangle,
fontsize=8,
fontname=fontname,
bbox={
'facecolor': backgroundcolor,
'linestyle': 'solid',
'linewidth': 0.0,
'boxstyle': 'square,pad=0.0'
}
)
ax.plot(
[1.3 * np.sin(np.deg2rad(angle)), 3.8 * np.sin(np.deg2rad(angle))],
[1.3 * np.cos(np.deg2rad(angle)), 3.8 * np.cos(np.deg2rad(angle))],
linewidth=0.6,
linestyle='--',
color='#DEDEDE'
)
lowerRoffset = 0.015
temperatureAnomaly = country[1]
rValue = 1.5 + (temperatureAnomaly + 2.0) / 4.0 * 1.5 # a lot more clever way for computing the radius should be used here...
ax.plot(
[(1.3 + lowerRoffset) * np.sin(np.deg2rad(angle)), rValue * np.sin(np.deg2rad(angle))],
[(1.3 + lowerRoffset) * np.cos(np.deg2rad(angle)), rValue * np.cos(np.deg2rad(angle))],
linewidth=4.3,
linestyle='-',
color='#202020'
)
ax.plot(
[(1.3 + lowerRoffset) * np.sin(np.deg2rad(angle)), rValue * np.sin(np.deg2rad(angle))],
[(1.3 + lowerRoffset) * np.cos(np.deg2rad(angle)), rValue * np.cos(np.deg2rad(angle))],
linewidth=4.0,
linestyle='-',
color=cmap(norm(temperatureAnomaly))
)
angle += anglePerCountry
angle += spaceBetweenContinents
c = Circle((0.0, 0.0), radius=1.0, fill=True, color='#fff9f5')
ax.add_patch(c)
plt.text(
0.0,
-0.52,
yearname,
{'ha': 'center', 'va': 'bottom'},
fontsize=40,
fontname=fontname,
)
plt.text(
0.0,
0.27,
'Year',
{'ha': 'center', 'va': 'center'},
fontsize=26,
fontname=fontname,
)
angles = np.linspace(np.deg2rad(0.0), np.deg2rad(360.0), 1000)
rs = [1.0, 1.3]
for r in rs:
ax.plot(r * np.sin(angles), r * np.cos(angles), linewidth=1.0, color='#666666', linestyle='-')
plt.text(
5.87,
-4.67,
'<NAME> (@anttilip)',
{'ha': 'right', 'va': 'center'},
fontsize=10,
fontname=fontname,
)
plt.text(
-6.3 + 0.015,
4.385 - 0.015,
'Temperature anomalies',
{'ha': 'left', 'va': 'center'},
fontsize=27,
fontname=fontname,
color='#909090'
)
plt.text(
-6.3,
4.385,
'Temperature anomalies',
{'ha': 'left', 'va': 'center'},
fontsize=27,
fontname=fontname,
color='#0D0D0D'
)
plt.text(
-6.35,
-4.35,
'Data source:\nNASA GISS Surface Temperature Analysis (GISTEMP)\nLand-Ocean Temperature Index, ERSSTv4, 1200km smoothing\nhttps://data.giss.nasa.gov/gistemp/\nAverage of monthly temperature anomalies. GISTEMP base period 1951-1980.',
{'ha': 'left', 'va': 'center'},
fontsize=10,
fontname=fontname,
)
areaText = [
['A', 46.0],
['f', 0.3],
['r', -0.05],
['i', -0.15],
['c', -0.15],
['a', 0.2],
]
rText, defaultspacing, rotangleoffset = 1.13, 4.4, 0.0
rotText(areaText, defaultspacing, rotangleoffset, rText, fontname)
areaText = [
['E', 236.0],
['u', 0.0],
['r', 0.3],
['o', 0.7],
['p', 0.0],
['e', 0.0],
]
rText, defaultspacing, rotangleoffset = 1.155, -5.5, 180.0
rotText(areaText, defaultspacing, rotangleoffset, rText, fontname)
areaText = [
['A', 147.0],
['s', -0.8],
['i', 0.0],
['a', 0.0],
]
rText, defaultspacing, rotangleoffset = 1.155, -4.7, 180.0
rotText(areaText, defaultspacing, rotangleoffset, rText, fontname)
areaText = [
['A', 276.0],
['m', 2.5],
['e', 0.6],
['r', -0.15],
['i', -2.0],
['c', -2.0],
['a', -0.15],
]
rText, defaultspacing, rotangleoffset = 1.13, 5.85, 0.0
rotText(areaText, defaultspacing, rotangleoffset, rText, fontname)
areaText = | |
'/fire_layer_1.png', b + '/textures/blocks')
if 'fire_layer_1.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'fire_layer_1.png'),
os.path.join(b + '/textures/blocks', str('fire_1.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/flower_dandelion.png', b + '/textures/blocks')
if 'flower_dandelion.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'flower_dandelion.png'),
os.path.join(b + '/textures/blocks', str('flower.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/flower_pot.png', b + '/textures/blocks')
if 'flower_pot.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'flower_pot.png'),
os.path.join(b + '/textures/blocks', str('flowerpot.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/furnace_front_off.png', b + '/textures/blocks')
if 'furnace_front_off.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'furnace_front_off.png'),
os.path.join(b + '/textures/blocks', str('furnace_front.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/furnace_front_on.png', b + '/textures/blocks')
if 'furnace_front_on.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'furnace_front_on.png'),
os.path.join(b + '/textures/blocks', str('furnace_front_lit.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/furnace_side.png', b + '/textures/blocks')
if 'furnace_side.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'furnace_side.png'),
os.path.join(b + '/textures/blocks', str('furnace_side.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/furnace_top.png', b + '/textures/blocks')
if 'furnace_top.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'furnace_top.png'),
os.path.join(b + '/textures/blocks', str('furnace_top.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/glass.png', b + '/textures/blocks')
if 'glass.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'glass.png'),
os.path.join(b + '/textures/blocks', str('glass.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/rail_golden.png', b + '/textures/blocks')
if 'rail_golden.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'rail_golden.png'),
os.path.join(b + '/textures/blocks', str('goldenRail.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/rail_golden_powered.png', b + '/textures/blocks')
if 'rail_golden_powered.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'rail_golden_powered.png'),
os.path.join(b + '/textures/blocks', str('goldenRail_powered.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/grass_side.png', b + '/textures/blocks')
if 'grass_side.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'grass_side.png'),
os.path.join(b + '/textures/blocks', str('grass_side.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/grass_side_overlay.png', b + '/textures/blocks')
if 'grass_side_overlay.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'grass_side_overlay.png'),
os.path.join(b + '/textures/blocks', str('grass_side_overlay.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/grass_top.png', b + '/textures/blocks')
if 'grass_top.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'grass_top.png'),
os.path.join(b + '/textures/blocks', str('grass_top.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/gravel.png', b + '/textures/blocks')
if 'gravel.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'gravel.png'),
os.path.join(b + '/textures/blocks', str('gravel.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/netherrack.png', b + '/textures/blocks')
if 'netherrack.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'netherrack.png'),
os.path.join(b + '/textures/blocks', str('hellrock.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/soul_sand.png', b + '/textures/blocks')
if 'soul_sand.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'soul_sand.png'),
os.path.join(b + '/textures/blocks', str('hellsand.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/hopper_outside.png', b + '/textures/blocks')
if 'hopper_outside.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'hopper_outside.png'),
os.path.join(b + '/textures/blocks', str('hopper.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/hopper_inside.png', b + '/textures/blocks')
if 'hopper_inside.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'hopper_inside.png'),
os.path.join(b + '/textures/blocks', str('hopper_inside.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/hopper_top.png', b + '/textures/blocks')
if 'hopper_top.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'hopper_top.png'),
os.path.join(b + '/textures/blocks', str('hopper_top.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/ice.png', b + '/textures/blocks')
if 'ice.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'ice.png'),
os.path.join(b + '/textures/blocks', str('ice.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/itemframe_background.png', b + '/textures/blocks')
if 'itemframe_background.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'itemframe_background.png'),
os.path.join(b + '/textures/blocks', str('itemframe_back.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/jukebox_top.png', b + '/textures/blocks')
if 'jukebox_top.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'jukebox_top.png'),
os.path.join(b + '/textures/blocks', str('jukebox_top.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/ladder.png', b + '/textures/blocks')
if 'ladder.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'ladder.png'),
os.path.join(b + '/textures/blocks', str('ladder.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/lava_still.png', b + '/textures/blocks')
if 'lava_still.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'lava_still.png'),
os.path.join(b + '/textures/blocks', str('lava.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/lava_flow.png', b + '/textures/blocks')
if 'lava_flow.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'lava_flow.png'),
os.path.join(b + '/textures/blocks', str('lava_flow.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/leaves_birch.png', b + '/textures/blocks')
if 'leaves_birch.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'leaves_birch.png'),
os.path.join(b + '/textures/blocks', str('leaves.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/leaves_jungle.png', b + '/textures/blocks')
if 'leaves_jungle.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'leaves_jungle.png'),
os.path.join(b + '/textures/blocks', str('leaves_jungle.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/leaves_jungle_opaque.png', b + '/textures/blocks')
if 'leaves_jungle_opaque.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'leaves_jungle_opaque.png'),
os.path.join(b + '/textures/blocks', str('leaves_jungle_opaque.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/leaves_birch_opaque.png', b + '/textures/blocks')
if 'leaves_birch_opaque.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'leaves_birch_opaque.png'),
os.path.join(b + '/textures/blocks', str('leaves_opaque.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/leaves_spruce.png', b + '/textures/blocks')
if 'leaves_spruce.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'leaves_spruce.png'),
os.path.join(b + '/textures/blocks', str('leaves_spruce.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/leaves_spruce_opaque.png', b + '/textures/blocks')
if 'leaves_spruce_opaque.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'leaves_spruce_opaque.png'),
os.path.join(b + '/textures/blocks', str('leaves_spruce_opaque.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/lever.png', b + '/textures/blocks')
if 'lever.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'lever.png'),
os.path.join(b + '/textures/blocks', str('lever.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/glowstone.png', b + '/textures/blocks')
if 'glowstone.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'glowstone.png'),
os.path.join(b + '/textures/blocks', str('lightgem.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/melon_side.png', b + '/textures/blocks')
if 'melon_side.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'melon_side.png'),
os.path.join(b + '/textures/blocks', str('melon_side.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/melon_top.png', b + '/textures/blocks')
if 'melon_top.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'melon_top.png'),
os.path.join(b + '/textures/blocks', str('melon_top.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/mob_spawner.png', b + '/textures/blocks')
if 'mob_spawner.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'mob_spawner.png'),
os.path.join(b + '/textures/blocks', str('mobspawner.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/mushroom_brown.png', b + '/textures/blocks')
if 'mushroom_brown.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'mushroom_brown.png'),
os.path.join(b + '/textures/blocks', str('mushroom_brown.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/mushroom_block_inside.png', b + '/textures/blocks')
if 'mushroom_block_inside.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'mushroom_block_inside.png'),
os.path.join(b + '/textures/blocks', str('mushroom_inside.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/mushroom_red.png', b + '/textures/blocks')
if 'mushroom_red.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'mushroom_red.png'),
os.path.join(b + '/textures/blocks', str('mushroom_red.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/mushroom_block_skin_brown.png', b + '/textures/blocks')
if 'mushroom_block_skin_brown.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'mushroom_block_skin_brown.png'),
os.path.join(b + '/textures/blocks', str('mushroom_skin_brown.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/mushroom_block_skin_red.png', b + '/textures/blocks')
if 'mushroom_block_skin_red.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'mushroom_block_skin_red.png'),
os.path.join(b + '/textures/blocks', str('mushroom_skin_red.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/mushroom_block_skin_stem.png', b + '/textures/blocks')
if 'mushroom_block_skin_stem.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'mushroom_block_skin_stem.png'),
os.path.join(b + '/textures/blocks', str('mushroom_skin_stem.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/noteblock.png', b + '/textures/blocks')
if 'noteblock.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'noteblock.png'),
os.path.join(b + '/textures/blocks', str('musicblock.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/mycelium_side.png', b + '/textures/blocks')
if 'mycelium_side.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'mycelium_side.png'),
os.path.join(b + '/textures/blocks', str('mycel_side.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/mycelium_top.png', b + '/textures/blocks')
| |
<filename>runtests.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
"""Functional tests for the Trac-GitHub plugin.
Trac's testing framework isn't well suited for plugins, so we NIH'd a bit.
"""
import BaseHTTPServer
import ConfigParser
import glob
import json
import os
import random
import re
import shutil
import signal
import subprocess
import sys
import threading
import time
import traceback
import unittest
import urllib2
import urlparse
from lxml import html
from trac.env import Environment
from trac.ticket.model import Ticket
from trac.util.translation import _
import requests
GIT = 'test-git-foo'
ALTGIT = 'test-git-bar'
NOGHGIT = 'test-git-nogithub'
ENV = 'test-trac-github'
CONF = '%s/conf/trac.ini' % ENV
HTDIGEST = '%s/passwd' % ENV
URL = 'http://localhost:8765/%s' % ENV
SECRET = 'test-secret'
HEADERS = {'Content-Type': 'application/json', 'X-GitHub-Event': 'push'}
UPDATEHOOK = '%s-mirror/hooks/trac-github-update' % GIT
COVERAGE = False
SHOW_LOG = False
class HttpNoRedirectHandler(urllib2.HTTPRedirectHandler):
def redirect_request(self, req, fp, code, msg, headers, newurl):
raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
urllib2.install_opener(urllib2.build_opener(HttpNoRedirectHandler()))
class TracGitHubTests(unittest.TestCase):
cached_git = False
@classmethod
def setUpClass(cls):
cls.createGitRepositories()
cls.createTracEnvironment()
cls.startTracd()
cls.env = Environment(ENV)
@classmethod
def tearDownClass(cls):
cls.env.shutdown()
cls.stopTracd()
cls.removeTracEnvironment()
cls.removeGitRepositories()
@classmethod
def createGitRepositories(cls):
subprocess.check_output(['git', 'init', GIT])
subprocess.check_output(['git', 'init', ALTGIT])
subprocess.check_output(['git', 'init', NOGHGIT])
cls.makeGitCommit(GIT, 'README', 'default git repository\n')
cls.makeGitCommit(ALTGIT, 'README', 'alternative git repository\n')
cls.makeGitCommit(NOGHGIT, 'README', 'git repository not on GitHub\n')
subprocess.check_output(['git', 'clone', '--quiet', '--mirror', GIT, '%s-mirror' % GIT])
subprocess.check_output(['git', 'clone', '--quiet', '--mirror', ALTGIT, '%s-mirror' % ALTGIT])
@classmethod
def removeGitRepositories(cls):
shutil.rmtree(GIT)
shutil.rmtree(ALTGIT)
shutil.rmtree(NOGHGIT)
shutil.rmtree('%s-mirror' % GIT)
shutil.rmtree('%s-mirror' % ALTGIT)
@classmethod
def createTracEnvironment(cls, **kwargs):
subprocess.check_output(['trac-admin', ENV, 'initenv',
'Trac - GitHub tests', 'sqlite:db/trac.db'])
subprocess.check_output(['trac-admin', ENV, 'permission',
'add', 'anonymous', 'TRAC_ADMIN'])
conf = ConfigParser.ConfigParser()
with open(CONF, 'rb') as fp:
conf.readfp(fp)
conf.add_section('components')
conf.set('components', 'trac.versioncontrol.web_ui.browser.BrowserModule', 'disabled')
conf.set('components', 'trac.versioncontrol.web_ui.changeset.ChangesetModule', 'disabled')
conf.set('components', 'trac.versioncontrol.web_ui.log.LogModule', 'disabled')
conf.set('components', 'trac.versioncontrol.svn_fs.*', 'disabled') # avoid spurious log messages
conf.set('components', 'tracext.git.*', 'enabled') # Trac 0.12.4
conf.set('components', 'tracext.github.*', 'enabled')
conf.set('components', 'tracopt.ticket.commit_updater.*', 'enabled')
conf.set('components', 'tracopt.versioncontrol.git.*', 'enabled') # Trac 1.0
cached_git = cls.cached_git
if 'cached_git' in kwargs:
cached_git = kwargs['cached_git']
if cached_git:
conf.add_section('git')
conf.set('git', 'cached_repository', 'true')
conf.set('git', 'persistent_cache', 'true')
if not conf.has_section('github'):
conf.add_section('github')
client_id = '01234567890123456789'
if 'client_id' in kwargs:
client_id = kwargs['client_id']
conf.set('github', 'client_id', client_id)
client_secret = '<KEY>'
if 'client_secret' in kwargs:
client_secret = kwargs['client_secret']
conf.set('github', 'client_secret', client_secret)
conf.set('github', 'repository', 'aaugustin/trac-github')
conf.set('github', 'alt.repository', 'follower/trac-github')
conf.set('github', 'alt.branches', 'master stable/*')
if 'request_email' in kwargs:
conf.set('github', 'request_email', kwargs['request_email'])
if 'preferred_email_domain' in kwargs:
conf.set('github', 'preferred_email_domain', kwargs['preferred_email_domain'])
if 'organization' in kwargs:
conf.set('github', 'organization', kwargs['organization'])
if 'username' in kwargs and 'access_token' in kwargs:
conf.set('github', 'username', kwargs['username'])
conf.set('github', 'access_token', kwargs['access_token'])
if 'webhook_secret' in kwargs:
conf.set('github', 'webhook_secret', kwargs['webhook_secret'])
if 'username_prefix' in kwargs:
conf.set('github', 'username_prefix', kwargs['username_prefix'])
if SHOW_LOG:
# The [logging] section already exists in the default trac.ini file.
conf.set('logging', 'log_type', 'stderr')
else:
# Write debug log so you can read it on crashes
conf.set('logging', 'log_type', 'file')
conf.set('logging', 'log_file', 'trac.log')
conf.set('logging', 'log_level', 'DEBUG')
conf.add_section('repositories')
conf.set('repositories', '.dir', os.path.realpath('%s-mirror' % GIT))
conf.set('repositories', '.type', 'git')
conf.set('repositories', 'alt.dir', os.path.realpath('%s-mirror' % ALTGIT))
conf.set('repositories', 'alt.type', 'git')
conf.set('repositories', 'nogh.dir', os.path.realpath('%s/.git' % NOGHGIT))
conf.set('repositories', 'nogh.type', 'git')
# Show changed files in timeline, which will trigger the
# IPermissionPolicy code paths
conf.set('timeline', 'changeset_show_files', '-1')
old_permission_policies = conf.get('trac', 'permission_policies')
if 'GitHubPolicy' not in old_permission_policies:
conf.set('trac', 'permission_policies',
'GitHubPolicy, %s' % old_permission_policies)
with open(CONF, 'wb') as fp:
conf.write(fp)
with open(HTDIGEST, 'w') as fp:
# user: user, pass: <PASSWORD>, realm: realm
fp.write("user:realm:8493fbc53ba582fb4c044c456bdc40eb\n")
run_resync = kwargs['resync'] if 'resync' in kwargs else True
if run_resync:
# Allow skipping resync for perfomance reasons if not required
subprocess.check_output(['trac-admin', ENV, 'repository', 'resync', ''])
subprocess.check_output(['trac-admin', ENV, 'repository', 'resync', 'alt'])
subprocess.check_output(['trac-admin', ENV, 'repository', 'resync', 'nogh'])
@classmethod
def removeTracEnvironment(cls):
shutil.rmtree(ENV)
@classmethod
def startTracd(cls, **kwargs):
if COVERAGE:
tracd = ['coverage', 'run', '--append', '--branch',
'--source=tracext.github',
subprocess.check_output(['which', 'tracd']).strip()]
else:
tracd = ['tracd']
if SHOW_LOG:
kwargs['stdout'] = sys.stdout
kwargs['stderr'] = sys.stderr
cls.tracd = subprocess.Popen(tracd + ['--port', '8765', '--auth=*,%s,realm' % HTDIGEST, ENV], **kwargs)
while True:
try:
urllib2.urlopen(URL)
except urllib2.URLError:
time.sleep(0.1)
else:
break
@classmethod
def stopTracd(cls):
cls.tracd.send_signal(signal.SIGINT)
cls.tracd.wait()
@staticmethod
def makeGitBranch(repo, branch):
subprocess.check_output(['git', '-C', repo, 'branch', branch])
@staticmethod
def makeGitCommit(repo, path, content, message='edit', branch='master'):
if branch != 'master':
subprocess.check_output(['git', '-C', repo, 'checkout', branch],
stderr=subprocess.PIPE)
with open(os.path.join(repo, path), 'wb') as fp:
fp.write(content)
subprocess.check_output(['git', '-C', repo, 'add', path])
subprocess.check_output(['git', '-C', repo, 'commit', '-m', message])
if branch != 'master':
subprocess.check_output(['git', '-C', repo, 'checkout', 'master'],
stderr=subprocess.PIPE)
changeset = subprocess.check_output(['git', '-C', repo, 'rev-parse', 'HEAD'])
return changeset.strip()
@staticmethod
def makeGitHubHookPayload(n=1, reponame=''):
# See https://developer.github.com/v3/activity/events/types/#pushevent
# We don't reproduce the entire payload, only what the plugin needs.
repo = {'': GIT, 'alt': ALTGIT}[reponame]
commits = []
log = subprocess.check_output(['git', '--git-dir=%s/.git' % repo, 'log',
'-%d' % n, '--branches', '--format=oneline', '--topo-order'])
for line in log.splitlines():
id, _, message = line.partition(' ')
commits.append({'id': id, 'message': message, 'distinct': True})
payload = {'commits': commits}
return payload
@staticmethod
def openGitHubHook(n=1, reponame='', payload=None):
if not payload:
payload = TracGitHubTests.makeGitHubHookPayload(n, reponame)
url = (URL + '/github/' + reponame) if reponame else URL + '/github'
request = urllib2.Request(url, json.dumps(payload), HEADERS)
return urllib2.urlopen(request)
class GitHubBrowserTests(TracGitHubTests):
def testLinkToChangeset(self):
self.makeGitCommit(GIT, 'myfile', 'for browser tests')
changeset = self.openGitHubHook().read().rstrip()[-40:]
try:
urllib2.urlopen(URL + '/changeset/' + changeset)
except urllib2.HTTPError as exc:
self.assertEqual(exc.code, 302)
self.assertEqual(exc.headers['Location'],
'https://github.com/aaugustin/trac-github/commit/%s' % changeset)
else:
self.fail("URL didn't redirect")
def testAlternateLinkToChangeset(self):
self.makeGitCommit(ALTGIT, 'myfile', 'for browser tests')
changeset = self.openGitHubHook(1, 'alt').read().rstrip()[-40:]
try:
urllib2.urlopen(URL + '/changeset/' + changeset + '/alt')
except urllib2.HTTPError as exc:
self.assertEqual(exc.code, 302)
self.assertEqual(exc.headers['Location'],
'https://github.com/follower/trac-github/commit/%s' % changeset)
else:
self.fail("URL didn't redirect")
def testNonGitHubLinkToChangeset(self):
changeset = self.makeGitCommit(NOGHGIT, 'myfile', 'for browser tests')
subprocess.check_output(['trac-admin', ENV, 'changeset', 'added', 'nogh', changeset])
response = requests.get(URL + '/changeset/' + changeset + '/nogh', allow_redirects=False)
self.assertEqual(response.status_code, 200)
def testLinkToPath(self):
self.makeGitCommit(GIT, 'myfile', 'for more browser tests')
changeset = self.openGitHubHook().read().rstrip()[-40:]
try:
urllib2.urlopen(URL + '/changeset/' + changeset + '/myfile')
except urllib2.HTTPError as exc:
self.assertEqual(exc.code, 302)
self.assertEqual(exc.headers['Location'],
'https://github.com/aaugustin/trac-github/blob/%s/myfile' % changeset)
else:
self.fail("URL didn't redirect")
def testAlternateLinkToPath(self):
self.makeGitCommit(ALTGIT, 'myfile', 'for more browser tests')
changeset = self.openGitHubHook(1, 'alt').read().rstrip()[-40:]
try:
urllib2.urlopen(URL + '/changeset/' + changeset + '/alt/myfile')
except urllib2.HTTPError as exc:
self.assertEqual(exc.code, 302)
self.assertEqual(exc.headers['Location'],
'https://github.com/follower/trac-github/blob/%s/myfile' % changeset)
else:
self.fail("URL didn't redirect")
def testNonGitHubLinkToPath(self):
changeset = self.makeGitCommit(NOGHGIT, 'myfile', 'for more browser tests')
subprocess.check_output(['trac-admin', ENV, 'changeset', 'added', 'nogh', changeset])
response = requests.get(URL + '/changeset/' + changeset + '/nogh/myfile', allow_redirects=False)
self.assertEqual(response.status_code, 200)
def testBadChangeset(self):
with self.assertRaisesRegexp(urllib2.HTTPError, r'^HTTP Error 404: Not Found$'):
urllib2.urlopen(URL + '/changeset/1234567890')
def testBadUrl(self):
with self.assertRaisesRegexp(urllib2.HTTPError, r'^HTTP Error 404: Not Found$'):
urllib2.urlopen(URL + '/changesetnosuchurl')
def testTimelineFiltering(self):
self.makeGitBranch(GIT, 'stable/2.0')
self.makeGitBranch(GIT, 'unstable/2.0')
self.makeGitBranch(ALTGIT, 'stable/2.0')
self.makeGitBranch(ALTGIT, 'unstable/2.0')
self.makeGitCommit(GIT, 'myfile', 'timeline 1\n', 'msg 1')
self.makeGitCommit(GIT, 'myfile', 'timeline 2\n', 'msg 2', 'stable/2.0')
self.makeGitCommit(GIT, 'myfile', 'timeline 3\n', 'msg 3', 'unstable/2.0')
self.makeGitCommit(ALTGIT, 'myfile', 'timeline 4\n', 'msg 4')
self.makeGitCommit(ALTGIT, 'myfile', 'timeline 5\n', 'msg 5', 'stable/2.0')
self.makeGitCommit(ALTGIT, 'myfile', 'timeline 6\n', 'msg 6', 'unstable/2.0')
self.openGitHubHook(3)
self.openGitHubHook(3, 'alt')
html = urllib2.urlopen(URL + '/timeline').read()
self.assertTrue('msg 1' in html)
self.assertTrue('msg 2' in html)
self.assertTrue('msg 3' in html)
self.assertTrue('msg 4' in html)
self.assertTrue('msg 5' in html)
self.assertFalse('msg 6' in html)
class GitHubLoginModuleTests(TracGitHubTests):
@classmethod
def startTracd(cls, **kwargs):
# Disable check for HTTPS to avoid adding complexity to the test setup.
kwargs['env'] = os.environ.copy()
kwargs['env']['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
super(GitHubLoginModuleTests, cls).startTracd(**kwargs)
def testLogin(self):
response = requests.get(URL + '/github/login', allow_redirects=False)
self.assertEqual(response.status_code, 302)
redirect_url = urlparse.urlparse(response.headers['Location'])
self.assertEqual(redirect_url.scheme, 'https')
self.assertEqual(redirect_url.netloc, 'github.com')
self.assertEqual(redirect_url.path, '/login/oauth/authorize')
params = urlparse.parse_qs(redirect_url.query, keep_blank_values=True)
state = params['state'][0] # this is a random value
self.assertEqual(params, {
'client_id': ['01234567890123456789'],
'redirect_uri': [URL + '/github/oauth'],
'response_type': ['code'],
'scope': [''],
'state': [state],
})
def testOauthInvalidState(self):
session = requests.Session()
# This adds a oauth_state parameter in the Trac session.
response = session.get(URL + '/github/login', allow_redirects=False)
self.assertEqual(response.status_code, 302)
response = session.get(
URL + '/github/oauth?code=01234567890123456789&state=wrong_state',
allow_redirects=False)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers['Location'], URL)
response = session.get(URL)
self.assertEqual(response.status_code, 200)
self.assertIn(
"Invalid request. Please try to login again.", response.text)
def testOauthInvalidStateWithoutSession(self):
session = requests.Session()
# There's no oauth_state parameter in the Trac session.
# OAuth callback requests without state must still fail.
response = session.get(
URL + '/github/oauth?code=01234567890123456789',
allow_redirects=False)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers['Location'], URL)
response = session.get(URL)
self.assertEqual(response.status_code, 200)
self.assertIn(
"Invalid request. Please try to login again.", response.text)
def testLogout(self):
response = requests.get(URL + '/github/logout', allow_redirects=False)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers['Location'], URL)
class GitHubLoginModuleConfigurationTests(TracGitHubTests):
# Append custom failure messages to the automatically generated ones
longMessage = True
@classmethod
def setUpClass(cls):
cls.createGitRepositories()
cls.mockdata = startAPIMock(8768)
trac_env = os.environ.copy()
trac_env.update({
'TRAC_GITHUB_OAUTH_URL': 'http://127.0.0.1:8768/',
'TRAC_GITHUB_API_URL': 'http://127.0.0.1:8768/',
'OAUTHLIB_INSECURE_TRANSPORT': '1'
})
trac_env_broken = trac_env.copy()
trac_env_broken.update({
'TRAC_GITHUB_OAUTH_URL': 'http://127.0.0.1:8769/',
'TRAC_GITHUB_API_URL': 'http://127.0.0.1:8769/',
})
trac_env_broken_api = trac_env.copy()
trac_env_broken_api.update({
'TRAC_GITHUB_API_URL': 'http://127.0.0.1:8769/',
})
cls.trac_env = trac_env
cls.trac_env_broken = trac_env_broken
cls.trac_env_broken_api = trac_env_broken_api
with open(SECRET, 'wb') as fp:
fp.write('98765432109876543210')
@classmethod
def tearDownClass(cls):
cls.removeGitRepositories()
os.remove(SECRET)
def testLoginWithReqEmail(self):
"""Test that configuring | |
"""
nc2pd
~~~~~
A thin python-netCDF4 wrapper to turn netCDF files into pandas data
structures, with a focus on extracting time series from regularly
spatial gridded data (with the ability to interpolate spatially).
Copyright 2015 <NAME>
License: MIT (see LICENSE file)
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import itertools
import numpy as np
import pandas as pd
from scipy import interpolate, ndimage
from netCDF4 import Dataset, num2date
def parse_time_string(string):
# datetools.parse_time_string returns a list of datetime objects
return pd.datetools.parse_time_string(string)[0]
class NetCDFDataset(object):
"""NetCDFDataset"""
def __init__(self, path):
super(NetCDFDataset, self).__init__()
self.path = path
self.rootgrp = Dataset(self.path)
# Determine latitute and longitude variable names
self.latlon_names = self._latlon_names()
# Generate datetime labels for the time variable
# Also sets self.time_name
self.datetimes = self._datetime_labels()
# Get array of latitude and longitude values
lat_name, lon_name = self.latlon_names
self.lon_array = self.rootgrp.variables[lon_name][:]
self.lat_array = self.rootgrp.variables[lat_name][:]
# Additional dimension slices set up internally as needed
self.dim_slices_collapse = {}
self.dim_slices_select = {}
def _latlon_names(self):
"""Determines the lat/lon variable names in the dataset"""
if 'latitude' in self.rootgrp.variables:
lat_name = 'latitude'
lon_name = 'longitude'
elif 'lat' in self.rootgrp.variables:
lat_name = 'lat'
lon_name = 'lon'
elif 'XDim' in self.rootgrp.variables:
lat_name = 'YDim'
lon_name = 'XDim'
else:
raise ValueError('Cannot determine lat and lon variable names')
return (lat_name, lon_name)
def _datetime_labels(self):
"""Return datetime labels for the dataset"""
if ('dfb' in self.rootgrp.variables
and 'hour' in self.rootgrp.variables):
# solargis data has dimensions ('dfb', 'hour', 'latitude', 'longitude')
# we must do some manual processing of the time dimension
# dfb - 1 to account for 00:00 representation of 24:00
# pushing us into the next day
days = num2date(self.rootgrp.variables['dfb'][:] - 1,
'days since 1980-01-01')
dt_from = '{} 00:00'.format(days[0].strftime('%Y-%m-%d'))
dt_to = '{} 23:00'.format(days[-1].strftime('%Y-%m-%d'))
dates = pd.date_range(dt_from, dt_to, freq='H')
# Set an additional slice on hour internally
self.dim_slices_collapse['hour'] = slice(None, None, None) # [::]
self.time_name = 'dfb'
else:
try:
time_name = 'time'
timevar = self.rootgrp.variables[time_name]
except AttributeError:
try:
time_name = 'TIME'
timevar = self.rootgrp.variables[time_name]
except AttributeError:
raise ValueError('Cannot find time variable.')
self.time_name = time_name
try:
timevar_units = timevar.units.decode()
except AttributeError:
timevar_units = timevar.units
dates = num2date(timevar[:], timevar_units, calendar='standard')
labels = pd.Series(range(len(dates)), index=dates)
return labels
def _find_coordinates(self, lat, lon, bounds=False):
"""
Finds the index of given lat/lon pair in the dataset.
Uses binary search to find closest coordinates if the exact ones
don't exist.
Parameters
----------
lat : float
latitude
lon : float
longitude
Returns
-------
x, y : 4-tuple
x and y (lon and lat) coordinate indices
"""
def _find_closest(array, value, bounds):
"""Searches array for value and returns the index of the entry
closest to value."""
if bounds:
pos = np.searchsorted(array, value)
return (pos - 1, pos)
else:
return (np.abs(array - value)).argmin()
if lon in self.lon_array:
x = np.argmax(self.lon_array == lon)
if bounds:
x = (x, x)
else:
x = _find_closest(self.lon_array, lon, bounds)
if lat in self.lat_array:
y = np.argmax(self.lat_array == lat)
if bounds:
y = (y, y)
else:
y = _find_closest(self.lat_array, lat, bounds)
# Return either a single x, y pair or a list of pairs: [(x, y)]
if bounds:
return list(zip(x, y))
else:
return (x, y)
def get_gridpoints(self, latlon_pairs, bounds=False):
"""Take a list of lat-lon pairs and return a list of x-y indices."""
points = [self._find_coordinates(lat, lon, bounds)
for lat, lon in latlon_pairs]
result = [i for i in itertools.chain.from_iterable(points)]
if len(latlon_pairs) == 1 and not bounds:
result = [result]
return result
def get_timerange(self, start=None, end=None, include_end=True):
"""
Take a start and end datetime and return a time index range.
If include_end is True, the returned range is 1 longer so that
the final timestep given in the range is included in slicing.
If the desired end point is not found in the data, the most recent
available end point is used.
"""
if start:
try:
start_idx = self.datetimes[start].ix[0]
except AttributeError: # because it's a single value already
start_idx = self.datetimes[start]
else:
start_idx = self.datetimes.ix[0]
if end:
try:
end_idx = self.datetimes[end].ix[-1]
except AttributeError: # because it's a single value already
end_idx = self.datetimes[end]
except IndexError: # because we've hit a missing datetime entry
# First get closest available end index
end_idx = np.argmin(np.abs(self.datetimes.index.to_pydatetime() -
parse_time_string(end)))
# Now check if this is beyond the desired end date, and if so,
# move back one in the list of existing datetimes, which
# will put us within the desired endpoint (given that the
# desired endpoint didn't exist in the first place!)
if self.datetimes.index[end_idx] > parse_time_string(end):
end_idx = end_idx - 1
else:
end_idx = self.datetimes.ix[-1]
if include_end:
end_idx += 1
return (start_idx, end_idx)
def read_data(self, variable, x_range=None, y_range=None,
time_range=None, fixed_dims={},
friendly_labels=False):
"""
Return a panel of data with the dimensions [time, lat, lon], i.e.
items are time, major_axis is latitude, minor_axis is longitude.
Parameters
----------
variable : str
name of variable
x_range : int or (int, int), default None
range of x grid points to select, if None, entire x range is used
y_range : int or (int, int), default None
range of y grid points to select, if None, entire y range is used
time_range : int or (int, int), default None
range of timesteps to select, if None, entire time range is used
fixed_dims : dict, default {}
map selections to other dimensions that may exist in the data,
e.g. {'level': 0}
friendly_labels : bool, default False
if True, sets the axis labels to datetimes, latitudes and
longitudes, instead of just integer indices
"""
# Helpers
slice_all = slice(None, None, None)
def add_slice(setting, var_slice):
if not setting:
slicer = slice_all
else:
if isinstance(setting, int) or isinstance(setting, np.integer):
slicer = slice(setting, setting + 1)
else: # Assume two or more integers
slicer = slice(*setting)
var_slice.append(slicer)
return slicer
# Start work
var = self.rootgrp.variables[variable]
var_slice = []
dim_pos = 0
# Transposition so that the panel order is always time, lat, lon
transposition = [None, None, None]
for dim in var.dimensions:
# 1. check if it's time, lat or lon name
# and assign appropriate slice if so
if dim == self.time_name:
time_slice = add_slice(time_range, var_slice)
transposition[0] = dim_pos
dim_pos += 1
elif dim == self.latlon_names[0]: # lat --> y
y_slice = add_slice(y_range, var_slice)
transposition[1] = dim_pos
dim_pos += 1
elif dim == self.latlon_names[1]: # lon --> x
x_slice = add_slice(x_range, var_slice)
transposition[2] = dim_pos
dim_pos += 1
# 2. check if it's in self.dim_slices
elif dim in self.dim_slices_collapse:
var_slice.append(self.dim_slices_collapse[dim])
# FIXME after taking var[var_slice], will also need
# to collapse all dim_slices_collapse simensions,
# or else reading e.g. solargis files won't work
raise NotImplementedError('well, that did not work!')
elif dim in self.dim_slices_select:
var_slice.append(self.dim_slices_select[dim])
# 3. check if it's in fixed_dims
elif dim in fixed_dims:
var_slice.append(fixed_dims[dim])
# 4. else, raise a KeyError or something
else:
raise KeyError('Dimension `{}` unknown'.format(dim))
panel = pd.Panel(var[var_slice]).transpose(*transposition)
if friendly_labels:
panel.items = self.datetimes.index[time_slice]
panel.major_axis = self.lat_array[y_slice]
panel.minor_axis = self.lon_array[x_slice]
return panel
def read_timeseries(self, variable, latlon_pairs,
start=None, end=None,
buffer_size=0,
fixed_dims={},
return_metadata=False):
"""
Return a time series for each given lat-lon pair.
Parameters
----------
variable : str
name of variable
latlon_pairs : list of (lat, lon) tuples
list of (lat, lon) tuples
start : str, default None
datetime string of the form 'YYYY-MM-DD hh:mm' or similar
end : str, default None
datetime string, like for start
fixed_dims : dict, default {}
map selections to other dimensions that may exist in the data,
e.g. {'level': 0}
Returns
-------
data : one or two pandas DataFrames
the first DataFrame contains each requested lat-lon pair
as a column
if return_metadata is True, the second DataFrame maps from
the requested latitudes/longitudes to grid points and
their latitudes/longitudes
"""
gridpoints = self.get_gridpoints(latlon_pairs)
timerange = self.get_timerange(start, end)
# Data
dfs = []
for x, y in gridpoints:
if buffer_size:
x_slice = (x - buffer_size, x + 1 + buffer_size)
y_slice = (y - buffer_size, y + 1 + buffer_size)
else:
x_slice = x
y_slice = y
panel = self.read_data(variable,
x_range=x_slice,
y_range=y_slice,
time_range=timerange,
fixed_dims=fixed_dims,
friendly_labels=True)
dfs.append(panel.to_frame().T)
# Metadata
md = pd.DataFrame(latlon_pairs, columns=['lat', 'lon'])
grid_cols = list(zip(*gridpoints))
md['y_gridpoint'] | |
= MaxNLocator(nbins=nbins, steps=steps).tick_values(min(sMin,-sMax), max(sMax,-sMin))
else:
levels = MaxNLocator(nbins=nbins, steps=steps).tick_values(sMin, sMax)
elif len(clim)==2: levels = MaxNLocator(nbins=nbins, steps=steps).tick_values(clim[0], clim[1])
else: levels = clim
nColors = len(levels)-1
if extend is None:
if sMin<levels[0] and sMax>levels[-1]: extend = 'both'#; eColors=[1,1]
elif sMin<levels[0] and sMax<=levels[-1]: extend = 'min'#; eColors=[1,0]
elif sMin>=levels[0] and sMax>levels[-1]: extend = 'max'#; eColors=[0,1]
else: extend = 'neither'#; eColors=[0,0]
eColors = [0,0]
if extend in ['both', 'min']: eColors[0] = 1
if extend in ['both', 'max']: eColors[1] = 1
cmap = plt.get_cmap(colorMapName)#,lut=nColors+eColors[0]+eColors[1])
#cmap0 = cmap(0.)
#cmap1 = cmap(1.)
#cmap = ListedColormap(cmap(range(eColors[0],nColors+1-eColors[1]+eColors[0])))#, N=nColors)
#if eColors[0]>0: cmap.set_under(cmap0)
#if eColors[1]>0: cmap.set_over(cmap1)
if logscale: norm = LogNorm(vmin=levels[0], vmax=levels[-1])
else: norm = BoundaryNorm(levels, ncolors=cmap.N)
return cmap, norm, extend
def linCI(min, max, ci, *args):
"""
Returns list of linearly spaced contour intervals from min to max with spacing ci.
Unline numpy.arange this max is included IF max = min + ci*N for an integer N.
"""
if len(args): return numpy.concatenate( (numpy.arange(min, max+ci, ci), linCI(*args)) )
return numpy.arange(min, max+ci, ci)
def pmCI(min, max, ci, *args):
"""
Returns list of linearly spaced contour intervals from -max to -min then min to max with spacing ci.
Unline numpy.arange this max is included IF max = min + ci*N for an integer N.
"""
ci = linCI(min, max, ci, *args)
if ci[0]>0: return numpy.concatenate( (-ci[::-1],ci) )
else: return numpy.concatenate( (-ci[::-1],ci[1:]) )
def myStats(s, area, debug=False):
"""
Calculates mean, standard deviation and root-mean-square of s.
"""
sMin = numpy.ma.min(s); sMax = numpy.ma.max(s)
if debug: print('myStats: min(s) =',sMin)
if debug: print('myStats: max(s) =',sMax)
if area is None: return sMin, sMax, None, None, None
weight = area.copy()
if debug: print('myStats: sum(area) =',numpy.ma.sum(weight))
if not numpy.ma.getmask(s).any()==numpy.ma.nomask: weight[s.mask] = 0.
sumArea = numpy.ma.sum(weight)
if debug: print('myStats: sum(area) =',sumArea,'after masking')
if debug: print('myStats: sum(s) =',numpy.ma.sum(s))
if debug: print('myStats: sum(area*s) =',numpy.ma.sum(weight*s))
mean = numpy.ma.sum(weight*s)/sumArea
std = math.sqrt( numpy.ma.sum( weight*((s-mean)**2) )/sumArea )
rms = math.sqrt( numpy.ma.sum( weight*(s**2) )/sumArea )
if debug: print('myStats: mean(s) =',mean)
if debug: print('myStats: std(s) =',std)
if debug: print('myStats: rms(s) =',rms)
return sMin, sMax, mean, std, rms
def corr(s1, s2, area):
"""
Calculates the correlation coefficient between s1 and s2, assuming s1 and s2 have
not mean. That is s1 = S - mean(S), etc.
"""
weight = area.copy()
if not numpy.ma.getmask(s1).any()==numpy.ma.nomask: weight[s1.mask] = 0.
sumArea = numpy.ma.sum(weight)
v1 = numpy.ma.sum( weight*(s1**2) )/sumArea
v2 = numpy.ma.sum( weight*(s2**2) )/sumArea
if v1==0 or v2==0: return numpy.NaN
rxy = numpy.ma.sum( weight*(s1*s2) )/sumArea / math.sqrt( v1*v2 )
return rxy
def createXYcoords(s, x, y):
"""
Checks that x and y are appropriate 2D corner coordinates
and tries to make some if they are not.
"""
nj, ni = s.shape
if x is None: xCoord = numpy.arange(0., ni+1)
else: xCoord = numpy.ma.filled(x, 0.)
if y is None: yCoord = numpy.arange(0., nj+1)
else: yCoord = numpy.ma.filled(y, 0.)
# Turn coordinates into 2D arrays if 1D arrays were provided
if len(xCoord.shape)==1:
nxy = yCoord.shape
xCoord = numpy.matlib.repmat(xCoord, nxy[0], 1)
nxy = xCoord.shape
if len(yCoord.shape)==1: yCoord = numpy.matlib.repmat(yCoord.T, nxy[-1], 1).T
if xCoord.shape!=yCoord.shape: raise Exception('The shape of coordinates are mismatched!')
# Create corner coordinates from center coordinates is center coordinates were provided
if xCoord.shape!=yCoord.shape: raise Exception('The shape of coordinates are mismatched!')
if s.shape==xCoord.shape:
xCoord = expandJ( expandI( xCoord ) )
yCoord = expandJ( expandI( yCoord ) )
return xCoord, yCoord
def expandI(a):
"""
Expands an array by one column, averaging the data to the middle columns and
extrapolating for the first and last columns. Needed for shifting coordinates
from centers to corners.
"""
nj, ni = a.shape
b = numpy.zeros((nj, ni+1))
b[:,1:-1] = 0.5*( a[:,:-1] + a[:,1:] )
b[:,0] = a[:,0] + 0.5*( a[:,0] - a[:,1] )
b[:,-1] = a[:,-1] + 0.5*( a[:,-1] - a[:,-2] )
return b
def expandJ(a):
"""
Expands an array by one row, averaging the data to the middle columns and
extrapolating for the first and last rows. Needed for shifting coordinates
from centers to corners.
"""
nj, ni = a.shape
b = numpy.zeros((nj+1, ni))
b[1:-1,:] = 0.5*( a[:-1,:] + a[1:,:] )
b[0,:] = a[0,:] + 0.5*( a[0,:] - a[1,:] )
b[-1,:] = a[-1,:] + 0.5*( a[-1,:] - a[-2,:] )
return b
def expand(a):
"""
Expands a vector by one element, averaging the data to the middle columns and
extrapolating for the first and last rows. Needed for shifting coordinates
from centers to corners.
"""
b = numpy.zeros((len(a)+1))
b[1:-1] = 0.5*( a[:-1] + a[1:] )
b[0] = a[0] + 0.5*( a[0] - a[1] )
b[-1] = a[-1] + 0.5*( a[-1] - a[-2] )
return b
def boundaryStats(a):
"""
Returns the minimum and maximum values of a only on the boundaries of the array.
"""
amin = numpy.amin(a[0,:])
amin = min(amin, numpy.amin(a[1:,-1]))
amin = min(amin, numpy.amin(a[-1,:-1]))
amin = min(amin, numpy.amin(a[1:-1,0]))
amax = numpy.amax(a[0,:])
amax = max(amax, numpy.amax(a[1:,-1]))
amax = max(amax, numpy.amax(a[-1,:-1]))
amax = max(amax, numpy.amax(a[1:-1,0]))
return amin, amax
def setFigureSize(aspect=None, verticalresolution=None, horiztonalresolution=None,
npanels=1, debug=False):
"""
Set the figure size based on vertical resolution and aspect ratio (tuple of W,H).
"""
if (not horiztonalresolution is None) and (not verticalresolution is None):
if aspect is None: aspect=[horiztonalresolution, verticalresolution]
else: raise Exception('Aspect-ratio and both h-/v- resolutions can not be specified together')
if aspect is None: aspect = {1:[16,9], 2:[1,1], 3:[7,10]}[npanels]
if (not horiztonalresolution is None) and (verticalresolution is None):
verticalresolution = int(1.*aspect[1]/aspect[0] * horiztonalresolution)
if verticalresolution is None: verticalresolution = {1:576, 2:720, 3:1200}[npanels]
width = int(1.*aspect[0]/aspect[1] * verticalresolution) # First guess
if debug: print('setFigureSize: first guess width =',width)
width = width + ( width % 2 ) # Make even
if debug: print('setFigureSize: corrected width =',width)
if debug: print('setFigureSize: height =',verticalresolution)
fig = plt.figure(figsize=(width/100., verticalresolution/100.)) # 100 dpi always?
if npanels==1: plt.gcf().subplots_adjust(left=.08, right=.99, wspace=0, bottom=.09, top=.9, hspace=0)
elif npanels==2: plt.gcf().subplots_adjust(left=.11, right=.94, wspace=0, bottom=.09, top=.9, hspace=0.15)
elif npanels==3: plt.gcf().subplots_adjust(left=.11, right=.94, wspace=0, bottom=.05, top=.93, hspace=0.15)
elif npanels==0: pass
else: raise Exception('npanels out of range')
return fig
def label(label, units):
"""
Combines a label string and units string together in the form 'label [units]'
unless one of the other is empty.
"""
string = r''+label
if len(units)>0: string = string + ' [' + units + ']'
return string
def createXYlabels(x, y, xlabel, xunits, ylabel, yunits):
"""
Checks that x and y labels are appropriate and tries to make some if they are not.
"""
if x is None:
if xlabel is None: xlabel='i'
if xunits is None: xunits=''
else:
if xlabel is None: xlabel='Longitude'
#if xunits is None: xunits=u'\u00B0E'
if xunits is None: xunits=r'$\degree$E'
if y is None:
if ylabel is None: ylabel='j'
if yunits is None: yunits=''
else:
if ylabel is None: ylabel='Latitude'
#if yunits is None: yunits=u'\u00B0N'
if yunits is None: yunits=r'$\degree$N'
return xlabel, xunits, ylabel, yunits
def addInteractiveCallbacks():
"""
Adds interactive features to a plot on screen.
Key 'q' to close window.
Zoom button to center.
Zoom wheel to zoom in and out.
"""
def keyPress(event):
if event.key=='Q': exit(0) # Exit python
elif event.key=='q': plt.close() # Close just the active figure
class hiddenStore:
def __init__(self,axis):
self.axis = axis
self.xMin, self.xMax = axis.get_xlim()
self.yMin, self.yMax = axis.get_ylim()
save = hiddenStore(plt.gca())
def zoom(event): # Scroll wheel up/down
if event.button == 'up': scaleFactor = 1/1.5 # deal with zoom in
elif event.button == 'down': scaleFactor = 1.5 # deal with zoom out
elif event.button == 2: scaleFactor = 1.0
else: return
axis = event.inaxes
axmin,axmax=axis.get_xlim(); aymin,aymax=axis.get_ylim();
(axmin,axmax),(aymin,aymax) = newLims(
(axmin,axmax), (aymin,aymax), (event.xdata, event.ydata),
(save.xMin,save.xMax), (save.yMin,save.yMax), scaleFactor)
if axmin is None: return
for axis in plt.gcf().get_axes():
if axis.get_navigate():
axis.set_xlim(axmin, axmax); axis.set_ylim(aymin, aymax)
plt.draw() # force re-draw
def zoom2(event): zoom(event)
plt.gcf().canvas.mpl_connect('key_press_event', keyPress)
plt.gcf().canvas.mpl_connect('scroll_event', zoom)
plt.gcf().canvas.mpl_connect('button_press_event', zoom2)
def addStatusBar(xCoord, yCoord, zData):
"""
Reformats status bar message
"""
class hiddenStore:
def __init__(self,axis):
self.axis = axis
self.xMin, self.xMax = axis.get_xlim()
self.yMin, self.yMax = axis.get_ylim()
save = hiddenStore(plt.gca())
def statusMessage(x,y):
# THIS NEEDS TESTING FOR ACCURACY, ESPECIALLY IN YZ PLOTS -AJA
if len(xCoord.shape)==1 and len(yCoord.shape)==1:
# -2 needed because of coords are for vertices and need to be averaged to centers
i = min(range(len(xCoord)-2), key=lambda l: abs((xCoord[l]+xCoord[l+1])/2.-x))
j = min(range(len(yCoord)-2), | |
`Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailMessage',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_conversations_email_participant_replace(self, conversation_id, participant_id, body, **kwargs):
"""
Replace this participant with the specified user and/or address
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_email_participant_replace(conversation_id, participant_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str conversation_id: conversationId (required)
:param str participant_id: participantId (required)
:param TransferRequest body: Transfer request (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['conversation_id', 'participant_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_email_participant_replace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'conversation_id' is set
if ('conversation_id' not in params) or (params['conversation_id'] is None):
raise ValueError("Missing the required parameter `conversation_id` when calling `post_conversations_email_participant_replace`")
# verify the required parameter 'participant_id' is set
if ('participant_id' not in params) or (params['participant_id'] is None):
raise ValueError("Missing the required parameter `participant_id` when calling `post_conversations_email_participant_replace`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_conversations_email_participant_replace`")
resource_path = '/api/v2/conversations/emails/{conversationId}/participants/{participantId}/replace'.replace('{format}', 'json')
path_params = {}
if 'conversation_id' in params:
path_params['conversationId'] = params['conversation_id']
if 'participant_id' in params:
path_params['participantId'] = params['participant_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_conversations_emails(self, body, **kwargs):
"""
Create an email conversation
If the direction of the request is INBOUND, this will create an external conversation with a third party provider. If the direction of the the request is OUTBOUND, this will create a conversation to send outbound emails on behalf of a queue.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_emails(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CreateEmailRequest body: Create email request (required)
:return: EmailConversation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_emails" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_conversations_emails`")
resource_path = '/api/v2/conversations/emails'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailConversation',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_conversations_faxes(self, body, **kwargs):
"""
Create Fax Conversation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_faxes(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param FaxSendRequest body: Fax (required)
:return: FaxSendResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_faxes" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_conversations_faxes`")
resource_path = '/api/v2/conversations/faxes'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FaxSendResponse',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_conversations_message_communication_messages(self, conversation_id, communication_id, body, **kwargs):
"""
Send message
Send message on existing conversation/communication. Only one message body field can be accepted, per request. Example: 1 textBody, 1 mediaId, 1 stickerId, or 1 messageTemplate.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_message_communication_messages(conversation_id, communication_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str conversation_id: conversationId (required)
:param str communication_id: communicationId (required)
:param AdditionalMessage body: Message (required)
:return: MessageData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['conversation_id', 'communication_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_message_communication_messages" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'conversation_id' is set
if ('conversation_id' not in params) or (params['conversation_id'] is None):
raise ValueError("Missing the required parameter `conversation_id` when calling `post_conversations_message_communication_messages`")
# verify the required parameter 'communication_id' is set
if ('communication_id' not in params) or (params['communication_id'] is None):
raise ValueError("Missing the required parameter `communication_id` when calling `post_conversations_message_communication_messages`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_conversations_message_communication_messages`")
resource_path = '/api/v2/conversations/messages/{conversationId}/communications/{communicationId}/messages'.replace('{format}', 'json')
path_params = {}
if 'conversation_id' in params:
path_params['conversationId'] = params['conversation_id']
if 'communication_id' in params:
path_params['communicationId'] = params['communication_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MessageData',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_conversations_message_communication_messages_media(self, conversation_id, communication_id, **kwargs):
"""
Create media
See https://developer.genesys.cloud/api/rest/v2/conversations/messaging-media-upload for example usage.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_conversations_message_communication_messages_media(conversation_id, communication_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str conversation_id: conversationId (required)
:param str communication_id: communicationId (required)
:return: MessageMediaData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['conversation_id', 'communication_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_conversations_message_communication_messages_media" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'conversation_id' is set
if ('conversation_id' not in params) or (params['conversation_id'] is None):
raise ValueError("Missing the required parameter `conversation_id` when calling `post_conversations_message_communication_messages_media`")
# verify the required parameter 'communication_id' is set
if ('communication_id' not in params) | |
= JobStatus.new
run['status_prov'] += ', HAZUS complete'
self.pg_db.update_job_status(run['id'], run['status_prov'])
# was there a failure
elif job_pod_status.startswith('Failed'):
# remove the job and get the final run status
job_status = self.k8s_create.delete_job(run)
self.logger.error(f"Error: Run status {run['status']}. Run ID: {run['id']}, Job type: {run['job-type']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, job status: {job_status}, pod status: {job_pod_status}.")
self.send_slack_msg(run['id'], f"failed in {run['job-type']}.", run['instance_name'])
# set error conditions
run['job-type'] = JobType.error
run['status'] = JobStatus.error
# is this a hazus job
elif run['job-type'] == JobType.hazus_singleton:
# work the current state
if run['status'] == JobStatus.new:
# set the activity flag
no_activity = False
# get the data by the download url
command_line_params = [run['downloadurl']]
# create the job configuration for a new run
self.k8s_create_job_obj(run, command_line_params, False)
# execute the k8s job run
self.k8s_create.execute(run)
# set the current status
run['status'] = JobStatus.hazus_singleton_running
run['status_prov'] += ', HAZUS singleton running'
self.pg_db.update_job_status(run['id'], run['status_prov'])
self.logger.info(f"Job created. Run ID: {run['id']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, Job type: {run['job-type']}")
elif run['status'] == JobStatus.hazus_singleton_running and run['status'] != JobStatus.error:
# set the activity flag
no_activity = False
# find the job, get the status
job_status, job_pod_status = self.k8s_find.find_job_info(run)
# if the job status is not active (!=1) it is complete or dead. either way it gets removed
if job_status is None and not job_pod_status.startswith('Failed'):
# remove the job and get the final run status
job_status = self.k8s_create.delete_job(run)
self.logger.info(f"Job complete. Run ID: {run['id']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, Job type: {run['job-type']}, Final status: {job_status}")
# set the next stage and stage status
run['job-type'] = JobType.complete
run['status'] = JobStatus.hazus_singleton_complete
run['status_prov'] += ', HAZUS singleton complete'
self.pg_db.update_job_status(run['id'], run['status_prov'])
# was there a failure
elif job_pod_status.startswith('Failed'):
# remove the job and get the final run status
job_status = self.k8s_create.delete_job(run)
self.logger.error(f"Error: Run status {run['status']}. Run ID: {run['id']}, Job type: {run['job-type']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, job status: {job_status}, pod status: {job_pod_status}.")
self.send_slack_msg(run['id'], f"failed in {run['job-type']}.", run['instance_name'])
# set error conditions
run['job-type'] = JobType.complete
run['status'] = JobStatus.error
run['status_prov'] += ', Error detected'
# is this a obs_mod job
elif run['job-type'] == JobType.obs_mod:
# work the current state
if run['status'] == JobStatus.new:
# set the activity flag
no_activity = False
# the download URL given is for a file server. obs/mod needs the dodsC access type
access_type = run['downloadurl'] + '/fort.63.nc'
access_type = access_type.replace('fileServer', 'dodsC')
# create the additional command line parameters
command_line_params = ['--instanceId', str(run['id']),
'--inputURL', access_type, '--grid', run['gridname'],
'--outputDIR',
self.k8s_config[run['job-type']]['DATA_MOUNT_PATH'] + '/' +
str(run['id']) +
self.k8s_config[run['job-type']]['SUB_PATH'] +
self.k8s_config[run['job-type']]['ADDITIONAL_PATH'],
'--finalDIR', self.k8s_config[run['job-type']]['DATA_MOUNT_PATH'] + '/' +
str(run['id']) + '/' +
'final' + self.k8s_config[run['job-type']]['ADDITIONAL_PATH']
]
# create the job configuration for a new run
self.k8s_create_job_obj(run, command_line_params)
# execute the k8s job run
self.k8s_create.execute(run)
# move to the next stage
run['status'] = JobStatus.obs_mod_running
run['status_prov'] += ', Obs/Mod running'
self.pg_db.update_job_status(run['id'], run['status_prov'])
self.logger.info(f"Job created. Run ID: {run['id']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, Job type: {run['job-type']}")
elif run['status'] == JobStatus.obs_mod_running and run['status'] != JobStatus.error:
# set the activity flag
no_activity = False
# find the job, get the status
job_status, job_pod_status = self.k8s_find.find_job_info(run)
# if the job status is not active (No) it is complete or dead. either way it gets removed
if job_status is None and not job_pod_status.startswith('Failed'):
# remove the job and get the final run status
job_status = self.k8s_create.delete_job(run)
self.logger.info(f"Job complete. Run ID: {run['id']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, Job type: {run['job-type']}, Final job status: {job_status}")
# set the next stage and stage status
run['job-type'] = JobType.run_geo_tiff
run['status'] = JobStatus.new
run['status_prov'] += ', Obs/Mod complete'
self.pg_db.update_job_status(run['id'], run['status_prov'])
# was there a failure
elif job_pod_status.startswith('Failed'):
# remove the job and get the final run status
job_status = self.k8s_create.delete_job(run)
self.logger.error(f"Error: Run status {run['status']}. Run ID: {run['id']}, Job type: {run['job-type']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, job status: {job_status}, pod status: {job_pod_status}.")
self.send_slack_msg(run['id'], f"failed in {run['job-type']}.", run['instance_name'])
# set error conditions
run['job-type'] = JobType.error
run['status'] = JobStatus.error
# is this a geo tiff job array
elif run['job-type'] == JobType.run_geo_tiff:
# work the current state
if run['status'] == JobStatus.new:
# set the activity flag
no_activity = False
# create the additional command line parameters
command_line_params = ['--outputDIR',
self.k8s_config[run['job-type']]['DATA_MOUNT_PATH'] + '/' +
str(run['id']) +
self.k8s_config[run['job-type']]['SUB_PATH'],
'--finalDIR',
self.k8s_config[run['job-type']]['DATA_MOUNT_PATH'] + '/' +
str(run['id']) + '/' +
'final' + self.k8s_config[run['job-type']]['SUB_PATH'],
'--inputFile']
# create the job configuration for a new run
self.k8s_create_job_obj(run, command_line_params)
# execute the k8s job run
self.k8s_create.execute(run)
# move to the next stage
run['status'] = JobStatus.run_geo_tiff_running
run['status_prov'] += ', Geo tiff running'
self.pg_db.update_job_status(run['id'], run['status_prov'])
self.logger.info(f"Job created. Run ID: {run['id']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, Job type: {run['job-type']}")
elif run['status'] == JobStatus.run_geo_tiff_running and run['status'] != JobStatus.error:
# set the activity flag
no_activity = False
# find the job, get the status
job_status, job_pod_status = self.k8s_find.find_job_info(run)
# if the job status is not active (!=1) it is complete or dead. either way it gets removed
if job_status is None and not job_pod_status.startswith('Failed'):
# remove the job and get the final run status
job_status = self.k8s_create.delete_job(run)
self.logger.info(f"Job complete. Run ID: {run['id']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, Job type: {run['job-type']}, Final job status: {job_status}")
# set the next stage and stage status
run['job-type'] = JobType.compute_mbtiles_0_10
run['status'] = JobStatus.new
run['status_prov'] += ', Geo tiff complete'
self.pg_db.update_job_status(run['id'], run['status_prov'])
# was there a failure
elif job_pod_status.startswith('Failed'):
# remove the job and get the final run status
job_status = self.k8s_create.delete_job(run)
self.logger.error(f"Error: Run status {run['status']}. Run ID: {run['id']}, Job type: {run['job-type']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, job status: {job_status}, pod status: {job_pod_status}.")
self.send_slack_msg(run['id'], f"failed in {run['job-type']}.", run['instance_name'])
# set error conditions
run['job-type'] = JobType.error
run['status'] = JobStatus.error
# is this a mbtiles zoom 0-10 job array
elif run['job-type'] == JobType.compute_mbtiles_0_10:
# work the current state
if run['status'] == JobStatus.new:
# set the activity flag
no_activity = False
# create the additional command line parameters
command_line_params = ['--outputDIR',
self.k8s_config[run['job-type']]['DATA_MOUNT_PATH'] + '/' +
str(run['id']) + self.k8s_config[run['job-type']]['SUB_PATH'],
'--finalDIR',
self.k8s_config[run['job-type']]['DATA_MOUNT_PATH'] + '/' +
str(run['id']) + '/' +
'final' + self.k8s_config[run['job-type']]['SUB_PATH'],
'--inputFile'
]
# create the job configuration for a new run
self.k8s_create_job_obj(run, command_line_params)
# execute the k8s job run
self.k8s_create.execute(run)
# move to the next stage
run['status'] = JobStatus.compute_mbtiles_0_10_running
run['status_prov'] += ', Compute mbtiles 0-10 running'
self.pg_db.update_job_status(run['id'], run['status_prov'])
self.logger.info(f"Job created. Run ID: {run['id']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, Job type: {run['job-type']}")
elif run['status'] == JobStatus.compute_mbtiles_0_10_running and run['status'] != JobStatus.error:
# set the activity flag
no_activity = False
# find the job, get the status
job_status, job_pod_status = self.k8s_find.find_job_info(run)
# if the job status is not active (!=1) it is complete or dead. either way it gets removed
if job_status is None and not job_pod_status.startswith('Failed'):
# remove the job and get the final run status
job_status = self.k8s_create.delete_job(run)
self.logger.info(f"Job complete. Run ID: {run['id']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, Job type: {run['job-type']}, Final job status: {job_status}")
# set the next stage and stage status
run['job-type'] = JobType.load_geo_server
run['status'] = JobStatus.new
run['status_prov'] += ', Compute mbtiles zoom 0-10 complete'
self.pg_db.update_job_status(run['id'], run['status_prov'])
# was there a failure
elif job_pod_status.startswith('Failed'):
# remove the job and get the final run status
job_status = self.k8s_create.delete_job(run)
self.logger.error(f"Error: Run status {run['status']}. Run ID: {run['id']}, Job type: {run['job-type']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, job status: {job_status}, pod status: {job_pod_status}.")
self.send_slack_msg(run['id'], f"failed in {run['job-type']}.", run['instance_name'])
# set error conditions
run['job-type'] = JobType.error
run['status'] = JobStatus.error
# is this a geo server load job
elif run['job-type'] == JobType.load_geo_server:
# work the current state
if run['status'] == JobStatus.new:
# set the activity flag
no_activity = False
# create the additional command line parameters
command_line_params = ['--instanceId', str(run['id'])]
# create the job configuration for a new run
self.k8s_create_job_obj(run, command_line_params)
# execute the k8s job run
self.k8s_create.execute(run)
# set the current status
run['status'] = JobStatus.load_geo_server_running
run['status_prov'] += ', Load geo server running'
self.pg_db.update_job_status(run['id'], run['status_prov'])
self.logger.info(f"Job created. Run ID: {run['id']}, Job ID: {run[run['job-type']]['job-config']['job_id']}, Job type: {run['job-type']}")
elif run['status'] == JobStatus.load_geo_server_running and run['status'] != JobStatus.error:
# set the activity flag
no_activity = False
# find the job, get the status
job_status, job_pod_status = self.k8s_find.find_job_info(run)
# if the job status is not active (!=1) it is complete or dead. either way it gets removed
if job_status is None and not job_pod_status.startswith('Failed'):
# remove the job and get the final run status
job_status = self.k8s_create.delete_job(run)
| |
nullable=False, index=False)
child_track_id = Column(Integer, nullable=False, index=False)
PrimaryKeyConstraint(parent_track_id, child_track_id)
def __repr__(self):
return f"<Remix(parent_track_id={self.parent_track_id},\
child_track_id={self.child_track_id})>"
class Play(Base):
__tablename__ = "plays"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, nullable=True, index=False)
source = Column(String, nullable=True, index=False)
play_item_id = Column(Integer, nullable=False, index=False)
slot = Column(Integer, nullable=True, index=True)
signature = Column(String, nullable=True, index=False)
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
Index("ix_plays_user_play_item", "play_item_id", "user_id", unique=False)
Index(
"ix_plays_user_play_item_date",
"play_item_id",
"user_id",
"created_at",
unique=False,
)
Index("ix_plays_sol_signature", "play_item_id", "signature", unique=False)
def __repr__(self):
return f"<Play(\
id={self.id},\
user_id={self.user_id},\
source={self.source},\
play_item_id={self.play_item_id}\
slot={self.slot}\
signature={self.signature}\
updated_at={self.updated_at}\
created_at={self.created_at})>"
class PlaysArchive(Base):
__tablename__ = "plays_archive"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, nullable=True, index=False)
source = Column(String, nullable=True, index=False)
play_item_id = Column(Integer, nullable=False, index=False)
slot = Column(Integer, nullable=True, index=True)
signature = Column(String, nullable=True, index=False)
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
archived_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<Play(\
id={self.id},\
user_id={self.user_id},\
source={self.source},\
play_item_id={self.play_item_id}\
slot={self.slot}\
signature={self.signature}\
updated_at={self.updated_at}\
created_at={self.created_at}\
archived_at={self.archived_at})>"
class AggregatePlays(Base):
__tablename__ = "aggregate_plays"
play_item_id = Column(Integer, primary_key=True, nullable=False, index=True)
count = Column(Integer, nullable=False, index=False)
Index("play_item_id_idx", "play_item_id", unique=False)
def __repr__(self):
return f"<AggregatePlays(\
play_item_id={self.play_item_id},\
count={self.count})>"
class AggregateMonthlyPlays(Base):
# Created for potential use case of year trending
# No dependencies as of now
__tablename__ = "aggregate_monthly_plays"
play_item_id = Column(Integer, primary_key=True, nullable=False)
timestamp = Column(
Date, primary_key=True, nullable=False, default=func.now()
) # monthly timestamps
count = Column(Integer, nullable=False)
def __repr__(self):
return f"<AggregateMonthlyPlays(\
play_item_id={self.play_item_id},\
timestamp={self.timestamp},\
count={self.count})>"
class HourlyPlayCounts(Base):
__tablename__ = "hourly_play_counts"
hourly_timestamp = Column(DateTime, primary_key=True, nullable=False)
play_count = Column(Integer, nullable=False, index=False)
def __repr__(self):
return f"<HourlyPlayCounts(\
hourly_timestamp={self.hourly_timestamp},\
play_count={self.play_count})>"
class IndexingCheckpoints(Base):
__tablename__ = "indexing_checkpoints"
tablename = Column(String, primary_key=True, nullable=False, index=False)
last_checkpoint = Column(Integer, nullable=False, index=False)
def __repr__(self):
return f"<IndexingCheckpoints(\
tablename={self.tablename},\
last_checkpoint={self.last_checkpoint}>"
class RouteMetrics(Base):
__tablename__ = "route_metrics"
id = Column(Integer, primary_key=True)
version = Column(String, nullable=True)
route_path = Column(String, nullable=False)
query_string = Column(String, nullable=True, default="")
count = Column(Integer, nullable=False)
ip = Column(String, nullable=False)
timestamp = Column(DateTime, nullable=False, default=func.now())
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<RouteMetrics(\
version={self.version},\
route_path={self.route_path},\
query_string={self.query_string},\
count={self.count},\
ip={self.ip},\
timestamp={self.timestamp},\
created_at={self.created_at},\
updated_at={self.updated_at}"
class AggregateDailyUniqueUsersMetrics(Base):
__tablename__ = "aggregate_daily_unique_users_metrics"
id = Column(Integer, primary_key=True)
count = Column(Integer, nullable=False)
summed_count = Column(Integer, nullable=True)
timestamp = Column(Date, nullable=False) # zeroed out to the day
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<AggregateDailyUniqueUsersMetrics(\
count={self.count},\
timestamp={self.timestamp},\
created_at={self.created_at},\
updated_at={self.updated_at}"
class AggregateDailyTotalUsersMetrics(Base):
__tablename__ = "aggregate_daily_total_users_metrics"
id = Column(Integer, primary_key=True)
count = Column(Integer, nullable=False)
timestamp = Column(Date, nullable=False) # zeroed out to the day
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<AggregateDailyTotalUsersMetrics(\
count={self.count},\
timestamp={self.timestamp},\
created_at={self.created_at},\
updated_at={self.updated_at}"
class AggregateMonthlyUniqueUsersMetrics(Base):
__tablename__ = "aggregate_monthly_unique_users_metrics"
id = Column(Integer, primary_key=True)
count = Column(Integer, nullable=False)
summed_count = Column(Integer, nullable=True)
timestamp = Column(Date, nullable=False) # first day of month
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<AggregateMonthlyUniqueUsersMetrics(\
count={self.count},\
timestamp={self.timestamp},\
created_at={self.created_at},\
updated_at={self.updated_at}"
class AggregateMonthlyTotalUsersMetrics(Base):
__tablename__ = "aggregate_monthly_total_users_metrics"
id = Column(Integer, primary_key=True)
count = Column(Integer, nullable=False)
timestamp = Column(Date, nullable=False) # first day of month
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<AggregateMonthlyTotalUsersMetrics(\
count={self.count},\
timestamp={self.timestamp},\
created_at={self.created_at},\
updated_at={self.updated_at}"
class AppNameMetrics(Base):
__tablename__ = "app_name_metrics"
id = Column(Integer, primary_key=True)
application_name = Column(String, nullable=False)
count = Column(Integer, nullable=False)
ip = Column(String, nullable=True)
timestamp = Column(DateTime, nullable=False, default=func.now())
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<AppNameMetrics(\
application_name={self.application_name},\
count={self.count},\
ip={self.ip},\
timestamp={self.timestamp},\
created_at={self.created_at},\
updated_at={self.updated_at}"
class AggregateDailyAppNameMetrics(Base):
__tablename__ = "aggregate_daily_app_name_metrics"
id = Column(Integer, primary_key=True)
application_name = Column(String, nullable=False)
count = Column(Integer, nullable=False)
timestamp = Column(Date, nullable=False) # zeroed out to the day
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<AggregateDailyAppNameMetrics(\
application_name={self.application_name},\
count={self.count},\
timestamp={self.timestamp},\
created_at={self.created_at},\
updated_at={self.updated_at}"
class AggregateMonthlyAppNameMetrics(Base):
__tablename__ = "aggregate_monthly_app_name_metrics"
id = Column(Integer, primary_key=True)
application_name = Column(String, nullable=False)
count = Column(Integer, nullable=False)
timestamp = Column(Date, nullable=False) # first day of month
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<AggregateMonthlyAppNameMetrics(\
application_name={self.application_name},\
count={self.count},\
timestamp={self.timestamp},\
created_at={self.created_at},\
updated_at={self.updated_at}"
class RouteMetricsDayMatview(Base):
__tablename__ = "route_metrics_day_bucket"
time = Column(DateTime, nullable=False, primary_key=True)
unique_count = Column(Integer, nullable=False)
count = Column(Integer, nullable=False)
def __repr__(self):
return f"<RouteMetricsDayMatview(\
unique_count={self.unique_count},\
count={self.count},\
time={self.time})>"
class RouteMetricsMonthMatview(Base):
__tablename__ = "route_metrics_month_bucket"
time = Column(DateTime, nullable=False, primary_key=True)
unique_count = Column(Integer, nullable=False)
count = Column(Integer, nullable=False)
def __repr__(self):
return f"<RouteMetricsMonthMatview(\
unique_count={self.unique_count},\
count={self.count},\
time={self.time})>"
class RouteMetricsTrailingWeek(Base):
__tablename__ = "route_metrics_trailing_week"
unique_count = Column(Integer, nullable=False)
count = Column(Integer, nullable=False)
PrimaryKeyConstraint(unique_count, count)
def __repr__(self):
return f"<RouteMetricsTrailingWeek(\
unique_count={self.unique_count},\
count={self.count})>"
class RouteMetricsTrailingMonth(Base):
__tablename__ = "route_metrics_trailing_month"
unique_count = Column(Integer, nullable=False)
count = Column(Integer, nullable=False)
PrimaryKeyConstraint(unique_count, count)
def __repr__(self):
return f"<RouteMetricsTrailingMonth(\
unique_count={self.unique_count},\
count={self.count})>"
class RouteMetricsAllTime(Base):
__tablename__ = "route_metrics_all_time"
unique_count = Column(Integer, nullable=False)
count = Column(Integer, nullable=False)
PrimaryKeyConstraint(unique_count, count)
def __repr__(self):
return f"<RouteMetricsTrailingAllTime(\
unique_count={self.unique_count},\
count={self.count})>"
class AppMetricsTrailingWeek(Base):
__tablename__ = "app_name_metrics_trailing_week"
count = Column(Integer, nullable=False)
name = Column(String, nullable=False, primary_key=True)
def __repr__(self):
return f"<AppMetricsTrailingWeek(\
name={self.name},\
count={self.count})>"
class AppMetricsTrailingMonth(Base):
__tablename__ = "app_name_metrics_trailing_month"
count = Column(Integer, nullable=False)
name = Column(String, nullable=False, primary_key=True)
def __repr__(self):
return f"<AppMetricsTrailingMonth(\
name={self.name},\
count={self.count})>"
class AppMetricsAllTime(Base):
__tablename__ = "app_name_metrics_all_time"
count = Column(Integer, nullable=False)
name = Column(String, nullable=False, primary_key=True)
def __repr__(self):
return f"<AppMetricsAllTime(\
name={self.name},\
count={self.count})>"
class TagTrackUserMatview(Base):
__tablename__ = "tag_track_user"
tag = Column(String, nullable=False)
track_id = Column(Integer, nullable=False)
owner_id = Column(Integer, nullable=False)
PrimaryKeyConstraint(tag, track_id, owner_id)
def __repr__(self):
return f"<TagTrackUserMatview(\
tag={self.tag},\
track_id={self.track_id},\
owner_id={self.owner_id})>"
class URSMContentNode(Base):
__tablename__ = "ursm_content_nodes"
blockhash = Column(String, ForeignKey("blocks.blockhash"), nullable=False)
blocknumber = Column(Integer, ForeignKey("blocks.number"), nullable=False)
txhash = Column(String, default="", nullable=False)
is_current = Column(Boolean, nullable=False)
cnode_sp_id = Column(Integer, nullable=False)
delegate_owner_wallet = Column(String, nullable=False)
owner_wallet = Column(String, nullable=False)
proposer_sp_ids = Column(postgresql.ARRAY(Integer), nullable=False)
proposer_1_delegate_owner_wallet = Column(String, nullable=False)
proposer_2_delegate_owner_wallet = Column(String, nullable=False)
proposer_3_delegate_owner_wallet = Column(String, nullable=False)
endpoint = Column(String, nullable=True)
created_at = Column(DateTime, nullable=False)
PrimaryKeyConstraint(is_current, cnode_sp_id, blockhash, txhash)
def __repr__(self):
return f"<URSMContentNode(blockhash={self.blockhash},\
blocknumber={self.blocknumber},\
txhash={self.txhash},\
is_current={self.is_current},\
cnode_sp_id={self.cnode_sp_id},\
delegate_owner_wallet={self.delegate_owner_wallet},\
owner_wallet={self.owner_wallet},\
proposer_sp_ids={self.proposer_sp_ids},\
proposer_1_delegate_owner_wallet={self.proposer_1_delegate_owner_wallet},\
proposer_2_delegate_owner_wallet={self.proposer_2_delegate_owner_wallet},\
proposer_3_delegate_owner_wallet={self.proposer_3_delegate_owner_wallet},\
endpoint={self.endpoint})>"
class UserBalance(Base):
__tablename__ = "user_balances"
user_id = Column(Integer, nullable=False, primary_key=True)
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
# balance in Wei
balance = Column(String, nullable=False)
associated_wallets_balance = Column(String, nullable=False)
associated_sol_wallets_balance = Column(String, nullable=False)
# wAudio balance
waudio = Column(String, nullable=False)
def __repr__(self):
return f"<UserBalance(\
user_id={self.user_id},\
balance={self.balance},\
associated_wallets_balance={self.associated_wallets_balance}\
associated_sol_wallets_balance={self.associated_sol_wallets_balance}\
waudio={self.waudio})>"
class UserBalanceChange(Base):
__tablename__ = "user_balance_changes"
user_id = Column(Integer, nullable=False, primary_key=True)
blocknumber = Column(Integer, ForeignKey("blocks.number"), nullable=False)
current_balance = Column(String, nullable=False)
previous_balance = Column(String, nullable=False)
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<UserBalanceChange(\
user_id={self.user_id},\
blocknumber={self.blocknumber},\
current_balance={self.current_balance},\
previous_balance={self.previous_balance})>"
class WalletChain(str, enum.Enum):
eth = "eth"
sol = "sol"
class AssociatedWallet(Base):
__tablename__ = "associated_wallets"
blockhash = Column(String, ForeignKey("blocks.blockhash"), nullable=False)
blocknumber = Column(Integer, ForeignKey("blocks.number"), nullable=False)
is_current = Column(Boolean, nullable=False)
is_delete = Column(Boolean, nullable=False)
id = Column(Integer, nullable=False, primary_key=True)
user_id = Column(Integer, nullable=False, index=True)
wallet = Column(String, nullable=False, index=True)
chain = Column(Enum(WalletChain), nullable=False)
def __repr__(self):
return f"<AssociatedWallet(blockhash={self.blockhash},\
blocknumber={self.blocknumber},\
is_current={self.is_current},\
is_delete={self.is_delete},\
id={self.id},\
user_id={self.user_id},\
wallet={self.wallet}\
chain={self.chain})>"
class AggregateUser(Base):
__tablename__ = "aggregate_user"
user_id = Column(Integer, primary_key=True, nullable=False, index=True)
track_count = Column(Integer, nullable=False)
playlist_count = Column(Integer, nullable=False)
album_count = Column(Integer, nullable=False)
follower_count = Column(Integer, nullable=False)
following_count = Column(Integer, nullable=False)
repost_count = Column(Integer, nullable=False)
track_save_count = Column(Integer, nullable=False)
Index("aggregate_user_idx", "user_id", unique=True)
def __repr__(self):
return f"<AggregateUser(\
user_id={self.user_id},\
track_count={self.track_count},\
playlist_count={self.playlist_count},\
album_count={self.album_count},\
follower_count={self.follower_count},\
following_count={self.following_count},\
repost_count={self.repost_count},\
track_save_count={self.track_save_count})>"
class AggregateTrack(Base):
__tablename__ = "aggregate_track"
track_id = Column(Integer, primary_key=True, nullable=False, index=True)
repost_count = Column(Integer, nullable=False)
save_count = Column(Integer, nullable=False)
Index("aggregate_track_idx", "track_id", unique=True)
def __repr__(self):
return f"<AggregateTrack(\
track_id={self.track_id},\
repost_count={self.repost_count},\
save_count={self.save_count})>"
class AggregatePlaylist(Base):
__tablename__ = "aggregate_playlist"
playlist_id = Column(Integer, primary_key=True, nullable=False, index=True)
is_album = Column(Boolean, nullable=False)
repost_count = Column(Integer, nullable=False)
save_count = Column(Integer, nullable=False)
Index("aggregate_playlist_idx", "playlist_id", unique=True)
def __repr__(self):
return f"<AggregatePlaylist(\
playlist_id={self.playlist_id},\
is_album={self.is_album},\
repost_count={self.repost_count},\
save_count={self.save_count})>"
class SkippedTransactionLevel(str, enum.Enum):
node = "node"
network = "network"
class SkippedTransaction(Base):
__tablename__ = "skipped_transactions"
id = Column(Integer, primary_key=True, nullable=False)
blocknumber = Column(Integer, nullable=False)
blockhash = Column(String, nullable=False)
txhash = Column(String, nullable=False)
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
level = Column(
Enum(SkippedTransactionLevel),
nullable=False,
default=SkippedTransactionLevel.node,
)
def __repr__(self):
return f"<SkippedTransaction(\
id={self.id},\
blocknumber={self.blocknumber},\
blockhash={self.blockhash},\
txhash={self.txhash},\
level={self.level},\
created_at={self.created_at},\
updated_at={self.updated_at})>"
class EthBlock(Base):
__tablename__ = "eth_blocks"
last_scanned_block = Column(Integer, primary_key=True, nullable=False)
created_at = Column(DateTime, nullable=False, default=func.now())
updated_at = Column(
DateTime, nullable=False, default=func.now(), onupdate=func.now()
)
def __repr__(self):
return f"<EthBlock(\
last_scanned_block={self.last_scanned_block},\
created_at={self.created_at},\
updated_at={self.updated_at})>"
class ChallengeType(str, enum.Enum):
boolean = "boolean"
numeric = "numeric"
aggregate = "aggregate"
trending = "trending"
class Challenge(Base):
"""Represents a particular challenge type"""
__tablename__ = "challenges"
# Identifies this challenge
id = Column(String, primary_key=True, nullable=False, index=True)
type = Column(Enum(ChallengeType), nullable=False)
# The amount of wAudio to disburse (8 decimals)
amount = Column(String, nullable=False)
# Whether the challenge is currently active
active = Column(Boolean, nullable=False)
# Optional field to support numeric challenges,
# representing the number of steps to complete the challenge
step_count = Column(Integer)
# Optional field for non-retroactive challenges -
# if set, events emitted prior to the starting_block
# will be ignord.
starting_block = Column(Integer)
def __repr__(self):
return f"<Challenge(\
id={self.id},\
type={self.type},\
amount={self.amount},\
active={self.active},\
step_count={self.step_count},\
starting_block={self.starting_block},\
"
class UserChallenge(Base):
"""Represents user progress through a particular challenge."""
__tablename__ = "user_challenges"
challenge_id = Column(String, ForeignKey("challenges.id"), nullable=False)
user_id = Column(Integer, nullable=False)
specifier = Column(String, nullable=False)
is_complete = Column(Boolean, nullable=False)
completed_blocknumber = Column(Integer, ForeignKey("blocks.number"), nullable=True)
current_step_count = Column(Integer)
PrimaryKeyConstraint(challenge_id, specifier)
| |
Sets from
BSR
''',
'bsr_maximum_global_candidate_rp_cache',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('warning-threshold', ATTRIBUTE, 'int' , None, None,
[('1', '10000')], [],
''' Set threshold to print warning
''',
'warning_threshold',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'bsr-global-candidate-rp-cache',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Maximum.BsrCandidateRpCache' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Maximum.BsrCandidateRpCache',
False,
[
_MetaInfoClassMember('bsr-maximum-candidate-rp-cache', ATTRIBUTE, 'int' , None, None,
[('1', '10000')], [],
''' Maximum number of BSR C-RP cache setting
''',
'bsr_maximum_candidate_rp_cache',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('warning-threshold', ATTRIBUTE, 'int' , None, None,
[('1', '10000')], [],
''' Set threshold to print warning
''',
'warning_threshold',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'bsr-candidate-rp-cache',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Maximum.GlobalRegisterStates' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Maximum.GlobalRegisterStates',
False,
[
_MetaInfoClassMember('maximum-register-states', ATTRIBUTE, 'int' , None, None,
[('0', '75000')], [],
''' Maximum number of PIM Sparse-Mode register
states
''',
'maximum_register_states',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('warning-threshold', ATTRIBUTE, 'int' , None, None,
[('0', '75000')], [],
''' Set threshold to print warning
''',
'warning_threshold',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'global-register-states',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Maximum.GlobalRouteInterfaces' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Maximum.GlobalRouteInterfaces',
False,
[
_MetaInfoClassMember('maximum-route-interfaces', ATTRIBUTE, 'int' , None, None,
[('1', '1100000')], [],
''' Maximum number of PIM route-interfaces
''',
'maximum_route_interfaces',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('warning-threshold', ATTRIBUTE, 'int' , None, None,
[('1', '1100000')], [],
''' Set threshold to print warning
''',
'warning_threshold',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'global-route-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Maximum.Routes' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Maximum.Routes',
False,
[
_MetaInfoClassMember('maximum-routes', ATTRIBUTE, 'int' , None, None,
[('1', '200000')], [],
''' Maximum number of PIM routes
''',
'maximum_routes',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('warning-threshold', ATTRIBUTE, 'int' , None, None,
[('1', '200000')], [],
''' Set threshold to print warning
''',
'warning_threshold',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'routes',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Maximum' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Maximum',
False,
[
_MetaInfoClassMember('bsr-candidate-rp-cache', REFERENCE_CLASS, 'BsrCandidateRpCache' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.BsrCandidateRpCache',
[], [],
''' Override default maximum and threshold for BSR
C-RP cache setting
''',
'bsr_candidate_rp_cache',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('bsr-global-candidate-rp-cache', REFERENCE_CLASS, 'BsrGlobalCandidateRpCache' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.BsrGlobalCandidateRpCache',
[], [],
''' Override default global maximum and threshold
for C-RP set in BSR
''',
'bsr_global_candidate_rp_cache',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('bsr-global-group-mappings', REFERENCE_CLASS, 'BsrGlobalGroupMappings' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.BsrGlobalGroupMappings',
[], [],
''' Override default global maximum and threshold
for PIM group mapping ranges from BSR
''',
'bsr_global_group_mappings',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('bsr-group-mappings', REFERENCE_CLASS, 'BsrGroupMappings' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.BsrGroupMappings',
[], [],
''' Override default maximum and threshold for
number of group mappings from BSR
''',
'bsr_group_mappings',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('gloabal-high-priority-packet-queue', ATTRIBUTE, 'int' , None, None,
[('0', '2147483648')], [],
''' Maximum packet queue size in bytes
''',
'gloabal_high_priority_packet_queue',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('gloabal-low-priority-packet-queue', ATTRIBUTE, 'int' , None, None,
[('0', '2147483648')], [],
''' Maximum packet queue size in bytes
''',
'gloabal_low_priority_packet_queue',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('global-group-mappings-auto-rp', REFERENCE_CLASS, 'GlobalGroupMappingsAutoRp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.GlobalGroupMappingsAutoRp',
[], [],
''' Maximum for number of group mappings from
autorp mapping agent
''',
'global_group_mappings_auto_rp',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('global-register-states', REFERENCE_CLASS, 'GlobalRegisterStates' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.GlobalRegisterStates',
[], [],
''' Override default maximum for number of
sparse-mode source registers
''',
'global_register_states',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('global-route-interfaces', REFERENCE_CLASS, 'GlobalRouteInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.GlobalRouteInterfaces',
[], [],
''' Override default maximum for number of
route-interfaces
''',
'global_route_interfaces',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('global-routes', REFERENCE_CLASS, 'GlobalRoutes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.GlobalRoutes',
[], [],
''' Override default maximum for number of routes
''',
'global_routes',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('group-mappings-auto-rp', REFERENCE_CLASS, 'GroupMappingsAutoRp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.GroupMappingsAutoRp',
[], [],
''' Override default maximum for number of group
mappings from autorp mapping agent
''',
'group_mappings_auto_rp',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('register-states', REFERENCE_CLASS, 'RegisterStates' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.RegisterStates',
[], [],
''' Override default maximum for number of
sparse-mode source registers
''',
'register_states',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('route-interfaces', REFERENCE_CLASS, 'RouteInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.RouteInterfaces',
[], [],
''' Override default maximum for number of
route-interfaces
''',
'route_interfaces',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('routes', REFERENCE_CLASS, 'Routes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Maximum.Routes',
[], [],
''' Override default maximum for number of routes
''',
'routes',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'maximum',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Ssm' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Ssm',
False,
[
_MetaInfoClassMember('disable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' TRUE if SSM is disabled on this router
''',
'disable',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('range', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access list of groups enabled with SSM
''',
'range',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'ssm',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.BidirRpAddresses.BidirRpAddress' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.BidirRpAddresses.BidirRpAddress',
False,
[
_MetaInfoClassMember('rp-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' RP address of Rendezvous Point
''',
'rp_address',
'Cisco-IOS-XR-ipv4-pim-cfg', True, [
_MetaInfoClassMember('rp-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' RP address of Rendezvous Point
''',
'rp_address',
'Cisco-IOS-XR-ipv4-pim-cfg', True),
_MetaInfoClassMember('rp-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' RP address of Rendezvous Point
''',
'rp_address',
'Cisco-IOS-XR-ipv4-pim-cfg', True),
]),
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access list of groups that should map to a
given RP
''',
'access_list_name',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('auto-rp-override', ATTRIBUTE, 'bool' , None, None,
[], [],
''' TRUE Indicates if static RP config overrides
AutoRP and BSR
''',
'auto_rp_override',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'bidir-rp-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.BidirRpAddresses' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.BidirRpAddresses',
False,
[
_MetaInfoClassMember('bidir-rp-address', REFERENCE_LIST, 'BidirRpAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.BidirRpAddresses.BidirRpAddress',
[], [],
''' Address of the Rendezvous Point
''',
'bidir_rp_address',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'bidir-rp-addresses',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Bsr.CandidateRps.CandidateRp.Sm' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Bsr.CandidateRps.CandidateRp.Sm',
False,
[
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access-list specifying the group range for
the Candidate-RP
''',
'access_list_name',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('30', '600')], [],
''' Advertisement interval
''',
'interval',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Priority of the CRP
''',
'priority',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('protocol-mode', ATTRIBUTE, 'int' , None, None,
[('0', '1')], [],
''' CRP Mode
''',
'protocol_mode',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'sm',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Bsr.CandidateRps.CandidateRp.Bidir' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Bsr.CandidateRps.CandidateRp.Bidir',
False,
[
_MetaInfoClassMember('access-list-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Access-list specifying the group range for
the Candidate-RP
''',
'access_list_name',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('30', '600')], [],
''' Advertisement interval
''',
'interval',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('priority', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Priority of the CRP
''',
'priority',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('protocol-mode', ATTRIBUTE, 'int' , None, None,
[('0', '1')], [],
''' CRP Mode
''',
'protocol_mode',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'bidir',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Bsr.CandidateRps.CandidateRp' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Bsr.CandidateRps.CandidateRp',
False,
[
_MetaInfoClassMember('address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Address of Candidate-RP
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', True, [
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Address of Candidate-RP
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', True),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Address of Candidate-RP
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', True),
]),
_MetaInfoClassMember('bidir', REFERENCE_CLASS, 'Bidir' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Bsr.CandidateRps.CandidateRp.Bidir',
[], [],
''' Parameters of PIM Bidir BSR Candidate-RP
''',
'bidir',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('sm', REFERENCE_CLASS, 'Sm' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Bsr.CandidateRps.CandidateRp.Sm',
[], [],
''' Parameters of PIM SM BSR Candidate-RP
''',
'sm',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'candidate-rp',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Bsr.CandidateRps' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Bsr.CandidateRps',
False,
[
_MetaInfoClassMember('candidate-rp', REFERENCE_LIST, 'CandidateRp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg', 'Pim.DefaultContext.Ipv6.Bsr.CandidateRps.CandidateRp',
[], [],
''' BSR Candidate RP Configuration
''',
'candidate_rp',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
],
'Cisco-IOS-XR-ipv4-pim-cfg',
'candidate-rps',
_yang_ns._namespaces['Cisco-IOS-XR-ipv4-pim-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_pim_cfg'
),
},
'Pim.DefaultContext.Ipv6.Bsr.CandidateBsr' : {
'meta_info' : _MetaInfoClass('Pim.DefaultContext.Ipv6.Bsr.CandidateBsr',
False,
[
_MetaInfoClassMember('address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' BSR Address configured
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', False, [
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' BSR Address configured
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' BSR Address configured
''',
'address',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
]),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Hash Mask Length for this candidate BSR
''',
'prefix_length',
'Cisco-IOS-XR-ipv4-pim-cfg', False),
_MetaInfoClassMember('priority', ATTRIBUTE, | |
p * w
# Update the probability that no skeleton yet is refinable.
refinable_skeleton_not_found_prob *= (1 - refinement_prob)
except (PlanningTimeout, PlanningFailure):
# Note if we failed to find any skeleton, the next lines add
# the upper bound with refinable_skeleton_not_found_prob = 1.0,
# so no special action is required.
pass
# After exhausting the skeleton budget or timeout, we use this
# probability to estimate a "worst-case" planning time, making the
# soft assumption that some skeleton will eventually work.
ub = CFG.grammar_search_expected_nodes_upper_bound
expected_planning_time += refinable_skeleton_not_found_prob * ub
# The score is simply the total expected planning time.
score += expected_planning_time
return score
@staticmethod
def _get_refinement_prob(
demo_atoms_sequence: Sequence[Set[GroundAtom]],
plan_atoms_sequence: Sequence[Set[GroundAtom]]) -> float:
"""Estimate the probability that plan_atoms_sequence is refinable using
the demonstration demo_atoms_sequence."""
# Make a soft assumption that the demonstrations are optimal,
# using a geometric distribution.
demo_len = len(demo_atoms_sequence)
plan_len = len(plan_atoms_sequence)
# The exponent is the difference in plan lengths.
exponent = abs(demo_len - plan_len)
p = CFG.grammar_search_expected_nodes_optimal_demo_prob
return p * (1 - p)**exponent
@dataclass(frozen=True, eq=False, repr=False)
class _HeuristicBasedScoreFunction(_OperatorLearningBasedScoreFunction):
"""Score a predicate set by learning operators and comparing some heuristic
against the demonstrations.
Subclasses must choose the heuristic function and how to evaluate
against the demonstrations.
"""
heuristic_names: Sequence[str]
demos_only: bool = field(default=True)
def evaluate_with_operators(self,
candidate_predicates: FrozenSet[Predicate],
low_level_trajs: List[LowLevelTrajectory],
segmented_trajs: List[List[Segment]],
strips_ops: List[STRIPSOperator],
option_specs: List[OptionSpec]) -> float:
# Lower scores are better.
scores = {name: 0.0 for name in self.heuristic_names}
seen_demos = 0
seen_nondemos = 0
max_demos = CFG.grammar_search_max_demos
max_nondemos = CFG.grammar_search_max_nondemos
assert len(low_level_trajs) == len(segmented_trajs)
demo_atom_sets = {
frozenset(a)
for ll_traj, seg_traj in zip(low_level_trajs, segmented_trajs)
if ll_traj.is_demo
for a in utils.segment_trajectory_to_atoms_sequence(seg_traj)
}
for ll_traj, seg_traj in zip(low_level_trajs, segmented_trajs):
# Skip this trajectory if it's not a demo and we don't want demos.
if self.demos_only and not ll_traj.is_demo:
continue
# Skip this trajectory if we've exceeded a budget.
if (ll_traj.is_demo and seen_demos == max_demos) or (
not ll_traj.is_demo and seen_nondemos == max_nondemos):
continue
if ll_traj.is_demo:
seen_demos += 1
else:
seen_nondemos += 1
atoms_sequence = utils.segment_trajectory_to_atoms_sequence(
seg_traj)
init_atoms = atoms_sequence[0]
objects = set(ll_traj.states[0])
goal = self._train_tasks[ll_traj.train_task_idx].goal
ground_ops = {
op
for strips_op in strips_ops
for op in utils.all_ground_operators(strips_op, objects)
}
for heuristic_name in self.heuristic_names:
heuristic_fn = self._generate_heuristic(
heuristic_name, init_atoms, objects, goal, strips_ops,
option_specs, ground_ops, candidate_predicates)
scores[heuristic_name] += self._evaluate_atom_trajectory(
atoms_sequence, heuristic_fn, ground_ops, demo_atom_sets,
ll_traj.is_demo)
score = min(scores.values())
return CFG.grammar_search_heuristic_based_weight * score
def _generate_heuristic(
self, heuristic_name: str, init_atoms: Set[GroundAtom],
objects: Set[Object], goal: Set[GroundAtom],
strips_ops: Sequence[STRIPSOperator],
option_specs: Sequence[OptionSpec],
ground_ops: Set[_GroundSTRIPSOperator],
candidate_predicates: Collection[Predicate]
) -> Callable[[Set[GroundAtom]], float]:
raise NotImplementedError("Override me!")
def _evaluate_atom_trajectory(self, atoms_sequence: List[Set[GroundAtom]],
heuristic_fn: Callable[[Set[GroundAtom]],
float],
ground_ops: Set[_GroundSTRIPSOperator],
demo_atom_sets: Set[FrozenSet[GroundAtom]],
is_demo: bool) -> float:
raise NotImplementedError("Override me!")
@dataclass(frozen=True, eq=False, repr=False)
class _HeuristicMatchBasedScoreFunction(_HeuristicBasedScoreFunction):
"""Implement _evaluate_atom_trajectory() by expecting the heuristic to
match the exact costs-to-go of the states in the demonstrations."""
def _evaluate_atom_trajectory(self, atoms_sequence: List[Set[GroundAtom]],
heuristic_fn: Callable[[Set[GroundAtom]],
float],
ground_ops: Set[_GroundSTRIPSOperator],
demo_atom_sets: Set[FrozenSet[GroundAtom]],
is_demo: bool) -> float:
score = 0.0
for i, atoms in enumerate(atoms_sequence):
ideal_h = len(atoms_sequence) - i - 1
h = heuristic_fn(atoms)
score += abs(h - ideal_h)
return score
@dataclass(frozen=True, eq=False, repr=False)
class _HeuristicEnergyBasedScoreFunction(_HeuristicBasedScoreFunction):
"""Implement _evaluate_atom_trajectory() by using the induced operators to
compute an energy-based policy, and comparing that policy to demos.
Overview of the idea:
1. Predicates induce operators. Denote this ops(preds).
2. Operators induce a heuristic. Denote this h(state, ops(preds)).
3. The heuristic induces a greedy one-step lookahead energy-based policy.
Denote this pi(a | s) propto exp(-k * h(succ(s, a), ops(preds)) where
k is CFG.grammar_search_energy_based_temperature.
4. The objective for predicate learning is to maximize prod pi(a | s)
where the product is over demonstrations.
"""
def _evaluate_atom_trajectory(self, atoms_sequence: List[Set[GroundAtom]],
heuristic_fn: Callable[[Set[GroundAtom]],
float],
ground_ops: Set[_GroundSTRIPSOperator],
demo_atom_sets: Set[FrozenSet[GroundAtom]],
is_demo: bool) -> float:
assert is_demo
score = 0.0
for i in range(len(atoms_sequence) - 1):
atoms, next_atoms = atoms_sequence[i], atoms_sequence[i + 1]
ground_op_demo_lpm = -np.inf # total log prob mass for demo actions
ground_op_total_lpm = -np.inf # total log prob mass for all actions
for predicted_next_atoms in utils.get_successors_from_ground_ops(
atoms, ground_ops, unique=False):
# Compute the heuristic for the successor atoms.
h = heuristic_fn(predicted_next_atoms)
# Compute the probability that the correct next atoms would be
# output under an energy-based policy.
k = CFG.grammar_search_energy_based_temperature
log_p = -k * h
ground_op_total_lpm = np.logaddexp(log_p, ground_op_total_lpm)
# Check whether the successor atoms match the demonstration.
if predicted_next_atoms == next_atoms:
ground_op_demo_lpm = np.logaddexp(log_p,
ground_op_demo_lpm)
# If there is a demonstration state that is a dead-end under the
# operators, immediately return a very bad score, because planning
# with these operators would never be able to recover the demo.
if ground_op_demo_lpm == -np.inf:
return float("inf")
# Accumulate the log probability of each (state, action) in this
# demonstrated trajectory.
trans_log_prob = ground_op_demo_lpm - ground_op_total_lpm
score += -trans_log_prob # remember that lower is better
return score
@dataclass(frozen=True, eq=False, repr=False)
class _HeuristicCountBasedScoreFunction(_HeuristicBasedScoreFunction):
"""Implement _evaluate_atom_trajectory() by using the induced operators to
compute estimated costs-to-go.
Then for each transition in the atoms_sequence, check whether the
transition is optimal with respect to the estimated costs-to-go. If
the transition is optimal and the sequence is not a demo, that's
assumed to be bad; if the transition is not optimal and the sequence
is a demo, that's also assumed to be bad.
Also: for each successor that is one step off the atoms_sequence, if the
state is optimal, then check if the state is "suspicious", meaning that it
does not "match" any state in the demo data. The definition of match is
currently based on utils.unify(). It may be that this definition is too
strong, so we could consider others. The idea is to try to distinguish
states that are actually impossible (suspicious) from ones that are simply
alternative steps toward optimally achieving the goal.
"""
def _evaluate_atom_trajectory(
self,
atoms_sequence: List[Set[GroundAtom]],
heuristic_fn: Callable[[Set[GroundAtom]], float],
ground_ops: Set[_GroundSTRIPSOperator],
demo_atom_sets: Set[FrozenSet[GroundAtom]],
is_demo: bool,
) -> float:
score = 0.0
for i in range(len(atoms_sequence) - 1):
atoms, next_atoms = atoms_sequence[i], atoms_sequence[i + 1]
best_h = float("inf")
on_sequence_h = float("inf")
optimal_successors = set()
for predicted_next_atoms in utils.get_successors_from_ground_ops(
atoms, ground_ops, unique=False):
# Compute the heuristic for the successor atoms.
h = heuristic_fn(predicted_next_atoms)
if h < best_h:
optimal_successors = {frozenset(predicted_next_atoms)}
best_h = h
elif h == best_h:
optimal_successors.add(frozenset(predicted_next_atoms))
if predicted_next_atoms == next_atoms:
assert on_sequence_h in [h, float("inf")]
on_sequence_h = h
# Bad case 1: transition is optimal and sequence is not a demo.
if on_sequence_h == best_h and not is_demo:
score += CFG.grammar_search_off_demo_count_penalty
# Bad case 2: transition is not optimal and sequence is a demo.
elif on_sequence_h > best_h and is_demo:
score += CFG.grammar_search_on_demo_count_penalty
# Bad case 3: there is a "suspicious" optimal state.
for successor in optimal_successors:
# If we're looking at a demo and the successor matches the
# next state in the demo, then the successor obviously matches
# some state in the demos, and thus is not suspicious.
if is_demo and successor == frozenset(next_atoms):
continue
if self._state_is_suspicious(successor, demo_atom_sets):
score += CFG.grammar_search_suspicious_state_penalty
return score
@staticmethod
def _state_is_suspicious(
successor: FrozenSet[GroundAtom],
demo_atom_sets: Set[FrozenSet[GroundAtom]]) -> bool:
for demo_atoms in demo_atom_sets:
suc, _ = utils.unify(successor, demo_atoms)
if suc:
return False
return True
@dataclass(frozen=True, eq=False, repr=False)
class _RelaxationHeuristicBasedScoreFunction(_HeuristicBasedScoreFunction):
"""Implement _generate_heuristic() with a delete relaxation heuristic like
hadd, hmax, or hff."""
lookahead_depth: int = field(default=0)
def _generate_heuristic(
self, heuristic_name: str, init_atoms: Set[GroundAtom],
objects: Set[Object], goal: Set[GroundAtom],
strips_ops: Sequence[STRIPSOperator],
option_specs: Sequence[OptionSpec],
ground_ops: Set[_GroundSTRIPSOperator],
candidate_predicates: Collection[Predicate]
) -> Callable[[Set[GroundAtom]], float]:
all_reachable_atoms = utils.get_reachable_atoms(ground_ops, init_atoms)
reachable_ops = [
op for op in ground_ops
if op.preconditions.issubset(all_reachable_atoms)
]
h_fn = utils.create_task_planning_heuristic(
heuristic_name, init_atoms, goal, reachable_ops,
set(candidate_predicates) | self._initial_predicates, objects)
del init_atoms # unused after this
cache: Dict[Tuple[FrozenSet[GroundAtom], int], float] = {}
def _relaxation_h(atoms: Set[GroundAtom], depth: int = 0) -> float:
cache_key = (frozenset(atoms), depth)
if cache_key in cache:
return cache[cache_key]
if goal.issubset(atoms):
result = 0.0
elif depth == self.lookahead_depth:
result = h_fn(atoms)
else:
successor_hs = [
_relaxation_h(next_atoms, depth + 1)
for next_atoms in utils.get_successors_from_ground_ops(
atoms, ground_ops)
]
if not successor_hs:
return float("inf")
result = 1.0 + min(successor_hs)
cache[cache_key] = result
return result
return _relaxation_h
@dataclass(frozen=True, eq=False, repr=False)
class _ExactHeuristicBasedScoreFunction(_HeuristicBasedScoreFunction):
"""Implement _generate_heuristic() with | |
= _mod_rank_best
_ranks.append([(Path(bn), gas, *i) for i in _mod_rank])
Mod_Rank = pd.DataFrame(
[a for i in _ranks for a in i],
columns=["PAR_file", "Gas", "Model_EEC", "rank_len_gr", f"rank_{errtype}_mean"],
)
Best_Mod_Rank = Mod_Rank.loc[
(Mod_Rank.rank_len_gr > 6) & (Mod_Rank[f"rank_{errtype}_mean"] < 30)
]
Mod_Rank_grps = (
Mod_Rank.groupby(["Model_EEC", "Gas"])["rank_len_gr", f"rank_{errtype}_mean"]
.agg(["count", "mean", "sum", "std"])
.sort_index(level=1)
)
_mrge = Mod_Rank_grps.reset_index()
_mrge = _flatten_columns(_mrge)
_mrge.columns
Mod_Ranks = pd.merge(_mrge, EIS_models_lenpars, on="Model_EEC", how="left")
N2 = Mod_Ranks.query('Gas == "N2" & rank_len_gr_count > 3')
def plot_ranks():
TT = bgrpmod.get_group(_mod_select)
DestFilePars = PPDmod.joinpath(f'Pars_Ev_{_mod_select.split("Model")[-1]}')
_mod_select_inst = [
i for i in EIS_models.lmfit_models if i.model.name == _mod_select
][0]
eisplot.plot_par_with_Ev(
_mod_select_inst, bgrp, SampleCodes, OriginColor, DestFilePars
)
bgrp.groupby("Model_EEC").agg()
newECexp = f"{bgrp.ECexp.unique()[0]}_{gas}.xlsx"
_specfit_files = bgrp.File_SpecFit.unique()
spectras_comb_all = pd.concat(
[
pd.read_excel(specfile, index_col=[0]).assign(
**{"File_ReadFile": specfile}
)
for specfile in _specfit_files
if "_spectrumfit_" in specfile
],
sort=False,
)
spectras_comb = spectras_comb_all.dropna(subset=["Model_EEC"])
spectras_comb_false = spectras_comb_all.loc[spectras_comb_all.Model_EEC.isna()]
# for bng_file in bgrp.File_SpecFit.unique():
# spf = pd.read_excel(bng_file,index_col=[0])
# spf.columns
## pd.read_excel(bgrp.File_SpecFit.unique()[0],index_col=[0])
# if not 'Model_EEC' in spectras_comb.columns:
# spectras_comb = pd.concat([pd.read_excel(specfile,index_col=[0]) for specfile in bgrp.File_SpecRaw.unique()])
#
spec_comb_mod = (
spectras_comb.groupby(["Model_EEC"])
.get_group(_mod_select)
.sort_values(by=[EvRHE, "Frequency(Hz)"])
)
# [(n,gr.File_ReadFile.unique()) for n,gr in spec_comb_mod.groupby([EvRHE])]
DestFile = PPDmod.joinpath(
f'{Path(newECexp).stem}_{mod.split("Model")[-1]}.png'
)
# TODO FIX FOR OR MAKE FIT USING PARS!!
# EEC_models_index(select=mod)[0][1]
plot_combined_model(spec_comb_mod, bgrp, SampleCodes, DestFile)
bgrp.to_excel(PPDmod.joinpath(Path(f"Pars_Ev_{newECexp}")))
# pars = plotting.eisplot.parlst + plotting.eisplot.extrapars + ignore_cols
outlst = []
uniqcols = {
i: bgrp[i].unique()[0] for i in bgrp.columns if bgrp[i].nunique() == 1
}
parcols = [i for i in bgrp.columns if i not in uniqcols.keys()]
for Ev, Egrp in bgrp.groupby("E_RHE"):
parcols_Ev = [f"{i}_{Ev*1000:.0f}" for i in parcols if not "E_RHE" in i]
Egrp_new = Egrp.rename(columns=dict(zip(parcols, parcols_Ev)))
Egrp_new = Egrp_new.assign(
**{"basename": bn, "Gas": gas, "SampleID": uniqcols.get("SampleID")}
)
Egrp_new = Egrp_new.set_index(["basename", "Gas", "SampleID"])
outlst.append(Egrp_new[parcols_Ev])
basegrp_Ev_cols = pd.concat(outlst, ignore_index=False, axis=1)
uniqcols.update({"OriginDestFile": PPDmod.joinpath(newECexp)})
_meta.append(uniqcols)
basegrp_Ev_cols.to_excel(PPDmod.joinpath(newECexp))
meta = pd.concat([pd.DataFrame(i, index=[0]) for i in _meta])
meta.to_excel(PPDmod.joinpath("meta_data_EIS_origin.xlsx"))
# def check_best_model_per_segment(ORR_acid_no):
# EIS_models = Model_Collection()
# _errs = []
# for (pf, nseg),grp in ORR_acid_no.groupby(['PAR_file', 'Segment #']):
# (pf, nseg),grp
# for modn, modgrp in grp.groupby('Model_EEC'):
# modn,modgrp
# modpars = EIS_models.modpars.get(modn)
# if modpars:
# modpars_errs = [i+'_stderr' for i in modpars]
# moderr = modgrp[modpars_errs].iloc[0].T
# _badpars = ', '.join([key.split('_stderr')[0] for key,val in moderr.to_dict().items() if val > 1E3])
# _errs.append((pf,nseg, modn, moderr.sum(), moderr.mean(), moderr.std(), _badpars ))
# _prfx = 'lmfit_errpars'
# EIS_pars_errsum = pd.DataFrame(_errs,columns =['PAR_file', 'Segment #','Model_EEC',f'{_prfx}_sum', f'{_prfx}_mean', f'{_prfx}_std',f'{_prfx}_badpars'])
# ORR_acid_no_err = pd.merge(ORR_acid_no,EIS_pars_errsum, how='left')
# return ORR_acid_no_err
def plot_combined_model(spectras_comb, bgrp, SampleCodes, DestFile):
for ZY in ["Z", "Y", "-Zangle"]:
DestZY = DestFile.parent.joinpath(f"{DestFile.stem}_{ZY}.png")
eisplot.PlotCombinedEIS(spectras_comb, bgrp, SampleCodes, DestZY, xEIS=ZY)
def plot_vars():
for i in var_names:
fig, ax = plt.subplots()
Mgrp.plot(x="E_RHE", y=i, ax=ax)
z_val = np.abs(stats.zscore(Mgrp[i]))
for (k, v), z in zip(Mgrp[["E_RHE", i]].iterrows(), z_val):
ax.annotate(
np.round(z, 2),
v,
xytext=(10, -5),
textcoords="offset points",
family="sans-serif",
fontsize=18,
color="darkslategrey",
)
# def plot_pars_with_Ev(bgrp,SampleCodes, DestFilePars):
# for ZY in ['Z', 'Y', '-Zangle']:
# DestZY = DestFile.parent.joinpath(f'{DestFile.stem}_{ZY}.png')
# eisplot.plot_par_with_Ev(bgrp,SampleCodes,DestFilePars)
# plot_combined_model(spectras_comb, bgrp,SampleCodes, DestFile)
def MergeEISandORR():
EODD = PostEC().DestDir.joinpath("EIS_ORR")
EODD.mkdir(parents=True, exist_ok=True)
# TODO 2020.01.16 trying to merge EIS and ORR pars
EC_midx = ["SampleID", "pH", "Electrolyte", "Loading_cm2", "postAST"]
EIScols = (
post_helper.CheckCols(
SampleSelection.EC_EIS_par_cols
+ ["ECexp", "E_RHE", "SampleID"]
+ ["lmfit_redchi", "Chisqr", "Model_EEC", "Model_index"],
EIS_pars,
)
+ post_helper.CheckCols(SampleSelection.EC_exp_cols, ORR_pars)
)
# Cdlcatancols = Cdl_pars_catan[EC_midx+['ECexp','E_RHE']+MakingCorrs.CheckCols(SampleSelection.EC_exp_cols,Cdl_pars_catan)]
fast_checking_EEC_models = [
"Model(Singh2015_RQRQR)",
"Model(Singh2015_RQRWR)",
"Model(Singh2015_R3RQ)",
"Model(Bandarenka_2011_RQRQR)",
]
EIS_midx = EIS_pars.set_index(EC_midx).loc[:, EIScols].dropna(axis=1)
sIDs_used = EIS_midx.index.get_level_values(level="SampleID").unique()
ORR_midx = ORR_pars.set_index(EC_midx)
ORR_mx_used = ORR_midx.loc[
ORR_midx.index.get_level_values(level="SampleID").isin(sIDs_used)
]
Cdl_catan_midx = Cdl_pars_catan.set_index(EC_midx)
Cdl_mx_used = Cdl_catan_midx.loc[
Cdl_catan_midx.index.get_level_values(level="SampleID").isin(sIDs_used)
]
# pd.merge(EIS_midx,ORR_midx,how='inner',left_index=True, right_index=True)
EIS_midx.join(ORR_midx, lsuffix="_eis", rsuffix="_ORR")
# EIS_ORR_pars = pd.merge(EIS_pars, ORR_pars , on=['ECexp'],how='left',suffixes=['_eis','_ORR']) # 2013511
# EIS_ORR_pars.to_pickle(EODD.joinpath('EIS_ORR_pars_full.pkl.compress'),compression='xz')
print(EIS_pars.Model_EEC.unique())
EIS_ORR_fast = pd.merge(
EIS_pars.query('Model_EEC == "Model(Singh2015_R3RQ)"'),
ORR_pars,
on=["ECexp"],
how="left",
suffixes=["_eis", "_ORR"],
) # 2013511
print(EIS_ORR_fast.ECexp.unique())
EIS_ORR_fast.to_pickle(
EODD.joinpath("EIS_ORR_pars_mSingRQRQR.pkl.compress"), compression="xz"
)
faillst, ORReis_merge_lst = [], []
merge_Model_EEC = [" Model(Singh2015_RQRQR)", "Model(Singh2015_R3RQ)"]
merge_Model_EEC_set = merge_Model_EEC[1]
add_ORR_cols = [
i
for i in ORR_pars.columns
if i not in EIS_pars.columns and i.split("_")[-1] not in ["x", "y"]
]
for exp, ECgr in EIS_pars.query(
f'(Model_EEC == "{merge_Model_EEC_set}") & (RPM_DAC > 1000)'
).groupby("ECexp"):
exp, ECgr
ORR_ECexp_slice = ORR_pars.query("ECexp == @exp")
if not ORR_ECexp_slice.empty:
# TODO Merge on E_RHE with data from ORR scans and compare then...
EIS_ECexp = ECgr
nECexp = exp
ORR_ECexp = ORR_ECexp_slice
ORR_ECexp_PARS_1500 = ORR_ECexp.query("RPM > 1000")[add_ORR_cols]
if len(ORR_ECexp_PARS_1500) == 1:
for col in ORR_ECexp_PARS_1500.columns:
EIS_ECexp[col] = [ORR_ECexp_PARS_1500[col].values[0]] * len(
EIS_ECexp
)
ORReis_merge_lst.append(EIS_ECexp)
else:
faillst.append([exp, ORR_ECexp_slice, "len_not_1"])
else:
faillst.append([exp, ORR_ECexp_slice, "ORR_empty"])
failed_ECexps = pd.DataFrame(
[(i[0], i[-1]) for i in faillst], columns=["ECexp", "MergeFailMsg"]
)
failed_ECexps_grps = pd.concat([i[1] for i in faillst], sort=False)
ORReis_merge_raw = pd.concat(ORReis_merge_lst, sort=False)
if not Cdl_pars_catan.empty:
mcs1 = [i for i in Cdl_pars_catan.columns if i in ORReis_merge_raw.columns]
olap = [i for i in Cdl_pars_catan.columns if i in ORReis_merge_raw.columns]
[
i
for i in olap
if Cdl_pars_catan[i].nunique() == 1 and ORReis_merge_raw[i].nunique() == 1
]
mcs = [
i
for i in SampleSelection.EC_exp_cols
if i in ORReis_merge_raw.columns and i in Cdl_pars_catan.columns
]
EIS_ORR_Cdl_fast = pd.merge(
ORReis_merge_raw.query(f'Model_EEC == "{merge_Model_EEC_set}"'),
Cdl_pars_catan,
on=["ECexp", "E_RHE"],
how="left",
suffixes=["", "_cdl"],
) # 2013511
ORReis_merge_raw.to_pickle(EODD.joinpath("EIS_ORR_refit_pars.pkl.compress"))
ORReis_mergeCB = ORReis_merge_raw.loc[
ORReis_merge_raw.SampleID.isin(SampleSelection.Series_CB_paper["sIDs"])
]
ORReis_merge = EIS_ORR_Cdl_fast
RedChiSq_limit = (
ORReis_merge.query("Rs > 1").lmfit_redchi.mean()
+ 1 * ORReis_merge.query("Rs > 1").lmfit_redchi.std()
)
ORReis_neat = ORReis_merge.query(
"lmfit_redchi < @RedChiSq_limit & Rs > 2 & Rct < 9E05"
)
ORReis_neat.query("E_RHE < 0.8 & E_RHE > 0.74 & Rct < 9E05").plot(
x="E_onset",
y="Rct",
kind="scatter",
ylim=(0.1, 1e5),
xlim=(0.5, 1),
logy=True,
logx=0,
c="pH",
colormap="viridis",
)
ORReis_neat.query("E_RHE < 0.8 & E_RHE > 0.74 & Rct < 9E05").plot(
x="E_onset",
y="Cdl_an",
kind="scatter",
xlim=(0.5, 1),
logy=True,
logx=0,
c="pH_cdl",
colormap="viridis",
)
ORReis_neat.SampleID.unique()
EIS_pars.SampleID.unique()
# .loc[ORReis_neat.SampleID.isin(SampleSelection.Series_CB_paper['sIDs']+SampleSelection.Series__paper['sIDs']]
sIDslice = (
SampleSelection.Series_CB_paper["sIDs"]
+ SampleSelection.Series_Porhp_SiO2["sIDs"]
)
Ekin = (
ORReis_neat.loc[ORReis_neat.SampleID.isin(sIDslice)]
.query('E_RHE <= 0.9 & E_RHE >= 0.6 & Gas == "O2" & SampleID != "JOS5" ')
.dropna(subset=["SeriesID"])
)
Ekin_grp = Ekin.groupby(["pH", "E_RHE"])
ORReis_neat.query("E_RHE < 0.8 & E_RHE > 0.74 & Rct < 9E05").plot(
x="Rs",
y="Jkin_075",
kind="scatter",
ylim=(0.1, 100),
xlim=(0.5, 70),
logy=True,
logx=0,
c="BET_cat_agg",
colormap="rainbow_r",
)
ORReis_neat.query("E_RHE < 0.8 & E_RHE > 0.74 & Rct < 9E05").plot(
x="Cdl_cat",
y="Rs",
kind="scatter",
logy=True,
logx=0,
c="BET_cat_agg",
colormap="rainbow_r",
)
for i in [
i for i in SampleSelection.EC_EIS_par_cols if not "lmfit_redchi" in i
] + SampleSelection.EC_ORR_kin_par_cols:
EKinsl = Ekin.query(
"E_RHE < 0.76 & E_RHE > 0.59 & Rct < 9E05 & Rorr < 1E09 & Qad < 35E-3 & Cdlp < 0.070"
)
for Elec, Elgr in EKinsl.groupby("Electrolyte"):
fig, ax = plt.subplots()
Elgr.plot(
x="BET_cat_agg",
y=i,
kind="scatter",
c="E_RHE",
colormap="rainbow_r",
ax=ax,
)
ax.set_xlim = (0.6, 0.9)
ax.set_title(Elec)
ps = plotting.eisplot(i)
ax.set_ylim(ps.ylim)
ax.set_yscale(ps.logyscale)
plt.show()
plt.close()
def one_sample():
DW25 = ORReis_neat.query('ECexp == "DW25_1.0_0.1MH2SO4_0.379_no_2019-01-28"')
def count_corr_occurences(grEcorrstack, *args):
lst = []
for n, val in grEcorrstack.iteritems():
t, c = 0, ""
if any([(i in n) for i in eisplot.parlst]):
t += 1
c += "eis"
if any([(i in n) for i in SampleSelection.EC_ORR_kin_par_cols]):
t += 1
c += "orr"
if any([(i in n) for i in SampleSelection.Character_xcols]):
c += "Xchar"
odct = dict(
zip(
["corr_idx", "val", "ocurrc", "occur_s", "pH", "E_RHE"],
[n, val, t, c, args[0], args[1]],
)
)
lst.append(odct)
return lst
Ekin_occur_lst = []
corr_method, corr_cutoff = "spearman", 0.1 # or default is pearson spearman
# TODO conti
for E, grE in Ekin_grp:
E, grE
for corr_m in ["pearson", "spearman"]:
grEcorrstack = grE.corr(method=corr_m).stack().sort_values()
grEcorrstack = grEcorrstack[np.abs(grEcorrstack) < 0.99]
occur_counts = count_corr_occurences(grEcorrstack, *E)
occur_DF = pd.DataFrame(occur_counts)
occur_DF["method"] = corr_m
Ekin_occur_lst.append(occur_DF)
Ekin_occur = (
pd.concat(Ekin_occur_lst, sort=True)
.drop_duplicates(subset=["val", "pH", "E_RHE"])
.sort_values(by="val")
)
Ekin_occur.query("ocurrc == 2")
grEtop = pd.concat(
[
Ekin_occur.query("ocurrc == 2").head(25),
Ekin_occur.query("ocurrc == 2").tail(25),
],
sort=False,
)
for pH, grppH in Ekin_occur.query("ocurrc == 2").groupby("pH"):
grEtop = pd.concat(
[grppH.query("ocurrc == 2").head(10), grppH.query("ocurrc == 2").tail(10)],
sort=False,
)
PPDEISbest_overall_pH = EODD.joinpath(
"MergeEIS_ORR_top_corrs_{0}_overall_{1}_{2}".format(
"both", pH, merge_Model_EEC_set
)
)
PPDEISbest_overall_pH.mkdir(parents=True, exist_ok=True)
for n, row in grEtop.iterrows():
# n,row
grE = Ekin_grp.get_group(tuple(row[["pH", "E_RHE"]]))
plot_pd_SampleIDs(
grE,
row.corr_idx[0],
row.corr_idx[1],
row.val,
PPDEISbest_overall_pH.joinpath(str(pH)),
corr_method=row["method"],
)
""" From the correlations in pH 13 it seems that nDL strongly correlates with the TSb,j_diff lim, so seems relating
to the exchange current density
ORR_pars.plot(x='TSb_l',y='E_onset',kind='scatter',xlim=(0,1),c='pH',colormap='viridis')
"""
PPDEISbest = EODD.joinpath("MergeEIS_ORR_top_corrs_{0}".format(corr_method))
PPDEISbest.mkdir(parents=True, exist_ok=True)
for E, grE in Ekin_grp:
E, grE
grEoccur = Ekin_occur.query(
"ocurrc == 2 & pH == @E[0] & E_RHE == @E[1] "
).sort_values(by=["val"])
rct_slice = [i for i in grEoccur.corr_idx.values if "Rct" in i]
rct_occur = grEoccur.loc[grEoccur.corr_idx.isin(rct_slice)]
grEtop = pd.concat([grEoccur.head(5), grEoccur.tail(5)], sort=False)
for n, tr in grEtop.iterrows():
# Gas_set,pH_set,Erhe_set,corr_val = tr[1].Gas,tr[1].pH,tr[1].E_RHE,tr[1][corr_method]
plot_pd_SampleIDs(
grE,
tr.corr_idx[0],
tr.corr_idx[1],
tr.val,
PPDEISbest.joinpath(f"{E[0]}_{E[1]}"),
)
plt.close()
for n, | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""Master Thesis Work
Copyright (c) 2018, <NAME>
All rights reserved.
The master thesis work has been carried out at the School of Engineering in Jönköping in
the subject area “Sentiment Analysis using Multi-label Machine Learning Techniques”.
The work is a part of the two-years Master of Science programme, Software Product
Engineering. The author takes full responsibility for opinions, conclusions and
findings presented.
Papar title:
Mining Comparative Opinion using Multi-label Machine Learning Techniques
Paper sub-title:
A case study to identify comparative opinions, based on product aspects, and their
sentiment classification, in online customer reviews..
Paper within:
Software Product Engineering Master’s Program
Scope:
30 credits (second cycle)
Author:
<NAME>
Supervisor:
<NAME>
Examinar:
<NAME>
In:
JÖNKÖPING December 2018
"""
# About this module
"""
Module name:
The name of this module is: "MLSAA: Multi-label Sentiment Analysis Application"
This module demonstrates the software developed for the multi-label sentiment
analysis application used in the master thesis work. The module contains the
required functions by the application. It has the functions to prepare the
source and development datasets. Also, the functions to preprocess the
labeled dataset, and run the experiments on the classification model.
The main functions discussed in the thesis are listed below:
read_source: Reads the reviews from the source dataset.
prepare_development: Create the development dataset from the source.
preprocess: Preprocess the text in the labeled dataset.
transform_labels: Transforms the labels into a multi-label format.
classify: Run the classification model which includes the multi-label
classification technique, machine learning classifier, dataset sampling,
features selection, and evaluation functions.
Example:
$ python
>>> import mlsaa
>>> mlsaa.classify(d="dataset_name", ct=1, pt=1, cl=1)
"""
# Importing general required modules
import pandas as pd
import numpy as np
import random
import pymysql
import re
import unicodedata
import warnings
import os.path
import sys
from enum import Enum
import time
import matplotlib.pyplot as plt
import itertools
# Import teh module for handling contractions
from pycontractions import Contractions
# Importing all required nltk modules
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
from nltk.corpus import sentiwordnet as swn
from nltk import sent_tokenize, word_tokenize, pos_tag
from nltk.sentiment.util import mark_negation
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# Importing all required scikit-learn modules
from sklearn.base import TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import BernoulliNB
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
from sklearn.metrics import hamming_loss as hamming_loss_score
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multiclass import OneVsRestClassifier
# Importing all required scikit-multilearn modules
from skmultilearn.problem_transform import LabelPowerset, BinaryRelevance, ClassifierChain
from skmultilearn.adapt import MLkNN
from skmultilearn.ensemble import RakelD
# A module variable for the classification model
classification_model = None
"""Attributes:
classification_model (classifier): as a module level variable to apply prediction on new data
if needed using predict function after training.
"""
# The function contains the code to sample the lablede dataset after preprocessing,
# train and test the classfication model, and evaluating it.
def classify(dataset_name = "preprocessed_labeled", ct = 1, pt = 1, cl = 1,
ts = 0.25, fo = 10, bi = True, mf = 5000, minn = 1, maxn = 3,
se = True, co = True, ab = True, dt = False, cf = False):
"""The function contains the code to sample the labeled dataset after preprocessing,
train and test the classfication model, and evaluating it.
Args:
dataset_name (str): The name of the dataset to be classified.
ct (int): The number of the classification technique as in Table 4 in the paper.
pt (int): The number of the problem transformation method as in Table 5 in the paper.
cl (int): The number of the classifier as in Table 6 in the paper.
ts (int): The test dataset size (IV2 in Table 14 in the paper).
fo (int): The number of folds for the cross validations (IV5 in Table 14 in the paper).
bi (bool): The features as Bag of Words (BoW) with the occurances of words as binary.
Value is True by defaul. If set to False the count of occurances will be given
(IV6 in Table 14 in the paper).
mf (int): Maximum number of features (IV7 in Table 14 in the paper).
minn (int): Minimum number of words in the ngram (IV8 in Table 14 in the paper).
maxn (int): Maximum number of words in the ngram (IV9 in Table 14 in the paper).
se (bool): The sentiment label. True by Defaul for teh multi-label problem.
Can be set to False if binary classification is needed on other labels only.
co (bool): The comparative label. True by Defaul for teh multi-label problem.
Can be set to False if binary classification is needed on other labels only.
ab (bool): The aspect-based label. True by Defaul for teh multi-label problem.
Can be set to False if binary classification is needed on other labels only.
dt (bool): The report details in each cross validation fold. Set to True if all
metrics needed. It was used when doing binary classification mostly (Not tested
with multi-label).
cf (bool): Prints the confusion matrix if true (Used for evaliation purpose with
binary classification.
Returns:
None
"""
# Initialize the classification model
global classification_model
# Get the number / count of labels to determine if it is single or multi-label problem
labels_count = 0
multi_label = False
# Variables to set the labels names, classes names and classification type for reporting
labels_names = []
classes_names = []
classification_type = "Binary"
if se:
labels_count +=1
labels_names.append("L1 - Sentiment")
classes_names.append("C1 - Positive")
classes_names.append("C2 - Negative")
if co:
labels_count +=1
labels_names.append("L2 - Comparative")
classes_names.append("C3 - Comparative")
classes_names.append("C4 - Non-comparative")
if ab:
labels_count +=1
labels_names.append("L3 - Aspect-based")
classes_names.append("C5 - Aspect-based")
classes_names.append("C6 - Non-aspect-based")
if labels_count > 1:
multi_label = True
classification_type = "Multi-label"
# Execut only if there is at least one label to classify,
# i.e. at least one of labels paramters (se, co, ab) is True
if labels_count > 0:
# Get the reviews text array from the preprocessed labeled dataset
reviews = get_reviews(dataset_name=dataset_name)
# Get labels array based on required labels to be predicted and the number of labels
# If it is multi-label problem, the function returns a binarized labels
# If not multi label, the fucnction returns a list of one list for one label
labels = get_labels(dataset_name=dataset_name, sentiment=se,
comparative=co, aspects_based=ab)
# A variable for the classfier name used when reporting at teh end of the function
classifier_name = ""
# A variable for the classfier used in the classification model, set based on cl arg
classifier = None
# A variable for tfidf transfomer when needed with some classifiers such as kNN
tfidf_tranformer = None
# Select classifier based on the given arguments
if cl == 1:
classifier = BernoulliNB()
classifier_name = "CL1 - NB - Naive Bayes"
elif cl == 2:
classifier = LinearSVC()
classifier_name = "CL2 - SVM - Support Vector Machine"
elif cl == 3:
classifier = LogisticRegression()
classifier_name = "CL3 - MaxEnt - Maximum Entropy"
elif cl == 4:
classifier = KNeighborsClassifier(n_neighbors=100)
classifier_name = "CL4 - kNN - k-Nearest Neighbors"
tfidf_tranformer = TfidfTransformer()
elif cl == 5:
classifier = OneVsRestClassifier(DecisionTreeClassifier(max_depth=12))
classifier_name = "CL5 - DT - Decision Trees"
elif cl == 6:
classifier = OneVsRestClassifier(RandomForestClassifier(n_estimators=300, max_depth=15))
classifier_name = "CL6 - RF - Random Forest"
elif cl == 7:
classifier = MLkNN(k=150, s=0.5)
tfidf_tranformer = TfidfTransformer(use_idf=True)
classifier_name = "CL7 - MLkNN - Multi-label kNN"
else:
print("Please select a valid classifier number for cl.")
print("It can only be from 1-7 as in Table 6 in the paper.")
return
# Check the techniques and methods based on ct and pt args
technique_name = ""
method_name = ""
if multi_label:
if ct == 1: # Problem Transformtion technique
technique_name = "CT1 - Problem Transformtion"
if pt == 1:
classifier = BinaryRelevance(classifier) # Binary Relevance method
method_name = "PT1 - Binary Relevance"
elif pt == 2:
classifier = LabelPowerset(classifier) # Label Powerset method
method_name = "PT2 - Label Powerset"
elif pt == 3:
classifier = ClassifierChain(classifier) # Classifier Chain method
method_name = "PT3 - Classifier Chain"
elif not (cl == 4 or cl == 5 or cl == 6):
method_name = "N/A"
print("Please select a valid method number for pt with the selected classifier.")
print ("It can only be from 1-3 as in Table 5 in the paper")
return
elif ct == 2:
technique_name = "CT2 - Algorithm Adaptation"
if not cl == 7:
print("Classifier number 7, CL7 - MLkNN, can be used with this technique only.")
return
elif ct == 3: # Ensemble (RAKEL) technique
technique_name = "CT3 - Ensemble (RAKEL)"
classifier = RakelD(
base_classifier=classifier,
base_classifier_require_dense=[False, False],
labelset_size=3 # As suggested by the original paper
)
else:
print("Please select a | |
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, "./activations")
sys.path.insert(1, "./utils")
from relu import *
from sigmoid import *
from softmax import *
"""
TODO: check if softmax can be included in linear activation or it must be directly combined with cross entropy
TODO: Use the general cross-entropy to compute the cost
TODO: Update forward and back propagation using hyper parameters for activation functions
TODO: Reduce backpropagation(), considering all layers in the same way
"""
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
Z = np.dot(W, A) + b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
if activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
if activation == "softmax":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = softmax(Z)
if activation == "linear":
# A particular case in which there is no activation function (useful for Word2Vec)
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A = Z
activation_cache = Z
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def forward_propagation(X, parameters, hyper_parameters):
"""
Forward propagation algorithm
It extends forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation written by <NAME>
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters() function in utils
hyper_parameters -- output of initialize_hyper_parameters() function in utils
Returns:
AL -- last post-activation value
caches -- list of caches containing every cache of linear activation forward
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network.
# Implement [LINEAR -> ACTIVATION]
for l in range(1, L+1):
A_prev = A
A, cache = linear_activation_forward(A_prev,
parameters["W" + str(l)],
parameters["b" + str(l)],
hyper_parameters["activations"][l])
caches.append(cache)
# assert(A.shape == (1, X.shape[1])) TODO: Check if this control is correct in any case
return A, caches
def compute_loss(AL, Y):
"""
Compute the cross-entropy loss.
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
loss -- cross-entropy loss
"""
# In case of loss of logistic regression you can compute np.multiply(-np.log(AL), Y) + np.multiply(-np.log(1 - AL), 1 - Y)
# but I prefer to generalize the loss function for multiclass problems.
# Cross entropy indicates the distance between what the model believes the output distribution should be, and what the original distribution really is
loss = - Y * np.log(AL)
loss = np.squeeze(np.sum(loss))
return loss
def compute_cost(AL, Y):
"""
Compute the average of the loss contribution for each sample
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = AL.shape[1]
cost = (1. / m) * np.sum(compute_loss(AL, Y))
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
return cost
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1/m) * np.dot(dZ, A_prev.T)
db = (1/m) * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_grad(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_grad(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "softmax":
dZ = softmax_grad(dA, activation_cache)
rows, cols = np.nonzero(dZ)
dZ_reshaped = dZ[rows, cols].reshape([dZ.shape[0], 1])
dA_prev, dW, db = linear_backward(dZ_reshaped, linear_cache)
elif activation == "linear":
dZ = dA
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def backpropagation(AL, Y, caches, hyper_parameters):
"""
Implement the backward propagation
Arguments:
AL -- probability vector, output of the forward propagation
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing every cache of linear_activation_forward() with the activation layer
hyper_parameters -- hyper parameters of the networks (in this case I need activation functions)
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1] # the number of samples at the output layer
Y = Y.reshape(AL.shape) # after this line, Y has the same shape of AL
# The first step of the backpropagation changes according to the activation function of the last layer
if (hyper_parameters["activations"][L] == "sigmoid"):
# Compute the derivative of the cross entropy cost for logistic regression:
# np.multiply(-np.log(AL), Y) + np.multiply(-np.log(1 - AL), 1 - Y)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
elif(hyper_parameters["activations"][L] == "softmax"):
# Compute the derivative of the cross entropy cost for a multilabel classifier.
# Y * np.log(AL)
# You obtain a vector like [0,0,0,1/AL,0] because all elements of vector Y are 0s except 1
dAL = - np.divide(Y, AL)
current_cache = caches[-1]
grads["dA" + str(L)], grads["dW" | |
kwargs: The argname=argval items that the functions should draw from.
:return:
>>> def add(a, b: float = 0.0) -> float:
... return a + b
>>> def mult(x: float, y=1):
... return x * y
>>> def formula1(w, /, x: float, y=1, *, z: int = 1):
... return ((w + x) * y) ** z
>>> commands = extract_commands(
... (add, mult, formula1), a=1, b=2, c=3, d=4, e=5, w=6, x=7
... )
>>> for command in commands:
... print(
... f"Calling {command.func.__name__} with "
... f"args={command.args} and kwargs={command.kwargs}"
... )
... print(command())
...
Calling add with args=() and kwargs={'a': 1, 'b': 2}
3
Calling mult with args=() and kwargs={'x': 7}
7
Calling formula1 with args=(6,) and kwargs={'x': 7}
13
"""
extract = partial(
extract_arguments,
what_to_do_with_remainding=what_to_do_with_remainding,
include_all_when_var_keywords_in_params=False,
assert_no_missing_position_only_args=True,
)
if callable(funcs):
funcs = [funcs]
for func in funcs:
func_args, func_kwargs = extract(func, **kwargs)
yield mk_command(func, *func_args, **func_kwargs)
def commands_dict(
funcs,
*,
mk_command: Callable[[Callable, tuple, dict], Any] = Command,
what_to_do_with_remainding='ignore',
**kwargs,
):
"""
:param funcs:
:param mk_command:
:param kwargs:
:return:
>>> def add(a, b: float = 0.0) -> float:
... return a + b
>>> def mult(x: float, y=1):
... return x * y
>>> def formula1(w, /, x: float, y=1, *, z: int = 1):
... return ((w + x) * y) ** z
>>> d = commands_dict((add, mult, formula1), a=1, b=2, c=3, d=4, e=5, w=6, x=7)
>>> d[add]()
3
>>> d[mult]()
7
>>> d[formula1]()
13
"""
if callable(funcs):
funcs = [funcs]
it = extract_commands(
funcs,
what_to_do_with_remainding=what_to_do_with_remainding,
mk_command=mk_command,
**kwargs,
)
return dict(zip(funcs, it))
class Param(Parameter):
"""A thin wrap of Parameters: Adds shorter aliases to argument kinds and
a POSITIONAL_OR_KEYWORD default to the argument kind to make it faster to make
Parameter objects
>>> list(map(Param, 'some quick arg params'.split()))
[<Param "some">, <Param "quick">, <Param "arg">, <Param "params">]
>>> from inspect import Signature
>>> P = Param
>>> Signature([P('x', P.PO), P('y', default=42, annotation=int), P('kw', P.KO)])
<Signature (x, /, y: int = 42, *, kw)>
"""
# aliases
PK = Parameter.POSITIONAL_OR_KEYWORD
PO = Parameter.POSITIONAL_ONLY
KO = Parameter.KEYWORD_ONLY
VP = Parameter.VAR_POSITIONAL
VK = Parameter.VAR_KEYWORD
def __init__(self, name, kind=PK, *, default=empty, annotation=empty):
super().__init__(name, kind, default=default, annotation=annotation)
# # Note: Was useful to make Param a mapping, to get (dict(param))
# # Is not useful anymore, so comment-deprecating
# def __iter__(self):
# yield from ['name', 'kind', 'default', 'annotation']
#
# def __getitem__(self, k):
# return getattr(self, k)
#
# def __len__(self):
# return 4
P = Param # useful shorthand alias
def param_has_default_or_is_var_kind(p: Parameter):
return p.default != Parameter.empty or p.kind in var_param_kinds
def parameter_to_dict(p: Parameter) -> dict:
return dict(name=p.name, kind=p.kind, default=p.default, annotation=p.annotation)
WRAPPER_UPDATES = ('__dict__',)
from typing import Callable
# A default signature of (*no_sig_args, **no_sig_kwargs)
DFLT_SIGNATURE = signature(lambda *no_sig_args, **no_sig_kwargs: ...)
# TODO: Might want to monkey-patch inspect._signature_from_callable to use sigs_for_sigless_builtin_name
def _robust_signature_of_callable(callable_obj: Callable) -> Signature:
r"""Get the signature of a Callable, returning a custom made one for those
builtins that don't have one
>>> _robust_signature_of_callable(
... _robust_signature_of_callable
... ) # has a normal signature
<Signature (callable_obj: Callable) -> inspect.Signature>
>>> s = _robust_signature_of_callable(print) # has one that this module provides
>>> assert isinstance(s, Signature)
>>> # Will be: <Signature (*value, sep=' ', end='\n', file=<_io.TextIOWrapper
name='<stdout>' mode='w' encoding='utf-8'>, flush=False)>
>>> _robust_signature_of_callable(
... zip
... ) # doesn't have one, so will return a blanket one
<Signature (*no_sig_args, **no_sig_kwargs)>
"""
try:
return signature(callable_obj)
except ValueError:
# if isinstance(callable_obj, partial):
# callable_obj = callable_obj.func
obj_name = getattr(callable_obj, '__name__', None)
if obj_name in sigs_for_sigless_builtin_name:
return sigs_for_sigless_builtin_name[obj_name] or DFLT_SIGNATURE
else:
raise
# TODO: See other signature operating functions below in this module:
# Do we need them now that we have Sig?
# Do we want to keep them and have Sig use them?
class Sig(Signature, Mapping):
"""A subclass of inspect.Signature that has a lot of extra api sugar,
such as
- making a signature for a variety of input types (callable,
iterable of callables, parameter lists, strings, etc.)
- has a dict-like interface
- signature merging (with operator interfaces)
- quick access to signature data
- positional/keyword argument mapping.
# Positional/Keyword argument mapping
In python, arguments can be positional (args) or keyword (kwargs).
... sometimes both, sometimes a single one is imposed.
... and you have variadic versions of both.
... and you can have defaults or not.
... and all these different kinds have a particular order they must be in.
It's is mess really. The flexibility is nice -- but still; a mess.
You only really feel the mess if you try to do some meta-programming with your
functions.
Then, methods like `normalize_kind` can help you out, since you can enforce, and
then assume, some stable interface to your functions.
Two of the base methods for dealing with positional (args) and keyword (kwargs)
inputs are:
- `kwargs_from_args_and_kwargs`: Map some args/kwargs input to a keyword-only
expression of the inputs. This is useful if you need to do some processing
based on the argument names.
- `args_and_kwargs_from_kwargs`: Translate a fully keyword expression of some
inputs into an (args, kwargs) pair that can be used to call the function.
(Remember, your function can have constraints, so you may need to do this.
The usual pattern of use of these methods is to use `kwargs_from_args_and_kwargs`
to map all the inputs to their corresponding name, do what needs to be done with
that (example, validation, transformation, decoration...) and then map back to an
(args, kwargs) pair than can actually be used to call the function.
Examples of methods and functions using these:
`call_forgivingly`, `tuple_the_args`, `extract_kwargs`, `extract_args_and_kwargs`,
`source_kwargs`, and `source_args_and_kwargs`.
# Making a signature
You can construct a `Sig` object from a callable,
>>> def f(w, /, x: float = 1, y=1, *, z: int = 1):
... ...
>>> Sig(f)
<Sig (w, /, x: float = 1, y=1, *, z: int = 1)>
but also from any "ParamsAble" object. Such as...
an iterable of Parameter instances, strings, tuples, or dicts:
>>> Sig(
... [
... "a",
... ("b", Parameter.empty, int),
... ("c", 2),
... ("d", 1.0, float),
... dict(name="special", kind=Parameter.KEYWORD_ONLY, default=0),
... ]
... )
<Sig (a, b: int, c=2, d: float = 1.0, *, special=0)>
>>>
>>> Sig(
... [
... "a",
... "b",
... dict(name="args", kind=Parameter.VAR_POSITIONAL),
... dict(name="kwargs", kind=Parameter.VAR_KEYWORD),
... ]
... )
<Sig (a, b, *args, **kwargs)>
The parameters of a signature are like a matrix whose rows are the parameters,
and the 4 columns are their properties: name, kind, default, and annotation
(the two laste ones being optional).
You get a row view when doing `Sig(...).parameters.values()`,
but what if you want a column-view?
Here's how:
>>> def f(w, /, x: float = 1, y=2, *, z: int = 3):
... ...
>>>
>>> s = Sig(f)
>>> s.kinds # doctest: +NORMALIZE_WHITESPACE
{'w': <_ParameterKind.POSITIONAL_ONLY: 0>,
'x': <_ParameterKind.POSITIONAL_OR_KEYWORD: 1>,
'y': <_ParameterKind.POSITIONAL_OR_KEYWORD: 1>,
'z': <_ParameterKind.KEYWORD_ONLY: 3>}
>>> s.annotations
{'x': <class 'float'>, 'z': <class 'int'>}
>>> assert (
... s.annotations == f.__annotations__
... ) # same as what you get in `__annotations__`
>>>
>>> s.defaults
{'x': 1, 'y': 2, 'z': 3}
>>> # Note that it's not the same as you get in __defaults__ though:
>>> assert (
... s.defaults != f.__defaults__ == (1, 2)
... ) # not 3, since __kwdefaults__ has that!
We can sum (i.e. merge) and subtract (i.e. remove arguments) Sig instances.
Also, Sig instance is callable. It has the effect of inserting it's signature in
the input
(in `__signature__`, but also inserting the resulting `__defaults__` and
`__kwdefaults__`).
One of the intents is to be able to do things like:
>>> import inspect
>>> def f(w, /, x: float = 1, y=1, *, z: int = 1):
... ...
>>> def g(i, w, /, j=2):
... ...
...
>>>
>>> @Sig.from_objs(f, g, ["a", ("b", 3.14), ("c", 42, int)])
... def some_func(*args, **kwargs):
... ...
>>> inspect.signature(some_func)
<Signature (w, i, /, a, x: float = 1, y=1, j=2, b=3.14, c: int = 42, *, | |
0.00000000035 * math.cos(3.09037355598 + 77211.44104153418 * self.t)
Z0 += 0.00000000035 * math.cos(5.86095139046 + 18207.81398823521 * self.t)
Z0 += 0.00000000038 * math.cos(1.94104879109 + 76044.9523205358 * self.t)
Z0 += 0.00000000032 * math.cos(1.45277855585 + 78270.82297172339 * self.t)
Z0 += 0.00000000044 * math.cos(4.36195482299 + 52643.7712735028 * self.t)
Z0 += 0.00000000043 * math.cos(0.13191769323 + 1581.959348283 * self.t)
Z0 += 0.00000000032 * math.cos(4.70833495807 + 71582.48457132299 * self.t)
Z0 += 0.00000000037 * math.cos(3.75248447021 + 27140.17152476259 * self.t)
Z0 += 0.00000000042 * math.cos(2.34341217670 + 84546.78527471398 * self.t)
Z0 += 0.00000000031 * math.cos(2.09939558148 + 2648.454825473 * self.t)
Z0 += 0.00000000033 * math.cos(4.22087782315 + 111122.32316754239 * self.t)
Z0 += 0.00000000032 * math.cos(5.93849066304 + 51742.09454527159 * self.t)
Z0 += 0.00000000037 * math.cos(2.22778724531 + 25973.46385288896 * self.t)
Z0 += 0.00000000031 * math.cos(0.06282672228 + 536.8045120954 * self.t)
Z0 += 0.00000000030 * math.cos(0.17325551658 + 224.3447957019 * self.t)
Z0 += 0.00000000030 * math.cos(4.46570213013 + 81706.28436968799 * self.t)
Z0 += 0.00000000029 * math.cos(4.99680928394 + 24491.71669928959 * self.t)
Z0 += 0.00000000035 * math.cos(5.04036468235 + 52182.4348420484 * self.t)
Z0 += 0.00000000033 * math.cos(4.30956969733 + 14477.35118320 * self.t)
Z0 += 0.00000000039 * math.cos(3.99296635763 + 78690.30761559859 * self.t)
Z0 += 0.00000000028 * math.cos(3.41723065188 + 7880.08915333899 * self.t)
Z0 += 0.00000000039 * math.cos(0.38970280197 + 114564.89811250778 * self.t)
Z0 += 0.00000000033 * math.cos(3.85407359005 + 143961.26714946238 * self.t)
Z0 += 0.00000000029 * math.cos(2.14304937049 + 150866.08680029298 * self.t)
Z0 += 0.00000000028 * math.cos(4.95187621332 + 956.2891559706 * self.t)
Z0 += 0.00000000036 * math.cos(4.89757043872 + 26202.34243025941 * self.t)
Z0 += 0.00000000030 * math.cos(0.55025093927 + 33967.99229491319 * self.t)
Z0 += 0.00000000028 * math.cos(1.52043005918 + 40565.2543247742 * self.t)
Z0 += 0.00000000027 * math.cos(5.29884427476 + 1162.4747044078 * self.t)
Z0 += 0.00000000027 * math.cos(3.44440418351 + 141762.17980617538 * self.t)
Z0 += 0.00000000035 * math.cos(2.50821332798 + 72936.23331633979 * self.t)
Z0 += 0.00000000028 * math.cos(0.83354394743 + 42153.969003049 * self.t)
Z0 += 0.00000000036 * math.cos(4.02142487381 + 79852.78232000639 * self.t)
Z0 += 0.00000000026 * math.cos(1.15352991801 + 51322.60990139639 * self.t)
Z0 += 0.00000000026 * math.cos(0.13577439886 + 22747.2907148744 * self.t)
Z0 += 0.00000000027 * math.cos(4.82379477303 + 110.2063212194 * self.t)
Z0 += 0.00000000025 * math.cos(0.71097564554 + 50483.640613646 * self.t)
Z0 += 0.00000000024 * math.cos(2.92248010137 + 121335.60871375339 * self.t)
Z0 += 0.00000000026 * math.cos(4.56151347419 + 183724.70054311278 * self.t)
Z0 += 0.00000000024 * math.cos(2.16545180970 + 32769.1279949738 * self.t)
Z0 += 0.00000000031 * math.cos(2.33336639968 + 51756.3216392732 * self.t)
Z0 += 0.00000000032 * math.cos(2.93016714121 + 181555.94006083018 * self.t)
Z0 += 0.00000000027 * math.cos(4.32935860229 + 124156.43985787958 * self.t)
Z0 += 0.00000000023 * math.cos(6.16949223424 + 39743.7636327506 * self.t)
Z0 += 0.00000000023 * math.cos(2.77333133719 + 155997.72788435058 * self.t)
Z0 += 0.00000000022 * math.cos(2.90047805068 + 25863.55834587229 * self.t)
Z0 += 0.00000000023 * math.cos(0.26296960044 + 103711.71527998279 * self.t)
Z0 += 0.00000000022 * math.cos(0.49237419293 + 23439.44831610119 * self.t)
Z0 += 0.00000000027 * math.cos(2.77269830479 + 24505.94379329119 * self.t)
Z0 += 0.00000000022 * math.cos(0.39938514656 + 26164.1692128498 * self.t)
Z0 += 0.00000000022 * math.cos(0.07820939757 + 78187.44335344699 * self.t)
Z0 += 0.00000000022 * math.cos(3.71330627735 + 129483.91596626239 * self.t)
Z0 += 0.00000000023 * math.cos(5.63887355161 + 522.5774180938 * self.t)
Z0 += 0.00000000021 * math.cos(3.36821177399 + 1375.7737998458 * self.t)
Z0 += 0.00000000029 * math.cos(5.62929042847 + 44181.27784112419 * self.t)
Z0 += 0.00000000022 * math.cos(4.35422573947 + 26720.68688088739 * self.t)
Z0 += 0.00000000021 * math.cos(5.12497732226 + 11610.5519583742 * self.t)
Z0 += 0.00000000025 * math.cos(5.36835518856 + 26013.1215430069 * self.t)
Z0 += 0.00000000024 * math.cos(5.55813303813 + 130969.20667296558 * self.t)
Z0 += 0.00000000020 * math.cos(4.79424075731 + 25780.3455206046 * self.t)
Z0 += 0.00000000020 * math.cos(3.82740705898 + 24925.4284371664 * self.t)
Z0 += 0.00000000020 * math.cos(4.26878389727 + 25131.61398560359 * self.t)
Z0 += 0.00000000021 * math.cos(5.10505150441 + 25977.69682035479 * self.t)
Z0 += 0.00000000020 * math.cos(3.32855955978 + 131498.89763806018 * self.t)
Z0 += 0.00000000020 * math.cos(1.29805086237 + 467.9649903544 * self.t)
Z0 += 0.00000000023 * math.cos(2.06377601353 + 100909.03762133139 * self.t)
Z0 += 0.00000000027 * math.cos(4.46165349883 + 316.3918696566 * self.t)
Z0 += 0.00000000019 * math.cos(2.33132135274 + 26395.46076254379 * self.t)
Z0 += 0.00000000019 * math.cos(0.35229627877 + 78417.48823520739 * self.t)
Z0 += 0.00000000019 * math.cos(2.86176933014 + 50579.61984086379 * self.t)
Z0 += 0.00000000018 * math.cos(4.33826105585 + 104371.28232719658 * self.t)
Z0 += 0.00000000022 * math.cos(1.69381062404 + 106570.36967048359 * self.t)
Z0 += 0.00000000021 * math.cos(0.89581658307 + 339142.74084046457 * self.t)
Z0 += 0.00000000025 * math.cos(5.95532312645 + 146314.13330323418 * self.t)
Z0 += 0.00000000020 * math.cos(0.11725114676 + 52815.7035694624 * self.t)
Z0 += 0.00000000018 * math.cos(1.66756826028 + 23888.81579828719 * self.t)
Z0 += 0.00000000022 * math.cos(4.37150668135 + 1478.8665740644 * self.t)
Z0 += 0.00000000017 * math.cos(4.39063105551 + 60055.89543648739 * self.t)
Z0 += 0.00000000017 * math.cos(3.17668084118 + 70269.18098269838 * self.t)
Z0 += 0.00000000017 * math.cos(1.74029229152 + 104138.31347085879 * self.t)
Z0 += 0.00000000017 * math.cos(5.24411288462 + 102232.84870591838 * self.t)
Z0 += 0.00000000019 * math.cos(0.11691865573 + 78109.93061423779 * self.t)
Z0 += 0.00000000017 * math.cos(3.28697052824 + 25565.3257234804 * self.t)
Z0 += 0.00000000019 * math.cos(0.84289230962 + 188276.65404017158 * self.t)
Z0 += 0.00000000018 * math.cos(0.47252163890 + 27999.1026247914 * self.t)
Z0 += 0.00000000016 * math.cos(2.81320924670 + 52101.02468458109 * self.t)
Z0 += 0.00000000016 * math.cos(4.20710393311 + 134991.46920492979 * self.t)
Z0 += 0.00000000019 * math.cos(0.54267379272 + 50593.84693486539 * self.t)
Z0 += 0.00000000015 * math.cos(5.78613478653 + 24712.1293417284 * self.t)
Z0 += 0.00000000018 * math.cos(2.23136091809 + 104331.94280539699 * self.t)
Z0 += 0.00000000016 * math.cos(3.30957717580 + 97112.93697469679 * self.t)
Z0 += 0.00000000016 * math.cos(1.02655918243 + 26162.6847401415 * self.t)
Z0 += 0.00000000015 * math.cos(0.51501957594 + 13362.4497067992 * self.t)
Z0 += 0.00000000015 * math.cos(5.44457668379 + 28286.9904848612 * self.t)
Z0 += 0.00000000015 * math.cos(3.33697839260 + 206.1855484372 * self.t)
Z0 += 0.00000000015 * math.cos(3.09319808895 + 25984.8103673556 * self.t)
Z0 += 0.00000000019 * math.cos(5.95214817336 + 29428.515568274 * self.t)
Z0 += 0.00000000017 * math.cos(1.60411390680 + 26507.38778544939 * self.t)
Z0 += 0.00000000016 * math.cos(3.62113927445 + 26610.48055966799 * self.t)
Z0 += 0.00000000018 * math.cos(4.55509949626 + 51543.0225438352 * self.t)
Z0 += 0.00000000016 * math.cos(4.95762784074 + 25771.5112719176 * self.t)
Z0 += 0.00000000014 * math.cos(0.15853597470 + 647.0108333148 * self.t)
Z0 += 0.00000000016 * math.cos(3.53658275362 + 25551.09862947879 * self.t)
Z0 += 0.00000000013 * math.cos(2.28786159405 + 12725.453434775 * self.t)
Z0 += 0.00000000016 * math.cos(3.05451392033 + 26190.99591579279 * self.t)
Z0 += 0.00000000013 * math.cos(5.81922389967 + 157483.01859105378 * self.t)
Z0 += 0.00000000013 * math.cos(4.03160498458 + 24356.7807886416 * self.t)
Z0 += 0.00000000013 * math.cos(4.49878848868 + 29416.03879785439 * self.t)
Z0 += 0.00000000016 * math.cos(1.14380482430 + 26404.2950112308 * self.t)
Z0 += 0.00000000016 * math.cos(4.77704247519 + 24609.0365675098 * self.t)
Z0 += 0.00000000013 * math.cos(1.33944288754 + 52808.59002246159 * self.t)
Z0 += 0.00000000015 * math.cos(1.50513017421 + 181505.94343892598 * self.t)
Z0 += 0.00000000012 * math.cos(0.92918175452 + 104347.73123093879 * self.t)
Z0 += 0.00000000014 * math.cos(2.44451494167 + 26421.7590823436 * self.t)
Z0 += 0.00000000012 * math.cos(1.66798607577 + 51013.33157874059 * self.t)
Z0 += 0.00000000012 * math.cos(4.24892890325 + 52225.8029050526 * self.t)
Z0 += 0.00000000014 * math.cos(4.68967753702 + 25754.0472008048 * self.t)
Z0 += 0.00000000013 * math.cos(0.63063491269 + 51639.00177105299 * self.t)
Z0 += 0.00000000013 * math.cos(0.51980975221 + 51951.46148744649 * self.t)
Z0 += 0.00000000013 * math.cos(0.11999669275 + 26294.08869001139 * self.t)
Z0 += 0.00000000012 * math.cos(5.63990253843 + 104355.49390165479 * self.t)
Z0 += 0.00000000013 * math.cos(0.37271479520 + 74923.09699802278 * self.t)
Z0 += 0.00000000015 * math.cos(2.19958896111 + 22759.76748529401 * self.t)
Z0 += 0.00000000013 * math.cos(1.47842980598 + 54294.57014352679 * self.t)
Z0 += 0.00000000012 * math.cos(3.10508217703 + 27819.0254945068 * self.t)
Z0 += 0.00000000011 * math.cos(2.42209220629 + 97580.90196505119 * self.t)
Z0 += 0.00000000013 * math.cos(2.58513127775 + 116917.76426627958 * self.t)
Z0 += 0.00000000011 * math.cos(4.76885794954 + 49527.35145767539 * self.t)
Z0 += 0.00000000011 * math.cos(3.92593978045 + 26521.614879451 * self.t)
Z0 += 0.00000000011 * math.cos(2.89444277066 + 119116.85160956658 * self.t)
Z0 += 0.00000000012 * math.cos(5.58737020094 + 25881.717593137 * self.t)
Z0 += 0.00000000013 * math.cos(5.59537220560 + 156100.82065856918 * self.t)
Z0 += 0.00000000012 * math.cos(2.01299993075 + 51219.51712717779 * self.t)
Z0 += 0.00000000011 * math.cos(0.99253734697 + 25455.119402261 * self.t)
Z0 += 0.00000000014 * math.cos(5.68122574937 + 154938.34595416138 * self.t)
Z0 += 0.00000000015 * math.cos(5.27886514517 + 178063.36849396059 * self.t)
Z0 += 0.00000000014 * math.cos(6.12654802167 + 52278.89905736699 * self.t)
Z0 += 0.00000000011 * math.cos(0.06908636848 + 52072.71350892979 * self.t)
Z0 += 0.00000000011 * math.cos(0.80982228740 + 104564.91166173479 * self.t)
Z0 += 0.00000000013 * math.cos(3.96505351243 + 68241.87214462319 * self.t)
Z0 += 0.00000000011 * math.cos(2.65559265085 + 52125.80966124419 * self.t)
Z0 += 0.00000000012 * math.cos(6.26057108211 + 24176.703658357 * self.t)
Z0 += 0.00000000012 * | |
'hp': 2100,
'sp': 6000,
'maxCargo': 3,
'maxCrew': 8,
'maxCannons': 14,
'maxBroadsides': 20,
'rammingPower': 900,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
EITC_TYRANT: {
'setShipClass': EITC_TYRANT,
'modelClass': WARSHIPL3,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Square, 3),
'mastConfig2': (Masts.Main_Square, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Multi, 3),
'aftmastConfig': (Masts.Aft_Tri, 1),
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L3] * 14,
'leftBroadsides': [
Cannons.L2] * 10,
'rightBroadsides': [
Cannons.L2] * 10,
'broadsideAmmo': InventoryType.CannonExplosive,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': 0,
'hp': 4200,
'sp': 6000,
'maxCargo': 5,
'maxCrew': 8,
'maxCannons': 14,
'maxBroadsides': 20,
'rammingPower': 900,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
EITC_SENTINEL: {
'setShipClass': EITC_SENTINEL,
'modelClass': MERCHANTL1,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Square, 2),
'mastConfig2': (Masts.Main_Square, 1),
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': 0,
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L1] * 4,
'leftBroadsides': [
Cannons.L2] * 5,
'rightBroadsides': [
Cannons.L2] * 5,
'broadsideAmmo': InventoryType.CannonFirebrand,
'cannonAmmo': InventoryType.CannonChainShot,
'prow': Prows.Lady,
'hp': 1400,
'sp': 4000,
'maxCargo': 2,
'maxCrew': 6,
'maxCannons': 4,
'maxBroadsides': 10,
'rammingPower': 150,
'acceleration': 1.0 * defaultAcceleration,
'maxSpeed': 0.69999999999999996 * defaultMaxSpeed,
'reverseAcceleration': 0.59999999999999998 * defaultReverseAcceleration,
'maxReverseSpeed': 0.59999999999999998 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
EITC_IRONWALL: {
'setShipClass': EITC_IRONWALL,
'modelClass': MERCHANTL2,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Square, 1),
'mastConfig2': (Masts.Main_Square, 2),
'mastConfig3': (Masts.Main_Square, 1),
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': 0,
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L1] * 6,
'leftBroadsides': [
Cannons.L3] * 7,
'rightBroadsides': [
Cannons.L3] * 7,
'broadsideAmmo': InventoryType.CannonRoundShot,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': Prows.Lady,
'hp': 1800,
'sp': 5000,
'maxCargo': 3,
'maxCrew': 10,
'maxCannons': 8,
'maxBroadsides': 18,
'rammingPower': 300,
'acceleration': 1.0 * defaultAcceleration,
'maxSpeed': 0.69999999999999996 * defaultMaxSpeed,
'reverseAcceleration': 0.59999999999999998 * defaultReverseAcceleration,
'maxReverseSpeed': 0.59999999999999998 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
EITC_OGRE: {
'setShipClass': EITC_OGRE,
'modelClass': MERCHANTL3,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Square, 2),
'mastConfig2': (Masts.Main_Square, 2),
'mastConfig3': (Masts.Main_Square, 2),
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': 0,
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L1] * 8,
'leftBroadsides': [
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0,
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2],
'rightBroadsides': [
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
0,
0,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2,
Cannons.L2],
'broadsideAmmo': InventoryType.CannonFirebrand,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': Prows.Lady,
'hp': 1800,
'sp': 5500,
'maxCargo': 3,
'maxCrew': 14,
'maxCannons': 10,
'maxBroadsides': 24,
'rammingPower': 600,
'acceleration': 1.0 * defaultAcceleration,
'maxSpeed': 0.69999999999999996 * defaultMaxSpeed,
'reverseAcceleration': 0.59999999999999998 * defaultReverseAcceleration,
'maxReverseSpeed': 0.59999999999999998 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
EITC_BEHEMOTH: {
'setShipClass': EITC_BEHEMOTH,
'modelClass': MERCHANTL3,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Square, 3),
'mastConfig2': (Masts.Main_Square, 3),
'mastConfig3': (Masts.Main_Square, 3),
'foremastConfig': (Masts.Fore_Multi, 2),
'aftmastConfig': 0,
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L1] * 10,
'leftBroadsides': [
Cannons.L2] * 12,
'rightBroadsides': [
Cannons.L2] * 12,
'broadsideAmmo': InventoryType.CannonFirebrand,
'cannonAmmo': InventoryType.CannonExplosive,
'prow': Prows.Lady,
'hp': 1800,
'sp': 6000,
'maxCargo': 3,
'maxCrew': 14,
'maxCannons': 10,
'maxBroadsides': 24,
'rammingPower': 600,
'acceleration': 1.0 * defaultAcceleration,
'maxSpeed': 0.69999999999999996 * defaultMaxSpeed,
'reverseAcceleration': 0.59999999999999998 * defaultReverseAcceleration,
'maxReverseSpeed': 0.59999999999999998 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
EITC_SEA_VIPER: {
'setShipClass': EITC_SEA_VIPER,
'modelClass': INTERCEPTORL1,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Tri, 2),
'mastConfig2': 0,
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Tri, 1),
'aftmastConfig': 0,
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L1] * 2,
'leftBroadsides': [
Cannons.L1] * 3,
'rightBroadsides': [
Cannons.L1] * 3,
'broadsideAmmo': InventoryType.CannonChainShot,
'cannonAmmo': InventoryType.CannonRoundShot,
'prow': Prows.Lady,
'hp': 1000,
'sp': 3000,
'maxCargo': 1,
'maxCrew': 4,
'maxCannons': 2,
'maxBroadsides': 6,
'rammingPower': 75,
'acceleration': 1.2 * defaultAcceleration,
'maxSpeed': 0.90000000000000002 * defaultMaxSpeed,
'reverseAcceleration': 0.80000000000000004 * defaultReverseAcceleration,
'maxReverseSpeed': 0.80000000000000004 * defaultMaxReverseAcceleration,
'turn': 0.80000000000000004 * defaultTurn,
'maxTurn': 0.80000000000000004 * defaultMaxTurn },
EITC_BLOODHOUND: {
'setShipClass': EITC_BLOODHOUND,
'modelClass': INTERCEPTORL2,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Tri, 2),
'mastConfig2': 0,
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Tri, 1),
'aftmastConfig': 0,
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L1] * 6,
'leftBroadsides': [
Cannons.L1] * 5,
'rightBroadsides': [
Cannons.L1] * 5,
'broadsideAmmo': InventoryType.CannonFirebrand,
'cannonAmmo': InventoryType.CannonChainShot,
'prow': Prows.Lady,
'hp': 1200,
'sp': 3500,
'maxCargo': 2,
'maxCrew': 8,
'maxCannons': 6,
'maxBroadsides': 10,
'rammingPower': 225,
'acceleration': 1.2 * defaultAcceleration,
'maxSpeed': 0.90000000000000002 * defaultMaxSpeed,
'reverseAcceleration': 0.80000000000000004 * defaultReverseAcceleration,
'maxReverseSpeed': 0.80000000000000004 * defaultMaxReverseAcceleration,
'turn': 0.80000000000000004 * defaultTurn,
'maxTurn': 0.80000000000000004 * defaultMaxTurn },
EITC_BARRACUDA: {
'setShipClass': EITC_BARRACUDA,
'modelClass': INTERCEPTORL3,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Tri, 2),
'mastConfig2': 0,
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Tri, 1),
'aftmastConfig': (Masts.Aft_Tri, 1),
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L1] * 6,
'leftBroadsides': [
Cannons.L1] * 7,
'rightBroadsides': [
Cannons.L1] * 7,
'broadsideAmmo': InventoryType.CannonFirebrand,
'cannonAmmo': InventoryType.CannonChainShot,
'prow': Prows.Lady,
'hp': 1200,
'sp': 4000,
'maxCargo': 2,
'maxCrew': 3,
'maxCannons': 8,
'maxBroadsides': 14,
'rammingPower': 450,
'acceleration': 1.2 * defaultAcceleration,
'maxSpeed': 0.90000000000000002 * defaultMaxSpeed,
'reverseAcceleration': 0.80000000000000004 * defaultReverseAcceleration,
'maxReverseSpeed': 0.80000000000000004 * defaultMaxReverseAcceleration,
'turn': 0.80000000000000004 * defaultTurn,
'maxTurn': 0.80000000000000004 * defaultMaxTurn },
EITC_CORSAIR: {
'setShipClass': EITC_CORSAIR,
'modelClass': INTERCEPTORL3,
'defaultStyle': Styles.EITC,
'mastConfig1': (Masts.Main_Tri, 2),
'mastConfig2': 0,
'mastConfig3': 0,
'foremastConfig': (Masts.Fore_Tri, 1),
'aftmastConfig': (Masts.Aft_Tri, 1),
'sailLogo': Logos.EITC,
'cannons': [
Cannons.L1] * 8,
'leftBroadsides': [
Cannons.L1] * 7,
'rightBroadsides': [
Cannons.L1] * 7,
'broadsideAmmo': InventoryType.CannonExplosive,
'cannonAmmo': InventoryType.CannonFirebrand,
'prow': Prows.Lady,
'hp': 1200,
'sp': 4000,
'maxCargo': 2,
'maxCrew': 3,
'maxCannons': 8,
'maxBroadsides': 14,
'rammingPower': 450,
'acceleration': 1.2 * defaultAcceleration,
'maxSpeed': 0.90000000000000002 * defaultMaxSpeed,
'reverseAcceleration': 0.80000000000000004 * defaultReverseAcceleration,
'maxReverseSpeed': 0.80000000000000004 * defaultMaxReverseAcceleration,
'turn': 0.80000000000000004 * defaultTurn,
'maxTurn': 0.80000000000000004 * defaultMaxTurn },
SKEL_PHANTOM: {
'setShipClass': SKEL_PHANTOM,
'modelClass': SKEL_WARSHIPL3,
'defaultStyle': Styles.Undead,
'mastConfig1': (Masts.Skel_Main_A, 3),
'mastConfig2': (Masts.Skel_Main_B, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Skel_Fore, 2),
'aftmastConfig': (Masts.Skel_Aft, 2),
'sailLogo': 0,
'cannons': [
Cannons.Skel_L3] * 6,
'leftBroadsides': [
0,
Cannons.Skel_L2,
Cannons.Skel_L2,
Cannons.Skel_L2,
Cannons.Skel_L2,
Cannons.Skel_L2,
0],
'rightBroadsides': [
0,
Cannons.Skel_L2,
Cannons.Skel_L2,
Cannons.Skel_L2,
Cannons.Skel_L2,
Cannons.Skel_L2,
0],
'broadsideAmmo': InventoryType.CannonThunderbolt,
'cannonAmmo': InventoryType.CannonChainShot,
'prow': 0,
'hp': 2500,
'sp': 6000,
'maxCargo': 2,
'maxCrew': 8,
'maxCannons': 8,
'maxBroadsides': 14,
'rammingPower': 600,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
SKEL_REVENANT: {
'setShipClass': SKEL_REVENANT,
'modelClass': SKEL_WARSHIPL3,
'defaultStyle': Styles.Undead,
'mastConfig1': (Masts.Skel_Main_A, 3),
'mastConfig2': (Masts.Skel_Main_B, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Skel_Fore, 2),
'aftmastConfig': (Masts.Skel_Aft, 2),
'sailLogo': 0,
'cannons': [
Cannons.Skel_L3] * 6,
'leftBroadsides': [
Cannons.Skel_L2] * 6,
'rightBroadsides': [
Cannons.Skel_L2] * 6,
'broadsideAmmo': InventoryType.CannonFury,
'cannonAmmo': InventoryType.CannonRoundShot,
'prow': 0,
'hp': 2500,
'sp': 6000,
'maxCargo': 2,
'maxCrew': 8,
'maxCannons': 8,
'maxBroadsides': 14,
'rammingPower': 600,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
SKEL_STORM_REAPER: {
'setShipClass': SKEL_STORM_REAPER,
'modelClass': SKEL_WARSHIPL3,
'defaultStyle': Styles.Undead,
'mastConfig1': (Masts.Skel_Main_A, 3),
'mastConfig2': (Masts.Skel_Main_B, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Skel_Fore, 2),
'aftmastConfig': (Masts.Skel_Aft, 2),
'sailLogo': 0,
'cannons': [
Cannons.Skel_L3] * 6,
'leftBroadsides': [
Cannons.Skel_L2] * 7,
'rightBroadsides': [
Cannons.Skel_L2] * 7,
'broadsideAmmo': InventoryType.CannonThunderbolt,
'cannonAmmo': InventoryType.CannonThunderbolt,
'prow': 0,
'hp': 2500,
'sp': 6000,
'maxCargo': 3,
'maxCrew': 8,
'maxCannons': 8,
'maxBroadsides': 14,
'rammingPower': 600,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
SKEL_BLACK_HARBINGER: {
'setShipClass': SKEL_BLACK_HARBINGER,
'modelClass': SKEL_WARSHIPL3,
'defaultStyle': Styles.Undead,
'mastConfig1': (Masts.Skel_Main_A, 3),
'mastConfig2': (Masts.Skel_Main_B, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Skel_Fore, 2),
'aftmastConfig': (Masts.Skel_Aft, 2),
'sailLogo': 0,
'cannons': [
Cannons.Skel_L3] * 6,
'leftBroadsides': [
Cannons.Skel_L2] * 7,
'rightBroadsides': [
Cannons.Skel_L2] * 7,
'broadsideAmmo': InventoryType.CannonFury,
'cannonAmmo': InventoryType.CannonFury,
'prow': 0,
'hp': 2500,
'sp': 6000,
'maxCargo': 3,
'maxCrew': 8,
'maxCannons': 8,
'maxBroadsides': 14,
'rammingPower': 600,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
SKEL_DEATH_OMEN: {
'setShipClass': SKEL_DEATH_OMEN,
'modelClass': SKEL_WARSHIPL3,
'defaultStyle': Styles.Undead,
'mastConfig1': (Masts.Skel_Main_A, 3),
'mastConfig2': (Masts.Skel_Main_B, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Skel_Fore, 2),
'aftmastConfig': (Masts.Skel_Aft, 2),
'sailLogo': 0,
'cannons': [
Cannons.Skel_L3] * 6,
'leftBroadsides': [
Cannons.Skel_L2] * 7,
'rightBroadsides': [
Cannons.Skel_L2] * 7,
'broadsideAmmo': InventoryType.CannonFury,
'cannonAmmo': InventoryType.CannonThunderbolt,
'prow': 0,
'hp': 2500,
'sp': 6000,
'maxCargo': 3,
'maxCrew': 8,
'maxCannons': 8,
'maxBroadsides': 14,
'rammingPower': 600,
'acceleration': 1.1000000000000001 * defaultAcceleration,
'maxSpeed': 0.80000000000000004 * defaultMaxSpeed,
'reverseAcceleration': 0.69999999999999996 * defaultReverseAcceleration,
'maxReverseSpeed': 0.69999999999999996 * defaultMaxReverseAcceleration,
'turn': 0.59999999999999998 * defaultTurn,
'maxTurn': 0.59999999999999998 * defaultMaxTurn },
JOLLY_ROGER: {
'setShipClass': JOLLY_ROGER,
'modelClass': SKEL_WARSHIPL3,
'defaultStyle': Styles.Undead,
'mastConfig1': (Masts.Skel_Main_A, 3),
'mastConfig2': (Masts.Skel_Main_B, 3),
'mastConfig3': 0,
'foremastConfig': (Masts.Skel_Fore, 2),
'aftmastConfig': (Masts.Skel_Aft, 2),
'sailLogo': 0,
'cannons': [
Cannons.Skel_L3] * 6,
'leftBroadsides': [
Cannons.Skel_L2] * 7,
'rightBroadsides': [
Cannons.Skel_L2] * 7,
'broadsideAmmo': InventoryType.CannonThunderbolt,
'cannonAmmo': InventoryType.CannonExplosive,
'prow': 0,
'hp': | |
the boot device persistently
:raises: Any exception from set_boot_device except IPMIFailure
(setting of boot device using ipmi is expected to fail).
"""
try:
manager_utils.node_set_boot_device(task, device,
persistent=persistent)
except exception.IPMIFailure:
with excutils.save_and_reraise_exception() as ctxt:
if get_boot_mode_for_deploy(task.node) == 'uefi':
ctxt.reraise = False
LOG.warning("ipmitool is unable to set boot device while "
"the node %s is in UEFI boot mode. Please set "
"the boot device manually.", task.node.uuid)
def is_secure_boot_requested(node):
"""Returns True if secure_boot is requested for deploy.
This method checks node property for secure_boot and returns True
if it is requested.
:param node: a single Node.
:raises: InvalidParameterValue if the capabilities string is not a
dictionary or is malformed.
:returns: True if secure_boot is requested.
"""
capabilities = parse_instance_info_capabilities(node)
sec_boot = capabilities.get('secure_boot', 'false').lower()
return sec_boot == 'true'
def is_trusted_boot_requested(node):
"""Returns True if trusted_boot is requested for deploy.
This method checks instance property for trusted_boot and returns True
if it is requested.
:param node: a single Node.
:raises: InvalidParameterValue if the capabilities string is not a
dictionary or is malformed.
:returns: True if trusted_boot is requested.
"""
capabilities = parse_instance_info_capabilities(node)
trusted_boot = capabilities.get('trusted_boot', 'false').lower()
return trusted_boot == 'true'
def get_disk_label(node):
"""Return the disk label requested for deploy, if any.
:param node: a single Node.
:raises: InvalidParameterValue if the capabilities string is not a
dictionary or is malformed.
:returns: the disk label or None if no disk label was specified.
"""
capabilities = parse_instance_info_capabilities(node)
return capabilities.get('disk_label')
def get_boot_mode_for_deploy(node):
"""Returns the boot mode that would be used for deploy.
This method returns boot mode to be used for deploy.
It returns 'uefi' if 'secure_boot' is set to 'true' or returns 'bios' if
'trusted_boot' is set to 'true' in 'instance_info/capabilities' of node.
Otherwise it returns value of 'boot_mode' in 'properties/capabilities'
of node if set. If that is not set, it returns boot mode in
'instance_info/deploy_boot_mode' for the node.
It would return None if boot mode is present neither in 'capabilities' of
node 'properties' nor in node's 'instance_info' (which could also be None).
:param node: an ironic node object.
:returns: 'bios', 'uefi' or None
"""
if is_secure_boot_requested(node):
LOG.debug('Deploy boot mode is uefi for %s.', node.uuid)
return 'uefi'
if is_trusted_boot_requested(node):
# TODO(lintan) Trusted boot also supports uefi, but at the moment,
# it should only boot with bios.
LOG.debug('Deploy boot mode is bios for %s.', node.uuid)
return 'bios'
boot_mode = driver_utils.get_node_capability(node, 'boot_mode')
if boot_mode is None:
instance_info = node.instance_info
boot_mode = instance_info.get('deploy_boot_mode')
LOG.debug('Deploy boot mode is %(boot_mode)s for %(node)s.',
{'boot_mode': boot_mode, 'node': node.uuid})
return boot_mode.lower() if boot_mode else boot_mode
def get_pxe_boot_file(node):
"""Return the PXE boot file name requested for deploy.
This method returns PXE boot file name to be used for deploy.
Architecture specific boot file is searched first. BIOS/UEFI
boot file is used if no valid architecture specific file found.
:param node: A single Node.
:returns: The PXE boot file name.
"""
cpu_arch = node.properties.get('cpu_arch')
boot_file = CONF.pxe.pxe_bootfile_name_by_arch.get(cpu_arch)
if boot_file is None:
if get_boot_mode_for_deploy(node) == 'uefi':
boot_file = CONF.pxe.uefi_pxe_bootfile_name
else:
boot_file = CONF.pxe.pxe_bootfile_name
return boot_file
def get_pxe_config_template(node):
"""Return the PXE config template file name requested for deploy.
This method returns PXE config template file to be used for deploy.
Architecture specific template file is searched first. BIOS/UEFI
template file is used if no valid architecture specific file found.
:param node: A single Node.
:returns: The PXE config template file name.
"""
cpu_arch = node.properties.get('cpu_arch')
config_template = CONF.pxe.pxe_config_template_by_arch.get(cpu_arch)
if config_template is None:
if get_boot_mode_for_deploy(node) == 'uefi':
config_template = CONF.pxe.uefi_pxe_config_template
else:
config_template = CONF.pxe.pxe_config_template
return config_template
def validate_capabilities(node):
"""Validates that specified supported capabilities have valid value
This method checks if the any of the supported capability is present in
Node capabilities. For all supported capabilities specified for a Node,
it validates that it has a valid value.
The node can have capability as part of the 'properties' or
'instance_info' or both.
Note that the actual value of a capability does not need to be the same
in the node's 'properties' and 'instance_info'.
:param node: an ironic node object.
:raises: InvalidParameterValue, if the capability is not set to a
valid value.
"""
exp_str = _("The parameter '%(capability)s' from %(field)s has an "
"invalid value: '%(value)s'. Acceptable values are: "
"%(valid_values)s.")
for capability_name, valid_values in SUPPORTED_CAPABILITIES.items():
# Validate capability_name in node's properties/capabilities
value = driver_utils.get_node_capability(node, capability_name)
if value and (value not in valid_values):
field = "properties/capabilities"
raise exception.InvalidParameterValue(
exp_str %
{'capability': capability_name, 'field': field,
'value': value, 'valid_values': ', '.join(valid_values)})
# Validate capability_name in node's instance_info/['capabilities']
capabilities = parse_instance_info_capabilities(node)
value = capabilities.get(capability_name)
if value and (value not in valid_values):
field = "instance_info['capabilities']"
raise exception.InvalidParameterValue(
exp_str %
{'capability': capability_name, 'field': field,
'value': value, 'valid_values': ', '.join(valid_values)})
def validate_image_properties(ctx, deploy_info, properties):
"""Validate the image.
For Glance images it checks that the image exists in Glance and its
properties or deployment info contain the properties passed. If it's not a
Glance image, it checks that deployment info contains needed properties.
:param ctx: security context
:param deploy_info: the deploy_info to be validated
:param properties: the list of image meta-properties to be validated.
:raises: InvalidParameterValue if:
* connection to glance failed;
* authorization for accessing image failed;
* HEAD request to image URL failed or returned response code != 200;
* HEAD request response does not contain Content-Length header;
* the protocol specified in image URL is not supported.
:raises: MissingParameterValue if the image doesn't contain
the mentioned properties.
"""
image_href = deploy_info['image_source']
try:
img_service = image_service.get_image_service(image_href, context=ctx)
image_props = img_service.show(image_href)['properties']
except (exception.GlanceConnectionFailed,
exception.ImageNotAuthorized,
exception.Invalid):
raise exception.InvalidParameterValue(_(
"Failed to connect to Glance to get the properties "
"of the image %s") % image_href)
except exception.ImageNotFound:
raise exception.InvalidParameterValue(_(
"Image %s can not be found.") % image_href)
except exception.ImageRefValidationFailed as e:
raise exception.InvalidParameterValue(e)
missing_props = []
for prop in properties:
if not (deploy_info.get(prop) or image_props.get(prop)):
missing_props.append(prop)
if missing_props:
props = ', '.join(missing_props)
raise exception.MissingParameterValue(_(
"Image %(image)s is missing the following properties: "
"%(properties)s") % {'image': image_href, 'properties': props})
def get_default_boot_option():
"""Gets the default boot option."""
return CONF.deploy.default_boot_option or 'netboot'
def get_boot_option(node):
"""Gets the boot option.
:param node: A single Node.
:raises: InvalidParameterValue if the capabilities string is not a
dict or is malformed.
:returns: A string representing the boot option type. Defaults to
'netboot'.
"""
capabilities = parse_instance_info_capabilities(node)
return capabilities.get('boot_option', get_default_boot_option()).lower()
def build_agent_options(node):
"""Build the options to be passed to the agent ramdisk.
:param node: an ironic node object
:returns: a dictionary containing the parameters to be passed to
agent ramdisk.
"""
agent_config_opts = {
'ipa-api-url': get_ironic_api_url(),
# NOTE: The below entry is a temporary workaround for bug/1433812
'coreos.configdrive': 0,
}
return agent_config_opts
def prepare_inband_cleaning(task, manage_boot=True):
"""Prepares the node to boot into agent for in-band cleaning.
This method does the following:
1. Prepares the cleaning ports for the bare metal
node and updates the clean parameters in node's driver_internal_info.
2. If 'manage_boot' parameter is set to true, it also calls the
'prepare_ramdisk' method of boot interface to boot the agent ramdisk.
3. Reboots the bare metal node.
:param task: a TaskManager object containing the node
:param manage_boot: If this is set to True, this method calls the
'prepare_ramdisk' method of boot interface to boot the agent
ramdisk. If False, it skips preparing the boot agent ramdisk using
boot interface, and assumes that the environment is setup to
automatically boot agent ramdisk every time bare metal node is
rebooted.
:returns: states.CLEANWAIT to signify an asynchronous prepare.
:raises: NetworkError, NodeCleaningFailure if the previous cleaning ports
cannot be removed or if new cleaning ports cannot be created.
:raises: InvalidParameterValue if cleaning network UUID config option has
an invalid value.
"""
task.driver.network.add_cleaning_network(task)
# Append required config parameters to node's driver_internal_info
# to pass to IPA.
agent_add_clean_params(task)
if manage_boot:
ramdisk_opts = build_agent_options(task.node)
task.driver.boot.prepare_ramdisk(task, ramdisk_opts)
manager_utils.node_power_action(task, states.REBOOT)
# Tell the conductor we are waiting for the agent to boot.
return states.CLEANWAIT
def tear_down_inband_cleaning(task, manage_boot=True):
"""Tears down the environment setup for in-band cleaning.
This method does the following:
1. Powers off the bare metal node.
2. If 'manage_boot' parameter is | |
from pypy.rpython.memory.gctransform.transform import GCTransformer
from pypy.rpython.memory.gctransform.support import find_gc_ptrs_in_type, \
get_rtti, ll_call_destructor, type_contains_pyobjs, var_ispyobj
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython import rmodel
from pypy.rpython.memory import gctypelayout
from pypy.rpython.memory.gc import marksweep
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rlib.rarithmetic import ovfcheck
from pypy.rlib import rstack
from pypy.rlib.debug import ll_assert
from pypy.translator.backendopt import graphanalyze
from pypy.translator.backendopt.support import var_needsgc
from pypy.annotation import model as annmodel
from pypy.rpython import annlowlevel
from pypy.rpython.rbuiltin import gen_cast
from pypy.rpython.memory.gctypelayout import ll_weakref_deref, WEAKREF
from pypy.rpython.memory.gctypelayout import convert_weakref_to, WEAKREFPTR
from pypy.rpython.memory.gctransform.log import log
from pypy.tool.sourcetools import func_with_new_name
from pypy.rpython.lltypesystem.lloperation import llop, LL_OPERATIONS
import sys
class CollectAnalyzer(graphanalyze.GraphAnalyzer):
def analyze_direct_call(self, graph, seen=None):
try:
func = graph.func
if func is rstack.stack_check:
return self.translator.config.translation.stackless
if func._gctransformer_hint_cannot_collect_:
return False
except AttributeError:
pass
return graphanalyze.GraphAnalyzer.analyze_direct_call(self, graph,
seen)
def operation_is_true(self, op):
if op.opname in ('malloc', 'malloc_varsize'):
flags = op.args[1].value
return flags['flavor'] == 'gc' and not flags.get('nocollect', False)
else:
return (op.opname in LL_OPERATIONS and
LL_OPERATIONS[op.opname].canunwindgc)
def find_initializing_stores(collect_analyzer, graph):
from pypy.objspace.flow.model import mkentrymap
entrymap = mkentrymap(graph)
# a bit of a hackish analysis: if a block contains a malloc and check that
# the result is not zero, then the block following the True link will
# usually initialize the newly allocated object
result = {}
def find_in_block(block, mallocvars):
for i, op in enumerate(block.operations):
if op.opname in ("cast_pointer", "same_as"):
if op.args[0] in mallocvars:
mallocvars[op.result] = True
elif op.opname in ("setfield", "setarrayitem", "setinteriorfield"):
TYPE = op.args[-1].concretetype
if (op.args[0] in mallocvars and
isinstance(TYPE, lltype.Ptr) and
TYPE.TO._gckind == "gc"):
result[op] = True
else:
if collect_analyzer.analyze(op):
return
for exit in block.exits:
if len(entrymap[exit.target]) != 1:
continue
newmallocvars = {}
for i, var in enumerate(exit.args):
if var in mallocvars:
newmallocvars[exit.target.inputargs[i]] = True
if newmallocvars:
find_in_block(exit.target, newmallocvars)
mallocnum = 0
blockset = set(graph.iterblocks())
while blockset:
block = blockset.pop()
if len(block.operations) < 2:
continue
mallocop = block.operations[-2]
checkop = block.operations[-1]
if not (mallocop.opname == "malloc" and
checkop.opname == "ptr_nonzero" and
mallocop.result is checkop.args[0] and
block.exitswitch is checkop.result):
continue
rtti = get_rtti(mallocop.args[0].value)
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
continue
exits = [exit for exit in block.exits if exit.llexitcase]
if len(exits) != 1:
continue
exit = exits[0]
if len(entrymap[exit.target]) != 1:
continue
try:
index = exit.args.index(mallocop.result)
except ValueError:
continue
target = exit.target
mallocvars = {target.inputargs[index]: True}
mallocnum += 1
find_in_block(target, mallocvars)
#if result:
# print "found %s initializing stores in %s" % (len(result), graph.name)
return result
class FrameworkGCTransformer(GCTransformer):
use_stackless = False
root_stack_depth = 163840
def __init__(self, translator):
from pypy.rpython.memory.gc.base import choose_gc_from_config
super(FrameworkGCTransformer, self).__init__(translator, inline=True)
if hasattr(self, 'GC_PARAMS'):
# for tests: the GC choice can be specified as class attributes
from pypy.rpython.memory.gc.marksweep import MarkSweepGC
GCClass = getattr(self, 'GCClass', MarkSweepGC)
GC_PARAMS = self.GC_PARAMS
else:
# for regular translation: pick the GC from the config
GCClass, GC_PARAMS = choose_gc_from_config(translator.config)
self.layoutbuilder = TransformerLayoutBuilder(self)
self.get_type_id = self.layoutbuilder.get_type_id
# set up dummy a table, to be overwritten with the real one in finish()
type_info_table = lltype._ptr(
lltype.Ptr(gctypelayout.GCData.TYPE_INFO_TABLE),
"delayed!type_info_table", solid=True)
gcdata = gctypelayout.GCData(type_info_table)
# initialize the following two fields with a random non-NULL address,
# to make the annotator happy. The fields are patched in finish()
# to point to a real array.
foo = lltype.malloc(lltype.FixedSizeArray(llmemory.Address, 1),
immortal=True, zero=True)
a_random_address = llmemory.cast_ptr_to_adr(foo)
gcdata.static_root_start = a_random_address # patched in finish()
gcdata.static_root_nongcend = a_random_address # patched in finish()
gcdata.static_root_end = a_random_address # patched in finish()
self.gcdata = gcdata
self.malloc_fnptr_cache = {}
gcdata.gc = GCClass(translator.config.translation, **GC_PARAMS)
root_walker = self.build_root_walker()
gcdata.set_query_functions(gcdata.gc)
gcdata.gc.set_root_walker(root_walker)
self.num_pushs = 0
self.write_barrier_calls = 0
def frameworkgc_setup():
# run-time initialization code
root_walker.setup_root_walker()
gcdata.gc.setup()
bk = self.translator.annotator.bookkeeper
# the point of this little dance is to not annotate
# self.gcdata.static_root_xyz as constants. XXX is it still needed??
data_classdef = bk.getuniqueclassdef(gctypelayout.GCData)
data_classdef.generalize_attr(
'static_root_start',
annmodel.SomeAddress())
data_classdef.generalize_attr(
'static_root_nongcend',
annmodel.SomeAddress())
data_classdef.generalize_attr(
'static_root_end',
annmodel.SomeAddress())
annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper)
def getfn(ll_function, args_s, s_result, inline=False,
minimal_transform=True):
graph = annhelper.getgraph(ll_function, args_s, s_result)
if minimal_transform:
self.need_minimal_transform(graph)
if inline:
self.graphs_to_inline[graph] = True
return annhelper.graph2const(graph)
self.frameworkgc_setup_ptr = getfn(frameworkgc_setup, [],
annmodel.s_None)
if root_walker.need_root_stack:
self.incr_stack_ptr = getfn(root_walker.incr_stack,
[annmodel.SomeInteger()],
annmodel.SomeAddress(),
inline = True)
self.decr_stack_ptr = getfn(root_walker.decr_stack,
[annmodel.SomeInteger()],
annmodel.SomeAddress(),
inline = True)
else:
self.incr_stack_ptr = None
self.decr_stack_ptr = None
self.weakref_deref_ptr = self.inittime_helper(
ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address)
classdef = bk.getuniqueclassdef(GCClass)
s_gc = annmodel.SomeInstance(classdef)
s_gcref = annmodel.SomePtr(llmemory.GCREF)
malloc_fixedsize_clear_meth = GCClass.malloc_fixedsize_clear.im_func
self.malloc_fixedsize_clear_ptr = getfn(
malloc_fixedsize_clear_meth,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(), annmodel.SomeBool(),
annmodel.SomeBool()], s_gcref,
inline = False)
if hasattr(GCClass, 'malloc_fixedsize'):
malloc_fixedsize_meth = GCClass.malloc_fixedsize.im_func
self.malloc_fixedsize_ptr = getfn(
malloc_fixedsize_meth,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(), annmodel.SomeBool(),
annmodel.SomeBool()], s_gcref,
inline = False)
else:
malloc_fixedsize_meth = None
self.malloc_fixedsize_ptr = self.malloc_fixedsize_clear_ptr
## self.malloc_varsize_ptr = getfn(
## GCClass.malloc_varsize.im_func,
## [s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)]
## + [annmodel.SomeBool(), annmodel.SomeBool()], s_gcref)
self.malloc_varsize_clear_ptr = getfn(
GCClass.malloc_varsize_clear.im_func,
[s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)]
+ [annmodel.SomeBool(), annmodel.SomeBool()], s_gcref)
self.collect_ptr = getfn(GCClass.collect.im_func,
[s_gc], annmodel.s_None)
self.can_move_ptr = getfn(GCClass.can_move.im_func,
[s_gc, annmodel.SomeAddress()],
annmodel.SomeBool())
# in some GCs we can inline the common case of
# malloc_fixedsize(typeid, size, True, False, False)
if getattr(GCClass, 'inline_simple_malloc', False):
# make a copy of this function so that it gets annotated
# independently and the constants are folded inside
if malloc_fixedsize_meth is None:
malloc_fast_meth = malloc_fixedsize_clear_meth
self.malloc_fast_is_clearing = True
else:
malloc_fast_meth = malloc_fixedsize_meth
self.malloc_fast_is_clearing = False
malloc_fast = func_with_new_name(
malloc_fast_meth,
"malloc_fast")
s_False = annmodel.SomeBool(); s_False.const = False
s_True = annmodel.SomeBool(); s_True .const = True
self.malloc_fast_ptr = getfn(
malloc_fast,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
s_True, s_False,
s_False], s_gcref,
inline = True)
else:
self.malloc_fast_ptr = None
# in some GCs we can also inline the common case of
# malloc_varsize(typeid, length, (3 constant sizes), True, False)
if getattr(GCClass, 'inline_simple_malloc_varsize', False):
# make a copy of this function so that it gets annotated
# independently and the constants are folded inside
malloc_varsize_clear_fast = func_with_new_name(
GCClass.malloc_varsize_clear.im_func,
"malloc_varsize_clear_fast")
s_False = annmodel.SomeBool(); s_False.const = False
s_True = annmodel.SomeBool(); s_True .const = True
self.malloc_varsize_clear_fast_ptr = getfn(
malloc_varsize_clear_fast,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
s_True, s_False], s_gcref,
inline = True)
else:
self.malloc_varsize_clear_fast_ptr = None
if getattr(GCClass, 'malloc_varsize_nonmovable', False):
malloc_nonmovable = func_with_new_name(
GCClass.malloc_varsize_nonmovable.im_func,
"malloc_varsize_nonmovable")
self.malloc_varsize_nonmovable_ptr = getfn(
malloc_nonmovable,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True)], s_gcref)
else:
self.malloc_varsize_nonmovable_ptr = None
if getattr(GCClass, 'malloc_varsize_resizable', False):
malloc_resizable = func_with_new_name(
GCClass.malloc_varsize_resizable.im_func,
"malloc_varsize_resizable")
self.malloc_varsize_resizable_ptr = getfn(
malloc_resizable,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True)], s_gcref)
else:
self.malloc_varsize_resizable_ptr = None
if getattr(GCClass, 'realloc', False):
self.realloc_ptr = getfn(
GCClass.realloc.im_func,
[s_gc, s_gcref] +
[annmodel.SomeInteger(nonneg=True)] * 4 +
[annmodel.SomeBool()],
s_gcref)
if GCClass.moving_gc:
self.id_ptr = getfn(GCClass.id.im_func,
[s_gc, s_gcref], annmodel.SomeInteger(),
inline = False,
minimal_transform = False)
else:
self.id_ptr = None
self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func,
[s_gc,
annmodel.SomeInteger(nonneg=True)],
annmodel.s_None)
if GCClass.needs_write_barrier:
self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func,
[s_gc,
annmodel.SomeAddress(),
annmodel.SomeAddress()],
annmodel.s_None,
inline=True)
else:
self.write_barrier_ptr = None
self.statistics_ptr = getfn(GCClass.statistics.im_func,
[s_gc, annmodel.SomeInteger()],
annmodel.SomeInteger())
# experimental gc_x_* operations
s_x_pool = annmodel.SomePtr(marksweep.X_POOL_PTR)
s_x_clone = annmodel.SomePtr(marksweep.X_CLONE_PTR)
# the x_*() methods use some regular mallocs that must be
# transformed in the normal way
self.x_swap_pool_ptr = getfn(GCClass.x_swap_pool.im_func,
[s_gc, s_x_pool],
s_x_pool,
minimal_transform = False)
self.x_clone_ptr = getfn(GCClass.x_clone.im_func,
[s_gc, s_x_clone],
annmodel.s_None,
minimal_transform = False)
# thread support
if translator.config.translation.thread:
if not hasattr(root_walker, "need_thread_support"):
raise Exception("%s does not support threads" % (
root_walker.__class__.__name__,))
root_walker.need_thread_support()
self.thread_prepare_ptr = getfn(root_walker.thread_prepare,
[], annmodel.s_None)
self.thread_run_ptr = getfn(root_walker.thread_run,
[], annmodel.s_None,
inline=True)
self.thread_die_ptr = getfn(root_walker.thread_die,
[], annmodel.s_None)
annhelper.finish() # at this point, annotate all mix-level helpers
annhelper.backend_optimize()
self.collect_analyzer = CollectAnalyzer(self.translator)
self.collect_analyzer.analyze_all()
s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass)
r_gc = self.translator.rtyper.getrepr(s_gc)
self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc)
self.malloc_zero_filled = GCClass.malloc_zero_filled
HDR = self._gc_HDR = self.gcdata.gc.gcheaderbuilder.HDR
self._gc_fields = fields = []
for fldname in HDR._names:
FLDTYPE = getattr(HDR, fldname)
fields.append(('_' + fldname, FLDTYPE))
def build_root_walker(self):
return ShadowStackRootWalker(self)
def consider_constant(self, TYPE, value):
self.layoutbuilder.consider_constant(TYPE, value, self.gcdata.gc)
#def get_type_id(self, TYPE):
# this method is attached to the instance and redirects to
# layoutbuilder.get_type_id().
def finalizer_funcptr_for_type(self, TYPE):
return self.layoutbuilder.finalizer_funcptr_for_type(TYPE)
def gc_fields(self):
return self._gc_fields
def gc_field_values_for(self, obj):
hdr = self.gcdata.gc.gcheaderbuilder.header_of_object(obj)
HDR = self._gc_HDR
return [getattr(hdr, fldname) for fldname in HDR._names]
def finish_tables(self):
table = self.layoutbuilder.flatten_table()
log.info("assigned %s typeids" % (len(table), ))
log.info("added %s push/pop stack root instructions" % (
self.num_pushs, ))
if self.write_barrier_ptr:
log.info("inserted %s write barrier calls" % (
self.write_barrier_calls, ))
# replace the type_info_table pointer in gcdata -- at this point,
# the database is in principle complete, so it has already seen
# the delayed pointer. We need to force it to consider the new
# array now.
self.gcdata.type_info_table._become(table)
# XXX because we call inputconst already in replace_malloc, we can't
# modify the instance, we have to modify the 'rtyped instance'
| |
###############################################################################
# Copyright 2018 The AnPyLar Team. All Rights Reserved.
# Use of this source code is governed by an MIT-style license that
# can be found in the LICENSE file at http://anpylar.com/mit-license
###############################################################################
from browser import document, window
import browser.ajax
from . import binding
from . import html
from .observable import Observable
from .promise import Promise
from .service import Service
from . import stacks
from . import utils
from .modbase import _MetaMod, _ModBase
__all__ = ['Component', 'ComponentInline']
_COMPCOUNT = utils.count(1)
_CIDCOUNT = utils.count(1)
class _MetaComponent(_MetaMod):
def __init__(cls, name, bases, dct, **kwds):
# Must be done here, because in new the factory is not there ... only
# the class dict. This is a "particularity" of brython
super().__init__(name, bases, dct, **kwds)
cid = str(next(_COMPCOUNT))
setattr(cls, '_cid', cid)
selector = dct.get('selector', None)
if not selector:
autosel = []
lastlower = False
for x in name:
if x.isupper():
if lastlower:
autosel.append('-')
autosel.append(x.lower())
lastlower = False
else:
autosel.append(x)
lastlower = x.islower()
# Add counter to make unique (same class name in diff module)
autosel.append('-')
autosel.append(cid)
dct['selector'] = selector = ''.join(autosel)
setattr(cls, 'selector', selector)
html._customize_tag(selector, dotag=True, component=cls)
def __call__(cls, *args, **kwargs):
htmlnode = stacks.htmlnodes[-1]
if htmlnode._comp is not None:
# rendered inside another component, send tagout which will
# piggyback on this
tag = cls._tagout(_compargs=args, _compkwargs=kwargs)
return tag._comp
# scan for kwargs that meet class attributes
autokwargs = {k: v for k, v in kwargs.items() if hasattr(cls, k)}
for k in autokwargs:
kwargs.pop(k)
self = cls.__new__(cls, *args, **kwargs) # create
self._children = []
# Find enclosing nodes, module and parent component
self._htmlnode = htmlnode
self._module = stacks.modules[-1]
self._parent = htmlnode._elparent._comp
parent_module = self._parent._module
if self._module != parent_module:
self._parent = self._module
self._parent._children.append(self)
for k, v in autokwargs.items(): # set the values in the instance
setattr(self, k, v) # before going to init
# choose the namespace under which services will be placed
if not self.service_ns:
service_ns = self
else:
class Service_PlaceHolder:
pass # simple attribute placeholder
service_ns = Service_PlaceHolder()
if self.service_ns is True: # specific check for True
self._s = service_ns
else:
setattr(self, self.service_ns, service_ns)
# Instantiate and place services under the selected namespace
for name, service in self.services.items():
if issubclass(service, (Service,)):
s = service(self, self._module)
else:
s = service()
s._module = self._module
s._parent = self
setattr(service_ns, name, s)
_cachename = self.cachename
if not _cachename:
_cachename = '{}.{}'.format(self.__class__.__module__,
self.__class__.__name__)
self._cachename_style = '{}.{}'.format(_cachename, 'style')
self._cachename_html = '{}.{}'.format(_cachename, 'html')
self.__init__(*args, **kwargs)
return self
class Component(_ModBase, metaclass=_MetaComponent):
'''A *Component* controls the appearance and elements inside a patch of the
screen
It can render the elements programatically or directly with html content
Attributes:
- ``bindings ({})``:
A dictionary containing the name and default value of attributes for
the class which will also automatically add bound ``Observables``
The observables will have a ``_`` (underscore) character appended.
Setting the value of the attribute will trigger the observable and
therefore any actions/subscriptions associated with it. Example:
bindings = {'myattr': 'myvalue'}
will create:
- An attribute ``myattr`` which defaults to ``myvalue``
- An observable ``myattr_``
- ``services ({})``:
A dictionary containing the name and service class for services defined
for this and any child component of it
services = {'myservice': MyService}
- ``service_ns (False)``:
If ``False``, services will be added as direct attributes of the
instance with the name specified in the ``services`` declaration.
If ``service_ns`` is:
- ``True``: the declared ``services`` will be reachable under the
attribute ``self._s.myservice``
This is meant to separate the services from the rest of the
attributes.
- A ``string``, the declared ``services`` will be reachable under
the attribute ``self.{value of the string}.myservice``. I.e.: for
a value of ``services_here``, then a service would be reachable
as::
self.services_here.myservice
- ``router``
Attribute which points to the router instance in charge of the
application
- ``route``
Attribute which contains the current active route snapshot
- ``selector (None)``
The selector is the name of the html tag under which the component is
rendered and controls elements.
If ``None``, the name will be automatically derived from the name of
the class
- ``htmlpath (True)``
Path to a file containing the html content.
If ``True`` the name of the file will be derived automatically from the
class name, i.e.: *MyComponent* -> *my_component.html*. In this case
the file has to be placed at the same level in the file hierarchy as
the python module in which the component class is defined.
To derive the name: underscores will be placed in the upper/lower-case
boundaries, everything will be lowercased and the extension ``.html``
will be appended.
If it contains a *name* (string), this will be used to fetch the file
from the server (or from a virtual file system if the app is delivered
as a package)
This takes precedence over the ``render`` method.
After loading, the ``render`` method will be called with the node under
which the content has been loaded
- ``htmlsheet (None)``
**This takes precedence over ``htmlpath``.**
If not ``None``, this will then contain html content in text format,
which will be used to render the patch
If ``None``, the name will be automatically derived from the name of
the class.
After loading, the ``render`` method will be called with the node under
which the content has been loaded
- ``stylepath (True)``
Path to a file containing the style sheet.
If ``True`` the name of the file will be derived automatically from the
class name, i.e.: *MyComponent* -> *mycomponent.css*. In this case the
file has to be placed at the same level in the file hierarchy as the
python module in which the component class is defined
To derive the name: underscores will be placed in the upper/lower-case
boundaries, everything will be lowercased and the extension ``.css``
will be appended.
If it contains a *name* (string), this will be used to fetch the file
from the server (or from a virtual file system if the app is delivered
as a package)
This takes precedence over the ``stlyer`` method.
After loading, the ``html`` method will be called with the node under
which the content has been loaded
- ``stylesheet (None)``
If not ``None``, this will then contain a style sheet in text format,
which will be used to control the styles of the elements rendered by
the component
**This takes precedence over ``stylepath``**
- ``cachesheets (True)``
If ``True``, loaded html content and style sheets will be cached,
rather than fetched again
- ``cacheable (True)``
If ``True``, the component will not be destroyed and recreated each
time. Setting it to ``False`` forces destruction and recreation
'''
cacheable = True # kept as in between routing or re-created
cachesheets = True # keep internal cache of the fetched stylepath
_styled = set() # Flag for style delivered to the head
cachename = None
selector = None # selector tag to apply if any
htmlpath = True # name or autoname (True) of htmlpath
htmlsheet = None # html template
stylesheet = None # name or autoname (True) of stylepath
stylepath = True # name or autoname (True) of stylepath
service_ns = False
services = {} # name:service of services at component level
_parent = None # parent component or module
_module = None # module in which the component lives
_cid = 0 # component id
def __getattr__(self, name):
if name.startswith('__'):
return super().__getattr__(name)
# During __init__ attributes that have to up the chain may be sought,
# but the parent may be unknown. This forces this check to make sure it
# makes it up to the module
try:
return getattr(self._parent, name)
except AttributeError:
pass
e = '{} not found in {} nor in its services or hierarchy'. \
format(name, self.__class__.__name__)
raise AttributeError(e)
def __setattr__(self, name, value):
# Attribute not found using the regular methods
if name[-1] == '_':
if isinstance(value, Observable):
super().__setattr__(name, value) # special: pointer creation
return
name1 = name[:-1]
if name1 in self.bindings: # see if it belongs to bindings
setattr(self, name1, value) | |
""" --- --- --- """
datafpath = Paths.ppr_sims + d3class.sim + '/' + __rootoutdir__
figname = "{}_rl{}.png".format(v_n, rl)
o_plot = PLOT_MANY_TASKS()
o_plot.gen_set["figdir"] = datafpath
o_plot.gen_set["type"] = "cartesian"
o_plot.gen_set["figsize"] = (4.2, 8.0) # <->, |] # to match hists with (8.5, 2.7)
o_plot.gen_set["figname"] = figname
o_plot.gen_set["sharex"] = False
o_plot.gen_set["sharey"] = False
o_plot.gen_set["subplots_adjust_h"] = -0.3
o_plot.gen_set["subplots_adjust_w"] = 0.2
o_plot.set_plot_dics = []
# for it, t in zip(d3class.list_iterations, d3class.times): # zip([346112],[0.020]):# #
if not os.path.isdir(datafpath + str(it) + '/' + figdir):
os.mkdir(datafpath + str(it) + '/' + figdir)
# tr = (t - tmerg) * 1e3 # ms
if not os.path.isfile(datafpath + str(it) + '/' + "profile.xy.h5") \
or not os.path.isfile(datafpath + str(it) + '/' + "profile.xz.h5"):
Printcolor.yellow(
"Required data ia missing: {}".format(datafpath + str(it) + '/' + "profile.xy(or yz).h5"))
continue
fpath = datafpath + str(it) + '/' + figdir + figname
t = d3class.get_time_for_it(it, "profiles", "prof")
try:
if (os.path.isfile(fpath) and rewritefigs) or not os.path.isfile(fpath):
if os.path.isfile(fpath): os.remove(fpath)
print_colored_string(
["task:", "plot prof slice", "it:", "{}".format(it), "rl:", "{:d}".format(rl), "v_ns:", v_n, ":", "plotting"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "green"])
# ---------- PLOTTING -------------
if v_n in ["velx", "vely", "velz", "vphi", "vr", "ang_mom_flux"]:
print("\t\tUsing 2 colobars for v_n:{}".format(v_n))
# make separate plotting >0 and <0 with log scales
o_plot.gen_set["figdir"] = datafpath + str(it) + '/' + figdir
def_dic_xz['cmap'] = 'Reds'
def_dic_xz["mask"] = "negative"
def_dic_xz['cbar'] = {'location': 'right .04 0.00', 'label': v_n.replace('_', '\_') + r"$<0$",
'labelsize': 14,
'fontsize': 14}
def_dic_xz["it"] = int(it)
def_dic_xz["title"]["text"] = r'$t:{:.1f}$ [ms]'.format(float(t))
n_def_dic_xz = def_dic_xz.copy() # copy.deepcopy(def_dic_xz)
def_dic_xz['data'] = d3class
o_plot.set_plot_dics.append(def_dic_xz)
n_def_dic_xz['data'] = d3class
n_def_dic_xz['cmap'] = 'Blues'
n_def_dic_xz["mask"] = "positive"
n_def_dic_xz['cbar'] = {}
n_def_dic_xz["it"] = int(it)
n_def_dic_xz["title"]["text"] = r'$t:{:.1f}$ [ms]'.format(float(t*1e3))
o_plot.set_plot_dics.append(n_def_dic_xz)
# --- ---
def_dic_xy["it"] = int(it)
def_dic_xy['cmap'] = 'Blues'
def_dic_xy['mask'] = "positive"
def_dic_xy['cbar'] = {'location': 'right .04 .0', 'label': v_n.replace('_', '\_') + r"$>0$",
# 'fmt': '%.1e',
'labelsize': 14,
'fontsize': 14}
# n_def_dic_xy = copy.deepcopy(def_dic_xy)
n_def_dic_xy = def_dic_xy.copy()
def_dic_xy['data'] = d3class
o_plot.set_plot_dics.append(def_dic_xy)
n_def_dic_xy['data'] = d3class
n_def_dic_xy['cbar'] = {}
n_def_dic_xy['cmap'] = 'Reds'
n_def_dic_xy['mask'] = "negative"
o_plot.set_plot_dics.append(n_def_dic_xy)
for dic in o_plot.set_plot_dics:
if not 'cbar' in dic.keys():
raise IOError("dic:{} no cbar".format(dic))
# ---- ----
o_plot.main()
# del(o_plot.set_plot_dics)
o_plot.figure.clear()
n_def_dic_xy = {}
n_def_dic_xz = {}
else:
def_dic_xz['data'] = d3class
def_dic_xz['cbar']['label'] = v_n.replace('_', '\_')
def_dic_xz['cbar']['location'] = 'right .04 -.36'
def_dic_xz["it"] = int(it)
def_dic_xz["title"]["text"] = r'$t:{:.1f}$ [ms]'.format(float(t*1e3))
o_plot.gen_set["figdir"] = datafpath + str(it) + '/' + figdir
o_plot.set_plot_dics.append(def_dic_xz)
def_dic_xy['data'] = d3class
def_dic_xy["it"] = int(it)
# rho_dic_xy["title"]["text"] = r'$t-t_{merg}:$' + r'${:.2f}ms$'.format(float(tr))
# o_plot.gen_set["figname"] = # 7 digit output
o_plot.set_plot_dics.append(def_dic_xy)
o_plot.main()
# del(o_plot.set_plot_dics)
o_plot.figure.clear()
def_dic_xy = {}
def_dic_xz = {}
# ------------------------
else:
print_colored_string(
["task:", "plot prof slice", "it:", "{}".format(it), "rl:", "{:d}".format(rl), "v_ns:", v_n, ":", "skipping"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "blue"])
except KeyboardInterrupt:
exit(1)
except IOError:
print_colored_string(
["task:", "plot prof slice", "it:", "{}".format(it), "rl:", "{:d}".format(rl), "v_ns:", v_n,
":", "IOError"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"])
except ValueError:
print_colored_string(
["task:", "plot prof slice", "it:", "{}".format(it), "rl:", "{:d}".format(rl), "v_ns:", v_n,
":", "ValueError"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"])
except:
print_colored_string(
["task:", "plot prof slice", "it:", "{}".format(it), "rl:", "{:d}".format(rl), "v_ns:", v_n, ":", "failed"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"])
v_n = None
rl = None
it = None
sys.stdout.flush()
i = i + 1
# exit(1)
def plot_d3_corr(d3histclass, rewrite=False):
iterations = select_number(glob_its, d3histclass.list_iterations)
v_ns = select_string(glob_v_ns, __d3corrs__, for_all="all")
for it in iterations:
for vn1vn2 in v_ns:
default_dic = { # relies on the "get_res_corr(self, it, v_n): " method of data object
'task': 'corr2d', 'ptype': 'cartesian',
'data': d3histclass,
'position': (1, 1),
'v_n_x': 'ang_mom_flux', 'v_n_y': 'dens_unb_bern', 'v_n': Labels.labels("mass"), 'normalize': True,
'xmin': None, 'xmax': None, 'ymin': None, 'ymax': None, 'vmin': 1e-7, 'vmax': 1e-3,
'xscale': 'log', 'yscale': 'log',
'mask_below': None, 'mask_above': None, 'cmap': 'inferno_r', 'norm': 'log', 'todo': None,
'cbar': {'location': 'right .03 .0', 'label': r'mass',
'labelsize': 14,
'fontsize': 14},
'title': {"text": r'$t-t_{merg}:$' + r'${:.1f}$'.format(0), 'fontsize': 14},
'fontsize': 14,
'labelsize': 14,
'minorticks': True,
'fancyticks': True,
'sharey': False,
'sharex': False,
}
if vn1vn2 == "rho_r":
v_n_x = 'rho'
v_n_y = 'r'
# default_dic['v_n_x'] = 'rho'
# default_dic['v_n_y'] = 'r'
default_dic['xmin'] = 1e-9
default_dic['xmax'] = 2e-5
default_dic['ymin'] = 0
default_dic['ymax'] = 250
default_dic['yscale'] = None
elif vn1vn2 == "rho_Ye":
v_n_x = 'rho'
v_n_y = 'Ye'
# default_dic['v_n_x'] = 'rho'
# default_dic['v_n_y'] = 'Ye'
default_dic['xmin'] = 1e-9
default_dic['xmax'] = 2e-5
default_dic['ymin'] = 0.01
default_dic['ymax'] = 0.5
default_dic['yscale'] = None
elif vn1vn2 == "r_Ye":
v_n_x = 'r'
v_n_y = 'Ye'
# default_dic['v_n_x'] = 'rho'
# default_dic['v_n_y'] = 'Ye'
default_dic['xmin'] = 0
default_dic['xmax'] = 100
default_dic['xscale'] = None
default_dic['ymin'] = 0.01
default_dic['ymax'] = 0.5
default_dic['yscale'] = None
elif vn1vn2 == "temp_Ye":
v_n_x = 'temp'
v_n_y = 'Ye'
# default_dic['v_n_x'] = 'temp'
# default_dic['v_n_y'] = 'Ye'
default_dic['xmin'] = 1e-2
default_dic['xmax'] = 1e2
default_dic['ymin'] = 0.01
default_dic['ymax'] = 0.5
default_dic['yscale'] = None
elif vn1vn2 == "Ye_entr":
v_n_x = 'Ye'
v_n_y = 'entr'
# default_dic['v_n_x'] = 'temp'
# default_dic['v_n_y'] = 'Ye'
default_dic['ymin'] = 0
default_dic['ymax'] = 50
default_dic['xmin'] = 0.01
default_dic['xmax'] = 0.5
default_dic['yscale'] = None
default_dic['xscale'] = None
elif vn1vn2 == "rho_temp":
v_n_x = 'rho'
v_n_y = 'temp'
# default_dic['v_n_x'] = 'rho'
# default_dic['v_n_y'] = 'theta'
default_dic['xmin'] = 1e-9
default_dic['xmax'] = 2e-5
default_dic['ymin'] = 1e-2
default_dic['ymax'] = 1e2
#default_dic['yscale'] = None
elif vn1vn2 == "rho_theta":
v_n_x = 'rho'
v_n_y = 'theta'
# default_dic['v_n_x'] = 'rho'
# default_dic['v_n_y'] = 'theta'
default_dic['xmin'] = 1e-9
default_dic['xmax'] = 2e-5
default_dic['ymin'] = 0
default_dic['ymax'] = 1.7
default_dic['yscale'] = None
elif vn1vn2 == "velz_theta":
v_n_x = 'velz'
v_n_y = 'theta'
# default_dic['v_n_x'] = 'velz'
# default_dic['v_n_y'] = 'theta'
default_dic['xmin'] = -.5
default_dic['xmax'] = .5
default_dic['ymin'] = 0
default_dic['ymax'] = 90.
default_dic['yscale'] = None
default_dic['xscale'] = None
elif vn1vn2 == "velz_Ye":
v_n_x = 'velz'
v_n_y = 'Ye'
# default_dic['v_n_x'] = 'velz'
# default_dic['v_n_y'] = 'Ye'
default_dic['xmin'] = -.5
default_dic['xmax'] = .5
default_dic['ymin'] = 0.01
default_dic['ymax'] = 0.5
default_dic['yscale'] = None
default_dic['xscale'] = None
elif vn1vn2 == "rho_ang_mom":
v_n_x = 'rho'
v_n_y = 'ang_mom'
# default_dic['v_n_x'] = 'rho'
# default_dic['v_n_y'] = 'ang_mom'
default_dic['xmin'] = 1e-9
default_dic['xmax'] = 2e-5
default_dic['ymin'] = 1e-9
default_dic['ymax'] = 1e-3
elif vn1vn2 == "theta_dens_unb_bern":
v_n_x = 'theta'
v_n_y = 'dens_unb_bern'
# default_dic['v_n_x'] = 'theta'
default_dic['xmin'] = 0.
default_dic['xmax'] = 90.
default_dic['xscale'] = None
# default_dic['v_n_y'] = 'dens_unb_bern'
default_dic['ymin'] = 1e-9
default_dic['ymax'] = 2e-6
elif vn1vn2 == "velz_dens_unb_bern":
v_n_x = 'velz'
v_n_y = 'dens_unb_bern'
# default_dic['v_n_x'] = 'velz'
default_dic['xmin'] = -.5
default_dic['xmax'] = .5
default_dic['xscale'] = None
# default_dic['v_n_y'] = 'dens_unb_bern'
default_dic['ymin'] = 1e-9
default_dic['ymax'] = 2e-6
elif vn1vn2 == "rho_ang_mom_flux":
v_n_x = 'rho'
v_n_y = 'ang_mom_flux'
# default_dic['v_n_x'] = 'rho'
# default_dic['v_n_y'] = 'ang_mom_flux'
default_dic['xmin'] = 1e-9
default_dic['xmax'] = 2e-5
default_dic['ymin'] = 1e-9
default_dic['ymax'] = 8e-5
elif vn1vn2 == "rho_dens_unb_bern":
v_n_x = 'rho'
v_n_y = 'dens_unb_bern'
# default_dic['v_n_x'] = 'rho'
# default_dic['v_n_y'] = 'dens_unb_bern'
default_dic['xmin'] = 1e-9
default_dic['xmax'] = 2e-5
default_dic['ymin'] = 1e-9
default_dic['ymax'] = 2e-6
elif vn1vn2 == "Ye_dens_unb_bern":
v_n_x = 'Ye'
v_n_y = 'dens_unb_bern'
# default_dic['v_n_x'] = 'Ye'
default_dic['xmin'] = 0.01
default_dic['xmax'] = 0.5
default_dic['xscale'] = None
# default_dic['v_n_y'] = 'dens_unb_bern'
default_dic['ymin'] = 1e-9
default_dic['ymax'] = 2e-6
default_dic['yscale'] = "log"
elif vn1vn2 == "ang_mom_flux_theta":
v_n_x = 'ang_mom_flux'
v_n_y = 'theta'
# default_dic['v_n_x'] = 'ang_mom_flux'
# default_dic['v_n_y'] = 'theta'
default_dic['xmin'] = 1e-9
default_dic['xmax'] = 8e-5
default_dic['ymin'] = 0
default_dic['ymax'] = 1.7
default_dic['yscale'] = None
elif vn1vn2 == "ang_mom_flux_dens_unb_bern":
v_n_x = 'ang_mom_flux'
v_n_y = 'dens_unb_bern'
default_dic['xmin'] = 1e-11
default_dic['xmax'] = 1e-7
default_dic['ymin'] = 1e-11
default_dic['ymax'] = 1e-7
elif vn1vn2 == "inv_ang_mom_flux_dens_unb_bern":
v_n_x = 'inv_ang_mom_flux'
v_n_y = 'dens_unb_bern'
default_dic['xmin'] = 1e-11
default_dic['xmax'] = 1e-7
default_dic['ymin'] = 1e-11
default_dic['ymax'] = 1e-7
# default_dic['v_n_x'] = 'inv_ang_mom_flux'
elif vn1vn2 == "hu_0_ang_mom":
v_n_x = 'hu_0'
v_n_y = 'ang_mom'
default_dic["xscale"] = None
default_dic['xmin'] = -1.2
default_dic['xmax'] = -0.8
default_dic['ymin'] = 1e-9
default_dic['ymax'] = 1e-3
elif vn1vn2 == "hu_0_ang_mom_flux":
v_n_x = 'hu_0'
v_n_y = 'ang_mom_flux'
default_dic["xscale"] = None
default_dic['xmin'] = -1.2
default_dic['xmax'] = -0.8
default_dic['ymin'] = 1e-11
default_dic['ymax'] = 1e-7
elif vn1vn2 == "hu_0_Ye":
v_n_x = 'hu_0'
v_n_y | |
# Copyright 2021 Phasecraft Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import datetime
import logging
import time
from collections import defaultdict
from itertools import chain
import cirq
import uncertainties
import cirq_google as cg
import numpy as np
import pandas as pd
import uncertainties.unumpy as unp
from cirq.google import optimized_for_sycamore
from uncertainties import ufloat
import fhvqe.circuit
import fhvqe.error_mitigation
from fhvqe.circuit import (ansatz, ansatz_multilayer_circuit,
ansatz_multilayer_circuit_merge,
ansatz_multistep, prepH, prepV, prepV2wrap)
from fhvqe.tools import map_site_to_JW
module_logger = logging.getLogger("fhvqe.experiment")
Measurements = collections.namedtuple("Measurements", "pairs prep analysis")
Circuits = collections.namedtuple("Circuits", "device, initial, final, ansatz, type, analysis")
# subbatch described a sub-groupation of a single batch, for example if the
# measurement is part of the same gradient evaluation
# descriptor is some extra descriptor of the measurement/circuit, for example
# the sign -1/+1/0 which means that it's the datapoint -1 delta away from some
# parameters/ +1 delta away from some parameters/at parameters (all giving
# different set of theta).
# type is measurement type of the circuit
# batchiteration is the iterator over different thetas (for a single theta we
# have multiple circuits corresponding to different measurement needed to be
# taken)
Descriptions = collections.namedtuple("Descriptions", "subbatch, descriptor, type, analysis, batchiteration")
measured_values = {}
def start_logger(logfile="fhvqe.log"):
"""Initializes the logger for fhvqe module.
"""
# create logger with 'fhvqe'
logger = logging.getLogger('fhvqe')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def analyze(measurement_type, pairs, nh, nv, t=1, U=2, **kwargs):
"""Returns function which analyzes a numpy array of measurement results.
Function it returns assumes results are in numpy array of shape ??
Args:
measurement_type -- onsite, horizontal or vertical measurement
pairs -- pairs of qubits which should be combined for measurement
nh -- number of horizontal sites
nv -- number of vertical sites
t -- hopping interaction parameter
U -- on-site interaction parameter
Returns:
Function for analysis of results
"""
if __debug__:
module_logger.debug(f"Preparing analysis function for {measurement_type}")
def _parity_indices(q1, q2, nh):
"""Internal function giving parity for qubits q1 to q2."""
index_list = range(q1+1, q2)
return index_list
def analyzeO(results):
"""Analyzes onsite measurement results.
"""
sum_tot = 0
for (q1, q2) in pairs:
res = np.mean(results[q1] * results[q2])
sum_tot += res
# print(f'Onsite energy: {U*sum_tot}')
return U * sum_tot
def analyzeH(results):
"""Analyzes horizontal measurement results."""
sum_tot = 0
for (q1, q2) in pairs:
res = np.mean(results[q2] - results[q1])
sum_tot += res
# print(f'H energy: {-t*sum_tot}')
return -t * sum_tot
def analyzeV(results):
"""Analyzes vertical measurement results (applies parity corrections)."""
sum_tot = 0
size = results.shape[1]
for (q1, q2) in pairs:
res = results[q2] - results[q1]
parity = 0
for q in _parity_indices(q1, q2, nh):
parity += results[q]
parity = 1 - (parity % 2) * 2
sum_tot += np.mean(res * parity)
# print(f'V energy: {-t*sum_tot}')
return -t * sum_tot
if measurement_type == "onsite":
return analyzeO
if measurement_type == "vert":
return analyzeV
if measurement_type in ["horiz", "vert0", "vert1"]:
return analyzeH
def analyze_exact(measurement_type, pairs, nh, nv, t=1, U=2, **kwargs):
"""Returns function which analyzes a numpy array of measurement results.
Function it returns assumes results are in numpy array of shape ??
Args:
measurement_type -- onsite, horizontal or vertical measurement
pairs -- pairs of qubits which should be combined for measurement
nh -- number of horizontal sites
nv -- number of vertical sites
t -- hopping interaction parameter
U -- on-site interaction parameter
Returns:
Function for analysis of results
"""
if __debug__:
module_logger.debug(f"Preparing exact analysis function for {measurement_type}")
def _parity_indices(q1, q2, nh):
"""Internal function giving parity for qubits q1 to q2."""
index_list = range(q1+1, q2)
return index_list
total_len = nh*nv*2
import itertools
lst = list(itertools.product([0, 1], repeat = total_len - 2))
if measurement_type == "onsite":
all_indices = []
for (q1, q2) in pairs:
all_indices += [cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(1,)+element[q2-1:]) for element in lst]
if measurement_type in ["horiz"]:
all_indices_plus = []
all_indices_minus = []
for (q1, q2) in pairs:
all_indices_plus += [cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]) for element in lst]
all_indices_minus += [cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]) for element in lst]
if measurement_type in ["vert0", "vert1", "vert"]:
all_indices_plus = []
all_indices_minus = []
for (q1, q2) in pairs:
parity = 1
for element in lst:
parity = 1 - (sum(element[q1:q2-1]) % 2) * 2
if parity == 1:
all_indices_plus.append(cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]))
all_indices_minus.append(cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]))
else:
all_indices_minus.append(cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]))
all_indices_plus.append(cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]))
def analyzeO(results):
"""Analyzes onsite measurement results.
"""
sum_tot = np.sum(np.abs(results[all_indices])**2)
# print(f'Onsite: {U * sum_tot}')
return U * sum_tot
def analyzeH(results):
"""Analyzes horizontal measurement results."""
sum_tot = np.sum(np.abs(results[all_indices_plus])**2) - np.sum(np.abs(results[all_indices_minus])**2)
# print(f'Horiz: {-t * sum_tot}')
return -t * sum_tot
def analyzeV(results):
"""Analyzes vertical measurement results (applies parity corrections)."""
#TODO: fix this one... parity needs to be handled..
sum_tot = np.sum(np.abs(results[all_indices_plus])**2) - np.sum(np.abs(results[all_indices_minus])**2)
return -t * sum_tot
if measurement_type == "onsite":
return analyzeO
if measurement_type == "vert":
return analyzeV
if measurement_type in ["horiz", "vert0", "vert1"]:
return analyzeH
def analyze_exact_mgd(measurement_type, pairs, nh, nv, t=1, U=2, **kwargs):
"""Returns function which analyzes a numpy array of measurement results.
Function it returns assumes results are in numpy array of shape ??
Args:
measurement_type -- onsite, horizontal or vertical measurement
pairs -- pairs of qubits which should be combined for measurement
nh -- number of horizontal sites
nv -- number of vertical sites
t -- hopping interaction parameter
U -- on-site interaction parameter
Returns:
Function for analysis of results
"""
if __debug__:
module_logger.debug(f"Preparing exact analysis function for {measurement_type}")
def _parity_indices(q1, q2, nh):
"""Internal function giving parity for qubits q1 to q2."""
index_list = range(q1+1, q2)
return index_list
print(f"analyze_exact_mgd {measurement_type}")
total_len = nh*nv*2
import itertools
lst = list(itertools.product([0, 1], repeat = total_len - 2))
if measurement_type == "onsite":
all_indices = []
for (q1, q2) in pairs:
all_indices += [cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(1,)+element[q2-1:]) for element in lst]
if measurement_type in ["horiz"]:
all_indices_plus = []
all_indices_minus = []
for (q1, q2) in pairs:
all_indices_plus += [cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]) for element in lst]
all_indices_minus += [cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]) for element in lst]
if measurement_type in ["vert0", "vert1", "vert"]:
all_indices_plus = []
all_indices_minus = []
for (q1, q2) in pairs:
parity = 1
for element in lst:
parity = 1 - (sum(element[q1:q2-1]) % 2) * 2
if parity == 1:
all_indices_plus.append(cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]))
all_indices_minus.append(cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]))
else:
all_indices_minus.append(cirq.big_endian_bits_to_int(element[:q1]+(0,)+element[q1:q2-1]+(1,)+element[q2-1:]))
all_indices_plus.append(cirq.big_endian_bits_to_int(element[:q1]+(1,)+element[q1:q2-1]+(0,)+element[q2-1:]))
def analyzeO(results):
"""Analyzes onsite measurement results.
"""
sum_tot = ufloat(np.sum(np.abs(results[all_indices])**2), np.sqrt(len(results[all_indices])))
# print(f'Onsite: {U * sum_tot}')
return U * sum_tot
def analyzeH(results):
"""Analyzes horizontal measurement results."""
sum_tot = ufloat(np.sum(np.abs(results[all_indices_plus])**2) - np.sum(np.abs(results[all_indices_minus])**2), np.sqrt(len(results[all_indices_plus])) )
# print(f'Horiz: {-t * sum_tot}')
return -t * sum_tot
def analyzeV(results):
"""Analyzes vertical measurement results (applies parity corrections)."""
#TODO: fix this one... parity needs to be handled..
sum_tot = ufloat(np.sum(np.abs(results[all_indices_plus])**2) - np.sum(np.abs(results[all_indices_minus])**2), np.sqrt(len(results[all_indices_plus])) )
return -t * sum_tot
if measurement_type == "onsite":
return analyzeO
if measurement_type == "vert":
return analyzeV
if measurement_type in ["horiz", "vert0", "vert1"]:
return analyzeH
def analyze_mgd(measurement_type, pairs, nh, nv, t=1, U=2, **kwargs):
"""Returns function which analyzes a numpy array of measurement results.
Function it returns assumes results are in numpy array of shape ??
Args:
measurement_type -- onsite, horizontal or vertical measurement
pairs -- pairs of qubits which should be combined for measurement
nh -- number of horizontal sites
nv -- number of vertical sites
t -- hopping interaction parameter
U -- on-site interaction parameter
Returns:
Function for analysis of results
"""
if __debug__:
module_logger.debug(f"Preparing analysis function for {measurement_type}")
def _parity_indices(q1, q2, nh):
"""Internal function giving parity for qubits q1 to q2."""
index_list = range(q1+1, q2)
return index_list
def analyzeO(results):
"""Analyzes onsite measurement results.
"""
sum_tot = ufloat(0.,0.)
for (q1, q2) in pairs:
res = np.mean(results[q1] * results[q2])
std = np.std(results[q1] * | |
})
self.__dhcpv6_relay_forw_sent = t
if hasattr(self, '_set'):
self._set()
def _unset_dhcpv6_relay_forw_sent(self):
self.__dhcpv6_relay_forw_sent = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="dhcpv6-relay-forw-sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='yang:counter64', is_config=False)
total_dropped = __builtin__.property(_get_total_dropped)
invalid_opcode = __builtin__.property(_get_invalid_opcode)
invalid_options = __builtin__.property(_get_invalid_options)
dhcpv6_solicit_received = __builtin__.property(_get_dhcpv6_solicit_received)
dhcpv6_decline_received = __builtin__.property(_get_dhcpv6_decline_received)
dhcpv6_request_received = __builtin__.property(_get_dhcpv6_request_received)
dhcpv6_release_received = __builtin__.property(_get_dhcpv6_release_received)
dhcpv6_confirm_received = __builtin__.property(_get_dhcpv6_confirm_received)
dhcpv6_rebind_received = __builtin__.property(_get_dhcpv6_rebind_received)
dhcpv6_info_request_received = __builtin__.property(_get_dhcpv6_info_request_received)
dhcpv6_relay_reply_received = __builtin__.property(_get_dhcpv6_relay_reply_received)
dhcpv6_adverstise_sent = __builtin__.property(_get_dhcpv6_adverstise_sent)
dhcpv6_reply_sent = __builtin__.property(_get_dhcpv6_reply_sent)
dhcpv6_reconfigure_sent = __builtin__.property(_get_dhcpv6_reconfigure_sent)
dhcpv6_relay_forw_sent = __builtin__.property(_get_dhcpv6_relay_forw_sent)
_pyangbind_elements = OrderedDict([('total_dropped', total_dropped), ('invalid_opcode', invalid_opcode), ('invalid_options', invalid_options), ('dhcpv6_solicit_received', dhcpv6_solicit_received), ('dhcpv6_decline_received', dhcpv6_decline_received), ('dhcpv6_request_received', dhcpv6_request_received), ('dhcpv6_release_received', dhcpv6_release_received), ('dhcpv6_confirm_received', dhcpv6_confirm_received), ('dhcpv6_rebind_received', dhcpv6_rebind_received), ('dhcpv6_info_request_received', dhcpv6_info_request_received), ('dhcpv6_relay_reply_received', dhcpv6_relay_reply_received), ('dhcpv6_adverstise_sent', dhcpv6_adverstise_sent), ('dhcpv6_reply_sent', dhcpv6_reply_sent), ('dhcpv6_reconfigure_sent', dhcpv6_reconfigure_sent), ('dhcpv6_relay_forw_sent', dhcpv6_relay_forw_sent), ])
class yc_state_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-relay-agent - based on the path /relay-agent/dhcpv6/interfaces/interface/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for relay agent interfaces.
"""
__slots__ = ('_path_helper', '_extmethods', '__id','__enable','__helper_address','__counters',)
_yang_name = 'state'
_yang_namespace = 'http://openconfig.net/yang/relay-agent'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='oc-if:interface-id', is_config=False)
self.__enable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
self.__helper_address = YANGDynClass(unique=True, base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'})), is_leaf=False, yang_name="helper-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='inet:ipv6-address', is_config=False)
self.__counters = YANGDynClass(base=yc_counters_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_state_counters, is_container='container', yang_name="counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['relay-agent', 'dhcpv6', 'interfaces', 'interface', 'state']
def _get_id(self):
"""
Getter method for id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/state/id (oc-if:interface-id)
YANG Description: Name of the interface on which relay agent is active
"""
return self.__id
def _set_id(self, v, load=False):
"""
Setter method for id, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/state/id (oc-if:interface-id)
If this variable is read-only (config: false) in the
source YANG file, then _set_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_id() directly.
YANG Description: Name of the interface on which relay agent is active
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='oc-if:interface-id', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """id must be of a type compatible with oc-if:interface-id""",
'defined-type': "oc-if:interface-id",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='oc-if:interface-id', is_config=False)""",
})
self.__id = t
if hasattr(self, '_set'):
self._set()
def _unset_id(self):
self.__id = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='oc-if:interface-id', is_config=False)
def _get_enable(self):
"""
Getter method for enable, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/state/enable (boolean)
YANG Description: Enables the relay agent on the referenced interface.
At least one helper address should also be configured
for forwarding requested.
"""
return self.__enable
def _set_enable(self, v, load=False):
"""
Setter method for enable, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/state/enable (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enable is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enable() directly.
YANG Description: Enables the relay agent on the referenced interface.
At least one helper address should also be configured
for forwarding requested.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enable must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)""",
})
self.__enable = t
if hasattr(self, '_set'):
self._set()
def _unset_enable(self):
self.__enable = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enable", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='boolean', is_config=False)
def _get_helper_address(self):
"""
Getter method for helper_address, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/state/helper_address (inet:ipv6-address)
YANG Description: List of IPv6 addresses of DHCP servers to which the
relay agent should forward DHCPv6 requests. The relay agent
is expected to forward DHCPv4/BOOTP requests to all listed
server addresses when DHCPv6 relaying is enabled globally, or
on the interface.
"""
return self.__helper_address
def _set_helper_address(self, v, load=False):
"""
Setter method for helper_address, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/state/helper_address (inet:ipv6-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_helper_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_helper_address() directly.
YANG Description: List of IPv6 addresses of DHCP servers to which the
relay agent should forward DHCPv6 requests. The relay agent
is expected to forward DHCPv4/BOOTP requests to all listed
server addresses when DHCPv6 relaying is enabled globally, or
on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,unique=True, base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'})), is_leaf=False, yang_name="helper-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='inet:ipv6-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """helper_address must be of a type compatible with inet:ipv6-address""",
'defined-type': "inet:ipv6-address",
'generated-type': """YANGDynClass(unique=True, base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'})), is_leaf=False, yang_name="helper-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='inet:ipv6-address', is_config=False)""",
})
self.__helper_address = t
if hasattr(self, '_set'):
self._set()
def _unset_helper_address(self):
self.__helper_address = YANGDynClass(unique=True, base=TypedListType(allowed_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'})), is_leaf=False, yang_name="helper-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='inet:ipv6-address', is_config=False)
def _get_counters(self):
"""
Getter method for counters, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/state/counters (container)
YANG Description: Counters and statistics for relay agent operation.
"""
return self.__counters
def _set_counters(self, v, load=False):
"""
Setter method for counters, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/state/counters (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_counters is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_counters() directly.
YANG Description: Counters and statistics for relay agent operation.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_counters_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_state_counters, is_container='container', yang_name="counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """counters must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_counters_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_state_counters, is_container='container', yang_name="counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=False)""",
})
self.__counters = t
if hasattr(self, '_set'):
self._set()
def _unset_counters(self):
self.__counters = YANGDynClass(base=yc_counters_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_state_counters, is_container='container', yang_name="counters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='container', is_config=False)
id = __builtin__.property(_get_id)
enable = __builtin__.property(_get_enable)
helper_address = __builtin__.property(_get_helper_address)
counters = __builtin__.property(_get_counters)
_pyangbind_elements = OrderedDict([('id', id), ('enable', enable), ('helper_address', helper_address), ('counters', counters), ])
class yc_config_openconfig_relay_agent__relay_agent_dhcpv6_interfaces_interface_interface_ref_config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-relay-agent - based on the path /relay-agent/dhcpv6/interfaces/interface/interface-ref/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configured reference to interface / subinterface
"""
__slots__ = ('_path_helper', '_extmethods', '__interface','__subinterface',)
_yang_name = 'config'
_yang_namespace = 'http://openconfig.net/yang/relay-agent'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='leafref', is_config=True)
self.__subinterface = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/relay-agent', defining_module='openconfig-relay-agent', yang_type='leafref', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['relay-agent', 'dhcpv6', 'interfaces', 'interface', 'interface-ref', 'config']
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /relay_agent/dhcpv6/interfaces/interface/interface_ref/config/interface (leafref)
YANG Description: Reference to a base interface. If a reference | |
@staticmethod
def get_points_single(featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid([y_range, x_range])
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def get_targets(self, gt_bboxes_list, gt_labels_list,
feat_dims, img_metas):
"""Gets targets for each output type.
This method is also responsible for splitting the targets up into the
the separate feature levels, i.e. figures out in which feature level to
detect each object.
The output returns labels_list, bboxes, and energy_preds. The bboxes
will first be split based on max edge size. Then the energy_preds for
each feature level will be calculated. Finally, the labels_list assigned
to non-zero energy areas within each bounding box. All other areas will
contain no label.
Args:
gt_bboxes_list (list): A list of tensors containing the ground
truth bounding boxes.
gt_labels_list (list): A list of tensors containing the ground
truth labels_list of each bounding box.
feat_dims (list): A list of 2-tuples where each element is the
(h, w)
img_metas (list): The img_metas as returned from the data loader.
Returns:
tuple: A tuple of bboxes, labels_list, energy_preds, and masks.
This will be split first by image then by feature level,
i.e. labels_list will be a list n lists, where n is the number
of images, and each of those lists will have s elements, where s
is the number of feature levels/heads. Each of those lists will
then hold n tensors, n being the number of feature levels, and
each tensor being the labels_list that are assigned to that
feature level.
bboxes will have shape (h, w, 4)
labels will have shape (h, w)
energy will have shape (h, w)
masks will have shape (h, w) and is a boolean tensor
"""
assert len(gt_bboxes_list) == len(gt_labels_list)
# Sort gt_bboxes_list by object max edge
split_bboxes = self.split_bboxes(gt_bboxes_list, gt_labels_list)
# Calculate energy_preds for image for each feature level
gt_bboxes = []
gt_energy = []
gt_labels = []
gt_masks = []
for i, bboxes in enumerate(split_bboxes):
image_energy = []
image_classes = []
image_bboxes = []
image_masks = []
for j, feat_level_bboxes in enumerate(bboxes):
img_shape = img_metas[i]['pad_shape']
feature_energy = self.get_energy_single(feat_dims[j],
img_shape,
feat_level_bboxes)
image_energy.append(feature_energy.values)
# Using the image_energy, create a mask of background areas.
# This is first made with an int tensor before being
# converted to a bool since where_cuda does not implement bool.
feature_mask = torch.where(
feature_energy.values > 0,
torch.tensor(1, dtype=torch.int8,
device=feature_energy.values.device),
torch.tensor(0, dtype=torch.int8,
device=feature_energy.values.device)
).bool()
image_masks.append(feature_mask)
# Then, using feature_energy.indices, get the class for each
# grid cell that isn't background Entire area of non-zero
# energy within a single bounding box should have the same
# label.
feature_classes = torch.zeros_like(feature_mask,
dtype=torch.float)
feature_classes[feature_mask] = (
feat_level_bboxes[feature_energy.indices[feature_mask]]
[:, -1]
)
image_classes.append(feature_classes)
# Finally, also assign bounding box values
feature_bboxes = torch.zeros([feat_dims[j][0],
feat_dims[j][1],
4],
dtype=torch.float,
device=feat_level_bboxes.device)
feature_bboxes[feature_mask] = (
feat_level_bboxes[feature_energy.indices[feature_mask]]
[:, 0:4]
)
image_bboxes.append(feature_bboxes)
gt_energy.append(image_energy)
gt_labels.append(image_classes)
gt_bboxes.append(image_bboxes)
gt_masks.append(image_masks)
return gt_bboxes, gt_labels, gt_energy, gt_masks
def split_bboxes(self, bbox_list, labels_list):
"""Splits bboxes based on max edge length.
Args:
bbox_list (list): The list of bounding boxes to be sorted. The
list contains b tensors, where b is the batch size. Each
tensor must be in the shape (n, 4) where n is the number of
bounding boxes.
labels_list (list): The list of ground truth labels associated with
the bounding boxes. The list contains b tensors, where b is
the batch size. Each tensor must be in the shape (n) where n
is the equivalent to the number of bounding boxes.
Returns:
list: A list of length b, each element being a list of length s,
where s is the number of heads used in the network. Each of
these lists contains an (n, 5) tensor, which represents the
bounding boxes with dim 5 being the class.
"""
if self.assign == "all":
out_list = []
for min_length, max_length in self.regress_ranges:
out_list.append(torch.cat([bbox_list[0], labels_list[0].to(dtype=torch.float).unsqueeze(1)], dim=1))
out_list = [out_list]
else:
# max_indices is a 2 dim tensor, where dim 0 is the sorted indices
# and dim 1 is the max_edge value
max_indices = self.sort_bboxes(bbox_list, self.assign)
# TODO: Figure out what to do with background class.
# Then move them to the appropriate level based on strides. The edge
# size for each level should be [prev_regress_range, regress_range).
# e.g. If we have ranges((-1, 4), (4, 8), (8, INF)), then we have edge
# sizes [-1, 4), [4, 8), [8, INF)
split_inds = []
for max_index in max_indices:
indices = []
for min_length, max_length in self.regress_ranges:
val = max_index[1]
val = (min_length <= val) * (val < max_length)
val = val.nonzero()
if val.nelement() != 0:
val = val[-1].item()
else:
val = None
indices.append(val)
# indices is now the indices of the elements as split by max_edge,
# split properly into each feature level.
#
# We now split the actual bboxes into the values
end = 0
temp_inds = []
for i in range(len(indices)):
if indices[i] is None:
temp_inds.append(
torch.empty(0, device=max_indices[0][1].device)
)
continue
start = end
end = indices[i] + 1
temp_inds.append(max_index[0][start:end])
split_inds.append(temp_inds)
# split_bbox_ind is appended an s length list, where each element
# contains all the indices that belong to that feature level.
out_list = []
for i in range(len(bbox_list)): # Iterate through each image
temp_list = []
for inds in split_inds[i]: # Iterate through head
# Grab bboxes with the given indices, then the labels
bbox = bbox_list[i][inds.to(dtype=torch.long)]
labels = labels_list[i][inds.to(dtype=torch.long)].to(
dtype=torch.float)
# Labels must be unsqueezed to allow concatenation
temp_list.append(
torch.cat((bbox, labels.unsqueeze(1)), dim=1)
)
out_list.append(temp_list)
return out_list
@staticmethod
def sort_bboxes(bbox_list, assign="max_edge"):
"""Sorts bboxes based on size.
Args:
bbox_list (list): The list of bounding boxes to be sorted. The
bounding boxes must be tensors in the shape (n, 4).
Returns:
list: A list of (2, n) tensors, where n is the number of bboxes
and 2 being the indice and max edge length of the
corresponding tensor.
"""
out_list = []
for bboxes in bbox_list:
edges = torch.cat((bboxes[:, 2] - bboxes[:, 0],
bboxes[:, 3] - bboxes[:, 1]))
# Split to a 2-dim array, dim 0 being the x length and dim 1
# being the y length
edges = edges.reshape(2, bboxes.shape[0])
# Then transpose it to associate both x and y with the same
# value. This is done simply because it is conceptually easier to
# understand.
edges = edges.transpose(0, 1)
if assign == "max_edge":
# Get the max, then get the sorted indices.
proc_edges = edges.max(1).values
elif assign == "min_edge":
proc_edges = edges.min(1).values
elif assign == "avg_edge":
proc_edges = edges.mean(1)
else:
raise ValueError("unknown assign parameter")
sorted_inds = proc_edges.argsort()
# Concatenate them and add them to the out_list
out_list.append(torch.cat((sorted_inds.to(dtype=torch.float),
proc_edges[sorted_inds]))
.reshape(2, bboxes.shape[0]))
return out_list
def get_energy_single(self, feat_dim, img_size, bboxes):
"""Gets energy for a single feature level based on deep watershed.
Args:
feat_dim (tuple): A 2-tuple containing the height and width of the
current feature level. (h, w)
img_size (tuple): A 2-tuple containg the size of the image. Used
for scaling the bboxes to the feature level dimensions. (h, w)
bboxes (torch.Tensor): A tensor of the bboxes that belong the this
feature level with shape (n, 4).
Notes:
The energy targets are calculated as:
E_max * argmax_{c \in C}[1 - \sqrt{dist to bbox center} / r]
for every position in the grid.
- r is a hyperparameter we would like to minimize.
- E_max is self.max_energy
Returns:
torch.return_types.max: The max energy values and the bounding
box they belong to.
"""
type_dict = {'dtype': bboxes.dtype, 'device': bboxes.device}
if not bboxes.shape[0] == 0:
# First create an n dimensional tensor, where n is the number of
# bboxes
energy_layers = [
torch.zeros([feat_dim[0], feat_dim[1]], **type_dict)
for _ in range(bboxes.shape[0])]
zero_tensor = torch.tensor(0., **type_dict)
# Now cast each bbox to each cell in the energy layer that it covers
# First bounds of grid squares that have | |
<filename>reordering_generators/c_generator.py
"""
Reordering generator for C source code.
This is an ANTLR generated parse tree listener, adapted to
walk a Python parse tree, randomly introduce multi scale reorderings
and regenerate the source code with these reorderings.
"""
import random
from antlr4 import ParseTreeWalker
from antlr4.tree.Tree import TerminalNodeImpl
from parsers.C.CListener import CListener
from parsers.C.CParser import CParser
class CGenerator(CListener):
"""
Parse Tree Listener for the Python language.
Enter- and exit functions generated by ANTLR.
"""
MODES = {
"SUB_STATEMENT": 0,
"STATEMENTS": 1,
"FUNCTIONS": 2,
"CONDITIONALS": 3
}
MODE = MODES["STATEMENTS"]
SMALL_REORDERED_TYPES = [
CParser.ParameterListContext, # Function parameters
CParser.ArgumentExpressionListContext, # Arguments in function call
CParser.MultiplicativeExpressionContext, # +, -
CParser.AdditiveExpressionContext, # *, /, %
]
SMALL_STATEMENTS = [
CParser.ExpressionStatementContext,
CParser.DeclarationContext,
CParser.JumpStatementContext
]
TOP_LEVEL_REORDERED_TYPES = [
CParser.TranslationUnitContext
]
def __init__(self, tree, file_name):
super().__init__()
self.tree = tree
self.hashed_tree = None
self.current = None
self.sorted_trees = {}
self.sub_tree_sizes = []
self.out_file = '/home/philo/Documents/uva/Jaar_3/thesis/CRDS/synthetic_data/reordered_statements/C/Graphics/' + file_name.split('/')[-1]
self.reorderings_executed = 0
def start(self):
walker = ParseTreeWalker()
walker.walk(self, self.tree)
def is_function(self, ctx):
is_function = False
filtered = [c for c in ctx.children if type(c) != TerminalNodeImpl]
while len(filtered) > 0:
c_ctx = filtered[0]
if type(c_ctx) == CParser.FunctionDefinitionContext:
is_function = True
break
filtered = [c for c in c_ctx.children if type(c) != TerminalNodeImpl]
return is_function
def is_small_stmt(self, ctx):
is_small_stmt = False
filtered = [c for c in ctx.children if type(c) != TerminalNodeImpl]
while len(filtered) == 1:
c_ctx = filtered[0]
if type(c_ctx) in self.SMALL_STATEMENTS:
is_small_stmt = True
break
filtered = [c for c in c_ctx.children if type(c) != TerminalNodeImpl]
return is_small_stmt
def is_stmt_in_blockitem(self, ctx):
if type(ctx) != CParser.BlockItemContext:
return False
filtered = [c for c in ctx.children if type(c) != TerminalNodeImpl]
statement = filtered[0]
return type(statement) == CParser.StatementContext
def is_case_stmt(self, ctx):
if type(ctx) != CParser.BlockItemContext:
return False
filtered = [c for c in ctx.children if type(c) != TerminalNodeImpl]
statement = filtered[0]
if type(statement) != CParser.StatementContext:
return False
filtered = [c for c in statement.children if type(c) != TerminalNodeImpl]
return type(filtered[0]) == CParser.LabeledStatementContext
def shuffle_children(self, ctx):
"""
Shuffle the children of a Parser context node.
We need to leave TerminalNodeImpl types in the same place
(those are commas, brackets etc.)
"""
reorder = []
indices = []
cases = {}
curr_case = None
is_switch_case = False
for i, child in enumerate(ctx.children):
if type(child) != TerminalNodeImpl:
if ((self.MODE == self.MODES["FUNCTIONS"] and not self.is_function(child)) or
(self.MODE == self.MODES["STATEMENTS"] and not self.is_small_stmt(child))):
continue
elif self.MODE == self.MODES["CONDITIONALS"]:
if type(ctx) == CParser.BlockItemListContext:
if not self.is_stmt_in_blockitem(child):
continue
if self.is_case_stmt(child):
is_switch_case = True
cases[i] = []
curr_case = i
indices.append(i)
elif is_switch_case:
cases[curr_case].append(i)
continue
reorder.append(child)
indices.append(i)
if is_switch_case:
old_indices = list(indices)
if len(indices) < 2:
return
while True:
if indices != old_indices:
break
random.shuffle(indices)
new_children = []
for i in indices:
new_children.append(ctx.children[i])
stmts = [ctx.children[j] for j in cases[i]]
new_children.extend(stmts)
ctx.children = list(new_children)
self.reorderings_executed += 1
else:
old_order = list(reorder)
reordered = False
if len(reorder) < 2:
return
while True:
for i, c in enumerate(reorder):
if id(c) != id(old_order[i]):
reordered = True
break
if reordered:
break
random.shuffle(reorder)
self.reorderings_executed += 1
for j, child in enumerate(reorder):
index = indices[j]
ctx.children[index] = child
def switch_if_else(self, ctx):
if type(ctx) != CParser.SelectionStatementContext:
return
children = [child for child in ctx.children if type(child) != TerminalNodeImpl]
if len(children) != 3:
return
if type(children[0]) != CParser.ExpressionContext:
print("IF WITHOUT CONDITIONAL??")
return
tmp = list(ctx.children)
ctx.children[4] = tmp[6]
ctx.children[6] = tmp[4]
self.reorderings_executed += 1
def enter_rule(self, ctx):
pass
def exit_rule(self, ctx):
"""
If the node is of a type that needs
reordering, reorder its children.
"""
if self.MODE == self.MODES['STATEMENTS']:
self.shuffle_children(ctx)
elif self.MODE == self.MODES["CONDITIONALS"]:
if type(ctx) == CParser.BlockItemListContext:
self.shuffle_children(ctx)
elif type(ctx) == CParser.SelectionStatementContext:
self.switch_if_else(ctx)
elif self.MODE == self.MODES['SUB_STATEMENT']:
if type(ctx) in self.SMALL_REORDERED_TYPES:
self.shuffle_children(ctx)
elif type(ctx) in self.TOP_LEVEL_REORDERED_TYPES:
self.shuffle_children(ctx)
def enterCompilationUnit(self, ctx:CParser.CompilationUnitContext):
"""Compilation Unit subtree, this is the root node."""
self.enter_rule(ctx)
def exitCompilationUnit(self, ctx:CParser.CompilationUnitContext):
self.exit_rule(ctx)
with open(self.out_file, 'w+') as f:
f.write(f'// REORDERINGS EXECUTED: {self.reorderings_executed}\n\n')
f.write(ctx.getText())
# --------------------------------------------------------------------
# Below are all the enter- and exit methods for every ctx type
# --------------------------------------------------------------------
# Enter a parse tree produced by CParser#primaryExpression.
def enterPrimaryExpression(self, ctx:CParser.PrimaryExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#primaryExpression.
def exitPrimaryExpression(self, ctx:CParser.PrimaryExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#genericSelection.
def enterGenericSelection(self, ctx:CParser.GenericSelectionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#genericSelection.
def exitGenericSelection(self, ctx:CParser.GenericSelectionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#genericAssocList.
def enterGenericAssocList(self, ctx:CParser.GenericAssocListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#genericAssocList.
def exitGenericAssocList(self, ctx:CParser.GenericAssocListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#genericAssociation.
def enterGenericAssociation(self, ctx:CParser.GenericAssociationContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#genericAssociation.
def exitGenericAssociation(self, ctx:CParser.GenericAssociationContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#postfixExpression.
def enterPostfixExpression(self, ctx:CParser.PostfixExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#postfixExpression.
def exitPostfixExpression(self, ctx:CParser.PostfixExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#argumentExpressionList.
def enterArgumentExpressionList(self, ctx:CParser.ArgumentExpressionListContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#argumentExpressionList.
def exitArgumentExpressionList(self, ctx:CParser.ArgumentExpressionListContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#unaryExpression.
def enterUnaryExpression(self, ctx:CParser.UnaryExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#unaryExpression.
def exitUnaryExpression(self, ctx:CParser.UnaryExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#unaryOperator.
def enterUnaryOperator(self, ctx:CParser.UnaryOperatorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#unaryOperator.
def exitUnaryOperator(self, ctx:CParser.UnaryOperatorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#castExpression.
def enterCastExpression(self, ctx:CParser.CastExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#castExpression.
def exitCastExpression(self, ctx:CParser.CastExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#multiplicativeExpression.
def enterMultiplicativeExpression(self, ctx:CParser.MultiplicativeExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#multiplicativeExpression.
def exitMultiplicativeExpression(self, ctx:CParser.MultiplicativeExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#additiveExpression.
def enterAdditiveExpression(self, ctx:CParser.AdditiveExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#additiveExpression.
def exitAdditiveExpression(self, ctx:CParser.AdditiveExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#shiftExpression.
def enterShiftExpression(self, ctx:CParser.ShiftExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#shiftExpression.
def exitShiftExpression(self, ctx:CParser.ShiftExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#relationalExpression.
def enterRelationalExpression(self, ctx:CParser.RelationalExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#relationalExpression.
def exitRelationalExpression(self, ctx:CParser.RelationalExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#equalityExpression.
def enterEqualityExpression(self, ctx:CParser.EqualityExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#equalityExpression.
def exitEqualityExpression(self, ctx:CParser.EqualityExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#andExpression.
def enterAndExpression(self, ctx:CParser.AndExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#andExpression.
def exitAndExpression(self, ctx:CParser.AndExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#exclusiveOrExpression.
def enterExclusiveOrExpression(self, ctx:CParser.ExclusiveOrExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#exclusiveOrExpression.
def exitExclusiveOrExpression(self, ctx:CParser.ExclusiveOrExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#inclusiveOrExpression.
def enterInclusiveOrExpression(self, ctx:CParser.InclusiveOrExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#inclusiveOrExpression.
def exitInclusiveOrExpression(self, ctx:CParser.InclusiveOrExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#logicalAndExpression.
def enterLogicalAndExpression(self, ctx:CParser.LogicalAndExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#logicalAndExpression.
def exitLogicalAndExpression(self, ctx:CParser.LogicalAndExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#logicalOrExpression.
def enterLogicalOrExpression(self, ctx:CParser.LogicalOrExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#logicalOrExpression.
def exitLogicalOrExpression(self, ctx:CParser.LogicalOrExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#conditionalExpression.
def enterConditionalExpression(self, ctx:CParser.ConditionalExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#conditionalExpression.
def exitConditionalExpression(self, ctx:CParser.ConditionalExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#assignmentExpression.
def enterAssignmentExpression(self, ctx:CParser.AssignmentExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#assignmentExpression.
def exitAssignmentExpression(self, ctx:CParser.AssignmentExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#assignmentOperator.
def enterAssignmentOperator(self, ctx:CParser.AssignmentOperatorContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#assignmentOperator.
def exitAssignmentOperator(self, ctx:CParser.AssignmentOperatorContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#expression.
def enterExpression(self, ctx:CParser.ExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#expression.
def exitExpression(self, ctx:CParser.ExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#constantExpression.
def enterConstantExpression(self, ctx:CParser.ConstantExpressionContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#constantExpression.
def exitConstantExpression(self, ctx:CParser.ConstantExpressionContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#declaration.
def enterDeclaration(self, ctx:CParser.DeclarationContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#declaration.
def exitDeclaration(self, ctx:CParser.DeclarationContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#declarationSpecifiers.
def enterDeclarationSpecifiers(self, ctx:CParser.DeclarationSpecifiersContext):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#declarationSpecifiers.
def exitDeclarationSpecifiers(self, ctx:CParser.DeclarationSpecifiersContext):
self.exit_rule(ctx)
# Enter a parse tree produced by CParser#declarationSpecifiers2.
def enterDeclarationSpecifiers2(self, ctx:CParser.DeclarationSpecifiers2Context):
self.enter_rule(ctx)
# Exit a parse tree produced by CParser#declarationSpecifiers2.
def exitDeclarationSpecifiers2(self, ctx:CParser.DeclarationSpecifiers2Context):
self.exit_rule(ctx)
# | |
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
The MapReduce API allows to write the components of a MapReduce application.
The basic MapReduce components (:class:`Mapper`, :class:`Reducer`,
:class:`RecordReader`, etc.) are provided as abstract classes that
must be subclassed by the developer, providing implementations for all
methods called by the framework.
"""
import json
from abc import abstractmethod
from pydoop.utils.py3compat import ABC
class PydoopError(Exception):
pass
class Counter(object):
"""
An interface to the Hadoop counters infrastructure.
Counter objects are instantiated and directly manipulated by the
framework; users get and update them via the :class:`Context`
interface.
"""
def __init__(self, counter_id):
self.id = counter_id
def get_id(self):
return self.id
class JobConf(dict):
"""
Configuration properties assigned to this job.
JobConf objects are instantiated by the framework and support the
same interface as dictionaries, plus a few methods that perform
automatic type conversion::
>>> jc['a']
'1'
>>> jc.get_int('a')
1
"""
def get_int(self, key, default=None):
"""
Same as :meth:`dict.get`, but the value is converted to an int.
"""
value = self.get(key, default)
return None if value is None else int(value)
def get_float(self, key, default=None):
"""
Same as :meth:`dict.get`, but the value is converted to an float.
"""
value = self.get(key, default)
return None if value is None else float(value)
def get_bool(self, key, default=None):
"""
Same as :meth:`dict.get`, but the value is converted to a bool.
The boolean value is considered, respectively, :obj:`True` or
:obj:`False` if the string is equal, ignoring case, to
``'true'`` or ``'false'``.
"""
v = self.get(key, default)
if v != default:
v = v.strip().lower()
if v == 'true':
v = True
elif v == 'false':
v = False
elif default is None:
raise RuntimeError("invalid bool string: %s" % v)
else:
v = default
return v
def get_json(self, key, default=None):
value = self.get(key, default)
return None if value is None else json.loads(value)
class Context(ABC):
"""
Context objects are used for communication between the framework
and the Mapreduce application. These objects are instantiated by the
framework and passed to user methods as parameters::
class Mapper(api.Mapper):
def map(self, context):
key, value = context.key, context.value
...
context.emit(new_key, new_value)
"""
@property
def job_conf(self):
"""
MapReduce job configuration as a :class:`JobConf` object.
"""
return self.get_job_conf()
@abstractmethod
def get_job_conf(self):
pass
@property
def key(self):
"""
Input key.
"""
return self.get_input_key()
@abstractmethod
def get_input_key(self):
pass
@property
def value(self):
"""
Input value.
"""
return self.get_input_value()
@abstractmethod
def get_input_value(self):
pass
@abstractmethod
def emit(self, key, value):
"""
Emit a key, value pair to the framework.
"""
pass
@abstractmethod
def progress(self):
pass
@abstractmethod
def set_status(self, status):
"""
Set the current status.
:type status: str
:param status: a description of the current status
"""
pass
@abstractmethod
def get_counter(self, group, name):
"""
Get a :class:`Counter` from the framework.
:type group: str
:param group: counter group name
:type name: str
:param name: counter name
The counter can be updated via :meth:`increment_counter`.
"""
pass
@abstractmethod
def increment_counter(self, counter, amount):
"""
Update a :class:`Counter` by the specified amount.
"""
pass
class MapContext(Context):
"""
The context given to the mapper.
"""
@property
def input_split(self):
"""\
The current input split as an :class:`~.pipes.InputSplit` object.
"""
return self.get_input_split()
@abstractmethod
def get_input_split(self, raw=False):
"""\
Get the current input split.
If ``raw`` is :obj:`False` (the default), return an
:class:`~.pipes.InputSplit` object; if it's :obj:`True`, return
a byte string (the unserialized split as sent via the downlink).
"""
pass
@property
def input_key_class(self):
"""
Return the type of the input key.
"""
return self.get_input_key_class()
@abstractmethod
def get_input_key_class(self):
pass
@property
def input_value_class(self):
return self.get_input_value_class()
@abstractmethod
def get_input_value_class(self):
"""
Return the type of the input value.
"""
pass
class ReduceContext(Context):
"""
The context given to the reducer.
"""
@property
def values(self):
return self.get_input_values()
@abstractmethod
def get_input_values(self):
pass
@abstractmethod
def next_value(self):
"""
Return :obj:`True` if there is another value that can be processed.
"""
pass
class Closable(ABC):
def close(self):
"""
Called after the object has finished its job.
Overriding this method is **not** required.
"""
pass
class Mapper(Closable):
"""
Maps input key/value pairs to a set of intermediate key/value pairs.
"""
def __init__(self, context):
self.context = context
@abstractmethod
def map(self, context):
"""
Called once for each key/value pair in the input
split. Applications must override this, emitting an output
key/value pair through the context.
:type context: :class:`MapContext`
:param context: the context object passed by the
framework, used to get the input key/value pair and emit the
output key/value pair.
"""
assert isinstance(context, MapContext)
class Reducer(Closable):
"""
Reduces a set of intermediate values which share a key to a
(possibly) smaller set of values.
"""
def __init__(self, context=None):
self.context = context
@abstractmethod
def reduce(self, context):
"""
Called once for each key. Applications must override this, emitting
an output key/value pair through the context.
:type context: :class:`ReduceContext`
:param context: the context object passed by
the framework, used to get the input key and corresponding
set of values and emit the output key/value pair.
"""
assert isinstance(context, ReduceContext)
class Partitioner(ABC):
r"""
Controls the partitioning of intermediate keys output by the
:class:`Mapper`\ . The key (or a subset of it) is used to derive the
partition, typically by a hash function. The total number of
partitions is the same as the number of reduce tasks for the
job. Hence this controls which of the *m* reduce tasks the
intermediate key (and hence the record) is sent to for reduction.
"""
def __init__(self, context):
self.context = context
@abstractmethod
def partition(self, key, num_of_reduces):
r"""
Get the partition number for ``key`` given the total number of
partitions, i.e., the number of reduce tasks for the
job. Applications must override this.
:type key: str
:param key: the key of the key/value pair being dispatched.
:type numOfReduces: int
:param numOfReduces: the total number of reduces.
:rtype: int
:return: the partition number for ``key``\ .
"""
assert isinstance(key, str)
assert isinstance(num_of_reduces, int)
class RecordReader(Closable):
r"""
Breaks the data into key/value pairs for input to the :class:`Mapper`\ .
"""
def __init__(self, context=None):
self.context = context
def __iter__(self):
return self
@abstractmethod
def next(self):
r"""
Called by the framework to provide a key/value pair to the
:class:`Mapper`\ . Applications must override this, making
sure it raises :exc:`~exceptions.StopIteration` when there are no more
records to process.
:rtype: tuple
:return: a tuple of two elements. They are, respectively, the
key and the value (as strings)
"""
raise StopIteration
def __next__(self):
return self.next()
@abstractmethod
def get_progress(self):
"""
The current progress of the record reader through its data.
:rtype: float
:return: the fraction of data read up to now, as a float between 0
and 1.
"""
pass
class RecordWriter(Closable):
"""
Writes the output key/value pairs to an output file.
"""
def __init__(self, context=None):
self.context = context
@abstractmethod
def emit(self, key, value):
"""
Writes a key/value pair. Applications must override this.
:type key: str
:param key: a final output key
:type value: str
:param value: a final output value
"""
pass
class Factory(ABC):
"""
Creates MapReduce application components.
The classes to use for each component must be specified as arguments
to the constructor.
"""
@abstractmethod
def create_mapper(self, context):
assert isinstance(context, MapContext)
@abstractmethod
def create_reducer(self, context):
assert isinstance(context, ReduceContext)
def create_combiner(self, context):
"""
Create a combiner object.
Return the new combiner or :obj:`None`, if one is not needed.
"""
assert isinstance(context, MapContext)
return None
def create_partitioner(self, context):
"""
Create a partitioner object.
Return the new partitioner or :obj:`None`, if the default partitioner
should be used.
"""
assert isinstance(context, MapContext)
return None
def create_record_reader(self, context):
"""
Create a record reader object.
Return the new record reader or :obj:`None`, if the Java record
reader should be used.
"""
assert isinstance(context, MapContext)
| |
count(self):
return MemoryPersistenceHandler.count(self)
@_checkDumpException
def reset(self, status):
return MemoryPersistenceHandler.reset(self, status)
def shutdown(self):
self.timer.cancel()
self._dump()
class RolloverFilePersistenceHandler(FilePersistenceHandler):
"""Load and dump resources from/to files respecting limits of file size and/or number of resources per file.
This handler uses multiple instances of :class:`FilePersistenceHandler` to allow insertion of new resources respecting limits specified by the user. It is also capable of reading and updating resources from multiple files.
The rollover handler leaves the low level details of persistence for the file handlers attached to each file, taking care of the coordination necessary to maintain consistency between them and also of the verification of limits established.
When inserting new resources, every time the file size limit and/or number of resources per file limit is reached rollover handler opens a new file and assigns a new instance of :class:`FilePersistenceHandler` to handle it. All resources, however, are maintained in memory. So, as in the case of :class:`FilePersistenceHandler`, this handler is not well suited for large datasets that cannot be completely fitted in memory.
.. note::
This handler was inspired by Python's :class:`python:logging.handlers.RotatingFileHandler` class.
"""
def __init__(self, configurationsDictionary):
self.originalConfig = deepcopy(configurationsDictionary)
MemoryPersistenceHandler.__init__(self, configurationsDictionary)
self._setFileHandler()
self.fileHandlersList = []
self.nextSuffixNumber = 1
self.insertHandlerIndex = 0
self.insertSize = -1
self.insertAmount = -1
# Iterate over old rollover files to get file names and max suffix number already used
fileNamesList = [self.config["filename"]]
for name in glob.iglob(self.config["filename"] + ".*"):
if re.search("\.[0-9]+$", name):
fileNamesList.append(name)
suffixNumber = int(name.rsplit(".", 1)[1])
if (suffixNumber >= self.nextSuffixNumber): self.nextSuffixNumber = suffixNumber + 1
# Initialize file persistence handlers
for fileName in fileNamesList: self._addHandler(fileName)
# Get initial file size and amount
if (self.config["sizethreshold"]): self.insertSize = os.path.getsize(self.config["filename"])
if (self.config["amountthreshold"]): self.insertAmount = len(self.fileHandlersList[self.insertHandlerIndex].resources)
def _extractConfig(self, configurationsDictionary):
FilePersistenceHandler._extractConfig(self, configurationsDictionary)
if ("sizethreshold" not in self.config): self.config["sizethreshold"] = 0
else: self.config["sizethreshold"] = int(self.config["sizethreshold"])
if ("amountthreshold" not in self.config): self.config["amountthreshold"] = 0
else: self.config["amountthreshold"] = int(self.config["amountthreshold"])
if (self.config["sizethreshold"] < 0): raise ValueError("Parameter 'sizethreshold' must be zero or greater.")
if (self.config["amountthreshold"] < 0): raise ValueError("Parameter 'amountthreshold' must be zero or greater.")
if (self.config["sizethreshold"] == 0) and (self.config["amountthreshold"] == 0):
raise ValueError("Parameters 'sizethreshold' and 'amountthreshold' cannot be zero at the same time.")
def _addHandler(self, fileName):
config = deepcopy(self.originalConfig)
config["filename"] = fileName
config["filetype"] = self.config["filetype"]
handler = FilePersistenceHandler(config)
if (self.config["uniqueresourceid"]):
duplicated = set(handler.IDsHash).intersection(self.IDsHash)
if (not duplicated): self.IDsHash.update(dict.fromkeys(handler.IDsHash, len(self.fileHandlersList)))
else:
details = ["%s ['%s']" % (resourceID, self.fileHandlersList[self.IDsHash[resourceID]].config["filename"]) for resourceID in duplicated]
raise KeyError("Duplicated ID(s) found in '%s': %s" % (fileName, ", ".join(details)))
self.fileHandlersList.append(handler)
def select(self):
for handlerKey, handler in enumerate(self.fileHandlersList):
(resourceKey, resourceID, resourceInfo) = handler.select()
if (resourceID): return ((handlerKey, resourceKey), resourceID, resourceInfo)
return (None, None, None)
def update(self, keyPair, status, resourceInfo):
self.fileHandlersList[keyPair[0]].update(keyPair[1], status, resourceInfo)
def insert(self, resourcesList):
for resourceID, resourceInfo in resourcesList:
if (self.config["uniqueresourceid"]) and (resourceID in self.IDsHash):
handler = self.fileHandlersList[self.IDsHash[resourceID]]
#try: handler.insert([(resourceID, resourceInfo)])
#except KeyError: raise KeyError("Cannot insert resource, ID %s already exists in file '%s'." % (resourceID, handler.config["filename"]))
handler.insert([(resourceID, resourceInfo)])
continue
with self.insertLock:
handler = self.fileHandlersList[self.insertHandlerIndex]
# Change insert handler if size or amount thresholds were exceeded. If there is no more
# handlers in the list, open a new file and instantiate a new handler to take care of it
while ((self.insertSize >= self.config["sizethreshold"]) or
(self.insertAmount >= self.config["amountthreshold"])):
self.insertHandlerIndex += 1
if (self.insertHandlerIndex >= len(self.fileHandlersList)):
newFileName = "%s.%d" % (self.config["filename"], self.nextSuffixNumber)
with open(newFileName, "w") as file: self.fileHandler.dump([], file, self.fileColumns)
self._addHandler(newFileName)
self.nextSuffixNumber += 1
handler = self.fileHandlersList[self.insertHandlerIndex]
if (self.config["sizethreshold"]): self.insertSize = os.path.getsize(handler.config["filename"])
if (self.config["amountthreshold"]): self.insertAmount = len(handler.resources)
handler.insert([(resourceID, resourceInfo)])
if (self.config["uniqueresourceid"]): self.IDsHash[resourceID] = self.insertHandlerIndex
if (self.config["sizethreshold"]):
self.insertSize += len(self.fileHandler.unparse(handler.resources[-1], self.fileColumns))
if (self.config["amountthreshold"]):
self.insertAmount += 1
def count(self):
counts = [0] * 6
for handler in self.fileHandlersList:
counts = [x + y for x, y in zip(counts, handler.count())]
return counts
def reset(self, status):
for handler in self.fileHandlersList: handler.reset(status)
def shutdown(self):
for handler in self.fileHandlersList: handler.shutdown()
class MySQLPersistenceHandler(BasePersistenceHandler):
"""Store and retrieve resources to/from a MySQL database.
The table must already exist in the database and must contain at least three columns: a primary key column, a resource ID column and a status column.
.. note::
This handler uses `MySQL Connector/Python <http://dev.mysql.com/doc/connector-python/en/index.html>`_ to interact with MySQL databases.
"""
def __init__(self, configurationsDictionary):
BasePersistenceHandler.__init__(self, configurationsDictionary)
self.echo = common.EchoHandler(self.config["echo"])
self.local = threading.local()
self.selectCacheThreadExceptionEvent = threading.Event()
self.selectNoResourcesEvent = threading.Event()
self.selectWaitCondition = threading.Condition()
# Get column names
query = "SELECT * FROM " + self.config["table"] + " LIMIT 0"
connection = mysql.connector.connect(**self.config["connargs"])
cursor = connection.cursor()
cursor.execute(query)
cursor.fetchall()
self.colNames = cursor.column_names
cursor.close()
connection.close()
self.excludedColNames = (self.config["primarykeycolumn"], self.config["resourceidcolumn"], self.config["statuscolumn"])
self.infoColNames = [name for name in self.colNames if (name not in self.excludedColNames)]
# Start select cache thread
self.resourcesQueue = Queue.Queue()
t = threading.Thread(target = self._selectCacheThread)
t.daemon = True
t.start()
with self.selectWaitCondition: self.selectWaitCondition.wait()
def _extractConfig(self, configurationsDictionary):
BasePersistenceHandler._extractConfig(self, configurationsDictionary)
if ("selectcachesize" not in self.config): raise KeyError("Parameter 'selectcachesize' must be specified.")
else: self.config["selectcachesize"] = int(self.config["selectcachesize"])
if ("onduplicateupdate" not in self.config): self.config["onduplicateupdate"] = False
else: self.config["onduplicateupdate"] = common.str2bool(self.config["onduplicateupdate"])
def _selectCacheQuery(self):
query = "SELECT " + self.config["primarykeycolumn"] + " FROM " + self.config["table"] + " WHERE " + self.config["statuscolumn"] + " = %s ORDER BY " + self.config["primarykeycolumn"]
if (self.config["selectcachesize"] > 0): query += " LIMIT %d" % self.config["selectcachesize"]
connection = mysql.connector.connect(**self.config["connargs"])
connection.autocommit = True
cursor = connection.cursor()
cursor.execute(query, (self.status.AVAILABLE,))
resourcesKeys = cursor.fetchall()
cursor.close()
connection.close()
return resourcesKeys
def _selectCacheThread(self):
try:
previouslyEmpty = False
while True:
if not previouslyEmpty: self.echo.out("[Table: %s] Select cache empty. Querying database..." % self.config["table"])
resourcesKeys = self._selectCacheQuery()
if resourcesKeys:
if previouslyEmpty: self.echo.out("[Table: %s] New resources available now." % self.config["table"])
self.selectNoResourcesEvent.clear()
previouslyEmpty = False
self.echo.out("[Table: %s] Filling select cache with resources keys..." % self.config["table"])
for key in resourcesKeys: self.resourcesQueue.put(key[0])
self.echo.out("[Table: %s] Select cache filled." % self.config["table"])
with self.selectWaitCondition: self.selectWaitCondition.notify()
self.resourcesQueue.join()
else:
if not previouslyEmpty: self.echo.out("[Table: %s] No available resources found." % self.config["table"])
self.selectNoResourcesEvent.set()
previouslyEmpty = True
with self.selectWaitCondition:
self.selectWaitCondition.notify()
self.selectWaitCondition.wait()
except:
self.selectCacheThreadExceptionEvent.set()
self.echo.out("[Table: %s] Exception while trying to fill select cache." % self.config["table"], "EXCEPTION")
def setup(self):
self.local.connection = mysql.connector.connect(**self.config["connargs"])
self.local.connection.autocommit = True
def select(self):
# Try to get resource key from select cache
while True:
try:
resourceKey = self.resourcesQueue.get_nowait()
except Queue.Empty:
if self.selectCacheThreadExceptionEvent.is_set():
raise RuntimeError("Exception in select cache thread. Execution of MySQLPersistenceHandler aborted.")
elif self.selectNoResourcesEvent.is_set():
with self.selectWaitCondition: self.selectWaitCondition.notify()
return (None, None, None)
else: break
# Fetch resource information and mark it as being processed
cursor = self.local.connection.cursor(dictionary = True)
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (self.status.INPROGRESS, resourceKey))
self.resourcesQueue.task_done()
query = "SELECT * FROM " + self.config["table"] + " WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (resourceKey,))
resource = cursor.fetchone()
cursor.close()
return (resource[self.config["primarykeycolumn"]],
resource[self.config["resourceidcolumn"]],
{k: resource[k] for k in self.infoColNames})
def update(self, resourceKey, status, resourceInfo):
cursor = self.local.connection.cursor()
if (not resourceInfo):
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (status, resourceKey))
else:
info = {k: resourceInfo[k] for k in resourceInfo if (k not in self.excludedColNames)}
query = "UPDATE " + self.config["table"] + " SET " + self.config["statuscolumn"] + " = %s, " + " = %s, ".join(info.keys()) + " = %s WHERE " + self.config["primarykeycolumn"] + " = %s"
cursor.execute(query, (status,) + tuple(info.values()) + (resourceKey,))
cursor.close()
def insert(self, resourcesList):
# The method cursor.executemany() is optimized for multiple inserts, batching all data into a single INSERT INTO
# statement. This method would be the best to use here but unfortunately it does not parse the DEFAULT keyword
# correctly. This way, the alternative is to pre-build the query and send it to | |
refresh token ({}) was not valid.'.format(current_user.name, character_id, refresh_token))
characterCard = get_character_card(character_id, preston, access_token)
if characterCard is None:
return redirect(url_for('esi_parser.index'))
return render_template('esi_parser/audit_skills.html',
character_id=character_id, client_id=client_id, client_secret=client_secret, refresh_token=refresh_token, scopes=scopes,
character=characterCard)
@Application.route('/audit/wallet/<int:character_id>/<client_id>/<client_secret>/<refresh_token>/<scopes>')
@login_required
@needs_permission('parse_esi', 'ESI Audit')
def audit_wallet(character_id, client_id, client_secret, refresh_token, scopes):
"""Audit a character's wallet.
Args:
character_id (int): ID of the character.
client_id (str): Client ID of the SSO that was used to retrieve the refresh token.
client_secret (str): Client secret of the SSO that was used to retrieve the refresh token.
refresh_token (str): Refresh token of the character.
scopes (str): Scopes that the refresh token provides access to.
Returns:
str: redirect to the appropriate url.
"""
# Make preston instance.
preston = Preston(
user_agent=EveAPI['user_agent'],
client_id=client_id,
client_secret=client_secret,
scope=scopes,
refresh_token=refresh_token
)
# Get access token.
access_token = preston._get_access_from_refresh()[0]
if access_token is None:
flash('Refresh token ({}) could not get an access token.'.format(refresh_token), 'danger')
current_app.logger.error('{} tried to parse ESI for character with ID {} but the refresh token ({}) was not valid.'.format(current_user.name, character_id, refresh_token))
characterCard = get_character_card(character_id, preston, access_token)
if characterCard is None:
return redirect(url_for('esi_parser.index'))
return render_template('esi_parser/audit_wallet.html',
character_id=character_id, client_id=client_id, client_secret=client_secret, refresh_token=refresh_token, scopes=scopes,
character=characterCard)
@Application.route('/audit/onepage/<int:character_id>/<client_id>/<client_secret>/<refresh_token>/<scopes>')
@login_required
@needs_permission('parse_esi', 'ESI Audit')
def audit_onepage(character_id, client_id, client_secret, refresh_token, scopes):
"""Views a member with ID.
Args:
character_id (int): ID of the character.
client_id (str): Client ID of the SSO that was used to retrieve the refresh token.
client_secret (str): Client secret of the SSO that was used to retrieve the refresh token.
refresh_token (str): Refresh token of the character.
scopes (str): Scopes that the refresh token provides access to.
Returns:
str: redirect to the appropriate url.
"""
# Make preston instance.
preston = Preston(
user_agent=EveAPI['user_agent'],
client_id=client_id,
client_secret=client_secret,
scope=scopes,
refresh_token=refresh_token
)
# Get access token.
access_token = preston._get_access_from_refresh()[0]
if access_token is None:
flash('Refresh token ({}) could not get an access token.'.format(refresh_token), 'danger')
current_app.logger.error('{} tried to parse ESI for character with ID {} but the refresh token ({}) was not valid.'.format(current_user.name, character_id, refresh_token))
characterCard = get_character_card(character_id, preston, access_token)
if characterCard is None:
return redirect(url_for('esi_parser.index'))
characterContacts = get_contacts(character_id, preston, access_token)
if characterContacts is None:
return redirect(url_for('esi_parser.index'))
characterMails = get_mails(character_id, preston, access_token)
if characterMails is None:
return redirect(url_for('esi_parser.index'))
return render_template('esi_parser/audit_onepage.html',
character_id=character_id, client_id=client_id, client_secret=client_secret, refresh_token=refresh_token, scopes=scopes,
character=characterCard, character_contacts=characterContacts, character_mails=characterMails)
def get_character_card(character_id, preston, access_token):
"""Get all the info for the character card.
Args:
character_id (int): ID of the character.
preston (preston): Preston object to make scope-required ESI calls.
access_token (str): Access token for the scope-required ESI calls.
Returns:
json: Character card information.
"""
# Get character.
characterPayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/?datasource=tranquility".format(str(character_id)))
if characterPayload.status_code != 200:
flash('There was an error ({}) when trying to retrieve character with ID {}'.format(str(characterPayload.status_code), str(character_id)), 'danger')
return None
characterJSON = characterPayload.json()
characterJSON['portrait'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/portrait/?datasource=tranquility".format(str(character_id))).json()
# Get corporation.
corporationPayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/?datasource=tranquility".format(str(characterJSON['corporation_id'])))
if corporationPayload.status_code != 200:
flash('There was an error ({}) when trying to retrieve corporation with ID {}'.format(str(corporationPayload.status_code), str(characterJSON['corporation_id'])), 'danger')
return None
characterJSON['corporation'] = corporationPayload.json()
characterJSON['corporation']['logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/icons/?datasource=tranquility".format(
str(characterJSON['corporation_id']))).json()
# Get alliance.
if 'alliance_id' in characterJSON:
alliancePayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/?datasource=tranquility".format(str(characterJSON['alliance_id'])))
if alliancePayload.status_code != 200:
flash('There was an error ({}) when trying to retrieve alliance with ID {}'.format(str(alliancePayload.status_code), str(characterJSON['alliance_id'])), 'danger')
return None
characterJSON['alliance'] = alliancePayload.json()
characterJSON['alliance']['logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/icons/?datasource=tranquility".format(
str(characterJSON['alliance_id']))).json()
# Get wallet.
walletIsk = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-wallet.read_character_wallet.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/wallet/?datasource=tranquility&token={}".format(
str(character_id), access_token))
walletIskJSON = None
if walletIsk is not None:
walletIskJSON = walletIsk.json()
if walletIskJSON is not None and type(walletIskJSON) is not float:
flash('There was an error ({}) when trying to retrieve wallet for character.'.format(str(walletIsk.status_code)), 'danger')
return None
else:
characterJSON['wallet_isk'] = walletIskJSON
# Get skillpoints
characterSkills = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-skills.read_skills.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/skills/?datasource=tranquility&token={}".format(
str(character_id), access_token))
characterSkillsJSON = None
if characterSkills is not None:
characterSkillsJSON = characterSkills.json()
if characterSkillsJSON is not None and 'error' in characterSkillsJSON:
flash('There was an error ({}) when trying to retrieve skills.'.format(str(characterSkills.status_code)), 'danger')
return None
else:
characterJSON['skills'] = characterSkillsJSON
return characterJSON
def get_contacts(character_id, preston, access_token):
"""Get all the contacts information.
Args:
character_id (int): ID of the character.
preston (preston): Preston object to make scope-required ESI calls.
access_token (str): Access token for the scope-required ESI calls.
Returns:
json: Contacts information.
"""
# Get raw contact data.
characterContacts = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-characters.read_contacts.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/contacts/?datasource=tranquility&token={}".format(
str(character_id), access_token))
if characterContacts is None:
return {'has_scope': False}
characterContactsJSON = characterContacts.json()
if characterContactsJSON is not None and 'error' in characterContactsJSON:
flash('There was an error ({}) when trying to retrieve contacts.'.format(str(characterContacts.status_code)), 'danger')
return None
characterContactLabels = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-characters.read_contacts.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/contacts/labels/?datasource=tranquility&token={}".format(
str(character_id), access_token))
characterContactLabelsJSON = characterContactLabels.json()
if characterContactLabelsJSON is not None and 'error' in characterContactLabelsJSON:
flash('There was an error ({}) when trying to retrieve contact labels.'.format(str(characterContactLabels.status_code)), 'danger')
return None
# Link characters, corporations, images and labels to contacts.
for contact in characterContactsJSON:
# Name.
if contact['contact_type'] == 'character':
# Get character.
character = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/?datasource=tranquility".format(str(contact['contact_id']))).json()
contact['character'] = character
# Get character corp.
contact['character']['corporation_name'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/?datasource=tranquility".format(
str(character['corporation_id']))).json()['name']
# Get character corp logo.
contact['character']['corporation_logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/icons/?datasource=tranquility".format(
str(character['corporation_id']))).json()['px128x128']
# Get character alliance if applicable.
if 'alliance_id' in character:
contact['character']['alliance_name'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/?datasource=tranquility".format(
str(character['alliance_id']))).json()['name']
# Get character alliance logo.
contact['character']['alliance_logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/icons/?datasource=tranquility".format(
str(character['alliance_id']))).json()['px128x128']
# Get corporation history.
corpHistory = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/corporationhistory/?datasource=tranquility".format(str(contact['contact_id']))).json()
for index, corp in enumerate(corpHistory):
# Name.
corp['name'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/?datasource=tranquility".format(
str(corp['corporation_id']))).json()['name']
# Logo.
corp['logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/icons/?datasource=tranquility".format(
str(corp['corporation_id']))).json()['px128x128']
# Leave date.
if index > 0:
corp['end_date'] = corpHistory[index - 1]['start_date']
contact['character']['corporation_history'] = corpHistory
# Get contact name / image.
contact['contact_name'] = character['name']
contact['contact_image'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/portrait/?datasource=tranquility".format(
str(contact['contact_id']))).json()['px128x128']
elif contact['contact_type'] == 'corporation':
# Get corporation.
corporation = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/?datasource=tranquility".format(str(contact['contact_id']))).json()
contact['corporation'] = corporation
# Get corporation alliance.
if 'alliance_id' in corporation:
contact['corporation']['alliance_name'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/?datasource=tranquility".format(
str(corporation['alliance_id']))).json()['name']
# Get corporation alliance logo.
contact['corporation']['alliance_logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/icons/?datasource=tranquility".format(
str(corporation['alliance_id']))).json()['px128x128']
# Get alliance history.
allianceHistory = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/alliancehistory/?datasource=tranquility".format(str(contact['contact_id']))).json()
for index, alliance in enumerate(allianceHistory):
allianceJSON = None
if 'alliance_id' in alliance:
allianceJSON = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/?datasource=tranquility".format(
str(alliance['alliance_id']))).json()
allianceJSON['alliance_id'] = alliance['alliance_id']
# Name.
if allianceJSON:
alliance['name'] = allianceJSON['name']
else:
alliance['name'] = "No alliance"
# Logo.
if allianceJSON:
alliance['logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/icons/?datasource=tranquility".format(
str(alliance['alliance_id']))).json()['px128x128']
# Leave date.
if index > 0:
alliance['end_date'] = allianceHistory[index - 1]['start_date']
contact['corporation']['alliance_history'] = allianceHistory
# Get contact name / image.
contact['contact_name'] = corporation['name']
contact['contact_image'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/icons/?datasource=tranquility".format(
str(contact['contact_id']))).json()['px128x128']
elif contact['contact_type'] == 'alliance':
# Get alliance.
alliance = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/?datasource=tranquility".format(str(contact['contact_id']))).json()
contact['alliance'] = alliance
# Exec corp.
if 'executor_corporation_id' in alliance:
# Name.
contact['alliance']['executor_corporation_name'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/?datasource=tranquility".format(
str(alliance['executor_corporation_id']))).json()['name']
# Logo.
contact['alliance']['executor_corporation_logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/icons/?datasource=tranquility".format(
str(alliance['executor_corporation_id']))).json()['px128x128']
# Alliance members.
allianceMembers = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/corporations/?datasource=tranquility".format(
str(contact['contact_id']))).json()
allianceMemberList = []
for member in allianceMembers:
# Corporation info.
memberJSON = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/?datasource=tranquility".format(
str(member))).json()
# ID.
memberJSON['corporation_id'] = member
# Logo.
memberJSON['corporation_logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/icons/?datasource=tranquility".format(
str(member))).json()['px128x128']
# Get corp.
allianceMemberList.append(memberJSON)
contact['alliance']['members'] = allianceMemberList
contact['contact_name'] = alliance['name']
contact['contact_image'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/icons/?datasource=tranquility".format(
str(contact['contact_id']))).json()['px128x128']
elif contact['contact_type'] == 'faction':
contact['contact_name'] = "FACTION NAMES NOT IMPLEMENTED"
contact['contact_image'] = "#"
# Labels.
if 'label_id' in contact:
for label in characterContactLabelsJSON:
if label['label_id'] == contact['label_id']:
contact['label_name'] = label['label_name']
# Sort contacts by name.
characterContactsJSON = sorted(characterContactsJSON, key=lambda k: k['contact_name'])
# Sort contacts by standings.
characterContactsJSON = sorted(characterContactsJSON, key=lambda k: k['standing'], reverse=True)
return characterContactsJSON
def get_mails(character_id, preston, access_token):
"""Get all the mail information.
Args:
character_id (int): ID of the character.
preston (preston): Preston object to make scope-required ESI calls.
access_token (str): Access token for the scope-required ESI calls.
Returns:
json: Mail information.
"""
# Get mail endpoint.
characterMails = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-mail.read_mail.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/mail/?datasource=tranquility&token={}".format(
str(character_id), access_token))
if characterMails is None:
return {'has_scope': False}
characterMailsJSON = characterMails.json()
if characterMailsJSON is not None and 'error' in characterMailsJSON:
flash('There was an error ({}) when trying to retrieve mails.'.format(str(characterMails.status_code)), 'danger')
return None
# Get mailing lists.
characterMailingLists = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-mail.read_mail.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/mail/lists/?datasource=tranquility&token={}".format(
str(character_id), access_token))
characterMailingListsJSON = characterMailingLists.json()
if characterMailingListsJSON is not None and 'error' in characterMailingListsJSON:
flash('There was an error ({}) when trying to retrieve mail labels.'.format(str(characterMailingLists.status_code)), 'danger')
return None
for mail in characterMailsJSON:
mail['mail'] = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-mail.read_mail.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/mail/{}/?datasource=tranquility&token={}".format(
str(character_id), str(mail['mail_id']), access_token)).json()
# Convert body to be easily showed in html, but first save raw body.
mail['mail']['raw_body'] = mail['mail']['body']
mailBody = mail['mail']['body'].replace('<br>', '\n')
mailBody = SharedInfo['util'].remove_html_tags(mailBody)
mail['mail']['body'] = Markup(mailBody.replace('\n', '<br>'))
# Get sender name.
mail['mail']['from_name'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/?datasource=tranquility".format(
str(mail['mail']['from']))).json()['name']
# Get recipients.
for recipient in mail['mail']['recipients']:
recipient['recipient_name'] = recipient['recipient_id']
# Determine type.
if recipient['recipient_type'] == 'character':
# Get character name.
recipient['recipient_name'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/?datasource=tranquility".format(
str(recipient['recipient_id']))).json()['name']
elif recipient['recipient_type'] == 'corporation':
# Get corporation name.
recipient['recipient_name'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/?datasource=tranquility".format(
str(recipient['recipient_id']))).json()['name']
elif recipient['recipient_type'] == 'alliance':
# Get alliance name.
recipient['recipient_name'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/?datasource=tranquility".format(
| |
import warnings
import logging
import pandas as pd
from functools import partial
from collections import defaultdict
from dae.utils.helpers import str2bool
from dae.variants.attributes import Role, Sex, Status
from dae.backends.raw.loader import CLILoader, CLIArgument
from dae.pedigrees.family import FamiliesData, Person, PEDIGREE_COLUMN_NAMES
from dae.pedigrees.family_role_builder import FamilyRoleBuilder
from dae.pedigrees.layout import Layout
logger = logging.getLogger(__name__)
PED_COLUMNS_REQUIRED = (
PEDIGREE_COLUMN_NAMES["family"],
PEDIGREE_COLUMN_NAMES["person"],
PEDIGREE_COLUMN_NAMES["mother"],
PEDIGREE_COLUMN_NAMES["father"],
PEDIGREE_COLUMN_NAMES["sex"],
PEDIGREE_COLUMN_NAMES["status"],
)
class FamiliesLoader(CLILoader):
def __init__(self, families_filename, **params):
super().__init__(params=params)
self.filename = families_filename
# TODO FIXME Params should be able to accept namedtuple instances
# self.params["ped_sep"] = ped_sep
self.file_format = self.params.get("ped_file_format", "pedigree")
@staticmethod
def load_pedigree_file(pedigree_filename, pedigree_format={}):
pedigree_format["ped_no_role"] = str2bool(
pedigree_format.get("ped_no_role", False)
)
pedigree_format["ped_no_header"] = str2bool(
pedigree_format.get("ped_no_header", False)
)
ped_df = FamiliesLoader.flexible_pedigree_read(
pedigree_filename, **pedigree_format
)
families = FamiliesData.from_pedigree_df(ped_df)
FamiliesLoader._build_families_layouts(families, pedigree_format)
FamiliesLoader._build_families_roles(families, pedigree_format)
return families
@staticmethod
def _build_families_layouts(families, pedigree_format):
ped_layout_mode = pedigree_format.get("ped_layout_mode", "load")
if ped_layout_mode == "generate":
for family in families.values():
logger.debug(
f"building layout for family: {family.family_id}; "
f"{family}")
layouts = Layout.from_family(family)
for layout in layouts:
layout.apply_to_family(family)
elif ped_layout_mode == "load":
pass
else:
raise ValueError(
f"unexpected `--ped-layout-mode` option value "
f"`{ped_layout_mode}`"
)
@staticmethod
def _build_families_roles(families, pedigree_format):
has_unknown_roles = any(
[
p.role is None # or p.role == Role.unknown
for p in families.persons.values()
]
)
if has_unknown_roles or pedigree_format.get("ped_no_role"):
for family in families.values():
logger.debug(f"building family roles: {family.family_id}")
role_build = FamilyRoleBuilder(family)
role_build.build_roles()
families._ped_df = None
# @staticmethod
# def load_simple_families_file(families_filename):
# ped_df = FamiliesLoader.load_simple_family_file(families_filename)
# return FamiliesData.from_pedigree_df(ped_df)
def load(self):
if self.file_format == "simple":
return self.load_simple_families_file(self.filename)
else:
assert self.file_format == "pedigree"
return self.load_pedigree_file(
self.filename, pedigree_format=self.params
)
@classmethod
def _arguments(cls):
arguments = []
arguments.append(CLIArgument(
"families",
value_type=str,
metavar="<families filename>",
help_text="families filename in pedigree or simple family format",
))
arguments.append(CLIArgument(
"--ped-family",
default_value="familyId",
help_text="specify the name of the column in the pedigree"
" file that holds the ID of the family the person belongs to"
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-person",
default_value="personId",
help_text="specify the name of the column in the pedigree"
" file that holds the person's ID [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-mom",
default_value="momId",
help_text="specify the name of the column in the pedigree"
" file that holds the ID of the person's mother"
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-dad",
default_value="dadId",
help_text="specify the name of the column in the pedigree"
" file that holds the ID of the person's father"
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-sex",
default_value="sex",
help_text="specify the name of the column in the pedigree"
" file that holds the sex of the person [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-status",
default_value="status",
help_text="specify the name of the column in the pedigree"
" file that holds the status of the person"
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-role",
default_value="role",
help_text="specify the name of the column in the pedigree"
" file that holds the role of the person"
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-no-role",
action="store_true",
default_value=False,
help_text="indicates that the provided pedigree file has no role "
"column. "
"If this argument is provided, the import tool will guess the "
"roles "
'of individuals and write them in a "role" column.',
))
arguments.append(CLIArgument(
"--ped-proband",
default_value=None,
help_text="specify the name of the column in the pedigree"
" file that specifies persons with role `proband`;"
" this columns is used only when"
" option `--ped-no-role` is specified. [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-no-header",
action="store_true",
default_value=False,
help_text="indicates that the provided pedigree"
" file has no header. The pedigree column arguments"
" will accept indices if this argument is given."
" [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-file-format",
default_value="pedigree",
help_text="Families file format. It should `pedigree` or `simple`"
"for simple family format [default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-layout-mode",
default_value="load",
help_text="Layout mode specifies how pedigrees "
"drawing of each family is handled."
" Available options are `generate` and `load`. When "
"layout mode option is set to generate the loader"
"tryes to generate a layout for the family pedigree. "
"When `load` is specified, the loader tries to load the layout "
"from the layout column of the pedigree. "
"[default: %(default)s]",
))
arguments.append(CLIArgument(
"--ped-sep",
default_value="\t",
raw=True,
help_text="Families file field separator [default: `\\t`]",
))
return arguments
@classmethod
def parse_cli_arguments(cls, argv):
filename = argv.families
super().parse_cli_arguments(argv, use_defaults=False)
ped_ped_args = [
"ped_family",
"ped_person",
"ped_mom",
"ped_dad",
"ped_sex",
"ped_status",
"ped_role",
"ped_file_format",
"ped_sep",
"ped_proband",
"ped_layout_mode",
]
columns = set(
[
"ped_family",
"ped_person",
"ped_mom",
"ped_dad",
"ped_sex",
"ped_status",
"ped_role",
"ped_proband",
]
)
assert argv.ped_file_format in ("simple", "pedigree")
assert argv.ped_layout_mode in ("generate", "load")
res = {}
res["ped_no_header"] = str2bool(argv.ped_no_header)
res["ped_no_role"] = str2bool(argv.ped_no_role)
for col in ped_ped_args:
ped_value = getattr(argv, col)
if not res["ped_no_header"] or col not in columns:
res[col] = ped_value
elif ped_value is not None and col in columns:
res[col] = int(ped_value)
return filename, res
@staticmethod
def produce_header_from_indices(
ped_family=None,
ped_person=None,
ped_mom=None,
ped_dad=None,
ped_sex=None,
ped_status=None,
ped_role=None,
ped_proband=None,
ped_layout=None,
ped_generated=None,
ped_not_sequenced=None,
ped_sample_id=None,
):
header = (
(ped_family, PEDIGREE_COLUMN_NAMES["family"]),
(ped_person, PEDIGREE_COLUMN_NAMES["person"]),
(ped_mom, PEDIGREE_COLUMN_NAMES["mother"]),
(ped_dad, PEDIGREE_COLUMN_NAMES["father"]),
(ped_sex, PEDIGREE_COLUMN_NAMES["sex"]),
(ped_status, PEDIGREE_COLUMN_NAMES["status"]),
(ped_role, PEDIGREE_COLUMN_NAMES["role"]),
(ped_proband, PEDIGREE_COLUMN_NAMES["proband"]),
(ped_layout, PEDIGREE_COLUMN_NAMES["layout"]),
(ped_generated, PEDIGREE_COLUMN_NAMES["generated"]),
(ped_not_sequenced, PEDIGREE_COLUMN_NAMES["not_sequenced"]),
(ped_sample_id, PEDIGREE_COLUMN_NAMES["sample id"]),
)
header = tuple(filter(lambda col: type(col[0]) is int, header))
for col in header:
assert type(col[0]) is int, col[0]
header = tuple(sorted(header, key=lambda col: col[0]))
return zip(*header)
@staticmethod
def flexible_pedigree_read(
pedigree_filepath,
ped_sep="\t",
ped_no_header=False,
ped_family="familyId",
ped_person="personId",
ped_mom="momId",
ped_dad="dadId",
ped_sex="sex",
ped_status="status",
ped_role="role",
ped_proband="proband",
ped_layout="layout",
ped_generated="generated",
ped_not_sequenced="not_sequenced",
ped_sample_id="sampleId",
ped_no_role=False,
**kwargs,
):
if type(ped_no_role) == str:
ped_no_role = str2bool(ped_no_role)
if type(ped_no_header) == str:
ped_no_header = str2bool(ped_no_header)
read_csv_func = partial(
pd.read_csv,
sep=ped_sep,
index_col=False,
skipinitialspace=True,
converters={
ped_role: Role.from_name,
ped_sex: Sex.from_name,
ped_status: Status.from_name,
ped_generated: lambda v: str2bool(v),
ped_not_sequenced: lambda v: str2bool(v),
ped_proband: lambda v: str2bool(v),
},
dtype=str,
comment="#",
encoding="utf-8",
)
with warnings.catch_warnings(record=True) as ws:
warnings.filterwarnings(
"ignore",
category=pd.errors.ParserWarning,
message="Both a converter and dtype were specified",
)
if ped_no_header:
_, file_header = FamiliesLoader.produce_header_from_indices(
ped_family=ped_family,
ped_person=ped_person,
ped_mom=ped_mom,
ped_dad=ped_dad,
ped_sex=ped_sex,
ped_status=ped_status,
ped_role=ped_role,
ped_proband=ped_proband,
ped_layout=ped_layout,
ped_generated=ped_generated,
ped_not_sequenced=ped_not_sequenced,
ped_sample_id=ped_sample_id,
)
ped_family = PEDIGREE_COLUMN_NAMES["family"]
ped_person = PEDIGREE_COLUMN_NAMES["person"]
ped_mom = PEDIGREE_COLUMN_NAMES["mother"]
ped_dad = PEDIGREE_COLUMN_NAMES["father"]
ped_sex = PEDIGREE_COLUMN_NAMES["sex"]
ped_status = PEDIGREE_COLUMN_NAMES["status"]
ped_role = PEDIGREE_COLUMN_NAMES["role"]
ped_proband = PEDIGREE_COLUMN_NAMES["proband"]
ped_layout = PEDIGREE_COLUMN_NAMES["layout"]
ped_generated = PEDIGREE_COLUMN_NAMES["generated"]
ped_not_sequenced = PEDIGREE_COLUMN_NAMES["not_sequenced"]
ped_sample_id = PEDIGREE_COLUMN_NAMES["sample id"]
ped_df = read_csv_func(
pedigree_filepath, header=None, names=file_header
)
else:
ped_df = read_csv_func(pedigree_filepath)
for w in ws:
warnings.showwarning(w.message, w.category, w.filename, w.lineno)
if ped_sample_id in ped_df:
if ped_generated in ped_df or ped_not_sequenced in ped_df:
def fill_sample_id(r):
if not pd.isna(r.sampleId):
return r.sampleId
else:
if r.generated or r.not_sequenced:
return None
else:
return r.personId
else:
def fill_sample_id(r):
if not pd.isna(r.sampleId):
return r.sampleId
else:
return r.personId
sample_ids = ped_df.apply(
lambda r: fill_sample_id(r), axis=1, result_type="reduce",
)
ped_df[ped_sample_id] = sample_ids
else:
sample_ids = pd.Series(data=ped_df[ped_person].values)
ped_df[ped_sample_id] = sample_ids
if ped_generated in ped_df:
ped_df[ped_generated] = ped_df[ped_generated].apply(
lambda v: v if v else None
)
if ped_not_sequenced in ped_df:
ped_df[ped_not_sequenced] = ped_df[ped_not_sequenced].apply(
lambda v: v if v else None
)
ped_df = ped_df.rename(
columns={
ped_family: PEDIGREE_COLUMN_NAMES["family"],
ped_person: PEDIGREE_COLUMN_NAMES["person"],
ped_mom: PEDIGREE_COLUMN_NAMES["mother"],
ped_dad: PEDIGREE_COLUMN_NAMES["father"],
ped_sex: PEDIGREE_COLUMN_NAMES["sex"],
ped_status: PEDIGREE_COLUMN_NAMES["status"],
ped_role: PEDIGREE_COLUMN_NAMES["role"],
ped_proband: PEDIGREE_COLUMN_NAMES["proband"],
ped_sample_id: PEDIGREE_COLUMN_NAMES["sample id"],
}
)
if not set(PED_COLUMNS_REQUIRED) <= set(ped_df.columns):
missing_columns = set(PED_COLUMNS_REQUIRED).difference(
set(ped_df.columns)
)
missing_columns = ", ".join(missing_columns)
print(f"pedigree file missing columns {missing_columns}")
raise ValueError(
f"pedigree file missing columns {missing_columns}"
)
return ped_df
@staticmethod
def load_simple_families_file(infile, ped_sep="\t"):
fam_df = pd.read_csv(
infile,
sep=ped_sep,
index_col=False,
skipinitialspace=True,
converters={
"role": lambda r: Role.from_name(r),
"gender": lambda s: Sex.from_name(s),
"sex": lambda s: Sex.from_name(s),
},
dtype={"familyId": str, "personId": str},
comment="#",
)
fam_df = fam_df.rename(
columns={
"gender": "sex",
"personId": "person_id",
"familyId": "family_id",
"momId": "mom_id",
"dadId": "dad_id",
"sampleId": "sample_id",
},
)
fam_df["status"] = pd.Series(index=fam_df.index, data=1)
fam_df.loc[fam_df.role == Role.prb, "status"] = 2
fam_df["status"] = fam_df.status.apply(lambda s: Status.from_value(s))
fam_df["mom_id"] = pd.Series(index=fam_df.index, data="0")
fam_df["dad_id"] = pd.Series(index=fam_df.index, data="0")
if "sample_id" not in fam_df.columns:
sample_ids = pd.Series(data=fam_df["person_id"].values)
fam_df["sample_id"] = sample_ids
families = defaultdict(list)
for rec in fam_df.to_dict(orient="records"):
families[rec["family_id"]].append(rec)
result = defaultdict(list)
for fam_id, members in families.items():
mom_id = None
dad_id = None
children = []
for member in members:
role = member["role"]
if role == Role.mom:
mom_id = member["person_id"]
elif role == Role.dad:
dad_id = member["person_id"]
else:
assert role in set([Role.prb, Role.sib])
children.append(member)
for child in children:
child["mom_id"] = mom_id
child["dad_id"] = dad_id
result[fam_id] = [Person(**member) for member in members]
return FamiliesData.from_family_persons(result)
@staticmethod
def save_pedigree(families, filename):
df = families.ped_df.copy()
df = df.rename(
columns={
"person_id": "personId",
"family_id": "familyId",
"mom_id": "momId",
"dad_id": "dadId",
"sample_id": "sampleId",
}
)
df.sex = df.sex.apply(lambda v: v.name)
df.role = df.role.apply(lambda v: v.name)
df.status = df.status.apply(lambda v: v.name)
df.to_csv(filename, index=False, sep="\t")
@staticmethod
def | |
import unittest
from zope.component import getSiteManager
class TestDummyRootFactory(unittest.TestCase):
def _makeOne(self, environ):
from pyramid.testing import DummyRootFactory
return DummyRootFactory(environ)
def test_it(self):
environ = {'bfg.routes.matchdict':{'a':1}}
factory = self._makeOne(environ)
self.assertEqual(factory.a, 1)
class TestDummySecurityPolicy(unittest.TestCase):
def _getTargetClass(self):
from pyramid.testing import DummySecurityPolicy
return DummySecurityPolicy
def _makeOne(self, userid=None, groupids=(), permissive=True):
klass = self._getTargetClass()
return klass(userid, groupids, permissive)
def test_authenticated_userid(self):
policy = self._makeOne('user')
self.assertEqual(policy.authenticated_userid(None), 'user')
def test_unauthenticated_userid(self):
policy = self._makeOne('user')
self.assertEqual(policy.unauthenticated_userid(None), 'user')
def test_effective_principals_userid(self):
policy = self._makeOne('user', ('group1',))
from pyramid.security import Everyone
from pyramid.security import Authenticated
self.assertEqual(policy.effective_principals(None),
[Everyone, Authenticated, 'user', 'group1'])
def test_effective_principals_nouserid(self):
policy = self._makeOne()
from pyramid.security import Everyone
self.assertEqual(policy.effective_principals(None), [Everyone])
def test_permits(self):
policy = self._makeOne()
self.assertEqual(policy.permits(None, None, None), True)
def test_principals_allowed_by_permission(self):
policy = self._makeOne('user', ('group1',))
from pyramid.security import Everyone
from pyramid.security import Authenticated
result = policy.principals_allowed_by_permission(None, None)
self.assertEqual(result, [Everyone, Authenticated, 'user', 'group1'])
def test_forget(self):
policy = self._makeOne()
self.assertEqual(policy.forget(None), [])
def test_remember(self):
policy = self._makeOne()
self.assertEqual(policy.remember(None, None), [])
class TestDummyResource(unittest.TestCase):
def _getTargetClass(self):
from pyramid.testing import DummyResource
return DummyResource
def _makeOne(self, name=None, parent=None, **kw):
klass = self._getTargetClass()
return klass(name, parent, **kw)
def test__setitem__and__getitem__and__delitem__and__contains__and_get(self):
class Dummy:
pass
dummy = Dummy()
resource = self._makeOne()
resource['abc'] = dummy
self.assertEqual(dummy.__name__, 'abc')
self.assertEqual(dummy.__parent__, resource)
self.assertEqual(resource['abc'], dummy)
self.assertEqual(resource.get('abc'), dummy)
self.assertRaises(KeyError, resource.__getitem__, 'none')
self.assertTrue('abc' in resource)
del resource['abc']
self.assertFalse('abc' in resource)
self.assertEqual(resource.get('abc', 'foo'), 'foo')
self.assertEqual(resource.get('abc'), None)
def test_extra_params(self):
resource = self._makeOne(foo=1)
self.assertEqual(resource.foo, 1)
def test_clone(self):
resource = self._makeOne('name', 'parent', foo=1, bar=2)
clone = resource.clone('name2', 'parent2', bar=1)
self.assertEqual(clone.bar, 1)
self.assertEqual(clone.__name__, 'name2')
self.assertEqual(clone.__parent__, 'parent2')
self.assertEqual(clone.foo, 1)
def test_keys_items_values_len(self):
class Dummy:
pass
resource = self._makeOne()
resource['abc'] = Dummy()
resource['def'] = Dummy()
L = list
self.assertEqual(L(resource.values()), L(resource.subs.values()))
self.assertEqual(L(resource.items()), L(resource.subs.items()))
self.assertEqual(L(resource.keys()), L(resource.subs.keys()))
self.assertEqual(len(resource), 2)
def test_nonzero(self):
resource = self._makeOne()
self.assertEqual(resource.__nonzero__(), True)
def test_bool(self):
resource = self._makeOne()
self.assertEqual(resource.__bool__(), True)
def test_ctor_with__provides__(self):
resource = self._makeOne(__provides__=IDummy)
self.assertTrue(IDummy.providedBy(resource))
class TestDummyRequest(unittest.TestCase):
def _getTargetClass(self):
from pyramid.testing import DummyRequest
return DummyRequest
def _makeOne(self, *arg, **kw):
return self._getTargetClass()(*arg, **kw)
def test_params(self):
request = self._makeOne(params = {'say':'Hello'},
environ = {'PATH_INFO':'/foo'},
headers = {'X-Foo':'YUP'},
)
self.assertEqual(request.params['say'], 'Hello')
self.assertEqual(request.GET['say'], 'Hello')
self.assertEqual(request.POST['say'], 'Hello')
self.assertEqual(request.headers['X-Foo'], 'YUP')
self.assertEqual(request.environ['PATH_INFO'], '/foo')
def test_defaults(self):
from pyramid.threadlocal import get_current_registry
from pyramid.testing import DummySession
request = self._makeOne()
self.assertEqual(request.method, 'GET')
self.assertEqual(request.application_url, 'http://example.com')
self.assertEqual(request.host_url, 'http://example.com')
self.assertEqual(request.path_url, 'http://example.com')
self.assertEqual(request.url, 'http://example.com')
self.assertEqual(request.host, 'example.com:80')
self.assertEqual(request.content_length, 0)
self.assertEqual(request.environ.get('PATH_INFO'), None)
self.assertEqual(request.headers.get('X-Foo'), None)
self.assertEqual(request.params.get('foo'), None)
self.assertEqual(request.GET.get('foo'), None)
self.assertEqual(request.POST.get('foo'), None)
self.assertEqual(request.cookies.get('type'), None)
self.assertEqual(request.path, '/')
self.assertEqual(request.path_info, '/')
self.assertEqual(request.script_name, '')
self.assertEqual(request.path_qs, '')
self.assertEqual(request.view_name, '')
self.assertEqual(request.subpath, ())
self.assertEqual(request.context, None)
self.assertEqual(request.root, None)
self.assertEqual(request.virtual_root, None)
self.assertEqual(request.virtual_root_path, ())
self.assertEqual(request.registry, get_current_registry())
self.assertEqual(request.session.__class__, DummySession)
def test_params_explicit(self):
request = self._makeOne(params = {'foo':'bar'})
self.assertEqual(request.params['foo'], 'bar')
self.assertEqual(request.GET['foo'], 'bar')
self.assertEqual(request.POST['foo'], 'bar')
def test_environ_explicit(self):
request = self._makeOne(environ = {'PATH_INFO':'/foo'})
self.assertEqual(request.environ['PATH_INFO'], '/foo')
def test_headers_explicit(self):
request = self._makeOne(headers = {'X-Foo':'YUP'})
self.assertEqual(request.headers['X-Foo'], 'YUP')
def test_path_explicit(self):
request = self._makeOne(path = '/abc')
self.assertEqual(request.path, '/abc')
def test_cookies_explicit(self):
request = self._makeOne(cookies = {'type': 'gingersnap'})
self.assertEqual(request.cookies['type'], 'gingersnap')
def test_post_explicit(self):
POST = {'foo': 'bar', 'baz': 'qux'}
request = self._makeOne(post=POST)
self.assertEqual(request.method, 'POST')
self.assertEqual(request.POST, POST)
# N.B.: Unlike a normal request, passing 'post' should *not* put
# explict POST data into params: doing so masks a possible
# XSS bug in the app. Tests for apps which don't care about
# the distinction should just use 'params'.
self.assertEqual(request.params, {})
def test_post_empty_shadows_params(self):
request = self._makeOne(params={'foo': 'bar'}, post={})
self.assertEqual(request.method, 'POST')
self.assertEqual(request.params.get('foo'), 'bar')
self.assertEqual(request.POST.get('foo'), None)
def test_kwargs(self):
request = self._makeOne(water = 1)
self.assertEqual(request.water, 1)
def test_add_response_callback(self):
request = self._makeOne()
request.add_response_callback(1)
self.assertEqual(list(request.response_callbacks), [1])
def test_registry_is_config_registry_when_setup_is_called_after_ctor(self):
# see https://github.com/Pylons/pyramid/issues/165
from pyramid.registry import Registry
from pyramid.config import Configurator
request = self._makeOne()
try:
registry = Registry('this_test')
config = Configurator(registry=registry)
config.begin()
self.assertTrue(request.registry is registry)
finally:
config.end()
def test_set_registry(self):
request = self._makeOne()
request.registry = 'abc'
self.assertEqual(request.registry, 'abc')
def test_del_registry(self):
# see https://github.com/Pylons/pyramid/issues/165
from pyramid.registry import Registry
from pyramid.config import Configurator
request = self._makeOne()
request.registry = 'abc'
self.assertEqual(request.registry, 'abc')
del request.registry
try:
registry = Registry('this_test')
config = Configurator(registry=registry)
config.begin()
self.assertTrue(request.registry is registry)
finally:
config.end()
def test_response_with_responsefactory(self):
from pyramid.registry import Registry
from pyramid.interfaces import IResponseFactory
registry = Registry('this_test')
class ResponseFactory(object):
pass
registry.registerUtility(
lambda r: ResponseFactory(), IResponseFactory
)
request = self._makeOne()
request.registry = registry
resp = request.response
self.assertEqual(resp.__class__, ResponseFactory)
self.assertTrue(request.response is resp) # reified
def test_response_without_responsefactory(self):
from pyramid.registry import Registry
from pyramid.response import Response
registry = Registry('this_test')
request = self._makeOne()
request.registry = registry
resp = request.response
self.assertEqual(resp.__class__, Response)
self.assertTrue(request.response is resp) # reified
class TestDummyTemplateRenderer(unittest.TestCase):
def _getTargetClass(self, ):
from pyramid.testing import DummyTemplateRenderer
return DummyTemplateRenderer
def _makeOne(self, string_response=''):
return self._getTargetClass()(string_response=string_response)
def test_implementation(self):
renderer = self._makeOne()
impl = renderer.implementation()
impl(a=1, b=2)
self.assertEqual(renderer._implementation._received['a'], 1)
self.assertEqual(renderer._implementation._received['b'], 2)
def test_getattr(self):
renderer = self._makeOne()
renderer({'a':1})
self.assertEqual(renderer.a, 1)
self.assertRaises(AttributeError, renderer.__getattr__, 'b')
def test_assert_(self):
renderer = self._makeOne()
renderer({'a':1, 'b':2})
self.assertRaises(AssertionError, renderer.assert_, c=1)
self.assertRaises(AssertionError, renderer.assert_, b=3)
self.assertTrue(renderer.assert_(a=1, b=2))
def test_nondefault_string_response(self):
renderer = self._makeOne('abc')
result = renderer({'a':1, 'b':2})
self.assertEqual(result, 'abc')
class Test_setUp(unittest.TestCase):
def _callFUT(self, **kw):
from pyramid.testing import setUp
return setUp(**kw)
def tearDown(self):
from pyramid.threadlocal import manager
manager.clear()
getSiteManager.reset()
def _assertSMHook(self, hook):
result = getSiteManager.sethook(None)
self.assertEqual(result, hook)
def test_it_defaults(self):
from pyramid.threadlocal import manager
from pyramid.threadlocal import get_current_registry
from pyramid.registry import Registry
old = True
manager.push(old)
config = self._callFUT()
current = manager.get()
self.assertFalse(current is old)
self.assertEqual(config.registry, current['registry'])
self.assertEqual(current['registry'].__class__, Registry)
self.assertEqual(current['request'], None)
self.assertEqual(config.package.__name__, 'pyramid.tests')
self._assertSMHook(get_current_registry)
def test_it_with_registry(self):
from pyramid.registry import Registry
from pyramid.threadlocal import manager
registry = Registry()
self._callFUT(registry=registry)
current = manager.get()
self.assertEqual(current['registry'], registry)
def test_it_with_request(self):
from pyramid.threadlocal import manager
request = object()
self._callFUT(request=request)
current = manager.get()
self.assertEqual(current['request'], request)
def test_it_with_package(self):
config = self._callFUT(package='pyramid')
self.assertEqual(config.package.__name__, 'pyramid')
def test_it_with_hook_zca_false(self):
from pyramid.registry import Registry
registry = Registry()
self._callFUT(registry=registry, hook_zca=False)
sm = getSiteManager()
self.assertFalse(sm is registry)
def test_it_with_settings_passed_explicit_registry(self):
from pyramid.registry import Registry
registry = Registry()
self._callFUT(registry=registry, hook_zca=False, settings=dict(a=1))
self.assertEqual(registry.settings['a'], 1)
def test_it_with_settings_passed_implicit_registry(self):
config = self._callFUT(hook_zca=False, settings=dict(a=1))
self.assertEqual(config.registry.settings['a'], 1)
class Test_cleanUp(Test_setUp):
def _callFUT(self, *arg, **kw):
from pyramid.testing import cleanUp
return cleanUp(*arg, **kw)
class Test_tearDown(unittest.TestCase):
def _callFUT(self, **kw):
from pyramid.testing import tearDown
return tearDown(**kw)
def tearDown(self):
from pyramid.threadlocal import manager
manager.clear()
getSiteManager.reset()
def _assertSMHook(self, hook):
result = getSiteManager.sethook(None)
self.assertEqual(result, hook)
def _setSMHook(self, hook):
getSiteManager.sethook(hook)
def test_defaults(self):
from pyramid.threadlocal import manager
registry = DummyRegistry()
old = {'registry':registry}
hook = lambda *arg: None
try:
self._setSMHook(hook)
manager.push(old)
self._callFUT()
current = manager.get()
self.assertNotEqual(current, old)
self.assertEqual(registry.inited, 2)
finally:
result = getSiteManager.sethook(None)
self.assertNotEqual(result, hook)
def test_registry_cannot_be_inited(self):
from pyramid.threadlocal import manager
registry = DummyRegistry()
def raiseit(name):
raise TypeError
registry.__init__ = raiseit
old = {'registry':registry}
try:
manager.push(old)
self._callFUT() # doesn't blow up
current = manager.get()
self.assertNotEqual(current, old)
self.assertEqual(registry.inited, 1)
finally:
manager.clear()
def test_unhook_zc_false(self):
hook = lambda *arg: None
try:
self._setSMHook(hook)
self._callFUT(unhook_zca=False)
finally:
self._assertSMHook(hook)
class TestDummyRendererFactory(unittest.TestCase):
def _makeOne(self, name, factory):
from pyramid.testing import DummyRendererFactory
return DummyRendererFactory(name, factory)
def test_add_no_colon(self):
f = self._makeOne('name', None)
f.add('spec', 'renderer')
self.assertEqual(f.renderers['spec'], 'renderer')
def test_add_with_colon(self):
f = self._makeOne('name', None)
f.add('spec:spec2', 'renderer')
self.assertEqual(f.renderers['spec:spec2'], 'renderer')
self.assertEqual(f.renderers['spec2'], 'renderer')
def test_call(self):
f = self._makeOne('name', None)
f.renderers['spec'] = 'renderer'
info = DummyRendererInfo({'name':'spec'})
self.assertEqual(f(info), 'renderer')
def test_call2(self):
f = self._makeOne('name', None)
f.renderers['spec'] = 'renderer'
info = DummyRendererInfo({'name':'spec:spec'})
self.assertEqual(f(info), 'renderer')
def test_call3(self):
def factory(spec):
return 'renderer'
f = self._makeOne('name', factory)
info = DummyRendererInfo({'name':'spec'})
self.assertEqual(f(info), 'renderer')
def test_call_miss(self):
f = self._makeOne('name', None)
info = DummyRendererInfo({'name':'spec'})
self.assertRaises(KeyError, f, info)
class TestMockTemplate(unittest.TestCase):
def _makeOne(self, response):
from pyramid.testing import MockTemplate
return MockTemplate(response)
def test_getattr(self):
template = self._makeOne(None)
self.assertEqual(template.foo, template)
def test_getitem(self):
template = self._makeOne(None)
self.assertEqual(template['foo'], template)
def test_call(self):
template = self._makeOne('123')
self.assertEqual(template(), '123')
class Test_skip_on(unittest.TestCase):
def setUp(self):
from pyramid.testing import skip_on
self.os_name = skip_on.os_name
skip_on.os_name = 'wrong'
def tearDown(self):
from pyramid.testing import skip_on
skip_on.os_name = self.os_name
def _callFUT(self, *platforms):
from pyramid.testing import skip_on
return skip_on(*platforms)
def test_wrong_platform(self):
def foo(): return True
decorated = self._callFUT('wrong')(foo)
self.assertEqual(decorated(), None)
def test_ok_platform(self):
def foo(): return True
decorated = self._callFUT('ok')(foo)
self.assertEqual(decorated(), True)
class TestDummySession(unittest.TestCase):
def _makeOne(self):
from pyramid.testing import DummySession
return DummySession()
def test_instance_conforms(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import ISession
session = self._makeOne()
verifyObject(ISession, session)
def test_changed(self):
session = self._makeOne()
self.assertEqual(session.changed(), None)
def test_invalidate(self):
session = self._makeOne()
session['a'] = 1
self.assertEqual(session.invalidate(), None)
self.assertFalse('a' in session)
def test_flash_default(self):
session = self._makeOne()
session.flash('msg1')
session.flash('msg2')
self.assertEqual(session['_f_'], ['msg1', 'msg2'])
def test_flash_mixed(self):
session = self._makeOne()
session.flash('warn1', 'warn')
session.flash('warn2', 'warn')
session.flash('err1', 'error')
session.flash('err2', 'error')
self.assertEqual(session['_f_warn'], ['warn1', 'warn2'])
def test_pop_flash_default_queue(self):
session = self._makeOne()
queue = ['one', 'two']
session['_f_'] = queue
result = session.pop_flash()
self.assertEqual(result, queue)
self.assertEqual(session.get('_f_'), None)
def test_pop_flash_nodefault_queue(self):
session = self._makeOne()
queue = ['one', 'two']
session['_f_error'] = queue
result = session.pop_flash('error')
self.assertEqual(result, queue)
self.assertEqual(session.get('_f_error'), None)
| |
unique_qcomponents,
chip_name)
if table_name == 'junction':
self.chip_info[chip_name]['junction'] = deepcopy(table)
else:
# For every chip, and layer, separate the "subtract" and "no_subtract" elements and gather bounds.
# dict_bounds[chip_name] = list_bounds
self.gather_subtract_elements_and_bounds(
chip_name, table_name, table, all_table_subtracts,
all_table_no_subtracts)
# If list of QComponents provided, use the bounding_box_scale(x and y),
# otherwise use self._chips
scaled_max_bound, max_bound = self.scale_max_bounds(
chip_name, self.dict_bounds[chip_name]['gather'])
if highlight_qcomponents:
self.dict_bounds[chip_name]['for_subtract'] = scaled_max_bound
else:
chip_box, status = self.design.get_x_y_for_chip(chip_name)
if status == 0:
self.dict_bounds[chip_name]['for_subtract'] = chip_box
else:
self.dict_bounds[chip_name]['for_subtract'] = max_bound
self.logger.warning(
f'design.get_x_y_for_chip() did NOT return a good code for chip={chip_name},'
f'for ground subtraction-box using the size calculated from QGeometry, ({max_bound}) will be used. '
)
if is_true(self.options.ground_plane):
self.handle_ground_plane(chip_name, all_table_subtracts,
all_table_no_subtracts)
return 0
def handle_ground_plane(self, chip_name: str, all_table_subtracts: list,
all_table_no_subtracts: list):
"""Place all the subtract geometries for one chip into self.chip_info[chip_name]['all_subtract_true'].
For LINESTRING within table that has a value for fillet, check if any segment is shorter than fillet radius.
If so, then break the LINESTRING so that shorter segments do not get fillet'ed and longer segments get fillet'ed.
Add the mulitiple LINESTRINGS back to table.
Also remove "bad" LINESTRING from table.
Then use qgeometry_to_gds() to convert the QGeometry elements to gdspy elements. The gdspy elements
are placed in self.chip_info[chip_name]['q_subtract_true'].
Args:
chip_name (str): Chip_name that is being processed.
all_table_subtracts (list): Add to self.chip_info by layer number.
all_table_no_subtracts (list): Add to self.chip_info by layer number.
"""
fix_short_segments = self.parse_value(
self.options.short_segments_to_not_fillet)
all_layers = self.design.qgeometry.get_all_unique_layers(chip_name)
for chip_layer in all_layers:
copy_subtract = []
copy_no_subtract = []
copy_subtract = deepcopy(all_table_subtracts)
copy_no_subtract = deepcopy(all_table_no_subtracts)
for item in copy_subtract:
item.drop(item.index[item['layer'] != chip_layer], inplace=True)
for item_no in copy_no_subtract:
item_no.drop(item_no.index[item_no['layer'] != chip_layer],
inplace=True)
self.chip_info[chip_name][chip_layer][
'all_subtract_true'] = geopandas.GeoDataFrame(
pd.concat(copy_subtract, ignore_index=False))
self.chip_info[chip_name][chip_layer][
'all_subtract_false'] = geopandas.GeoDataFrame(
pd.concat(copy_no_subtract, ignore_index=False))
self.chip_info[chip_name][chip_layer][
'all_subtract_true'].reset_index(inplace=True)
self.chip_info[chip_name][chip_layer][
'all_subtract_false'].reset_index(inplace=True)
if is_true(fix_short_segments):
self.fix_short_segments_within_table(chip_name, chip_layer,
'all_subtract_true')
self.fix_short_segments_within_table(chip_name, chip_layer,
'all_subtract_false')
self.chip_info[chip_name][chip_layer][
'q_subtract_true'] = self.chip_info[chip_name][chip_layer][
'all_subtract_true'].apply(self.qgeometry_to_gds, axis=1)
self.chip_info[chip_name][chip_layer][
'q_subtract_false'] = self.chip_info[chip_name][chip_layer][
'all_subtract_false'].apply(self.qgeometry_to_gds, axis=1)
# Handling Fillet issues.
def fix_short_segments_within_table(self, chip_name: str, chip_layer: int,
all_sub_true_or_false: str):
"""Update self.chip_info geopandas.GeoDataFrame.
Will iterate through the rows to examine the LineString.
Then determine if there is a segment that is shorter than the critera based on default_options.
If so, then remove the row, and append shorter LineString with no fillet, within the dataframe.
Args:
chip_name (str): The name of chip.
chip_layer (int): The layer within the chip to be evaluated.
all_sub_true_or_false (str): To be used within self.chip_info: 'all_subtract_true' or 'all_subtract_false'.
"""
df = self.chip_info[chip_name][chip_layer][all_sub_true_or_false]
df_fillet = df[-df['fillet'].isnull()]
if not df_fillet.empty:
# Don't edit the table when iterating through the rows.
# Save info in dict and then edit the table.
edit_index = dict()
for index, row in df_fillet.iterrows():
# print(
# f'With parse_value: {self.parse_value(row.fillet)}, row.fille: {row.fillet}')
status, all_shapelys = self.check_length(
row.geometry, row.fillet)
if status > 0:
edit_index[index] = all_shapelys
df_copy = self.chip_info[chip_name][chip_layer][
all_sub_true_or_false].copy(deep=True)
for del_key, the_shapes in edit_index.items():
# copy row "index" into a new df "status" times. Then replace the LONG shapely with all_shapleys
# For any entries in edit_index, edit table here.
orig_row = df_copy.loc[del_key].copy(deep=True)
df_copy = df_copy.drop(index=del_key)
for new_row, short_shape in the_shapes.items():
orig_row['geometry'] = short_shape['line']
orig_row['fillet'] = short_shape['fillet']
# Keep ignore_index=False, otherwise, the other del_key will not be found.
df_copy = df_copy.append(orig_row, ignore_index=False)
self.chip_info[chip_name][chip_layer][
all_sub_true_or_false] = df_copy.copy(deep=True)
def check_length(self, a_shapely: shapely.geometry.LineString,
a_fillet: float) -> Tuple[int, Dict]:
"""Determine if a_shapely has short segments based on scaled fillet value.
Use check_short_segments_by_scaling_fillet to determine the critera for flagging a segment.
Return Tuple with flagged segments.
The "status" returned in int:
* -1: Method needs to update the return code.
* 0: No issues, no short segments found
* int: The number of shapelys returned. New shapeleys, should replace the ones provided in a_shapley
The "shorter_lines" returned in dict:
key: Using the index values from list(a_shapely.coords)
value: dict() for each new, shorter, LineString
The dict()
key: fillet, value: can be float from before, or undefined to denote no fillet.
key: line, value: shorter LineString
Args:
a_shapely (shapely.geometry.LineString): A shapley object that needs to be evaluated.
a_fillet (float): From component developer.
Returns:
Tuple[int, Dict]:
int: Number of short segments that should not have fillet.
Dict: Key: Index into a_shapely, Value: dict with fillet and shorter LineString
"""
# Holds all of the index of when a segment is too short.
idx_bad_fillet = list()
status = -1 # Initalize to meaningless value.
coords = list(a_shapely.coords)
len_coords = len(coords)
all_idx_bad_fillet = dict()
self.identify_vertex_not_to_fillet(coords, a_fillet, all_idx_bad_fillet)
shorter_lines = dict()
idx_bad_fillet = sorted(all_idx_bad_fillet['reduced_idx'])
status = len(idx_bad_fillet)
if status:
midpoints = all_idx_bad_fillet['midpoints']
no_fillet_vertices = list()
fillet_vertices = list()
# Gather the no-fillet segments
for idx, (start, stop) in enumerate(idx_bad_fillet):
no_fillet_vertices.clear()
if idx == 0 and start == 0:
# The first segment.
if stop == len_coords - 1:
# Every vertex should not be fillet'd
no_fillet_vertices = coords[start:len_coords]
shorter_lines[stop] = dict({
'line': LineString(no_fillet_vertices),
'fillet': float('NaN')
})
else:
no_fillet_vertices = coords[start:stop + 1]
no_fillet_vertices.append(midpoints[stop])
shorter_lines[stop] = dict({
'line': LineString(no_fillet_vertices),
'fillet': float('NaN')
})
elif idx == status - 1 and stop == len_coords - 1:
# The last segment
no_fillet_vertices = coords[start:stop + 1]
no_fillet_vertices.insert(0, midpoints[start - 1])
shorter_lines[stop] = dict({
'line': LineString(no_fillet_vertices),
'fillet': float('NaN')
})
else:
# Segment in between first and last segment.
no_fillet_vertices = coords[start:stop + 1]
no_fillet_vertices.insert(0, midpoints[start - 1])
no_fillet_vertices.append(midpoints[stop])
shorter_lines[stop] = dict({
'line': LineString(no_fillet_vertices),
'fillet': float('NaN')
})
# Gather the fillet segments.
at_vertex = 0
for idx, (start, stop) in enumerate(idx_bad_fillet):
fillet_vertices.clear()
if idx == 0 and start == 0:
pass # just update at_vertex
if idx == 0 and start == 1:
init_tuple = coords[0]
fillet_vertices = [init_tuple, midpoints[start - 1]]
shorter_lines[start] = dict({
'line': LineString(fillet_vertices),
'fillet': a_fillet
})
if idx == 0 and start > 1:
fillet_vertices = coords[0:start]
fillet_vertices.append(midpoints[start - 1])
shorter_lines[start] = dict({
'line': LineString(fillet_vertices),
'fillet': a_fillet
})
if idx == status - 1 and stop != len_coords - 1:
# Extra segment after the last no-fillet.
fillet_vertices.clear()
fillet_vertices = coords[stop + 1:len_coords]
fillet_vertices.insert(0, midpoints[stop])
shorter_lines[len_coords] = dict({
'line': LineString(fillet_vertices),
'fillet': a_fillet
})
elif idx == status - 1 and start == 0 and stop != len_coords - 1:
# At last tuple, and and start at first index, and the stop is not last index of coords.
fillet_vertices = coords[stop + 1:len_coords]
fillet_vertices.insert(0, midpoints[stop])
shorter_lines[start] = dict({
'line': LineString(fillet_vertices),
'fillet': a_fillet
})
elif idx == status - 1 and stop != len_coords - 1:
# At last tuple, and the stop is not last index of coords.
fillet_vertices = coords[at_vertex + 1:start]
fillet_vertices.insert(0, midpoints[at_vertex])
fillet_vertices.append(midpoints[start - 1])
shorter_lines[start] = dict({
'line': LineString(fillet_vertices),
'fillet': a_fillet
})
# Extra segment after the last no-fillet.
fillet_vertices.clear()
fillet_vertices = coords[stop + 1:len_coords]
fillet_vertices.insert(0, midpoints[stop])
shorter_lines[len_coords] = dict({
'line': LineString(fillet_vertices),
'fillet': a_fillet
})
else:
if (start - at_vertex) > 1:
fillet_vertices = coords[at_vertex + 1:start]
fillet_vertices.insert(0, midpoints[at_vertex])
fillet_vertices.append(midpoints[start - 1])
shorter_lines[start] = dict({
'line': LineString(fillet_vertices),
'fillet': a_fillet
})
at_vertex = stop # Need to update for every loop.
else:
# No short segments.
shorter_lines[len_coords - 1] = a_shapely
return status, shorter_lines
def identify_vertex_not_to_fillet(self, coords: list, a_fillet: float,
all_idx_bad_fillet: dict):
"""Use coords to denote segments that are too short. In particular,
when fillet'd, they will cause the appearance of incorrect fillet when graphed.
Args:
coords (list): User provide a list of tuples. The tuple is (x,y) location for a vertex.
The list represents a LineString.
a_fillet (float): The value provided by component developer.
all_idx_bad_fillet (dict): An empty dict which will be populated by this method.
Dictionary:
Key 'reduced_idx' will hold list of tuples. The tuples correspond to index for list named "coords".
Key 'midpoints' will hold list of tuples. The index of a tuple corresponds to two index within coords.
For example, a index in midpoints is x, that coresponds midpoint of segment x-1 to x.
"""
# Depreciated since there is no longer a scale factor given to QCheckLength.
# fillet_scale_factor = self.parse_value(
# self.options.check_short_segments_by_scaling_fillet)
precision = float(self.parse_value(self.options.precision))
# For now, DO NOT allow the user of GDS to provide the precision.
# user_precision | |
from pygrank.algorithms.utils import MethodHasher, call, ensure_used_args, remove_used_args
from pygrank.core.signals import GraphSignal, to_signal, NodeRanking
from pygrank.core import backend, GraphSignalGraph, GraphSignalData
from typing import Union, Optional
class Postprocessor(NodeRanking):
def __init__(self, ranker: NodeRanking = None):
self.ranker = ranker
def transform(self, ranks: GraphSignal, *args, **kwargs):
return to_signal(ranks, call(self._transform, kwargs, [ranks]))
def rank(self, *args, **kwargs):
ranks = self.ranker.rank(*args, **kwargs)
kwargs = remove_used_args(self.ranker.rank, kwargs)
return to_signal(ranks, call(self._transform, kwargs, [ranks]))
def _transform(self, ranks: GraphSignal, **kwargs):
raise Exception("_transform method not implemented for the class "+self.__class__.__name__)
def _reference(self):
return self.__class__.__name__
def references(self):
if self.ranker is None:
return [self._reference()]
refs = self.ranker.references()
ref = self._reference()
if ref is not None and len(ref) > 0:
refs.append(ref)
return refs
class Tautology(Postprocessor):
""" Returns ranks as-are.
Can be used as a baseline against which to compare other postprocessors or graph filters.
"""
def __init__(self, ranker: NodeRanking = None):
"""Initializes the Tautology postprocessor with a base ranker.
Args:
ranker: The base ranker instance. If None (default), this works as a base ranker that returns
a copy of personalization signals as-are or a conversion of backend primitives into signals.
"""
super().__init__(ranker)
def transform(self, ranks: GraphSignal, *args, **kwargs) -> GraphSignal:
return ranks
def rank(self,
graph: GraphSignalGraph = None,
personalization: GraphSignalData = None,
*args, **kwargs) -> GraphSignal:
if self.ranker is not None:
return self.ranker.rank(graph, personalization, *args, **kwargs)
return to_signal(graph, personalization)
def _reference(self):
return "tautology" if self.ranker is None else ""
class MabsMaintain(Postprocessor):
"""Forces node ranking posteriors to have the same mean absolute value as prior inputs."""
def __init__(self, ranker):
""" Initializes the postprocessor with a base ranker instance.
Args:
ranker: Optional. The base ranker instance. If None (default), a Tautology() ranker is created.
"""
super().__init__(Tautology() if ranker is None else ranker)
def rank(self, graph=None, personalization=None, *args, **kwargs):
personalization = to_signal(graph, personalization)
norm = backend.sum(backend.abs(personalization.np))
ranks = self.ranker(graph, personalization, *args, **kwargs)
if norm != 0:
ranks.np = ranks.np * norm / backend.sum(backend.abs(ranks.np))
return ranks
def _reference(self):
return "mabs preservation"
class Normalize(Postprocessor):
""" Normalizes ranks by dividing with their maximal value."""
def __init__(self,
ranker: Optional[Union[NodeRanking,str]] = None,
method: Optional[Union[NodeRanking,str]] = "max"):
""" Initializes the class with a base ranker instance. Args are automatically filled in and
re-ordered if at least one is provided.
Args:
ranker: Optional. The base ranker instance. A Tautology() ranker is created if None (default) was specified.
method: Optional. Divide ranks either by their "max" (default) or by their "sum" or make the lie in the
"range" [0,1] by subtracting their mean before diving by their max.
Example:
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> algorithm = pg.Normalize(0.5, algorithm) # sets ranks >= 0.5 to 1 and lower ones to 0
>>> ranks = algorithm.rank(graph, personalization)
Example (same outcome, simpler one-liner):
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> ranks = pg.Normalize(0.5).transform(algorithm.rank(graph, personalization))
"""
if ranker is not None and not callable(getattr(ranker, "rank", None)):
ranker, method = method, ranker
if not callable(getattr(ranker, "rank", None)):
ranker = None
super().__init__(Tautology() if ranker is None else ranker)
self.method = method
def _transform(self, ranks: GraphSignal, **kwargs):
ensure_used_args(kwargs)
min_rank = 0
if self.method == "range":
max_rank = float(backend.max(ranks.np))
min_rank = float(backend.min(ranks.np))
elif self.method == "max":
max_rank = float(backend.max(ranks.np))
elif self.method == "sum":
max_rank = float(backend.sum(ranks.np))
else:
raise Exception("Can only normalize towards max or sum")
if min_rank == max_rank:
return ranks
ret = (ranks.np-min_rank) / (max_rank-min_rank)
return ret
def _reference(self):
if self.method == "range":
return "[0,1] " + self.method + " normalization"
return self.method+" normalization"
class Ordinals(Postprocessor):
""" Converts ranking outcome to ordinal numbers.
The highest rank is set to 1, the second highest to 2, etc.
"""
def __init__(self, ranker=None):
""" Initializes the class with a base ranker instance.
Args:
ranker: Optional. The base ranker instance. A Tautology() ranker is created if None (default) was specified.
Example:
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> algorithm = pg.Ordinals(algorithm)
>>> ranks = algorithm.rank(graph, personalization)
Example (same outcome, simpler one-liner):
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> ranks = pg.Ordinals().transform(algorithm.rank(graph, personalization))
"""
super().__init__(Tautology() if ranker is None else ranker)
def _transform(self, ranks: GraphSignal, **kwargs):
ensure_used_args(kwargs)
return {v: order+1 for order, v in enumerate(sorted(ranks, key=ranks.get, reverse=True))}
def _reference(self):
return "ordinal conversion"
class Transformer(Postprocessor):
"""Applies an element-by-element transformation on a graph signal based on a given expression."""
def __init__(self, ranker=None, expr=backend.exp):
""" Initializes the class with a base ranker instance. Args are automatically filled in and
re-ordered if at least one is provided.
Args:
ranker: Optional. The base ranker instance. A Tautology() ranker is created if None (default) was specified.
expr: Optional. A lambda expression to apply on each element. The transformer will automatically try to
apply it on the backend array representation of the graph signal first, so prefer pygrank's backend
functions for faster computations. For example, backend.exp (default) should be preferred instead of
math.exp, because the former can directly parse numpy arrays, tensors, etc.
Example:
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> r1 = pg.Normalize(algorithm, "sum").rank(graph, personalization)
>>> r2 = pg.Transformer(algorithm, lambda x: x/pg.sum(x)).rank(graph, personalization)
>>> print(pg.Mabs(r1)(r2))
"""
if ranker is not None and not callable(getattr(ranker, "rank", None)):
ranker, expr = expr, ranker
if not callable(getattr(ranker, "rank", None)):
ranker = None
super().__init__(Tautology() if ranker is None else ranker)
self.expr = expr
def _transform(self, ranks: GraphSignal, **kwargs):
ensure_used_args(kwargs)
try:
return self.expr(ranks.np)
except:
return {v: self.expr(ranks[v]) for v in ranks}
def _reference(self):
return "element-by-element "+self.expr.__name__
class Threshold(Postprocessor):
""" Converts ranking outcome to binary values based on a threshold value."""
def __init__(self,
ranker: Union[str, float, NodeRanking] = None,
threshold: Union[str, float, NodeRanking] = "gap"):
""" Initializes the Threshold postprocessing scheme. Args are automatically filled in and
re-ordered if at least one is provided.
Args:
ranker: Optional. The base ranker instance. A Tautology() ranker is created if None (default) was specified.
threshold: Optional. The minimum numeric value required to output rank 1 instead of 0. If "gap" (default)
then its value is automatically determined based on the maximal percentage increase between consecutive
ranks.
Example:
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> algorithm = pg.Threshold(algorithm, 0.5) # sets ranks >= 0.5 to 1 and lower ones to 0
>>> ranks = algorithm.rank(graph, personalization)
Example (same outcome):
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> ranks = pg.Threshold(0.5).transform(algorithm.rank(graph, personalization))
"""
if ranker is not None and not callable(getattr(ranker, "rank", None)):
ranker, threshold = threshold, ranker
if not callable(getattr(ranker, "rank", None)):
ranker = None
super().__init__(Tautology() if ranker is None else ranker)
self.threshold = threshold
def _transform(self,
ranks: GraphSignal,
**kwargs):
ensure_used_args(kwargs)
threshold = self.threshold
if threshold == "gap":
# TODO maybe enable ranks = {v: ranks[v] / ranks.graph.degree(v) for v in ranks} with a postprocessor
max_diff = 0
threshold = 0
prev_rank = 0
for v in sorted(ranks, key=ranks.get, reverse=True):
if prev_rank > 0:
diff = (prev_rank - ranks[v]) / prev_rank
if diff > max_diff:
max_diff = diff
threshold = ranks[v]
prev_rank = ranks[v]
return {v: 1 if ranks[v] >= threshold else 0 for v in ranks.keys()}
def _reference(self):
return str(self.threshold)+" threshold"
class Sweep(Postprocessor):
"""
Applies a sweep procedure that divides personalized node ranks by corresponding non-personalized ones.
"""
def __init__(self,
ranker: NodeRanking,
uniform_ranker: NodeRanking = None):
"""
Initializes the sweep procedure.
Args:
ranker: The base ranker instance.
uniform_ranker: Optional. The ranker instance used to perform non-personalized ranking. If None (default)
the base ranker is used.
Example:
>>> import pygrank as pg
>>> graph, personalization, algorithm = ...
>>> algorithm = pg.Sweep(algorithm) # divides node scores by uniform ranker'personalization non-personalized outcome
>>> ranks = algorithm.rank(graph, personalization
Example with different rankers:
>>> import pygrank as pg
>>> graph, personalization, algorithm, uniform_ranker = ...
>>> algorithm = pg.Sweep(algorithm, uniform_ranker=uniform_ranker)
>>> ranks = algorithm.rank(graph, personalization)
Example (same outcome):
>>> import pygrank as pg
>>> graph, personalization, uniform_ranker, algorithm = ...
>>> ranks = pg.Threshold(uniform_ranker).transform(algorithm.rank(graph, personalization))
"""
super().__init__(ranker)
self.uniform_ranker = ranker if uniform_ranker is | |
"""Actions for linking object code produced by compilation"""
load(":private/packages.bzl", "expose_packages", "pkg_info_to_ghc_args")
load("@bazel_skylib//lib:paths.bzl", "paths")
load(
":private/path_utils.bzl",
"get_lib_name",
"is_shared_library",
"is_static_library",
"ln",
)
load(":private/pkg_id.bzl", "pkg_id")
load(":private/providers.bzl", "get_mangled_libs")
load(":private/set.bzl", "set")
load(":private/list.bzl", "list")
# tests in /tests/unit_tests/BUILD
def parent_dir_path(path):
"""Returns the path of the parent directory.
For a relative path with just a file, "." is returned.
The path is not normalized.
foo => .
foo/ => foo
foo/bar => foo
foo/bar/baz => foo/bar
foo/../bar => foo/..
Args:
a path string
Returns:
A path list of the form `["foo", "bar"]`
"""
path_dir = paths.dirname(path)
# dirname returns "" if there is no parent directory
# In that case we return the identity path, which is ".".
if path_dir == "":
return ["."]
else:
return path_dir.split("/")
def __check_dots(target, path):
# there’s still (non-leading) .. in split
if ".." in path:
fail("the short_path of target {} (which is {}) contains more dots than loading `../`. We can’t handle that.".format(
target,
target.short_path,
))
# skylark doesn’t allow nested defs, which is a mystery.
def _get_target_parent_dir(target):
"""get the parent dir and handle leading short_path dots,
which signify that the target is in an external repository.
Args:
target: a target, .short_path is used
Returns:
(is_external, parent_dir)
`is_external`: Bool whether the path points to an external repository
`parent_dir`: The parent directory, either up to the runfiles toplel,
up to the external repository toplevel.
"""
parent_dir = parent_dir_path(target.short_path)
if parent_dir[0] == "..":
__check_dots(target, parent_dir[1:])
return (True, parent_dir[1:])
else:
__check_dots(target, parent_dir)
return (False, parent_dir)
# tests in /tests/unit_tests/BUILD
def create_rpath_entry(
binary,
dependency,
keep_filename,
comes_from_haskell_cc_import,
prefix = ""):
"""Return a (relative) path that points from `binary` to `dependecy`
while not leaving the current bazel runpath, taking into account weird
corner cases of `.short_path` concerning external repositories.
The resulting entry should be able to be inserted into rpath or similar.
runpath/foo/a.so to runfile/bar/b.so => ../bar
with `keep_filename=True`:
runpath/foo/a.so to runfile/bar/b.so => ../bar/b.so
with `prefix="$ORIGIN"`:
runpath/foo/a.so to runfile/bar/b.so => $ORIGIN/../bar/b.so
Args:
binary: target of current binary
dependency: target of dependency to relatively point to
keep_filename: whether to point to the filename or its parent dir
comes_from_haskell_cc_import: if dependency is a haskell_cc_import
prefix: string path prefix to add before the relative path
Returns:
relative path string
"""
# we need to use different paths if the dependency was imported
# via the `haskell_cc_import` rule.
# XXX: remove once `haskell_cc_import` is removed.
if comes_from_haskell_cc_import:
# backup the full path to the binary
# This abuses the fact that the linker resolves symlinks,
# meaning the `binary` path is not actually the path in the
# runfiles folder, rather the one in the bazel execpath.
bin_backup = [".."] * len(parent_dir_path(binary.path))
# append the full path of our dependency
path_segments = bin_backup + parent_dir_path(dependency.path)
else:
(bin_is_external, bin_parent_dir) = _get_target_parent_dir(binary)
(dep_is_external, dep_parent_dir) = _get_target_parent_dir(dependency)
# backup through parent directories of the binary,
# to the runfiles directory
bin_backup = [".."] * len(bin_parent_dir)
# external repositories live in `target.runfiles/external`,
# while the internal repository lives in `target.runfiles`.
# The `.short_path`s of external repositories are strange,
# they start with `../`, but you cannot just append that in
# order to find the correct runpath. Instead you have to use
# the following logic to construct the correct runpaths:
if bin_is_external:
if dep_is_external:
# stay in `external`
path_segments = bin_backup
else:
# backup out of `external`
path_segments = [".."] + bin_backup
elif dep_is_external:
# go into `external`
path_segments = bin_backup + ["external"]
else:
# no special external traversal
path_segments = bin_backup
# then add the parent dir to our dependency
path_segments.extend(dep_parent_dir)
# optionally add the filename
if keep_filename:
path_segments.append(
paths.basename(dependency.short_path),
)
# normalize for good measure and create the final path
path = paths.normalize("/".join(path_segments))
# and add the prefix if applicable
if prefix == "":
return path
else:
return prefix + "/" + path
def _merge_parameter_files(hs, file1, file2):
"""Merge two GHC parameter files into one.
Args:
hs: Haskell context.
file1: The first parameter file.
file2: The second parameter file.
Returns:
File: A new parameter file containing the parameters of both input files.
The file name is based on the file names of the input files. The file
is located next to the first input file.
"""
params_file = hs.actions.declare_file(
file1.basename + ".and." + file2.basename,
sibling = file1,
)
hs.actions.run_shell(
inputs = [file1, file2],
outputs = [params_file],
command = """
cat {file1} {file2} > {out}
""".format(
file1 = file1.path,
file2 = file2.path,
out = params_file.path,
),
)
return params_file
def _darwin_create_extra_linker_flags_file(hs, cc, objects_dir, executable, dynamic, solibs):
"""Write additional linker flags required on MacOS to a parameter file.
Args:
hs: Haskell context.
cc: CcInteropInfo, information about C dependencies.
objects_dir: Directory storing object files.
Used to determine output file location.
executable: The executable being built.
dynamic: Bool: Whether to link dynamically or statically.
solibs: List of dynamic library dependencies.
Returns:
File: Parameter file with additional linker flags. To be passed to GHC.
"""
# On Darwin GHC will pass the dead_strip_dylibs flag to the linker. This
# flag will remove any shared library loads from the binary's header that
# are not directly resolving undefined symbols in the binary. I.e. any
# indirect shared library dependencies will be removed. This conflicts with
# Bazel's builtin cc rules, which assume that the final binary will load
# all transitive shared library dependencies. In particlar shared libraries
# produced by Bazel's cc rules never load shared libraries themselves. This
# causes missing symbols at runtime on MacOS, see #170.
#
# The following work-around applies the `-u` flag to the linker for any
# symbol that is undefined in any transitive shared library dependency.
# This forces the linker to resolve these undefined symbols in all
# transitive shared library dependencies and keep the corresponding load
# commands in the binary's header.
#
# Unfortunately, this prohibits elimination of any truly redundant shared
# library dependencies. Furthermore, the transitive closure of shared
# library dependencies can be large, so this makes it more likely to exceed
# the MACH-O header size limit on MacOS.
#
# This is a horrendous hack, but it seems to be forced on us by how Bazel
# builds dynamic cc libraries.
suffix = ".dynamic.linker_flags" if dynamic else ".static.linker_flags"
linker_flags_file = hs.actions.declare_file(
executable.basename + suffix,
sibling = objects_dir,
)
hs.actions.run_shell(
inputs = solibs,
outputs = [linker_flags_file],
command = """
touch {out}
for lib in {solibs}; do
{nm} -u "$lib" | sed 's/^/-optl-Wl,-u,/' >> {out}
done
""".format(
nm = cc.tools.nm,
solibs = " ".join(["\"" + l.path + "\"" for l in solibs]),
out = linker_flags_file.path,
),
)
return linker_flags_file
def _fix_darwin_linker_paths(hs, inp, out, external_libraries):
"""Postprocess a macOS binary to make shared library references relative.
On macOS, in order to simulate the linker "rpath" behavior and make the
binary load shared libraries from relative paths, (or dynamic libraries
load other libraries) we need to postprocess it with install_name_tool.
(This is what the Bazel-provided `cc_wrapper.sh` does for cc rules.)
For details: https://blogs.oracle.com/dipol/entry/dynamic_libraries_rpath_and_mac
Args:
hs: Haskell context.
inp: An input file.
out: An output file.
external_libraries: List of C libraries that inp depends on.
These can be plain File for haskell_cc_import dependencies, or
struct(lib, mangled_lib) for regular cc_library dependencies.
"""
hs.actions.run_shell(
inputs = [inp],
outputs = [out],
mnemonic = "HaskellFixupLoaderPath",
progress_message = "Fixing install paths for {0}".format(out.basename),
command = " &&\n ".join(
[
"cp {} {}".format(inp.path, out.path),
"chmod +w {}".format(out.path),
# Patch the "install name" or "library identifaction name".
# The "install name" informs targets that link against `out`
# where `out` can be found during runtime. Here we update this
# "install name" to the new filename of the fixed binary.
# Refer to the Oracle blog post linked above for details.
"/usr/bin/install_name_tool -id @rpath/{} {}".format(
out.basename,
out.path,
),
] +
[
# Make external library references relative to rpath instead of
# relative to the working directory at link time.
# Handles cc_library dependencies.
"/usr/bin/install_name_tool -change {} {} {}".format(
f.lib.path,
paths.join("@rpath", f.mangled_lib.basename),
out.path,
| |
[None, 210, 310],
}
dataset_in = test_helpers.build_dataset({region_as: metrics_as, region_sf: metrics_sf})
dataset_in.write_to_dataset_pointer(pointer)
# Compare written file with a string literal so a test fails if something changes in how the
# file is written. The literal contains spaces to align the columns in the source.
assert pointer.path_wide_dates().read_text() == (
" location_id,variable,demographic_bucket,provenance,2020-04-03,2020-04-02,2020-04-01\n"
"iso1:us#iso2:us-as, cases, all, , 300, 200, 100\n"
"iso1:us#iso2:us-as,icu_beds, all, pt_src1, 4, 2, 0\n"
"iso1:us#iso2:us-ca#fips:06075,cases, all, , 310, 210,\n"
"iso1:us#iso2:us-ca#fips:06075,deaths, all, pt_src2, , 2, 1\n"
).replace(" ", "")
dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)
test_helpers.assert_dataset_like(dataset_read, dataset_in)
# Check that a file without the demographic_bucket column (as written before
# https://github.com/covid-projections/covid-data-model/pull/1021) can be read.
pointer.path_wide_dates().write_text(
" location_id,variable,provenance,2020-04-03,2020-04-02,2020-04-01\n"
" iso1:us#iso2:us-as, cases, , 300, 200, 100\n"
" iso1:us#iso2:us-as,icu_beds, pt_src1, 4, 2, 0\n"
"iso1:us#iso2:us-ca#fips:06075, cases, , 310, 210,\n"
"iso1:us#iso2:us-ca#fips:06075, deaths, pt_src2, , 2, 1\n".replace(
" ", ""
)
)
dataset_without_bucket_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)
test_helpers.assert_dataset_like(dataset_without_bucket_read, dataset_in)
def test_write_read_wide_dates_csv_with_annotation(tmpdir):
pointer = _make_dataset_pointer(tmpdir)
region = Region.from_state("AS")
metrics = {
CommonFields.ICU_BEDS: TimeseriesLiteral(
[0, 2, 4],
annotation=[
test_helpers.make_tag(date="2020-04-01"),
test_helpers.make_tag(TagType.ZSCORE_OUTLIER, date="2020-04-02"),
],
),
CommonFields.CASES: [100, 200, 300],
}
dataset_in = test_helpers.build_dataset({region: metrics})
dataset_in.write_to_dataset_pointer(pointer)
dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)
test_helpers.assert_dataset_like(dataset_read, dataset_in)
def test_write_read_dataset_pointer_with_provenance_list(tmpdir):
pointer = _make_dataset_pointer(tmpdir)
dataset_in = test_helpers.build_default_region_dataset(
{
CommonFields.ICU_BEDS: TimeseriesLiteral(
[0, 2, 4],
annotation=[
test_helpers.make_tag(date="2020-04-01"),
test_helpers.make_tag(date="2020-04-02"),
],
provenance=["prov1", "prov2"],
),
CommonFields.CASES: [100, 200, 300],
}
)
dataset_in.write_to_dataset_pointer(pointer)
dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)
test_helpers.assert_dataset_like(dataset_read, dataset_in)
def test_write_read_wide_with_buckets(tmpdir):
pointer = _make_dataset_pointer(tmpdir)
all_bucket = DemographicBucket("all")
age_20s = DemographicBucket("age:20-29")
age_30s = DemographicBucket("age:30-39")
region_as = Region.from_state("AS")
region_sf = Region.from_fips("06075")
metrics_as = {
CommonFields.ICU_BEDS: TimeseriesLiteral(
[0, 2, 4],
annotation=[
test_helpers.make_tag(date="2020-04-01"),
test_helpers.make_tag(TagType.ZSCORE_OUTLIER, date="2020-04-02"),
],
),
CommonFields.CASES: [100, 200, 300],
}
metrics_sf = {
CommonFields.CASES: {
age_20s: TimeseriesLiteral([3, 4, 5], source=taglib.Source(type="MySource")),
age_30s: [4, 5, 6],
all_bucket: [1, 2, 3],
}
}
dataset_in = test_helpers.build_dataset({region_as: metrics_as, region_sf: metrics_sf})
dataset_in.write_to_dataset_pointer(pointer)
dataset_read = timeseries.MultiRegionDataset.read_from_pointer(pointer)
test_helpers.assert_dataset_like(dataset_read, dataset_in)
def test_timeseries_drop_stale_timeseries_entire_region():
ds_in = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,3\n"
"iso1:us#cbsa:10100,,,,,3\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
)
ds_out = ds_in.drop_stale_timeseries(pd.to_datetime("2020-04-04"))
ds_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,,,,,3\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
)
test_helpers.assert_dataset_like(ds_out, ds_expected)
def test_timeseries_drop_stale_timeseries_one_metric():
csv_in = (
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,11,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,3\n"
"iso1:us#cbsa:10100,,,,,3\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
ds_in = timeseries.MultiRegionDataset.from_csv(io.StringIO(csv_in)).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#cbsa:10100,m1,m1-10100prov\n"
"iso1:us#cbsa:10100,m2,m2-10100prov\n"
"iso1:us#fips:97111,m1,m1-97111prov\n"
)
)
ds_out = ds_in.drop_stale_timeseries(pd.to_datetime("2020-04-03"))
# The only timeseries that is stale with cutoff of 4/3 is the CBSA m1. The expected
# dataset is the same as the input with "11" removed from the timeseries and
# corresponding provenance removed.
ds_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(csv_in.replace(",11,", ",,"))
).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#cbsa:10100,m2,m2-10100prov\n"
"iso1:us#fips:97111,m1,m1-97111prov\n"
)
)
test_helpers.assert_dataset_like(ds_out, ds_expected)
def test_timeseries_drop_stale_timeseries_with_tag():
region = Region.from_state("TX")
values_recent = [100, 200, 300, 400]
values_stale = [100, 200, None, None]
ts_recent = TimeseriesLiteral(values_recent, annotation=[test_helpers.make_tag()])
ts_stale = TimeseriesLiteral(values_stale, annotation=[test_helpers.make_tag()])
dataset_in = test_helpers.build_dataset(
{region: {CommonFields.CASES: ts_recent, CommonFields.DEATHS: ts_stale}}
)
dataset_out = dataset_in.drop_stale_timeseries(pd.to_datetime("2020-04-03"))
assert len(dataset_out.tag) == 1
# drop_stale_timeseries preserves the empty DEATHS column so add it to dataset_expected
dataset_expected = test_helpers.build_dataset(
{region: {CommonFields.CASES: ts_recent}}, timeseries_columns=[CommonFields.DEATHS]
)
test_helpers.assert_dataset_like(dataset_out, dataset_expected)
def test_append_region_and_get_regions_subset_with_tag():
region_tx = Region.from_state("TX")
region_sf = Region.from_fips("06075")
values = [100, 200, 300, 400]
ts_with_tag = TimeseriesLiteral(values, annotation=[test_helpers.make_tag()])
dataset_tx = test_helpers.build_dataset({region_tx: {CommonFields.CASES: ts_with_tag}})
dataset_sf = test_helpers.build_dataset({region_sf: {CommonFields.CASES: ts_with_tag}})
dataset_appended = dataset_tx.append_regions(dataset_sf)
assert len(dataset_appended.tag) == 2
dataset_tx_and_sf = test_helpers.build_dataset(
{region_tx: {CommonFields.CASES: ts_with_tag}, region_sf: {CommonFields.CASES: ts_with_tag}}
)
test_helpers.assert_dataset_like(dataset_appended, dataset_tx_and_sf)
dataset_out = dataset_tx_and_sf.get_regions_subset([region_tx])
assert len(dataset_out.tag) == 1
test_helpers.assert_dataset_like(dataset_out, dataset_tx)
def test_one_region_annotations():
region_tx = Region.from_state("TX")
region_sf = Region.from_fips("06075")
values = [100, 200, 300, 400]
tag1 = test_helpers.make_tag(date="2020-04-01")
tag2a = test_helpers.make_tag(date="2020-04-02")
tag2b = test_helpers.make_tag(date="2020-04-03")
dataset_tx_and_sf = test_helpers.build_dataset(
{
region_tx: {CommonFields.CASES: (TimeseriesLiteral(values, annotation=[tag1]))},
region_sf: {CommonFields.CASES: (TimeseriesLiteral(values, annotation=[tag2a, tag2b]))},
}
)
# get_one_region and iter_one_regions use separate code to split up the tags. Test both of them.
one_region_tx = dataset_tx_and_sf.get_one_region(region_tx)
assert one_region_tx.annotations_all_bucket(CommonFields.CASES) == [tag1]
one_region_sf = dataset_tx_and_sf.get_one_region(region_sf)
assert one_region_sf.annotations_all_bucket(CommonFields.CASES) == [
tag2a,
tag2b,
]
assert set(one_region_sf.sources_all_bucket(CommonFields.CASES)) == set()
assert {
region: one_region_dataset.annotations_all_bucket(CommonFields.CASES)
for region, one_region_dataset in dataset_tx_and_sf.iter_one_regions()
} == {region_sf: [tag2a, tag2b], region_tx: [tag1],}
def test_one_region_empty_annotations():
one_region = test_helpers.build_one_region_dataset({CommonFields.CASES: [100, 200, 300]})
assert one_region.annotations_all_bucket(CommonFields.CASES) == []
assert one_region.source_url == {}
assert one_region.provenance == {}
assert set(one_region.sources_all_bucket(CommonFields.ICU_BEDS)) == set()
assert set(one_region.sources_all_bucket(CommonFields.CASES)) == set()
def test_one_region_tag_objects_series():
values = [100, 200]
tag1 = test_helpers.make_tag(TagType.ZSCORE_OUTLIER, date="2020-04-01")
tag2a = test_helpers.make_tag(date="2020-04-02")
tag2b = test_helpers.make_tag(date="2020-04-03")
one_region = test_helpers.build_one_region_dataset(
{
CommonFields.CASES: TimeseriesLiteral(values, annotation=[tag1]),
CommonFields.ICU_BEDS: TimeseriesLiteral(values, provenance="prov1"),
CommonFields.DEATHS: TimeseriesLiteral(values, annotation=[tag2a, tag2b]),
}
)
assert isinstance(one_region.tag_objects_series, pd.Series)
assert one_region.tag.index.equals(one_region.tag_objects_series.index)
assert set(one_region.tag_objects_series.reset_index().itertuples(index=False)) == {
(CommonFields.CASES, DemographicBucket.ALL, tag1.tag_type, tag1),
(
CommonFields.ICU_BEDS,
DemographicBucket.ALL,
"provenance",
taglib.ProvenanceTag(source="prov1"),
),
(CommonFields.DEATHS, DemographicBucket.ALL, tag2a.tag_type, tag2a),
(CommonFields.DEATHS, DemographicBucket.ALL, tag2b.tag_type, tag2b),
}
def test_one_region_tag_objects_series_empty():
one_region = test_helpers.build_one_region_dataset({CommonFields.CASES: [1, 2, 3]})
assert one_region.tag.empty
assert isinstance(one_region.tag_objects_series, pd.Series)
assert one_region.tag_objects_series.empty
def test_timeseries_tag_objects_series():
values = [100, 200]
tag1 = test_helpers.make_tag(TagType.ZSCORE_OUTLIER, date="2020-04-01")
tag2a = test_helpers.make_tag(date="2020-04-02")
tag2b = test_helpers.make_tag(date="2020-04-03")
url_str = UrlStr("http://foo.com/1")
source_obj = taglib.Source("source_with_url", url=url_str)
ds = test_helpers.build_default_region_dataset(
{
CommonFields.CASES: TimeseriesLiteral(values, annotation=[tag1]),
CommonFields.ICU_BEDS: TimeseriesLiteral(values, source=source_obj),
CommonFields.DEATHS: TimeseriesLiteral(values, annotation=[tag2a, tag2b]),
CommonFields.TOTAL_TESTS: values,
}
)
assert isinstance(ds.tag_objects_series, pd.Series)
assert ds.tag.index.equals(ds.tag_objects_series.index)
location_id = test_helpers.DEFAULT_REGION.location_id
assert set(ds.tag_objects_series.reset_index().itertuples(index=False)) == {
(location_id, CommonFields.CASES, DemographicBucket.ALL, tag1.tag_type, tag1),
(location_id, CommonFields.ICU_BEDS, DemographicBucket.ALL, TagType.SOURCE, source_obj),
(location_id, CommonFields.DEATHS, DemographicBucket.ALL, tag2a.tag_type, tag2a),
(location_id, CommonFields.DEATHS, DemographicBucket.ALL, tag2b.tag_type, tag2b),
}
def test_timeseries_latest_values():
dataset = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,10,3\n"
"iso1:us#cbsa:10100,2020-04-04,,,,1\n"
"iso1:us#cbsa:10100,,,,,4\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,5,\n"
)
)
# Check bulk access via _timeseries_latest_values
expected = pd.read_csv(
io.StringIO("location_id,m1,m2\n" "iso1:us#cbsa:10100,10,1\n" "iso1:us#fips:97111,4,\n")
)
latest_from_timeseries = dataset._timeseries_latest_values().reset_index()
pd.testing.assert_frame_equal(
latest_from_timeseries, expected, check_like=True, check_dtype=False
)
# Check access to timeseries latests values via get_one_region
region_10100 = dataset.get_one_region(Region.from_cbsa_code("10100"))
assert region_10100.latest == {
"aggregate_level": "cbsa",
"county": None,
"country": "USA",
"fips": "10100",
"state": None,
"m1": 10, # Derived from timeseries
"m2": 4, # Explicitly in recent values
}
region_97111 = dataset.get_one_region(Region.from_fips("97111"))
assert region_97111.latest == {
"aggregate_level": "county",
"county": "Bar County",
"country": "USA",
"fips": "97111",
"state": "ZZ",
"m1": 5,
"m2": None,
}
def test_timeseries_latest_values_copied_to_static():
dataset = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,t1,s1\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,10,3\n"
"iso1:us#cbsa:10100,2020-04-04,,,,1\n"
"iso1:us#cbsa:10100,,,,,4\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,,\n"
)
)
# Check access to latest values as copied to static
t1 = FieldName("t1")
s1 = FieldName("s1")
dataset_t1_latest_in_static = dataset.latest_in_static(t1)
assert dataset_t1_latest_in_static.static.loc["iso1:us#cbsa:10100", t1] == 10
assert dataset_t1_latest_in_static.static.loc["iso1:us#fips:97111", t1] == 4
# Trying to copy the latest values of s1 fails because s1 already has a real value in static.
# See also longer comment where the ValueError is raised.
with pytest.raises(ValueError):
dataset.latest_in_static(s1)
def test_join_columns():
ts_1 = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1\n"
"iso1:us#cbsa:10100,2020-04-02,,,\n"
"iso1:us#cbsa:10100,2020-04-03,,,\n"
"iso1:us#cbsa:10100,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4\n"
"iso1:us#fips:97111,,Bar County,county,4\n"
)
).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#cbsa:10100,m1,ts110100prov\n"
"iso1:us#fips:97111,m1,ts197111prov\n"
)
)
ts_2 = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,3\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,\n"
)
).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#cbsa:10100,m2,ts110100prov\n"
"iso1:us#fips:97111,m2,ts197111prov\n"
)
)
ts_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,3\n"
"iso1:us#cbsa:10100,,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
).add_provenance_csv(
io.StringIO(
"location_id,variable,provenance\n"
"iso1:us#cbsa:10100,m1,ts110100prov\n"
"iso1:us#cbsa:10100,m2,ts110100prov\n"
"iso1:us#fips:97111,m1,ts197111prov\n"
"iso1:us#fips:97111,m2,ts197111prov\n"
)
)
ts_joined = ts_1.join_columns(ts_2)
test_helpers.assert_dataset_like(ts_joined, ts_expected, drop_na_latest=True)
ts_joined = ts_2.join_columns(ts_1)
test_helpers.assert_dataset_like(ts_joined, ts_expected, drop_na_latest=True)
with pytest.raises(ValueError):
# Raises because the same column is in both datasets
ts_2.join_columns(ts_2)
# geo attributes, such as aggregation level and county name, generally appear in geo-data.csv
# instead of MultiRegionDataset so they don't need special handling in join_columns.
def test_join_columns_missing_regions():
ts_1 = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1\n"
"iso1:us#cbsa:10100,2020-04-02,,,\n"
"iso1:us#cbsa:10100,2020-04-03,,,\n"
"iso1:us#cbsa:10100,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4\n"
"iso1:us#fips:97111,,Bar County,county,4\n"
)
)
ts_2 = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m2\n" "iso1:us#cbsa:10100,2020-04-02,,,2\n"
)
)
ts_expected = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1,m2\n"
"iso1:us#cbsa:10100,2020-04-02,,,,2\n"
"iso1:us#cbsa:10100,2020-04-03,,,,\n"
"iso1:us#cbsa:10100,,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2,\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4,\n"
"iso1:us#fips:97111,,Bar County,county,4,\n"
)
)
ts_joined = ts_1.join_columns(ts_2)
test_helpers.assert_dataset_like(ts_joined, ts_expected, drop_na_latest=True)
def test_join_columns_with_buckets():
m1 = FieldName("m1")
m2 = FieldName("m2")
age20s = DemographicBucket("age:20-29")
m1_data = {m1: {age20s: [1, 2, 3]}}
ds_1 = test_helpers.build_default_region_dataset(m1_data)
m2_data = {m2: {age20s: [4, 5, 6], DemographicBucket.ALL: [7, 8, 9]}}
ds_2 = test_helpers.build_default_region_dataset(m2_data)
with pytest.raises(ValueError):
ds_1.join_columns(ds_1)
ds_expected = test_helpers.build_default_region_dataset({**m1_data, **m2_data})
ds_joined = ds_1.join_columns(ds_2)
test_helpers.assert_dataset_like(ds_joined, ds_expected)
def test_join_columns_with_static():
m1 = FieldName("m1")
m2 = FieldName("m2")
ds_1 = test_helpers.build_default_region_dataset({}, static={m1: 1})
ds_2 = test_helpers.build_default_region_dataset({}, static={m2: 2})
with pytest.raises(ValueError):
ds_1.join_columns(ds_1)
ds_expected = test_helpers.build_default_region_dataset({}, static={m1: 1, m2: 2})
ds_joined = ds_1.join_columns(ds_2)
test_helpers.assert_dataset_like(ds_joined, ds_expected)
ds_joined = ds_2.join_columns(ds_1)
test_helpers.assert_dataset_like(ds_joined, ds_expected)
def test_iter_one_region():
ts = timeseries.MultiRegionDataset.from_csv(
io.StringIO(
"location_id,date,county,aggregate_level,m1\n"
"iso1:us#cbsa:10100,2020-04-02,,,\n"
"iso1:us#cbsa:10100,2020-04-03,,,\n"
"iso1:us#cbsa:10100,,,,\n"
"iso1:us#fips:97111,2020-04-02,Bar County,county,2\n"
"iso1:us#fips:97111,2020-04-04,Bar County,county,4\n"
"iso1:us#fips:97111,,Bar County,county,4\n"
# 97222 does not have a row of latest data to make sure it still works
"iso1:us#fips:97222,2020-04-02,No Recent County,county,3\n"
"iso1:us#fips:97222,2020-04-04,No Recent County,county,5\n"
)
)
assert {region.location_id for region, _ in ts.iter_one_regions()} == {
"iso1:us#cbsa:10100",
"iso1:us#fips:97111",
"iso1:us#fips:97222",
}
for it_region, it_one_region in ts.iter_one_regions():
one_region = ts.get_one_region(it_region)
assert (one_region.data.fillna("") == it_one_region.data.fillna("")).all(axis=None)
assert one_region.latest == it_one_region.latest
assert one_region.provenance == it_one_region.provenance
assert one_region.region == it_region
assert one_region.region == it_one_region.region
def test_drop_regions_without_population():
cbsa_with_pop = | |
from jinja2 import Template
from shutil import which
from time import gmtime, strftime
import argparse
import fnmatch
import functools
import getpass
import logging
import operator
import os
import pwd
import re
import shlex
import signal
import socket
import subprocess
import sys
import time
"""
TODO:
- More optional parameters
- GPU utility
- Merge error and output files
- The log out/err should contain the actual script that was started
- Calculate fairshare and priority
- Optimize jupyter usage
- Add the option for -exc and -inc for the this format tbm[0-5]
- The NODE_SET is probably outdated
- Put all constants in a config file
- Clean up Jinja2 templates
Calculate fairshare = 0.50 * CPU + 0.25 * Mem[GB] + 2 * GPU) * walltime[sec]
Calculate priority = 40,000,000 * QoS + 20,000,000 * FairShare
"""
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
NODE_SET = set(
[
"tbm1",
"tbm2",
"tbm3",
"tbm4",
"insy1",
"insy2",
"insy3",
"insy4",
"insy5",
"insy6",
"insy7",
"insy8",
"insy11",
"insy12",
"insy13",
"insy14",
"insy15",
"grs1",
"grs2",
"grs3",
"grs4",
"100plus",
"ew1",
"ewi2",
"wis1",
]
)
TIME_DICT = {"short": "4:00:00", "long": "168:00:00", "infinite": "99999:00:00"}
CPU_DICT = {"short": 600, "long": 240, "infinite": 32}
DEFAULT_TASKS = 1
DEFAULT_CORES = 1
DEFAULT_PARTITION = "general"
DEFAULT_TIME = "1:00:00"
DEFAULT_QOS = "short"
DEFAULT_MEMORY = 1000
DEFAULT_JOB_NAME = "myjob"
DEFAULT_WORKDIR = "/../../user/"
DEFAULT_JUPYTER_LOG = "/../../jupyter/logs"
MAX_TASKS = 600
MAX_CPUS = 600
MAX_HOURS = 168
MAX_MEMORY = 750000
MIN_PORT = 8889
PREAMBLE = f"""# Created by job script generator for SLURM\n# {strftime("%a %b %d %H:%M:%S UTC %Y", gmtime())}\n"""
TO_EXECUTE_BEFORE = 'if [ "x$SLURM_JOB_ID" == "x" ]; then\n\techo "You need to submit your job to the queuing system with sbatch"\n\texit 1\nfi\n\nif [ X"$SLURM_STEP_ID" = "X" -a X"$SLURM_PROCID" = "X"0 ]; then\n\techo "Submitted batch job $SLURM_JOB_ID on the node(s): $SLURM_NODELIST"\nfi\n\ndate -u'
TO_EXECUTE_AFTER = "date -u"
def run_job(sbatch_job, dur_sleep=0.05):
ps = subprocess.Popen(("echo", sbatch_job), stdout=subprocess.PIPE)
output = subprocess.check_output(
("sbatch"), stdin=ps.stdout, universal_newlines=True
)
print(output, end="")
time.sleep(dur_sleep)
ps.wait()
def kill_job(sbatch_job, message=None, dur_sleep=0.05):
if message is None:
message = f"Cancelling {sbatch_job}"
ps = subprocess.Popen(("scancel", sbatch_job), stdout=subprocess.PIPE)
# output = subprocess.check_output(
# ("sbatch"), stdin=ps.stdout, universal_newlines=True
# )
# out, err = ps.communicate()
print(message)
time.sleep(dur_sleep)
ps.wait()
def count_prefix_queue(prefix):
if which("squeue") is None:
parser.error(
f"The command `squeue` is not found on this system, cannot cancel jobs!"
)
user_name = pwd.getpwuid(os.getuid()).pw_name
exec_str = (
f'squeue -h -o "%.18i %.9P %.255j %.30u %.2t %.10M %R" -u {user_name}'
)
p = subprocess.Popen(
shlex.split(exec_str), shell=False, bufsize=4096, stdout=subprocess.PIPE
)
out, err = p.communicate()
jobs = [job.split() for job in out.decode("utf-8").strip().split("\n")]
n = 0
for job in jobs:
if job:
job_id, partition, job_name, job_user_name, job_status, job_time, job_node = (
job
)
assert user_name == job_user_name
if fnmatch.fnmatch(job_name, prefix):
n += 1
return n
def make_generic(cvalue, func_xs, comp_xs, typecast, message):
"""Function generator that can handle most of the different types needed by argparse
Args:
cvalue (T): Optional value to compare to with the argument of the inner function
func_xs (List[func(x: T) -> bool]): Optional list value of functions that are applied onto the argument of the inner function
comp_xs (List[func(x: T, y: T) -> bool] or List[func(x: T) -> bool]): List value of functions (two-argument if cvalue is set, otherwise only applied on the argument of the inner function)
typecast (T): Typecast argument to cast the argument of the inner function
message (str): fstring used in error messages should follow the format of {tvalue} {cvalue}
Returns:
f(x: T) -> x: T
"""
def check_generic(value):
if func_xs:
tvalue = functools.reduce(lambda res, f: f(res), func_xs, typecast(value))
else:
tvalue = typecast(value)
if cvalue is None:
if functools.reduce(lambda res, f: f(res), comp_xs, tvalue):
raise argparse.ArgumentTypeError(message.format(tvalue, cvalue))
else:
if any([func(tvalue, cvalue) for func in comp_xs]):
raise argparse.ArgumentTypeError(message.format(tvalue, cvalue))
return typecast(value)
return check_generic
def check_time(value):
if not re.match(
"^(([1-9]|0[0-9]|1[0-9]|[0-9][0-9]|1[0-6][0-8]):([0-5][0-9]):([0-5][0-9]))$",
value,
):
raise argparse.ArgumentTypeError(
f"Invalid time provided: {value}, example format: 12:30:15, should be between 0 to {MAX_HOURS} hours"
)
hours, minutes, seconds = map(int, value.split(":"))
if hours == MAX_HOURS and (minutes != 0 or seconds != 0):
raise argparse.ArgumentTypeError(
f"{value} invalid time provided, should be between 0 to {MAX_HOURS} hours"
)
return value
def check_output(value):
if value:
if not re.search(".out$", value):
raise argparse.ArgumentTypeError(
f"Invalid output file specified, should end with .out! {value}"
)
value = "".join(["%j-", ".".join(value.split(".")[:-1]), ".out"])
return value
def check_error(value):
if value:
if not re.search(".err$", value):
raise argparse.ArgumentTypeError(
f"Invalid error file specified, should end with .err! {value}"
)
value = "".join(["%j-", ".".join(value.split(".")[:-1]), ".err"])
return value
def check_workdir(value):
# return os.path.join(value, '') # Stuck with os.path since pathlib likes to strip trailing slashes
if os.path.isdir(value):
return os.path.join(value, "")
else:
raise argparse.ArgumentTypeError(
f"Invalid/nonexistant working directory given! {value}"
)
def calc_fairshare(cpu, mem, time, gpu=0):
# TODO: Update fairshare function
return int((0.50 * cpu + 0.125 * (mem / 1000.0) + 10 * gpu) * time_to_seconds(time))
def time_to_seconds(td):
return sum(x * int(t) for x, t in zip([3600, 60, 1], td.split(":")))
def compare_time(t1, t2):
return time_to_seconds(t1) > time_to_seconds(t2)
def sanity_checks(parser, args):
if compare_time(args.time, TIME_DICT[args.qos]):
parser.error(
f"Maximum time for specified qos='{args.qos}' is limited to {TIME_DICT[args.qos]}, attempted to allocate {args.time}"
)
if args.cpus > CPU_DICT[args.qos]:
parser.error(
f"CPU usage of qos '{args.qos}' is limited to {CPU_DICT[args.qos]}, attempted to allocate {args.cpus}"
)
# TODO: Clean up
if args.exclude:
args.exclude = ",".join(args.exclude)
if args.include:
args.include = ",".join(args.include)
# Workaround to get newlines working TODO: Figure something more elegant
if vars(args).get("script"):
args.script = "\n".join(args.script.split("\\n"))
fairshare = calc_fairshare(args.cpus, args.mem, args.time)
logger.warning(f"Fairshare usage: {fairshare}")
def make_job(
partition=DEFAULT_PARTITION,
qos=DEFAULT_QOS,
time=DEFAULT_TIME,
ntasks=DEFAULT_TASKS,
cpus=DEFAULT_CORES,
mem=DEFAULT_MEMORY,
jobname=DEFAULT_JOB_NAME,
workdir=DEFAULT_WORKDIR,
output="",
error="",
incerror=None,
script="",
constraints="",
include="",
exclude="",
):
execute_before = TO_EXECUTE_BEFORE
execute_after = TO_EXECUTE_AFTER
template = Template(
"#!/bin/sh\n\
{{ preamble }}\n\
#SBATCH --partition={{ partition }}\n\
#SBATCH --qos={{ qos }}\n\
#SBATCH --time={{ time }}\n\
#SBATCH --ntasks={{ ntasks }}\n\
#SBATCH --cpus-per-task={{ cpus }}\n\
#SBATCH --mem={{ mem }}\n\
#SBATCH --job-name={{ jobname }}\n\
#SBATCH --chdir={{ workdir }}\n\
{% if constraints %}#SBATCH --constraint={{constraints}}\n{% endif %}\
{% if include %}#SBATCH --nodelist={{include}}\n{% endif %}\
{% if exclude %}#SBATCH --exclude={{exclude}}\n{% endif %}\
#SBATCH --output={% if output %}{{output}}{% else %}%j-{{jobname}}.out {% endif %}\n\
{% if error %}#SBATCH --error={{error}}\n{% elif incerror %}#SBATCH --error=%j-{{jobname}}.err\n {% endif %}\
\n\
{{ execute_before }}\
\n{{ script }}\n\
{{ execute_after }}"
)
return template.render(locals())
def make_jupyter_job(
partition=DEFAULT_PARTITION,
qos=DEFAULT_QOS,
time=DEFAULT_TIME,
ntasks=DEFAULT_TASKS,
cpus=DEFAULT_CORES,
mem=DEFAULT_MEMORY,
jobname=DEFAULT_JOB_NAME,
workdir=DEFAULT_WORKDIR,
port=8889,
output="",
error="",
incerror=None,
constraints="",
include="",
exclude="",
):
execute_before = TO_EXECUTE_BEFORE
execute_after = TO_EXECUTE_AFTER
jupyter_log = DEFAULT_JUPYTER_LOG
user = getpass.getuser()
template = Template(
"""#!/bin/sh\n\
{{ preamble }}\n\
#SBATCH --partition={{ partition }}\n\
#SBATCH --qos={{ qos }}\n\
#SBATCH --time={{ time }}\n\
#SBATCH --ntasks={{ ntasks }}\n\
#SBATCH --cpus-per-task={{ cpus }}\n\
#SBATCH --mem={{ mem }}\n\
#SBATCH --job-name={{ jobname }}\n\
#SBATCH --chdir={{ workdir }}\n\
{% if constraints %}#SBATCH --constraint={{constraints}}\n{% endif %}\
{% if include %}#SBATCH --nodelist={{include}}\n{% endif %}\
{% if exclude %}#SBATCH --exclude={{exclude}}\n{% endif %}\
#SBATCH --output={% if output %}{{output}}{% else %}{{jupyter_log}}%j-{{jobname}}.out {% endif %}\n\
{% if error %}#SBATCH --error={{error}}\n{% elif incerror %}#SBATCH --error=%j-{{jobname}}.err\n {% endif %}\
\n\
if [ "x$SLURM_JOB_ID" == "x" ]; then\n\
\techo "You need to submit your job to the queuing system with sbatch"\n\
\texit 1\nfi\n\n\
if [ X"$SLURM_STEP_ID" = "X" -a X"$SLURM_PROCID" = "X"0 ]; then\n\
\techo "Submitted batch job $SLURM_JOB_ID on the node(s): $SLURM_NODELIST"\n\
fi\n\n\
XDG_RUNTIME_DIR=""\n\
node=$(hostname)\n\
echo -e "\
Command to create ssh tunnel:\n\
ssh -N -f -L {{port}}:${node}:{{port}} {{user}}@login<EMAIL> -o ProxyCommand='ssh -W %h:%p {{user}}@linux-<EMAIL>ion.<EMAIL>.nl'\n\
Use a Browser on your local machine to go to:\n\
http://localhost:{{port}} (prefix w/ https:// if using password)"\n\
date -u\n\
jupyter-lab --no-browser --port={{port}} --ip=0.0.0.0\n\
{{ TO_EXECUTE_AFTER }}"""
)
return template.render(locals())
if __name__ == "__main__":
logging.basicConfig(stream=sys.stderr, format="%(levelname)s: %(message)s")
logger = logging.getLogger(__name__)
def cmd_gen(args):
sanity_checks(parser, args)
job_script = make_job(
partition=args.partition,
qos=args.qos,
time=args.time,
ntasks=args.ntasks,
cpus=args.cpus,
mem=args.mem,
jobname=args.jobname,
workdir=args.workdir,
output=args.output,
error=args.error,
incerror=args.incerror,
script=args.script,
constraints=args.constraints,
include=args.include,
exclude=args.exclude,
)
if args.sbatch:
run_job(job_script)
else:
print(job_script)
def cmd_jupyter(args):
sanity_checks(parser, args)
job_script = make_jupyter_job(
partition=args.partition,
qos=args.qos,
time=args.time,
ntasks=args.ntasks,
cpus=args.cpus,
mem=args.mem,
jobname=args.jobname,
workdir=args.workdir,
port=args.port,
incerror=args.incerror,
constraints=args.constraints,
include=args.include,
exclude=args.exclude,
)
if args.sbatch:
run_job(job_script)
else:
print(job_script)
def cmd_cancel(args):
if which("squeue") is None:
parser.error(
f"The command `squeue` is not found on this system, cannot cancel jobs!"
)
user_name = pwd.getpwuid(os.getuid()).pw_name
exec_str = (
f'squeue -h -o "%.18i %.9P %.255j %.30u %.2t %.10M %R" -u {user_name}'
)
p = subprocess.Popen(
shlex.split(exec_str), shell=False, bufsize=4096, stdout=subprocess.PIPE
)
out, err = p.communicate()
jobs = [job.split() for job in out.decode("utf-8").strip().split("\n")]
for job in jobs:
job_id, partition, job_name, job_user_name, job_status, job_time, job_node = (
job
)
assert user_name == job_user_name
if args.name:
if fnmatch.fnmatch(job_name, args.name):
kill_job(
job_id, message=f"Cancelling {job_name} with job id {job_id}"
)
check_memory = make_generic(
MAX_MEMORY,
None,
[operator.ge, lambda *x: x[0] <= 0],
int,
'"{}" invalid amount of memory provided, should be between 1 to {} MB',
)
check_cpus = make_generic(
MAX_CPUS,
None,
[operator.gt, lambda *x: x[0] <= 0],
int,
'"{}" is | |
<gh_stars>1-10
# External Dependencies
import ldap
# Internal Dependencies
import datetime
import re
class activedirectory:
# User configurable
# Which account states will you allow to change their own password?
# Any combination of:
# ['acct_pwd_expired', 'acct_expired', 'acct_disabled', 'acct_locked']
can_change_pwd_states = ['acct_pwd_expired']
# Internal
domain_pwd_policy = {}
granular_pwd_policy = {} # keys are policy DNs
def __init__(self, host, base, bind_dn, bind_pwd):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, 0)
ldap.set_option(ldap.OPT_REFERRALS, 0)
ldap.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
self.conn = None
self.host = host
self.uri = "ldaps://%s" % (host)
self.base = base
self.bind_dn = bind_dn
try:
self.conn = ldap.initialize(self.uri)
self.conn.simple_bind_s(bind_dn, bind_pwd)
if not self.is_admin(bind_dn):
return None
self.get_pwd_policies()
except ldap.LDAPError, e:
raise self.ldap_error(e)
def user_authn_pwd_verify(self, user, user_pwd):
# Attempt to bind but only throw an exception if the password is incorrect or the account
# is in a state that would preclude changing the password.
try:
self.user_authn(user, user_pwd)
except (self.authn_failure_time, self.authn_failure_workstation, \
(self.authn_failure_pwd_expired_natural if 'acct_pwd_expired' in self.can_change_pwd_states else None),
(self.authn_failure_pwd_expired_admin if 'acct_pwd_expired' in self.can_change_pwd_states else None),
(self.authn_failure_acct_disabled if 'acct_disabled' in self.can_change_pwd_states else None),
(self.authn_failure_acct_expired if 'acct_expired' in self.can_change_pwd_states else None),
(self.authn_failure_acct_locked if 'acct_locked' in self.can_change_pwd_states else None)):
return True
except Exception, e:
return False
return True
def user_authn(self, user, user_pwd):
# Look up DN for user, bind using current_pwd.
# Return true on success, exception on failure.
try:
status = self.get_user_status(user)
bind_dn = status['user_dn']
user_conn = ldap.initialize(self.uri)
user_conn.simple_bind_s(bind_dn, user_pwd)
except ldap.INVALID_CREDENTIALS, e:
raise self.parse_invalid_credentials(e, bind_dn)
except ldap.LDAPError, e:
raise self.ldap_error(e)
return True
def change_pwd(self, user, current_pwd, new_pwd):
# Change user's account using their own creds
# This forces adherence to length/complexity/history
# They must exist, not be priv'd, and can change pwd per can_change_pwd_states
status = self.get_user_status(user)
user_dn = status['user_dn']
if self.is_admin(user_dn):
raise self.user_protected(user)
if not status['acct_can_change_pwd']:
raise self.user_cannot_change_pwd(user, status, self.can_change_pwd_states)
# The new password must respect policy
if not len(new_pwd) >= status['acct_pwd_policy']['pwd_length_min']:
msg = 'New password for %s must be at least %d characters, submitted password has only %d.' % (user, status['acct_pwd_policy']['pwd_length_min'], len(new_pwd))
raise self.pwd_vette_failure(user, new_pwd, msg, status)
# Check Complexity - 3of4 and username/displayname check
if status['acct_pwd_policy']['pwd_complexity_enforced']:
patterns = [r'.*(?P<digit>[0-9]).*', r'.*(?P<lowercase>[a-z]).*', r'.*(?P<uppercase>[A-Z]).*', r'.*(?P<special>[~!@#$%^&*_\-+=`|\\(){}\[\]:;"\'<>,.?/]).*']
matches = []
for pattern in patterns:
match = re.match(pattern, new_pwd)
if match and match.groupdict() and match.groupdict().keys():
matches.append(match.groupdict().keys()[0])
if len(matches) < 3:
msg = 'New password for %s must contain 3 of 4 character types (lowercase, uppercase, digit, special), only found %s.' % (user, (', ').join(matches))
raise self.pwd_vette_failure(user, new_pwd, msg, status)
# The new password must not contain user's username
if status['user_id'].lower() in new_pwd.lower():
msg = 'New password for %s must not contain their username.' % (user)
raise self.pwd_vette_failure(user, new_pwd, msg, status)
# The new password must not contain word from displayname
for e in status['user_displayname_tokenized']:
if len(e) > 2 and e.lower() in new_pwd.lower():
msg = 'New password for %s must not contain a word longer than 2 characters from your name in our system (%s), found %s.' % (user, (', ').join(status['user_displayname_tokenized']), e)
raise self.pwd_vette_failure(user, new_pwd, msg, status)
# Encode password and attempt change. If server is unwilling, history is likely fault.
current_pwd = unicode('\"' + current_pwd + '\"').encode('utf-16-le')
new_pwd = unicode('\"' + new_pwd + '\"').encode('utf-16-le')
pass_mod = [(ldap.MOD_DELETE, 'unicodePwd', [current_pwd]), (ldap.MOD_ADD, 'unicodePwd', [new_pwd])]
try:
self.conn.modify_s(user_dn, pass_mod)
except ldap.CONSTRAINT_VIOLATION, e:
# If the exceptions's 'info' field begins with:
# 00000056 - Current passwords do not match
# 0000052D - New password violates length/complexity/history
msg = e[0]['desc']
if e[0]['info'].startswith('00000056'):
# Incorrect current password.
raise self.authn_failure(user, self.uri)
elif e[0]['info'].startswith('0000052D'):
msg = 'New password for %s must not match any of the past %d passwords.' % (user, status['acct_pwd_policy']['pwd_history_depth'])
raise self.pwd_vette_failure(user, new_pwd, msg, status)
except ldap.LDAPError, e:
raise self.ldap_error(e)
def set_pwd(self, user, new_pwd):
# Change the user's password using priv'd creds
# They must exist, not be priv'd
status = self.get_user_status(user)
user_dn = status['user_dn']
if self.is_admin(user_dn):
raise self.user_protected(user)
# Even priv'd user must respect min password length.
if not len(new_pwd) >= status['acct_pwd_policy']['pwd_length_min']:
msg = 'New password for %s must be at least %d characters, submitted password has only %d.' % (user, status['acct_pwd_policy']['pwd_length_min'], len(new_pwd))
raise self.pwd_vette_failure(user, new_pwd, msg, status)
new_pwd = unicode('\"' + new_pwd + '\"', "iso-8859-1").encode('utf-16-le')
pass_mod = [((ldap.MOD_REPLACE, 'unicodePwd', [new_pwd]))]
try:
self.conn.modify_s(user_dn, pass_mod)
except ldap.LDAPError, e:
raise self.ldap_error(e)
def force_change_pwd(self, user):
# They must exist, not be priv'd
status = self.get_user_status(user)
user_dn = status['user_dn']
if self.is_admin(user_dn):
raise self.user_protected(user)
if status['acct_pwd_expiry_enabled']:
mod = [(ldap.MOD_REPLACE, 'pwdLastSet', [0])]
try:
self.conn.modify_s(user_dn, mod)
except ldap.LDAPError, e:
raise self.ldap_error(e)
def get_user_status(self, user):
user_base = "CN=Users,%s" % (self.base)
user_filter = "(sAMAccountName=%s)" % (user)
user_scope = ldap.SCOPE_SUBTREE
status_attribs = ['pwdLastSet', 'accountExpires', 'userAccountControl', 'memberOf', 'msDS-User-Account-Control-Computed', 'msDS-UserPasswordExpiryTimeComputed', 'msDS-ResultantPSO', 'lockoutTime', 'sAMAccountName', 'displayName']
user_status = {'user_dn':'', 'user_id':'', 'user_displayname':'', 'acct_pwd_expiry_enabled':'', 'acct_pwd_expiry':'', 'acct_pwd_last_set':'', 'acct_pwd_expired':'', 'acct_pwd_policy':'', 'acct_disabled':'', 'acct_locked':'', 'acct_locked_expiry':'', 'acct_expired':'', 'acct_expiry':'', 'acct_can_change_pwd':'', 'acct_bad_states':[]}
bad_states = ['acct_locked', 'acct_disabled', 'acct_expired', 'acct_pwd_expired']
try:
# search for user
results = self.conn.search_s(user_base, user_scope, user_filter, status_attribs)
except ldap.LDAPError, e:
raise self.ldap_error(e)
if len(results) != 1: # sAMAccountName must be unique
raise self.user_not_found(user)
result = results[0]
user_dn = result[0]
user_attribs = result[1]
uac = int(user_attribs['userAccountControl'][0])
uac_live = int(user_attribs['msDS-User-Account-Control-Computed'][0])
s = user_status
s['user_dn'] = user_dn
s['user_id'] = user_attribs['sAMAccountName'][0]
s['user_displayname'] = user_attribs['displayName'][0]
# AD complexity will not allow a word longer than 2 characters as part of displayName
s['user_displayname_tokenized'] = [a for a in re.split('[,.\-_ #\t]+', s['user_displayname']) if len(a) > 2]
# uac_live (msDS-User-Account-Control-Computed) contains current locked, pwd_expired status
s['acct_locked'] = (1 if (uac_live & 0x00000010) else 0)
s['acct_disabled'] = (1 if (uac & 0x00000002) else 0)
s['acct_expiry'] = self.ad_time_to_unix(user_attribs['accountExpires'][0])
s['acct_expired'] = (0 if datetime.datetime.fromtimestamp(s['acct_expiry']) > datetime.datetime.now() or s['acct_expiry'] == 0 else 1)
s['acct_pwd_last_set'] = self.ad_time_to_unix(user_attribs['pwdLastSet'][0])
s['acct_pwd_expiry_enabled'] = (0 if (uac & 0x00010000) else 1)
# For password expiration need to determine which policy, if any, applies to this user.
# msDS-ResultantPSO will be present in Server 2008+ and if the user has a PSO applied.
# If not present, use the domain default.
if 'msDS-ResultantPSO' in user_attribs and user_attribs['msDS-ResultantPSO'][0] in self.granular_pwd_policy:
s['acct_pwd_policy'] = self.granular_pwd_policy[user_attribs['msDS-ResultantPSO'][0]]
else:
s['acct_pwd_policy'] = self.domain_pwd_policy
# If account is locked, expiry comes from lockoutTime + policy lockout ttl.
# lockoutTime is only reset to 0 on next successful login.
s['acct_locked_expiry'] = (self.ad_time_to_unix(user_attribs['lockoutTime'][0]) + s['acct_pwd_policy']['pwd_lockout_ttl'] if s['acct_locked'] else 0)
# msDS-UserPasswordExpiryTimeComputed is when a password expires. If never it is very high.
s['acct_pwd_expiry'] = self.ad_time_to_unix(user_attribs['msDS-UserPasswordExpiryTimeComputed'][0])
s['acct_pwd_expired'] = (1 if (uac_live & 0x00800000) else 0)
for state in bad_states:
if s[state]:
s['acct_bad_states'].append(state)
# If there is something in s['acct_bad_states'] not in self.can_change_pwd_states, they can't change pwd.
s['acct_can_change_pwd'] = (0 if (len(set(s['acct_bad_states']) - set(self.can_change_pwd_states)) != 0) else 1)
return s
def get_pwd_policies(self):
default_policy_container = self.base
default_policy_attribs = ['maxPwdAge', 'minPwdLength', 'pwdHistoryLength', 'pwdProperties', 'lockoutThreshold', 'lockOutObservationWindow', 'lockoutDuration']
default_policy_map = {'maxPwdAge':'pwd_ttl', 'minPwdLength':'pwd_length_min', 'pwdHistoryLength':'pwd_history_depth', 'pwdProperties':'pwd_complexity_enforced', 'lockoutThreshold':'pwd_lockout_threshold', 'lockOutObservationWindow':'pwd_lockout_window', 'lockoutDuration':'pwd_lockout_ttl'}
granular_policy_container = 'CN=Password Settings Container,CN=System,%s' % (self.base)
granular_policy_filter = '(objectClass=msDS-PasswordSettings)'
granular_policy_attribs = ['msDS-LockoutDuration', 'msDS-LockoutObservationWindow', 'msDS-PasswordSettingsPrecedence', 'msDS-MaximumPasswordAge', 'msDS-LockoutThreshold', 'msDS-MinimumPasswordLength', 'msDS-PasswordComplexityEnabled', 'msDS-PasswordHistoryLength']
granular_policy_map = {'msDS-MaximumPasswordAge':'pwd_ttl', 'msDS-MinimumPasswordLength':'pwd_<PASSWORD>', 'msDS-PasswordComplexityEnabled':'pwd_complexity_enforced', 'msDS-PasswordHistoryLength':'pwd_history_depth', 'msDS-LockoutThreshold':'pwd_lockout_threshold', 'msDS-LockoutObservationWindow':'pwd_lockout_window', 'msDS-LockoutDuration':'pwd_lockout_ttl','msDS-PasswordSettingsPrecedence':'pwd_policy_priority'}
if not self.conn:
return None
try:
# Load domain-wide policy.
results = self.conn.search_s(default_policy_container, ldap.SCOPE_BASE)
except ldap.LDAPError, e:
raise self.ldap_error(e)
dpp = dict([(default_policy_map[k], results[0][1][k][0]) for k in default_policy_map.keys()])
dpp["pwd_policy_priority"] = 0 # 0 Indicates don't use it in priority calculations
self.domain_pwd_policy = self.sanitize_pwd_policy(dpp)
# Server 2008r2 only. Per-group policies in CN=Password Settings Container,CN=System
results = self.conn.search_s(granular_policy_container, ldap.SCOPE_ONELEVEL, granular_policy_filter, granular_policy_attribs)
for policy in results:
gpp = dict([(granular_policy_map[k], policy[1][k][0]) for k in granular_policy_map.keys()])
self.granular_pwd_policy[policy[0]] = self.sanitize_pwd_policy(gpp)
self.granular_pwd_policy[policy[0]]['pwd_policy_dn'] = policy[0]
def sanitize_pwd_policy(self, pwd_policy):
valid_policy_entries = ['pwd_ttl', 'pwd_length_min', 'pwd_history_depth', 'pwd_complexity_enforced', 'pwd_lockout_threshold', 'pwd_lockout_window', 'pwd_lockout_ttl', 'pwd_policy_priority']
if len(set(valid_policy_entries) - set(pwd_policy.keys())) != 0:
return None
pwd_policy['pwd_history_depth'] = int(pwd_policy['pwd_history_depth'])
pwd_policy['pwd_length_min'] = int(pwd_policy['pwd_length_min'])
pwd_policy['pwd_complexity_enforced'] = (int(pwd_policy['pwd_complexity_enforced']) & 0x1 if pwd_policy['pwd_complexity_enforced'] not in ['TRUE', 'FALSE'] else int({'TRUE':1, 'FALSE':0}[pwd_policy['pwd_complexity_enforced']]))
pwd_policy['pwd_ttl'] = self.ad_time_to_seconds(pwd_policy['pwd_ttl'])
pwd_policy['pwd_lockout_ttl'] = self.ad_time_to_seconds(pwd_policy['pwd_lockout_ttl'])
pwd_policy['pwd_lockout_window'] = self.ad_time_to_seconds(pwd_policy['pwd_lockout_window'])
pwd_policy['pwd_lockout_threshold'] = int(pwd_policy['pwd_lockout_threshold'])
pwd_policy['pwd_policy_priority'] = int(pwd_policy['pwd_policy_priority'])
return pwd_policy
def is_admin(self, search_dn, admin = 0):
# Recursively look at what groups search_dn is a member of.
# If we find a search_dn is a member of the builtin Administrators group, return true.
if not self.conn:
return None
try:
results = self.conn.search_s(search_dn, ldap.SCOPE_BASE, '(memberOf=*)', ['memberOf'])
except ldap.LDAPError, e:
raise self.ldap_error(e)
if not results:
return 0
if ('CN=Administrators,CN=Builtin,'+self.base).lower() in [g.lower() for g in results[0][1]['memberOf']]:
| |
np.nan
tcr_f = np.nan
elif model == 'mri_esm2_0':
mrun_f = 'r1i1p1f1'
doi_f = '10.2151/jmsj.2019-051'
atmos_f = 'MRI-AGCM3.5 (TL159; 320 x 160 longitude/latitude; 80 levels; top level 0.01 hPa)'
surface_f = ' HAL 1.0'
ocean_f = 'MRI.COM4.4 (tripolar primarily 0.5 deg latitude/1 deg longitude with meridional refinement down to 0.3 deg within 10 degrees north and south of the equator; 360 x 364 longitude/latitude; 61 levels; top grid cell 0-2 m)'
seaice_f = 'MRI.COM4.4'
aerosols_f = 'MASINGAR mk2r4 (TL95; 192 x 96 longitude/latitude; 80 levels; top level 0.01 hPa)'
chemistry_f = 'MRI-CCM2.1 (T42; 128 x 64 longitude/latitude; 80 levels; top level 0.01 hPa)'
obgc_f = 'MRI.COM4.4'
landice_f = 'none'
coupler_f = 'Scup'
complex_f = '2222112210' #122220 for r1i2p1f1, provided by <NAME>
addinfo_f = ''
family_f = 'esm'
cmip_f = 6
calendar_f = 'gregorian'
rgb_f = '#B5651D'
marker_f = 'v'
latres_atm_f = 160
lonres_atm_f = 320
lev_atm_f = 80
latres_oc_f = 364 #from metadata in file
lonres_oc_f = 360
lev_oc_f = 61
ecs_f = 3.2
tcr_f = 1.6
ecs_f = np.nan
tcr_f = np.nan
elif model == 'noresm1_m':
mrun_f = 'r1i1p1'
doi_f = '10.5194/gmd-6-687-2013'
atmos_f = 'CAM-Oslo (CAM4-Oslo-noresm-ver1_cmip5-r112, f19L26)'
surface_f = 'CLM (CLM4-noresm-ver1_cmip5-r112)'
ocean_f = 'MICOM (MICOM-noresm-ver1_cmip5-r112, gx1v6L53)'
seaice_f = 'CICE (CICE4-noresm-ver1_cmip5-r112)'
aerosols_f = 'CAM-Oslo (CAM4-Oslo-noresm-ver1_cmip5-r112, f19L26)'
chemistry_f = 'none'
obgc_f = 'none'
landice_f = 'none'
coupler_f = 'CPL7'
complex_f = '2222122000' #confirmed by <NAME>
addinfo_f = 'based on the Community Climate System Model version 4 (CCSM4); in the NorESM experiments discussed in this study, the carbon–nitrogen (CN) cycle option of CLM4 is enabled (Thornton et al., 2007; Gent et al., 2011). Within the land component the carbon and nitrogen are prognostic variables, while carbon and nitrogen fluxes are diagnostically determined and do not influence other model components.'
family_f = 'gcm'
cmip_f = 5
rgb_f = '#FFB6C1'
marker_f = 'P'
latres_atm_f = 96
lonres_atm_f = 144
lev_atm_f = 26
latres_oc_f = 320
lonres_oc_f = 384
lev_oc_f = 53 #70 in oceanfile
ecs_f = 2.8
tcr_f = 1.4
ecs_f = 2.8
tcr_f = 1.4
elif model == 'noresm2_lm':
mrun_f = 'r1i1p1f1'
doi_f = '10.5194/gmd-6-687-2013'
atmos_f = 'CAM-OSLO (2 degree resolution; 144 x 96; 32 levels; top level 3 mb), based on CAM6'
surface_f = 'CLM5'
ocean_f = 'BLOM, based on MICOM (1 degree resolution; 360 x 384; 70 levels; top grid cell minimum 0-2.5 m [native model uses hybrid density and generic upper-layer coordinate interpolated to z-level for contributed data])'
seaice_f = 'CICE5'
aerosols_f = 'OsloAero6'
chemistry_f = 'OsloChemSimp'
obgc_f = 'iHAMOCC'
landice_f = 'none' #WARNING: while 10.5194/gmd-2019-378 states CISM is not activated, it is indicated in the source_id attribute of the models' nc files. These information are in conflict.
coupler_f = 'CPL7'
complex_f = '2222122120' #confirmed by <NAME>
addinfo_f = ''
family_f = 'esm'
cmip_f = 6
calendar_f = '365_day'
rgb_f = '#FFB6C1'
marker_f = 'v'
latres_atm_f = 96
lonres_atm_f = 144
lev_atm_f = 32
latres_oc_f = 360 #from array in file
lonres_oc_f = 384
lev_oc_f = 53 #or 70?
ecs_f = 2.5
tcr_f = 1.5
co2_f = 'concentrations' #concentrations or emissions
elif model == 'noresm2_mm':
mrun_f = 'r1i1p1f1'
doi_f = '10.5194/gmd-6-687-2013'
atmos_f = 'CAM-OSLO (1 degree resolution; 288 x 192; 32 levels; top level 3 mb), based on CAM6'
surface_f = 'CLM5'
ocean_f = 'BLOM, based on MICOM (1 degree resolution; 360 x 384; 70 levels; top grid cell minimum 0-2.5 m [native model uses hybrid density and generic upper-layer coordinate interpolated to z-level for contributed data])'
seaice_f = 'CICE5'
aerosols_f = 'OsloAero6'
chemistry_f = 'OsloChemSimp'
obgc_f = 'iHAMOCC'
landice_f = 'none' #WARNING: while 10.5194/gmd-2019-378 states CISM is not activated, it is indicated in the source_id attribute of the models' nc files. These information are in conflict.
coupler_f = 'CPL7'
complex_f = '2222122120' #confirmed by <NAME>
addinfo_f = 'based on CESM2.1, differs from noresm2_lm only in terms of the AGCMs horizontal resolution'
family_f = 'esm'
cmip_f = 6
calendar_f = '365_day'
rgb_f = '#FFB6C1'
marker_f = '<'
latres_atm_f = 192
lonres_atm_f = 288
lev_atm_f = 32
latres_oc_f = 360
lonres_oc_f = 384
lev_oc_f = 53 # or 70?
ecs_f = np.nan
tcr_f = np.nan
co2_f = 'concentrations'
elif model == 'nesm3': #v3
mrun_f = 'r1i1p1f1'
doi_f = '10.5194/gmd-11-2975-2018'
atmos_f = 'ECHAM v6.3 (T63; 192 x 96)'
surface_f = 'JSBACH v3.1'
ocean_f = 'NEMO v3.4 (NEMO v3.4, tripolar primarily 1deg; 384 x 362 longitude/latitude; 46 levels; top grid cell 0-6 m)'
seaice_f = 'CICE4.1'
aerosols_f = 'none'
chemistry_f = 'none'
obgc_f = 'none'
landice_f = 'none'
coupler_f = 'OASIS3-MCT3.0'
complex_f = '2222221000' #provided by Dr. <NAME>
addinfo_f = ''
family_f = 'gcm'
cmip_f = 6
calendar_f = 'gregorian'
rgb_f = 'white'
marker_f = '>'
latres_atm_f = 96
lonres_atm_f = 192
lev_atm_f = 47
latres_oc_f = 292
lonres_oc_f = 362
lev_oc_f = 46
ecs_f = 4.7
tcr_f = 2.7
elif model == 'sam0_unicon': #v2017
mrun_f = 'r1i1p1f1'
doi_f = '10.1175/JCLI-D-18-0796.1'
atmos_f = 'CAM5.3 with UNICON (1deg; 288 x 192 longitude/latitude; 30 levels; top level ~2 hPa)'
surface_f = 'CLM4.0'
ocean_f = 'POP2 (Displaced Pole; 320 x 384 longitude/latitude; 60 levels; top grid cell 0-10 m)'
seaice_f = 'CICE4.0'
aerosols_f = 'MAM3'
chemistry_f = 'none'
obgc_f = 'none'
landice_f = 'none'
coupler_f = 'not specified in reference article or source_id attribute but should be CPL'
complex_f = '2222222000' #treatment of tbgc is unclear from the reference article, but normally taken into account by CLM4, needs to be confirmed by the corresponding model development team yet
addinfo_f = 'based on CESM1. Mainly, the parametrization schemes of CAM5.3 have been modified.'
family_f = 'esm'
cmip_f = 6
rgb_f = 'white'
marker_f = '^'
latres_atm_f = 192
lonres_atm_f = 288
lev_atm_f = 30
latres_oc_f = 384 #from metadata in file
lonres_oc_f = 320
lev_oc_f = 60
ecs_f = 3.7
tcr_f = 2.3
elif model == 'kiost_esm':
fullname_f = 'KIOST-ESM (2018)'
mrun_f = 'r1i1p1f1'
doi_f = '10.1007/s12601-021-00001-7'
atmos_f = 'GFDL-AM2.0 (cubed sphere (C48); 192 x 96 longitude/latitude; 32 vertical levels; top level 2 hPa)'
surface_f = 'NCAR-CLM4'
ocean_f = 'GFDL-MOM5.0 (tripolar - nominal 1.0 deg; 360 x 200 longitude/latitude; 52 levels; top grid cell 0-2 m; NK mixed layer scheme)'
seaice_f = 'GFDL-SIS'
aerosols_f = 'none'
chemistry_f = 'Simple carbon aerosol model (emission type)'
obgc_f = 'TOPAZ2'
landice_f = 'none'
coupler_f = ''
complex_f = '2222221120' #provided by <NAME>
addinfo_f = ' based on a low-resolution version of the Geophysical Fluid Dynamics Laboratory Climate Model version 2.5. he main changes made to the base model include using new cumulus convection and ocean mixed layer parameterization schemes, which improve the model fidelity significantly. In addition, the KIOST-ESM adopts dynamic vegetation and new soil respiration schemes in its land model component.'
family_f = 'esm'
cmip_f = 6
rgb_f = 'white'
marker_f = 's'
latres_atm_f = 96
lonres_atm_f = 192
lev_atm_f = 32
latres_oc_f = 200
lonres_oc_f = 360
lev_oc_f = 52
ecs_f = np.nan
tcr_f = np.nan
elif model == 'taiesm1':
fullname_f = 'TaiESM 1.0 (2018), based on CAM5.3'
mrun_f = 'r1i1p1f1'
doi_f = '10.5194/gmd-13-3887-2020'
atmos_f = 'TaiAM1 (0.9x1.25 degree; 288 x 192 longitude/latitude; 30 levels; top level ~2 hPa)'
surface_f = 'CLM4.0 (same grid as atmos)'
ocean_f = 'POP2 (320x384 longitude/latitude; 60 levels; top grid cell 0-10 m)'
seaice_f = 'CICE4'
aerosols_f = 'SNAP (same grid as atmos)'
chemistry_f = 'none'
obgc_f = 'none'
landice_f = 'none'
coupler_f = ''
complex_f = '2222222000' #confirmed by <NAME>: "Most numbers are correct except the "vegetation" that I am not quite sure about. We did not turn on dynamic vegetation in our land model (CLM4). The fraction of land use is given by the input file but not simulated. However, the biomass can evolve because we turned on the carbon-nitrogen process. Therefore, maybe "vegetation" should be 1 (but it depends on how you define interactive for vegetation)."
addinfo_f = 'based on CESM1.2.2, | |
= f'Book{book_index}'
self.course.textbooks = self.books
self.course.pdf_textbooks = self.books
self.course.html_textbooks = self.books
def test_pdf_textbook_tabs(self):
"""
Test that all textbooks tab links generating correctly.
"""
type_to_reverse_name = {'textbook': 'book', 'pdftextbook': 'pdf_book', 'htmltextbook': 'html_book'}
self.addCleanup(set_current_request, None)
course_tab_list = get_course_tab_list(self.user, self.course)
num_of_textbooks_found = 0
for tab in course_tab_list:
# Verify links of all textbook type tabs.
if tab.type == 'single_textbook':
book_type, book_index = tab.tab_id.split("/", 1)
expected_link = reverse(
type_to_reverse_name[book_type],
args=[str(self.course.id), book_index]
)
tab_link = tab.link_func(self.course, reverse)
assert tab_link == expected_link
num_of_textbooks_found += 1
assert num_of_textbooks_found == self.num_textbooks
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_TEXTBOOK": False})
def test_textbooks_disabled(self):
tab = xmodule_tabs.CourseTab.load('textbooks')
assert not tab.is_enabled(self.course, self.user)
class TabListTestCase(TabTestCase):
"""Base class for Test cases involving tab lists."""
def setUp(self):
super().setUp()
# invalid tabs
self.invalid_tabs = [
# less than 2 tabs
[{'type': CoursewareTab.type}],
# missing course_info
[{'type': CoursewareTab.type}, {'type': 'discussion', 'name': 'fake_name'}],
[{'type': 'unknown_type'}],
# incorrect order
[{'type': 'discussion', 'name': 'fake_name'},
{'type': CourseInfoTab.type, 'name': 'fake_name'}, {'type': CoursewareTab.type}],
]
# tab types that should appear only once
unique_tab_types = [
CoursewareTab.type,
CourseInfoTab.type,
'textbooks',
'pdf_textbooks',
'html_textbooks',
]
for unique_tab_type in unique_tab_types:
self.invalid_tabs.append([
{'type': CoursewareTab.type},
{'type': CourseInfoTab.type, 'name': 'fake_name'},
# add the unique tab multiple times
{'type': unique_tab_type},
{'type': unique_tab_type},
])
# valid tabs
self.valid_tabs = [
# any empty list is valid because a default list of tabs will be
# generated to replace the empty list.
[],
# all valid tabs
[
{'type': CoursewareTab.type},
{'type': CourseInfoTab.type, 'name': 'fake_name'},
{'type': DatesTab.type}, # Add this even though we filter it out, for testing purposes
{'type': 'discussion', 'name': 'fake_name'},
{'type': ExternalLinkCourseTab.type, 'name': 'fake_name', 'link': 'fake_link'},
{'type': ExternalLinkCourseTab.type, 'name': 'fake_name', 'link': 'fake_link'},
{'type': 'textbooks'},
{'type': 'pdf_textbooks'},
{'type': 'html_textbooks'},
{'type': ProgressTab.type, 'name': 'fake_name'},
{'type': xmodule_tabs.StaticTab.type, 'name': 'fake_name', 'url_slug': 'schlug'},
{'type': 'syllabus'},
],
# with external discussion
[
{'type': CoursewareTab.type},
{'type': CourseInfoTab.type, 'name': 'fake_name'},
{'type': ExternalDiscussionCourseTab.type, 'name': 'fake_name', 'link': 'fake_link'}
],
]
self.all_valid_tab_list = xmodule_tabs.CourseTabList().from_json(self.valid_tabs[1])
class ValidateTabsTestCase(TabListTestCase):
"""Test cases for validating tabs."""
def test_validate_tabs(self):
tab_list = xmodule_tabs.CourseTabList()
for invalid_tab_list in self.invalid_tabs:
with pytest.raises(xmodule_tabs.InvalidTabsException):
tab_list.from_json(invalid_tab_list)
for valid_tab_list in self.valid_tabs:
from_json_result = tab_list.from_json(valid_tab_list)
assert len(from_json_result) == len(valid_tab_list)
def test_invalid_tab_type(self):
"""
Verifies that having an unrecognized tab type does not cause
the tabs to be undisplayable.
"""
tab_list = xmodule_tabs.CourseTabList()
assert len(tab_list.from_json([{'type': CoursewareTab.type},
{'type': CourseInfoTab.type, 'name': 'fake_name'},
{'type': 'no_such_type'}])) == 2
class CourseTabListTestCase(TabListTestCase):
"""Testing the generator method for iterating through displayable tabs"""
def setUp(self):
super().setUp()
self.addCleanup(set_current_request, None)
def has_tab(self, tab_list, tab_type):
""" Searches the given lab_list for a given tab_type. """
for tab in tab_list:
if tab.type == tab_type:
return True
return False
def test_initialize_default_without_syllabus(self):
self.course.tabs = []
self.course.syllabus_present = False
xmodule_tabs.CourseTabList.initialize_default(self.course)
assert not self.has_tab(self.course.tabs, 'syllabus')
def test_initialize_default_with_syllabus(self):
self.course.tabs = []
self.course.syllabus_present = True
xmodule_tabs.CourseTabList.initialize_default(self.course)
assert self.has_tab(self.course.tabs, 'syllabus')
def test_initialize_default_with_external_link(self):
self.course.tabs = []
self.course.discussion_link = "other_discussion_link"
xmodule_tabs.CourseTabList.initialize_default(self.course)
assert self.has_tab(self.course.tabs, 'external_discussion')
assert not self.has_tab(self.course.tabs, 'discussion')
def test_initialize_default_without_external_link(self):
self.course.tabs = []
self.course.discussion_link = ""
xmodule_tabs.CourseTabList.initialize_default(self.course)
assert not self.has_tab(self.course.tabs, 'external_discussion')
assert self.has_tab(self.course.tabs, 'discussion')
@patch.dict("django.conf.settings.FEATURES", {
"ENABLE_TEXTBOOK": True,
"ENABLE_DISCUSSION_SERVICE": True,
"ENABLE_EDXNOTES": True,
})
def test_iterate_displayable(self):
self.course.hide_progress_tab = False
# create 1 book per textbook type
self.set_up_books(1)
# initialize the course tabs to a list of all valid tabs
self.course.tabs = self.all_valid_tab_list
# enumerate the tabs with no user
expected = [tab.type for tab in
xmodule_tabs.CourseTabList.iterate_displayable(self.course, inline_collections=False)]
actual = [tab.type for tab in self.course.tabs if tab.is_enabled(self.course, user=None)]
assert actual == expected
# enumerate the tabs with a staff user
user = UserFactory(is_staff=True)
CourseEnrollment.enroll(user, self.course.id)
for i, tab in enumerate(xmodule_tabs.CourseTabList.iterate_displayable(self.course, user=user)):
if getattr(tab, 'is_collection_item', False):
# a collection item was found as a result of a collection tab
assert getattr(self.course.tabs[i], 'is_collection', False)
else:
# all other tabs must match the expected type
assert tab.type == self.course.tabs[i].type
# test including non-empty collections
assert {'type': 'html_textbooks'} in\
list(xmodule_tabs.CourseTabList.iterate_displayable(self.course, inline_collections=False))
# test not including empty collections
self.course.html_textbooks = []
assert {'type': 'html_textbooks'} not in\
list(xmodule_tabs.CourseTabList.iterate_displayable(self.course, inline_collections=False))
def test_get_tab_by_methods(self):
"""Tests the get_tab methods in CourseTabList"""
self.course.tabs = self.all_valid_tab_list
for tab in self.course.tabs:
# get tab by type
assert xmodule_tabs.CourseTabList.get_tab_by_type(self.course.tabs, tab.type) == tab
# get tab by id
assert xmodule_tabs.CourseTabList.get_tab_by_id(self.course.tabs, tab.tab_id) == tab
def test_course_tabs_staff_only(self):
"""
Tests the static tabs that available only for instructor
"""
self.course.tabs.append(xmodule_tabs.CourseTab.load('static_tab', name='Static Tab Free',
url_slug='extra_tab_1',
course_staff_only=False))
self.course.tabs.append(xmodule_tabs.CourseTab.load('static_tab', name='Static Tab Instructors Only',
url_slug='extra_tab_2',
course_staff_only=True))
self.course.save()
user = self.create_mock_user(is_staff=False, is_enrolled=True)
self.addCleanup(set_current_request, None)
course_tab_list = get_course_tab_list(user, self.course)
name_list = [x.name for x in course_tab_list]
assert 'Static Tab Free' in name_list
assert 'Static Tab Instructors Only' not in name_list
# Login as member of staff
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='<PASSWORD>')
course_tab_list_staff = get_course_tab_list(staff_user, self.course)
name_list_staff = [x.name for x in course_tab_list_staff]
assert 'Static Tab Free' in name_list_staff
assert 'Static Tab Instructors Only' in name_list_staff
class ProgressTestCase(TabTestCase):
"""Test cases for Progress Tab."""
def check_progress_tab(self):
"""Helper function for verifying the progress tab."""
return self.check_tab(
tab_class=ProgressTab,
dict_tab={'type': ProgressTab.type, 'name': 'same'},
expected_link=self.reverse('progress', args=[str(self.course.id)]),
expected_tab_id=ProgressTab.type,
invalid_dict_tab=None,
)
@patch('common.djangoapps.student.models.CourseEnrollment.is_enrolled')
def test_progress(self, is_enrolled):
is_enrolled.return_value = True
self.course.hide_progress_tab = False
tab = self.check_progress_tab()
self.check_can_display_results(
tab, for_staff_only=True, for_enrolled_users_only=True
)
self.course.hide_progress_tab = True
self.check_progress_tab()
self.check_can_display_results(
tab, for_staff_only=True, for_enrolled_users_only=True, expected_value=False
)
class StaticTabTestCase(TabTestCase):
"""Test cases for Static Tab."""
def test_static_tab(self):
url_slug = 'schmug'
tab = self.check_tab(
tab_class=xmodule_tabs.StaticTab,
dict_tab={'type': xmodule_tabs.StaticTab.type, 'name': 'same', 'url_slug': url_slug},
expected_link=self.reverse('static_tab', args=[str(self.course.id), url_slug]),
expected_tab_id='static_tab_schmug',
invalid_dict_tab=self.fake_dict_tab,
)
self.check_can_display_results(tab)
self.check_get_and_set_method_for_key(tab, 'url_slug')
class CourseInfoTabTestCase(TabTestCase):
"""Test cases for the course info tab."""
def setUp(self): # lint-amnesty, pylint: disable=super-method-not-called
self.user = self.create_mock_user()
self.addCleanup(set_current_request, None)
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=True)
def test_default_tab(self):
# Verify that the course info tab is the first tab
tabs = get_course_tab_list(self.user, self.course)
# So I know this means course_info is not the first tab, but it is going to be
# retired soon (https://openedx.atlassian.net/browse/TNL-7061) and also it has
# a lower priority than courseware so seems odd that it would ever be first.
# As such, I feel comfortable updating this test so it passes until it is removed
# as part of the linked ticket
assert tabs[1].type == 'course_info'
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=False)
def test_default_tab_for_new_course_experience(self):
# Verify that the unified course experience hides the course info tab
tabs = get_course_tab_list(self.user, self.course)
assert tabs[0].type == 'courseware'
# TODO: LEARNER-611 - remove once course_info is removed.
@override_waffle_flag(DISABLE_UNIFIED_COURSE_TAB_FLAG, active=False)
def test_default_tab_for_displayable(self):
tabs = xmodule_tabs.CourseTabList.iterate_displayable(self.course, self.user)
for i, tab in enumerate(tabs):
if i == 0:
assert tab.type == 'course_info'
class DiscussionLinkTestCase(TabTestCase):
"""Test cases for discussion link tab."""
def setUp(self):
super().setUp()
self.tabs_with_discussion = [
xmodule_tabs.CourseTab.load('discussion'),
]
self.tabs_without_discussion = [
]
@staticmethod
def _reverse(course):
"""Custom reverse function"""
def reverse_discussion_link(viewname, args):
"""reverse lookup for discussion link"""
if viewname == "forum_form_discussion" and args == [str(course.id)]:
return "default_discussion_link"
return reverse_discussion_link
def check_discussion(
self, tab_list,
expected_discussion_link,
expected_can_display_value,
discussion_link_in_course="",
is_staff=True,
is_enrolled=True,
):
"""Helper function to verify whether the discussion tab exists and can be displayed"""
self.course.tabs = tab_list
self.course.discussion_link = discussion_link_in_course
discussion_tab = xmodule_tabs.CourseTabList.get_discussion(self.course)
user = self.create_mock_user(is_staff=is_staff, is_enrolled=is_enrolled)
with patch('common.djangoapps.student.models.CourseEnrollment.is_enrolled') as check_is_enrolled:
check_is_enrolled.return_value = is_enrolled
assert ((discussion_tab is not None) and self.is_tab_enabled(discussion_tab, self.course, user) and
(discussion_tab.link_func(self.course, self._reverse(self.course))
== expected_discussion_link)) == expected_can_display_value
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": False})
def test_explicit_discussion_link(self):
"""Test that setting discussion_link overrides everything else"""
self.check_discussion(
tab_list=self.tabs_with_discussion,
discussion_link_in_course="other_discussion_link",
expected_discussion_link="other_discussion_link",
expected_can_display_value=True,
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": False})
def test_discussions_disabled(self):
"""Test that other cases return None with discussions disabled"""
for tab_list in [[], self.tabs_with_discussion, self.tabs_without_discussion]:
self.check_discussion(
tab_list=tab_list,
expected_discussion_link=not None,
expected_can_display_value=False,
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_tabs_with_discussion(self):
"""Test a course with a discussion tab configured"""
self.check_discussion(
tab_list=self.tabs_with_discussion,
expected_discussion_link="default_discussion_link",
expected_can_display_value=True,
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_tabs_without_discussion(self):
"""Test a course with tabs configured but without a discussion tab"""
self.check_discussion(
tab_list=self.tabs_without_discussion,
expected_discussion_link=not None,
expected_can_display_value=False,
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_tabs_enrolled_or_staff(self):
for is_enrolled, is_staff in [(True, False), (False, True)]:
self.check_discussion(
tab_list=self.tabs_with_discussion,
expected_discussion_link="default_discussion_link",
expected_can_display_value=True,
is_enrolled=is_enrolled,
is_staff=is_staff
)
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def test_tabs_not_enrolled_or_staff(self):
is_enrolled = is_staff = False
self.check_discussion(
tab_list=self.tabs_with_discussion,
expected_discussion_link="default_discussion_link",
expected_can_display_value=False,
is_enrolled=is_enrolled,
is_staff=is_staff
)
class DatesTabTestCase(TabListTestCase):
"""Test cases for dates tab"""
@patch('common.djangoapps.student.models.CourseEnrollment.is_enrolled')
def test_dates_tab_disabled_if_unenrolled(self, is_enrolled):
tab = DatesTab({'type': DatesTab.type, 'name': 'dates'})
is_enrolled.return_value = False
unenrolled_user = self.create_mock_user(is_staff=False, is_enrolled=False)
assert not self.is_tab_enabled(tab, self.course, unenrolled_user)
staff_user = self.create_mock_user(is_staff=True, is_enrolled=False)
assert self.is_tab_enabled(tab, self.course, staff_user)
is_enrolled.return_value = True
enrolled_user = self.create_mock_user(is_staff=False, is_enrolled=True)
assert self.is_tab_enabled(tab, self.course, enrolled_user)
def test_singular_dates_tab(self):
"""Test cases for making sure no persisted dates tab is surfaced"""
user = self.create_mock_user()
self.course.tabs = self.all_valid_tab_list
self.course.save()
# Verify that there is a dates tab in the modulestore
has_dates_tab | |
E501
collection_formats = {}
path_params = {}
if 'anti_malware_id' in params:
path_params['antiMalwareID'] = params['anti_malware_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/antimalwareconfigurations/{antiMalwareID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AntiMalwareConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_anti_malwares(self, api_version, **kwargs): # noqa: E501
"""List Anti-Malware Configurations # noqa: E501
Lists all anti-malware configurations. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_anti_malwares(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:return: AntiMalwareConfigurations
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_anti_malwares_with_http_info(api_version, **kwargs) # noqa: E501
else:
(data) = self.list_anti_malwares_with_http_info(api_version, **kwargs) # noqa: E501
return data
def list_anti_malwares_with_http_info(self, api_version, **kwargs): # noqa: E501
"""List Anti-Malware Configurations # noqa: E501
Lists all anti-malware configurations. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_anti_malwares_with_http_info(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:return: AntiMalwareConfigurations
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_anti_malwares" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_anti_malwares`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/antimalwareconfigurations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AntiMalwareConfigurations', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_anti_malware(self, anti_malware_id, anti_malware_configuration, api_version, **kwargs): # noqa: E501
"""Modify an Anti-Malware Configuration # noqa: E501
Modify an anti-malware configuration by ID. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_anti_malware(anti_malware_id, anti_malware_configuration, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int anti_malware_id: The ID number of the anti-malware configuration to modify. (required)
:param AntiMalwareConfiguration anti_malware_configuration: The settings of the anti-malware configuration to modify. (required)
:param str api_version: The version of the api being called. (required)
:return: AntiMalwareConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_anti_malware_with_http_info(anti_malware_id, anti_malware_configuration, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_anti_malware_with_http_info(anti_malware_id, anti_malware_configuration, api_version, **kwargs) # noqa: E501
return data
def modify_anti_malware_with_http_info(self, anti_malware_id, anti_malware_configuration, api_version, **kwargs): # noqa: E501
"""Modify an Anti-Malware Configuration # noqa: E501
Modify an anti-malware configuration by ID. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_anti_malware_with_http_info(anti_malware_id, anti_malware_configuration, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int anti_malware_id: The ID number of the anti-malware configuration to modify. (required)
:param AntiMalwareConfiguration anti_malware_configuration: The settings of the anti-malware configuration to modify. (required)
:param str api_version: The version of the api being called. (required)
:return: AntiMalwareConfiguration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['anti_malware_id', 'anti_malware_configuration', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_anti_malware" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'anti_malware_id' is set
if ('anti_malware_id' not in params or
params['anti_malware_id'] is None):
raise ValueError("Missing the required parameter `anti_malware_id` when calling `modify_anti_malware`") # noqa: E501
# verify the required parameter 'anti_malware_configuration' is set
if ('anti_malware_configuration' not in params or
params['anti_malware_configuration'] is None):
raise ValueError("Missing the required parameter `anti_malware_configuration` when calling `modify_anti_malware`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_anti_malware`") # noqa: E501
if 'anti_malware_id' in params and not re.search('\\d+', str(params['anti_malware_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `anti_malware_id` when calling `modify_anti_malware`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'anti_malware_id' in params:
path_params['antiMalwareID'] = params['anti_malware_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'anti_malware_configuration' in params:
body_params = params['anti_malware_configuration']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/antimalwareconfigurations/{antiMalwareID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AntiMalwareConfiguration', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_anti_malwares(self, api_version, **kwargs): # noqa: E501
"""Search Anti-Malware Configurations # noqa: E501
Search for anti-malware configurations using optional filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_anti_malwares(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:param SearchFilter search_filter: A collection of options used to filter the search results.
:return: AntiMalwareConfigurations
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_anti_malwares_with_http_info(api_version, **kwargs) # noqa: E501
else:
(data) = self.search_anti_malwares_with_http_info(api_version, **kwargs) # noqa: E501
return data
def search_anti_malwares_with_http_info(self, api_version, **kwargs): # noqa: E501
"""Search Anti-Malware Configurations # noqa: E501
Search for anti-malware configurations using optional filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_anti_malwares_with_http_info(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:param SearchFilter search_filter: A collection of options used to filter the search results.
:return: AntiMalwareConfigurations
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_version', 'search_filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_anti_malwares" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `search_anti_malwares`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'search_filter' in params:
body_params = params['search_filter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings | |
now
self.fluxBoundaryConditions=fluxBoundaryConditionsDict
self.advectiveFluxBoundaryConditionsSetterDict=advectiveFluxBoundaryConditionsSetterDict
self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict
#determine whether the stabilization term is nonlinear
self.stabilizationIsNonlinear = False
#cek come back
if self.stabilization is not None:
for ci in range(self.nc):
if coefficients.mass.has_key(ci):
for flag in coefficients.mass[ci].values():
if flag == 'nonlinear':
self.stabilizationIsNonlinear=True
if coefficients.advection.has_key(ci):
for flag in coefficients.advection[ci].values():
if flag == 'nonlinear':
self.stabilizationIsNonlinear=True
if coefficients.diffusion.has_key(ci):
for diffusionDict in coefficients.diffusion[ci].values():
for flag in diffusionDict.values():
if flag != 'constant':
self.stabilizationIsNonlinear=True
if coefficients.potential.has_key(ci):
for flag in coefficients.potential[ci].values():
if flag == 'nonlinear':
self.stabilizationIsNonlinear=True
if coefficients.reaction.has_key(ci):
for flag in coefficients.reaction[ci].values():
if flag == 'nonlinear':
self.stabilizationIsNonlinear=True
if coefficients.hamiltonian.has_key(ci):
for flag in coefficients.hamiltonian[ci].values():
if flag == 'nonlinear':
self.stabilizationIsNonlinear=True
#determine if we need element boundary storage
self.elementBoundaryIntegrals = {}
for ci in range(self.nc):
self.elementBoundaryIntegrals[ci] = ((self.conservativeFlux is not None) or
(numericalFluxType is not None) or
(self.fluxBoundaryConditions[ci] == 'outFlow') or
(self.fluxBoundaryConditions[ci] == 'mixedFlow') or
(self.fluxBoundaryConditions[ci] == 'setFlow'))
#
#calculate some dimensions
#
self.nSpace_global = self.u[0].femSpace.nSpace_global #assume same space dim for all variables
self.nDOF_trial_element = [u_j.femSpace.max_nDOF_element for u_j in self.u.values()]
self.nDOF_phi_trial_element = [phi_k.femSpace.max_nDOF_element for phi_k in self.phi.values()]
self.n_phi_ip_element = [phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in self.phi.values()]
self.nDOF_test_element = [femSpace.max_nDOF_element for femSpace in self.testSpace.values()]
self.nFreeDOF_global = [dc.nFreeDOF_global for dc in self.dirichletConditions.values()]
self.nVDOF_element = sum(self.nDOF_trial_element)
self.nFreeVDOF_global = sum(self.nFreeDOF_global)
#
NonlinearEquation.__init__(self,self.nFreeVDOF_global)
#
#build the quadrature point dictionaries from the input (this
#is just for convenience so that the input doesn't have to be
#complete)
#
elementQuadratureDict={}
elemQuadIsDict = isinstance(elementQuadrature,dict)
if elemQuadIsDict: #set terms manually
for I in self.coefficients.elementIntegralKeys:
if elementQuadrature.has_key(I):
elementQuadratureDict[I] = elementQuadrature[I]
else:
elementQuadratureDict[I] = elementQuadrature['default']
else:
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[I] = elementQuadrature
if self.stabilization is not None:
for I in self.coefficients.elementIntegralKeys:
if elemQuadIsDict:
if elementQuadrature.has_key(I):
elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature[I]
else:
elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature['default']
else:
elementQuadratureDict[('stab',)+I[1:]] = elementQuadrature
if self.shockCapturing is not None:
for ci in self.shockCapturing.components:
if elemQuadIsDict:
if elementQuadrature.has_key(('numDiff',ci,ci)):
elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature[('numDiff',ci,ci)]
else:
elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature['default']
else:
elementQuadratureDict[('numDiff',ci,ci)] = elementQuadrature
if massLumping:
for ci in self.coefficients.mass.keys():
elementQuadratureDict[('m',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)
if reactionLumping:
for ci in self.coefficients.mass.keys():
elementQuadratureDict[('r',ci)] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)
for I in self.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global,1)
elementBoundaryQuadratureDict={}
if isinstance(elementBoundaryQuadrature,dict): #set terms manually
for I in self.coefficients.elementBoundaryIntegralKeys:
if elementBoundaryQuadrature.has_key(I):
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature[I]
else:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature['default']
else:
for I in self.coefficients.elementBoundaryIntegralKeys:
elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature
#
# find the union of all element quadrature points and
# build a quadrature rule for each integral that has a
# weight at each point in the union
#mwf include tag telling me which indices are which quadrature rule?
(self.elementQuadraturePoints,self.elementQuadratureWeights,
self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)
self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]
self.nQuadraturePoints_global = self.nQuadraturePoints_element*self.mesh.nElements_global
#
#Repeat the same thing for the element boundary quadrature
#
(self.elementBoundaryQuadraturePoints,
self.elementBoundaryQuadratureWeights,
self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)
self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[0]
self.nElementBoundaryQuadraturePoints_global = (self.mesh.nElements_global*
self.mesh.nElementBoundaries_element*
self.nElementBoundaryQuadraturePoints_elementBoundary)
# if isinstance(self.u[0].femSpace,C0_AffineLinearOnSimplexWithNodalBasis):
# print self.nQuadraturePoints_element
# if self.nSpace_global == 3:
# assert(self.nQuadraturePoints_element == 5)
# elif self.nSpace_global == 2:
# assert(self.nQuadraturePoints_element == 6)
# elif self.nSpace_global == 1:
# assert(self.nQuadraturePoints_element == 3)
#
# print self.nElementBoundaryQuadraturePoints_elementBoundary
# if self.nSpace_global == 3:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)
# elif self.nSpace_global == 2:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)
# elif self.nSpace_global == 1:
# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 1)
#
#simplified allocations for test==trial and also check if space is mixed or not
#
self.q={}
self.ebq={}
self.ebq_global={}
self.ebqe={}
self.phi_ip={}
#mesh
#self.q['x'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,3),'d')
self.ebqe['x'] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,3),'d')
self.q[('u',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
self.q[('grad(u)',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')
self.q[('m_last',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
self.q[('m_tmp',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
self.q[('m',0)] = self.q[('u',0)]#for time integration by VBDF and probably FLCBDF
#needed by PsiTCtte
self.q[('mt',0)] = numpy.zeros(self.q[('u',0)].shape,'d')
self.q[('dH',0,0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')
self.q[('dH_sge',0,0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element,self.nSpace_global),'d')
self.q[('cfl',0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
self.q[('numDiff',0,0)] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')
self.ebqe[('u',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
self.ebqe[('grad(u)',0)] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary,self.nSpace_global),'d')
self.points_elementBoundaryQuadrature= set()
self.scalars_elementBoundaryQuadrature= set([('u',ci) for ci in range(self.nc)])
self.vectors_elementBoundaryQuadrature= set()
self.tensors_elementBoundaryQuadrature= set()
self.inflowBoundaryBC = {}
self.inflowBoundaryBC_values = {}
self.inflowFlux = {}
for cj in range(self.nc):
self.inflowBoundaryBC[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,),'i')
self.inflowBoundaryBC_values[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nDOF_trial_element[cj]),'d')
self.inflowFlux[cj] = numpy.zeros((self.mesh.nExteriorElementBoundaries_global,self.nElementBoundaryQuadraturePoints_elementBoundary),'d')
self.internalNodes = set(range(self.mesh.nNodes_global))
#identify the internal nodes this is ought to be in mesh
##\todo move this to mesh
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
eN_global = self.mesh.elementBoundaryElementsArray[ebN,0]
ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[ebN,0]
for i in range(self.mesh.nNodes_element):
if i != ebN_element:
I = self.mesh.elementNodesArray[eN_global,i]
self.internalNodes -= set([I])
self.nNodes_internal = len(self.internalNodes)
self.internalNodesArray=numpy.zeros((self.nNodes_internal,),'i')
for nI,n in enumerate(self.internalNodes):
self.internalNodesArray[nI]=n
#
del self.internalNodes
self.internalNodes = None
logEvent("Updating local to global mappings",2)
self.updateLocal2Global()
logEvent("Building time integration object",2)
logEvent(memory("inflowBC, internalNodes,updateLocal2Global","OneLevelTransport"),level=4)
#mwf for interpolating subgrid error for gradients etc
if self.stabilization and self.stabilization.usesGradientStabilization:
self.timeIntegration = TimeIntegrationClass(self,integrateInterpolationPoints=True)
else:
self.timeIntegration = TimeIntegrationClass(self)
if options is not None:
self.timeIntegration.setFromOptions(options)
logEvent(memory("TimeIntegration","OneLevelTransport"),level=4)
logEvent("Calculating numerical quadrature formulas",2)
self.calculateQuadrature()
self.setupFieldStrides()
comm = Comm.get()
self.comm=comm
if comm.size() > 1:
assert numericalFluxType is not None and numericalFluxType.useWeakDirichletConditions,"You must use a numerical flux to apply weak boundary conditions for parallel runs"
logEvent(memory("stride+offset","OneLevelTransport"),level=4)
if numericalFluxType is not None:
if options is None or options.periodicDirichletConditions is None:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict)
else:
self.numericalFlux = numericalFluxType(self,
dofBoundaryConditionsSetterDict,
advectiveFluxBoundaryConditionsSetterDict,
diffusiveFluxBoundaryConditionsSetterDictDict,
options.periodicDirichletConditions)
else:
self.numericalFlux = None
#set penalty terms
#cek todo move into numerical flux initialization
if self.ebq_global.has_key('penalty'):
for ebN in range(self.mesh.nElementBoundaries_global):
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebq_global['penalty'][ebN,k] = self.numericalFlux.penalty_constant/(self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)
#penalty term
#cek move to Numerical flux initialization
if self.ebqe.has_key('penalty'):
for ebNE in range(self.mesh.nExteriorElementBoundaries_global):
ebN = self.mesh.exteriorElementBoundariesArray[ebNE]
for k in range(self.nElementBoundaryQuadraturePoints_elementBoundary):
self.ebqe['penalty'][ebNE,k] = self.numericalFlux.penalty_constant/self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power
logEvent(memory("numericalFlux","OneLevelTransport"),level=4)
self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray
#use post processing tools to get conservative fluxes, None by default
from proteus import PostProcessingTools
self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(self)
logEvent(memory("velocity postprocessor","OneLevelTransport"),level=4)
#helper for writing out data storage
from proteus import Archiver
self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()
#TODO get rid of this
for ci,fbcObject in self.fluxBoundaryConditionsObjectsDict.iteritems():
self.ebqe[('advectiveFlux_bc_flag',ci)] = numpy.zeros(self.ebqe[('advectiveFlux_bc',ci)].shape,'i')
for t,g in fbcObject.advectiveFluxBoundaryConditionsDict.iteritems():
if self.coefficients.advection.has_key(ci):
self.ebqe[('advectiveFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)
self.ebqe[('advectiveFlux_bc_flag',ci)][t[0],t[1]] = 1
#reduced quad
if hasattr(self.numericalFlux,'setDirichletValues'):
self.numericalFlux.setDirichletValues(self.ebqe)
if not hasattr(self.numericalFlux,'isDOFBoundary'):
self.numericalFlux.isDOFBoundary = {0:numpy.zeros(self.ebqe[('u',0)].shape,'i')}
if not hasattr(self.numericalFlux,'ebqe'):
self.numericalFlux.ebqe = {('u',0):numpy.zeros(self.ebqe[('u',0)].shape,'d')}
#TODO how to handle redistancing calls for calculateCoefficients,calculateElementResidual etc
self.globalResidualDummy = None
#create storage for enforcing weak dirichlet boundary conditions around zero level set contour
self.freezeLevelSet=1#True
self.u_dof_last = numpy.zeros(self.u[0].dof.shape,'d')
self.weakDirichletConditionFlags = numpy.zeros(self.u[0].dof.shape,'i')
self.dofFlag_element = numpy.zeros((self.nDOF_trial_element[0],),'i')
#allow Newton solves for redistancing
if self.timeIntegration.__class__ == TimeIntegration.NoIntegration:
self.timeIntegration.m_tmp = {0:numpy.zeros(self.q[('m_tmp',0)].shape,'d')}
self.timeIntegration.m_last = {0:numpy.zeros(self.q[('m_tmp',0)].shape,'d')}
compKernelFlag=0
if self.coefficients.useConstantH:
self.elementDiameter = self.mesh.elementDiametersArray.copy()
self.elementDiameter[:] = max(self.mesh.elementDiametersArray)
else:
self.elementDiameter = self.mesh.elementDiametersArray
self.rdls = cRDLS_base(self.nSpace_global,
self.nQuadraturePoints_element,
self.u[0].femSpace.elementMaps.localFunctionSpace.dim,
self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,
self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,
self.nElementBoundaryQuadraturePoints_elementBoundary,
compKernelFlag)
def calculateCoefficients(self):
pass
def calculateElementResidual(self):
pass
def getResidual(self,u,r):
import pdb
import copy
#try to use 1d,2d,3d specific modules
#mwf debug
#pdb.set_trace()
r.fill(0.0)
#Load the unknowns into the finite element dof
self.timeIntegration.calculateCoefs()
self.timeIntegration.calculateU(u)
self.setUnknowns(self.timeIntegration.u)
#cek can put in logic to skip of BC's don't depend on t or u
#Dirichlet boundary conditions
if hasattr(self.numericalFlux,'setDirichletValues'):
self.numericalFlux.setDirichletValues(self.ebqe)
#flux boundary conditions, SHOULDN'T HAVE
#for now force time integration
useTimeIntegration = 1
#assert self.timeIntegration.__class__ != TimeIntegration.NoIntegration
if self.timeIntegration.__class__ == TimeIntegration.NoIntegration or not self.timeTerm:
useTimeIntegration = 0
if useTimeIntegration:
alpha_bdf = self.timeIntegration.alpha_bdf
beta_bdf = self.timeIntegration.beta_bdf
else:
alpha_bdf = self.timeIntegration.dt
beta_bdf = self.timeIntegration.m_last
#self.elementResidual[0].fill(0.0)
#cRDLS.calculateResidual(self.mesh.nElements_global,
#print "beta_bdf",beta_bdf
self.rdls.calculateResidual(#element
self.u[0].femSpace.elementMaps.psi,
self.u[0].femSpace.elementMaps.grad_psi,
self.mesh.nodeArray,
self.mesh.elementNodesArray,
self.elementQuadratureWeights[('u',0)],
self.u[0].femSpace.psi,
self.u[0].femSpace.grad_psi,
self.u[0].femSpace.psi,
self.u[0].femSpace.grad_psi,
#element boundary
self.u[0].femSpace.elementMaps.psi_trace,
self.u[0].femSpace.elementMaps.grad_psi_trace,
self.elementBoundaryQuadratureWeights[('u',0)],
self.u[0].femSpace.psi_trace,
self.u[0].femSpace.grad_psi_trace,
self.u[0].femSpace.psi_trace,
self.u[0].femSpace.grad_psi_trace,
self.u[0].femSpace.elementMaps.boundaryNormals,
self.u[0].femSpace.elementMaps.boundaryJacobians,
#physics
self.mesh.nElements_global,
self.coefficients.useMetrics,
alpha_bdf,
self.coefficients.epsFact,
self.coefficients.backgroundDiffusionFactor,
self.coefficients.weakDirichletFactor,
self.freezeLevelSet,
useTimeIntegration,
self.shockCapturing.lag,
self.stabilization.lag,#0 nothing lagged
#1 dH lagged in tau calc
#2 dH lagged in tau and pdeResidual, Lstar*w calculations
self.shockCapturing.shockCapturingFactor,
self.u[0].femSpace.dofMap.l2g,
self.elementDiameter,#self.mesh.elementDiametersArray,
self.mesh.nodeDiametersArray,
self.u[0].dof,
self.coefficients.q_u0,
self.timeIntegration.m_tmp[0],
self.q[('u',0)],
self.q[('grad(u)',0)],
self.q[('dH',0,0)],
self.u_dof_last,
beta_bdf[0],
self.q[('dH_sge',0,0)],
self.q[('cfl',0)],
self.shockCapturing.numDiff[0],
self.shockCapturing.numDiff_last[0],
self.weakDirichletConditionFlags,
self.offset[0],self.stride[0],
r,
self.mesh.nExteriorElementBoundaries_global,
self.mesh.exteriorElementBoundariesArray,
self.mesh.elementBoundaryElementsArray,
self.mesh.elementBoundaryLocalElementBoundariesArray,
self.coefficients.ebqe_u0,
self.numericalFlux.isDOFBoundary[0],
self.numericalFlux.ebqe[('u',0)],
self.ebqe[('u',0)],
self.ebqe[('grad(u)',0)] )
#print "m_tmp",self.timeIntegration.m_tmp[0]
#print "dH",self.q[('dH',0,0)]
#print "dH_sge",self.q[('dH_sge',0,0)]
if self.stabilization:
self.stabilization.accumulateSubgridMassHistory(self.q)
logEvent("Global residual",level=9,data=r)
#mwf decide if this is reasonable for keeping solver statistics
self.nonlinear_function_evaluations += 1
if self.globalResidualDummy is None:
self.globalResidualDummy = numpy.zeros(r.shape,'d')
def getJacobian(self,jacobian):
if not debugRDLS:
return self.getJacobianNew(jacobian)
OneLevelTransport.getJacobian(self,jacobian)
def getJacobianNew(self,jacobian):
#import superluWrappers
#import numpy
import pdb
cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian,
jacobian)
#for now force time integration
useTimeIntegration = 1
if self.timeIntegration.__class__ == TimeIntegration.NoIntegration or not self.timeTerm:
useTimeIntegration = 0
if useTimeIntegration:
alpha_bdf = self.timeIntegration.alpha_bdf
beta_bdf = self.timeIntegration.beta_bdf
else:
alpha_bdf = self.timeIntegration.dt
beta_bdf = self.timeIntegration.m_last
self.rdls.calculateJacobian(#element
self.u[0].femSpace.elementMaps.psi,
self.u[0].femSpace.elementMaps.grad_psi,
self.mesh.nodeArray,
self.mesh.elementNodesArray,
self.elementQuadratureWeights[('u',0)],
self.u[0].femSpace.psi,
self.u[0].femSpace.grad_psi,
self.u[0].femSpace.psi,
self.u[0].femSpace.grad_psi,
#element boundary
self.u[0].femSpace.elementMaps.psi_trace,
self.u[0].femSpace.elementMaps.grad_psi_trace,
self.elementBoundaryQuadratureWeights[('u',0)],
self.u[0].femSpace.psi_trace,
self.u[0].femSpace.grad_psi_trace,
self.u[0].femSpace.psi_trace,
self.u[0].femSpace.grad_psi_trace,
self.u[0].femSpace.elementMaps.boundaryNormals,
self.u[0].femSpace.elementMaps.boundaryJacobians,
self.mesh.nElements_global,
self.coefficients.useMetrics,
alpha_bdf,
self.coefficients.epsFact,
self.coefficients.backgroundDiffusionFactor,
self.coefficients.weakDirichletFactor,
self.freezeLevelSet,
useTimeIntegration,
self.shockCapturing.lag,
self.stabilization.lag,
self.shockCapturing.shockCapturingFactor,
self.u[0].femSpace.dofMap.l2g,
self.elementDiameter,#self.mesh.elementDiametersArray,
self.mesh.nodeDiametersArray,
self.u[0].dof,
self.u_dof_last,
self.coefficients.q_u0,
beta_bdf[0],
self.q[('dH_sge',0,0)],
self.q[('cfl',0)],
self.shockCapturing.numDiff[0],
self.shockCapturing.numDiff_last[0],
self.weakDirichletConditionFlags,
self.csrRowIndeces[(0,0)],self.csrColumnOffsets[(0,0)],
jacobian,
self.mesh.nExteriorElementBoundaries_global,
self.mesh.exteriorElementBoundariesArray,
self.mesh.elementBoundaryElementsArray,
self.mesh.elementBoundaryLocalElementBoundariesArray,
self.coefficients.ebqe_u0,
self.numericalFlux.isDOFBoundary[0],
self.numericalFlux.ebqe[('u',0)],
self.csrColumnOffsets_eb[(0,0)])
logEvent("Jacobian ",level=10,data=jacobian)
#mwf decide if this is | |
di=None, # Diameter,
epsilon = 0.0006,
):
#Check types and converto to np.ndarray
assert isinstance(pressure,(int,float,np.ndarray,np.float64,np.int64))
pressure = np.atleast_1d(pressure)
assert isinstance(temperature,(int,float,np.ndarray,np.float64,np.int64))
temperature = np.atleast_1d(temperature)
assert isinstance(liquid_rate,(int,float,np.ndarray,np.float64,np.int64))
liquid_rate = np.atleast_1d(liquid_rate)
assert isinstance(gas_rate,(int,float,np.ndarray,np.float64,np.int64))
gas_rate = np.atleast_1d(gas_rate)
assert isinstance(ten_liquid,(int,float,np.ndarray,np.float64,np.int64))
ten_liquid = np.atleast_1d(ten_liquid)
assert isinstance(rho_liquid,(int,float,np.ndarray,np.float64,np.int64))
rho_liquid = np.atleast_1d(rho_liquid)
assert isinstance(rho_gas,(int,float,np.ndarray,np.float64,np.int64))
rho_gas = np.atleast_1d(rho_gas)
assert isinstance(mu_liquid,(int,float,np.ndarray,np.float64,np.int64))
mu_liquid = np.atleast_1d(mu_liquid)
assert isinstance(mu_gas,(int,float,np.ndarray,np.float64,np.int64))
mu_gas = np.atleast_1d(mu_gas)
assert isinstance(z,(int,float,np.ndarray,np.float64,np.int64))
z = np.atleast_1d(z)
assert isinstance(di,(int,float,np.ndarray,np.float64,np.int64))
di = np.atleast_1d(di)
assert isinstance(epsilon,(int,float,np.ndarray,np.float64,np.int64))
epsilon = np.atleast_1d(epsilon)
area = np.power((di*0.5)/12,2)*np.pi
usl = (liquid_rate * 5.615)/(area * 86400)
usg = (4*gas_rate*1000*z*(460+temperature)*14.7)/(86400*pressure*520*np.pi*np.power(di/12,2))
#Total velocity
um = usl + usg
#Lambda liquid
lambda_l = usl / um
# Rho m
rho_m = lambda_l*rho_liquid + (1 - lambda_l) * rho_gas
#Calculate N
n1 = (np.power(rho_m,2)*np.power(um,4))/(32.172*6.85e-5*ten_liquid*(rho_liquid-rho_gas))
n2 = (32.172 * np.power(di/12,2)*(rho_liquid-rho_gas))/(ten_liquid*6.85e-5)
#N3
rv = usl / usg
n3 = 0.0814 * (1 - 0.0554 * np.log(1+((730*rv)/(rv + 1))))
#Liquid Holdup
fl = -2.314 * np.power(n1*(1+(205/n2)),n3)
yl = 1 - (1-lambda_l)*(1 - np.exp(fl))
#Rho avg
rho_avg = yl*rho_liquid + (1-yl)*rho_gas
#potential energy
ppe = rho_avg / 144
# Absolute Roughness
k = epsilon * (di/12)
ko = (0.285*ten_liquid)/(rho_m * np.power(um,2))
if rv >= 0.007:
ke = ko
else:
ke = k + rv*((ko - k)/0.007)
epsilon_relative = ke / (di/12)
#Friction Factor
nre = np.power(10,7)
ff = np.power((1/(-4*np.log10((epsilon_relative/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon_relative,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)
#ppf
ppf = (2*ff*rho_m*np.power(um,2))/(32.172 * (di/12) * 144)
pressure_gradient = ppe + ppf
return pressure_gradient
def two_phase_pressure_profile(
depth = None,
thp = None,
liquid_rate = None,
oil_rate = None,
gas_rate = None,
glr = None,
gor = None,
bsw = None,
oil_obj = None,
gas_obj = None,
water_obj = None,
epsilon=0.0006,
surface_temperature=80,
temperature_gradient=1,
di=2.99,
tol=0.02,
max_iter = 20,
method = 'hagedorn_brown',
min_glr = 10
):
# Assert the right types and shapes for input
assert isinstance(depth, (np.ndarray,pd.Series,list))
depth = np.atleast_1d(depth)
assert depth.ndim == 1
assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
thp = np.atleast_1d(thp)
assert thp.shape == (1,)
if oil_rate is not None:
assert isinstance(oil_rate, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
oil_rate = np.atleast_1d(oil_rate)
assert oil_rate.shape == (1,)
if liquid_rate is not None:
assert isinstance(liquid_rate, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
liquid_rate = np.atleast_1d(liquid_rate)
assert liquid_rate.shape == (1,)
assert any([oil_rate is not None,liquid_rate is not None])
if gas_rate is not None:
assert isinstance(gas_rate, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
gas_rate = np.atleast_1d(gas_rate)
assert gas_rate.shape == (1,)
if gor is not None:
assert isinstance(gor, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
gor = np.atleast_1d(gor)
assert gor.shape == (1,)
if glr is not None:
assert isinstance(glr, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
glr = np.atleast_1d(glr)
assert glr.shape == (1,)
assert any([gas_rate is not None,gor is not None,glr is not None])
assert isinstance(bsw, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
bsw = np.atleast_1d(bsw)
assert bsw.shape == (1,)
assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None
assert isinstance(oil_obj,Oil) and oil_obj.pvt is not None
assert isinstance(water_obj,Water) and water_obj.pvt is not None
if isinstance(di,(np.ndarray,pd.Series,list)):
di = np.atleast_1d(di)
assert di.shape == depth.shape
elif isinstance(di,(int,float)):
di = np.full(depth.shape,di)
assert isinstance(epsilon, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
epsilon = np.atleast_1d(epsilon)
assert epsilon.shape == (1,)
assert isinstance(surface_temperature,(int,float,np.ndarray))
surface_temperature = np.atleast_1d(surface_temperature)
assert isinstance(temperature_gradient,(int,float,np.ndarray))
temperature_gradient = np.atleast_1d(temperature_gradient)
#Start
if liquid_rate is None:
liquid_rate = oil_rate / (1-bsw)
else:
oil_rate = liquid_rate*(1-bsw)
water_rate = liquid_rate * bsw
if gas_rate is None:
if gor is None:
gas_rate = glr * liquid_rate * 1e-3
else:
gas_rate = gor * oil_rate * 1e-3
pressure_profile = np.zeros(depth.shape)
pressure_profile[0] = thp
pressure_gradient = np.zeros(depth.shape)
iterations = np.zeros(depth.shape)
free_gas_rate = np.zeros(depth.shape)
glr = np.zeros(depth.shape)
temperature_profile = np.abs(depth[0] - depth) * (temperature_gradient/100) + surface_temperature
#Initials Densities
rho_oil_i = oil_obj.pvt.interpolate(thp,property = 'rhoo').iloc[0,0]
rho_water_i = water_obj.pvt.interpolate(thp,property = 'rhow').iloc[0,0]
rho_l = rho_oil_i * (1-bsw) + rho_water_i * bsw
pressure_gradient[0] = rho_l * (0.433/62.4)
for i in range(1,depth.shape[0]):
err = tol + 0.01
it = 0
grad_guess = pressure_gradient[i-1]
while err>= tol and it <= max_iter:
p_guess = grad_guess * np.abs(depth[i] - depth[i-1]) + pressure_profile[i-1]
#Interpolate pvt
gas_pvt_guess = gas_obj.pvt.interpolate(p_guess)
oil_pvt_guess = oil_obj.pvt.interpolate(p_guess)
water_pvt_guess = water_obj.pvt.interpolate(p_guess)
ten_liquid = oil_pvt_guess['tension'].iloc[0] * (1-bsw) + water_pvt_guess['tension'].iloc[0] * bsw
rho_liquid = oil_pvt_guess['rhoo'].iloc[0] * (1-bsw) + water_pvt_guess['rhow'].iloc[0] * bsw
mu_liquid = oil_pvt_guess['muo'].iloc[0] * (1-bsw) + water_pvt_guess['muw'].iloc[0] * bsw
rho_gas = (28.97 * gas_obj.sg * p_guess)/(gas_pvt_guess['z'].iloc[0]*10.73*(temperature_profile[i]+460))
mu_gas = gas_pvt_guess['mug'].iloc[0]
z = gas_pvt_guess['z'].iloc[0]
free_gas = gas_rate - (oil_pvt_guess['rs'].iloc[0]*oil_rate*1e-3)
free_gas = 0 if free_gas < 0 else free_gas
glr_ratio = free_gas*1e3 / liquid_rate
if glr_ratio > 10:
if method == 'hagedorn_brown':
grad_new = hb_correlation(
pressure=p_guess,
temperature=temperature_profile[i],
liquid_rate = liquid_rate,
gas_rate = free_gas,
ten_liquid = ten_liquid,
rho_liquid = rho_liquid,
rho_gas = rho_gas,
mu_liquid = mu_liquid,
mu_gas = mu_gas,
z = z,
di = di[i],
epsilon = epsilon,
)
#elif method == 'beggs_brill':
# grad_new = bb_correlation()
elif method == 'gray':
grad_new = gray_correlation(
pressure=p_guess, #Pressure [psi]
temperature=temperature_profile[i], #Temperature [F]
liquid_rate=liquid_rate, # Liquid Flow [bbl/d]
gas_rate=free_gas, # gas flow [kscfd]
ten_liquid=ten_liquid, #Surface tension dyne/cm2
rho_liquid=rho_liquid, # density lb/ft3
rho_gas=rho_gas, # density lb/ft3
mu_liquid=mu_liquid, # Viscosity [cp]
mu_gas=mu_gas, # Viscosity [cp]
z=z, # Gas compressibility Factor
di=di[i], # Diameter,
epsilon = epsilon
)
else:
df, _ = one_phase_pressure_profile(
p1=p_guess,
ge=rho_liquid /62.4,
epsilon=epsilon,
md=[depth[i], depth[i-1]],
tvd=[depth[i], depth[i-1]],
d = [di[i], di[i]],
rate = liquid_rate,
mu=mu_liquid
)
grad_new = df['gradient'].iloc[-1]
err = abs(grad_guess-grad_new)/grad_new
grad_guess = grad_new
it += 1
pressure_gradient[i] = grad_new
pressure_profile[i] = p_guess
free_gas_rate[i] = free_gas
glr[i] = glr_ratio
iterations[i] = it
df_dict = {
'pressure':pressure_profile,
'pressure_gradient': pressure_gradient,
'free_gas_rate': free_gas_rate,
'temperature': temperature_profile,
'iterations': iterations,
'grl': glr
}
df = pd.DataFrame(df_dict, index = depth)
pwf = pressure_profile[-1]
return df, pwf
def two_phase_upward_pressure(
depth = None,
pwf = None,
liquid_rate = None,
oil_rate = None,
gas_rate = None,
glr = None,
gor = None,
bsw = None,
oil_obj = None,
gas_obj = None,
water_obj = None,
epsilon=0.0006,
surface_temperature=80,
temperature_gradient=1,
di=2.99,
tol=0.02,
max_iter = 20,
method = 'hagedorn_brown',
guess=None,
grad_guess = [0.41,0.38]
):
if guess is None:
grad = np.atleast_1d(grad_guess)
delta_h = np.abs(depth[-1] - depth[0])
guess = pwf - grad * delta_h
else:
assert isinstance(guess,(list,np.ndarray))
guess = np.atleast_1d(guess)
def solve(x):
_,_pwf = two_phase_pressure_profile(
depth = depth,
thp = x,
liquid_rate = liquid_rate,
oil_rate = oil_rate,
gas_rate = gas_rate,
glr = glr,
gor = gor,
bsw = bsw,
oil_obj = oil_obj,
gas_obj = gas_obj,
water_obj = water_obj,
epsilon=epsilon,
surface_temperature=surface_temperature,
temperature_gradient=temperature_gradient,
di=di,
tol=tol,
max_iter = max_iter,
method = method
)
return pwf - _pwf
sol = root_scalar(solve, x0=guess[0],x1=guess[1])
return sol.root
def two_phase_outflow_curve(
depth = None,
thp = None,
liquid_rate = None,
oil_rate = None,
gas_rate = None,
glr = None,
gor = None,
bsw = None,
oil_obj = None,
gas_obj = None,
water_obj = None,
epsilon=0.0006,
surface_temperature=80,
temperature_gradient=1,
di=2.99,
tol=0.02,
max_iter = 20,
method = 'hagedorn_brown',
use_gas = False,
operating_point = None,
op_n = 30
):
# Assert the right types and shapes for input
assert isinstance(depth, (np.ndarray,pd.Series,list))
depth = np.atleast_1d(depth)
assert depth.ndim == 1
assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'
thp = np.atleast_1d(thp)
assert thp.ndim == 1
if oil_rate is not None:
assert isinstance(oil_rate, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'
oil_rate = np.atleast_1d(oil_rate)
assert oil_rate.ndim == 1
if liquid_rate is not None:
assert isinstance(liquid_rate, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'
liquid_rate = np.atleast_1d(liquid_rate)
assert liquid_rate.ndim == 1
assert any([oil_rate is not None,liquid_rate is not None])
if gas_rate is not None:
assert isinstance(gas_rate, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'
gas_rate = np.atleast_1d(gas_rate)
assert gas_rate.ndim == 1
if gor is not None:
assert isinstance(gor, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'
gor = np.atleast_1d(gor)
assert gor.ndim == 1
if glr is not None:
assert isinstance(glr, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'
glr = np.atleast_1d(glr)
assert glr.ndim == 1
assert any([gas_rate is not None,gor is not None,glr is not None])
assert isinstance(bsw, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'
bsw = np.atleast_1d(bsw)
assert bsw.ndim == 1
assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None
assert isinstance(oil_obj,Oil) and oil_obj.pvt is not None
assert isinstance(water_obj,Water) and water_obj.pvt is not None
if isinstance(di,(np.ndarray,list)):
di = np.atleast_2d(di)
assert di.shape[0] == depth.shape[0]
elif isinstance(di,(int,float)):
di = np.full((depth.shape[0],1),di)
assert isinstance(epsilon, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'
epsilon = np.atleast_1d(epsilon)
assert epsilon.shape | |
"writeonly" pipes may only send packets from client to service.
See ``MemberDefinition_Direction`` constants for possible return values.
:rtype: int
"""
return self.__innerpipe.Direction()
@property
def RequestPacketAck(self):
"""
Get if pipe endpoint should request packet acks
Packet acks are generated by receiving endpoints to inform the sender that
a packet has been received. The ack contains the packet index, the sequence number
of the packet. Packet acks are used for flow control by PipeBroadcaster.
:rtype: bool
"""
return self.__innerpipe.GetRequestPacketAck()
@RequestPacketAck.setter
def RequestPacketAck(self,value):
self.__innerpipe.SetRequestPacketAck(value)
@property
def IgnoreReceived(self):
"""
Set if pipe endpoint is ignoring incoming packets
If true, pipe endpoint is ignoring incoming packets and is not adding
incoming packets to the receive queue.
:rtype: bool
"""
return self.__innerpipe.GetIgnoreReceived()
@IgnoreReceived.setter
def IgnoreReceived(self,value):
self.__innerpipe.SetIgnoreReceived(value)
def AsyncClose(self,handler,timeout=2):
"""
Asynchronously close the pipe endpoint
Same as Close() but returns asynchronously
If ``handler`` is None, returns an awaitable future.
:param handler: A handler function to call on completion, possibly with an exception
:type handler: Callable[[Exception],None]
:param timeout: Timeout in seconds, or -1 for no timeout
:type timeout: float
"""
return async_call(self.__innerpipe.AsyncClose,(adjust_timeout(timeout),),AsyncVoidReturnDirectorImpl,handler)
def AsyncSendPacket(self, packet, handler):
"""
Send a packet to the peer endpoint asynchronously
Same as SendPacket(), but returns asynchronously.
If ``handler`` is None, returns an awaitable future.
:param packet: The packet to send
:param handler: A handler function to receive the sent packet number or an exception
:type handler: Callable[[Exception],None]
"""
m=PackMessageElement(packet,self.__type,self.__obj,self.__innerpipe.GetNode())
return async_call(self.__innerpipe.AsyncSendPacket,(m,),AsyncUInt32ReturnDirectorImpl,handler)
def ReceivePacket(self):
"""
Receive the next packet in the receive queue
Receive the next packet from the receive queue. This function will throw an
InvalidOperationException if there are no packets in the receive queue. Use
ReceivePacketWait() to block until a packet has been received.
:return: The received packet
"""
m=self.__innerpipe.ReceivePacket()
return UnpackMessageElement(m,self.__type,self.__obj,self.__innerpipe.GetNode())
def PeekNextPacket(self):
"""
Peeks the next packet in the receive queue
Returns the first packet in the receive queue, but does not remove it from
the queue. Throws an InvalidOperationException if there are no packets in the
receive queue.
:return: The next packet in the receive queue
"""
m=self.__innerpipe.PeekNextPacket()
return UnpackMessageElement(m,self.__type,self.__obj,self.__innerpipe.GetNode())
def TryReceivePacketWait(self, timeout=RobotRaconteurPython.RR_TIMEOUT_INFINITE, peek=False):
#TODO: Add timeout back
m=RobotRaconteurPython.MessageElement()
r=self.__innerpipe.TryReceivePacket(m, peek)
return (r, UnpackMessageElement(m,self.__type,self.__obj,self.__innerpipe.GetNode()))
@property
def PacketReceivedEvent(self):
"""
Event hook for received packets. Use to add handlers to be called
when packets are received by the endpoint.
.. code-block:: python
def my_handler(ep):
# Receive packets
while ep.Available > 0:
packet = ep.ReceivePacket()
# Do something with packet
my_endpoint.PacketReceivedEvent += my_handler
Handler must have signature ``Callable[[RobotRaconteur.PipeEndpoint],None]``
"""
return self._PacketReceivedEvent
@PacketReceivedEvent.setter
def PacketReceivedEvent(self, evt):
if (evt is not self._PacketReceivedEvent):
raise RuntimeError("Invalid operation")
@property
def PacketAckReceivedEvent(self):
"""
Event hook for received packets. Use to add handlers to be called
when packets are received by the endpoint.
.. code-block:: python
def my_ack_handler(ep, packet_num):
# Do something with packet_num info
pass
my_endpoint.PacketAckReceivedEvent += my_ack_handler
Handler must have signature ``Callable[[RobotRaconteur.PipeEndpoint,T],None]``
"""
return self._PacketAckReceivedEvent
@PacketAckReceivedEvent.setter
def PacketAckReceivedEvent(self, evt):
if (evt is not self._PacketAckReceivedEvent):
raise RuntimeError("Invalid operation")
def GetNode(self):
return self.__innerpipe.GetNode()
class PipeEndpointDirector(RobotRaconteurPython.WrappedPipeEndpointDirector):
def __init__(self,endpoint):
self.__endpoint=endpoint
super(PipeEndpointDirector, self).__init__()
def PipeEndpointClosedCallback(self):
if (not self.__endpoint.PipeEndpointClosedCallback is None):
self.__endpoint.PipeEndpointClosedCallback(self.__endpoint)
def PacketReceivedEvent(self):
self.__endpoint.PacketReceivedEvent.fire(self.__endpoint)
def PacketAckReceivedEvent(self,packetnum):
self.__endpoint.PacketAckReceivedEvent.fire(self.__endpoint,packetnum)
class PipeAsyncConnectHandlerImpl(RobotRaconteurPython.AsyncPipeEndpointReturnDirector):
def __init__(self,handler,innerpipe,obj):
super(PipeAsyncConnectHandlerImpl,self).__init__()
self._handler=handler
self.__innerpipe=innerpipe
self.__obj=obj
def handler(self, innerendpoint, error_info):
if (error_info.error_code!=0):
err=RobotRaconteurPythonError.RobotRaconteurExceptionUtil.ErrorInfoToException(error_info)
self._handler(None,err)
return
try:
outerendpoint=PipeEndpoint(innerendpoint,self.__innerpipe.Type,self.__obj)
director=PipeEndpointDirector(outerendpoint)
innerendpoint.SetRRDirector(director,0)
director.__disown__()
except Exception as err2:
self._handler(None, err2)
return
self._handler(outerendpoint, None)
class Pipe(object):
"""
Pipe()
"pipe" member type interface
The Pipe class implements the "pipe" member type. Pipes are declared in service definition files
using the "pipe" keyword within object declarations. Pipes provide reliable packet streaming between
clients and services. They work by creating pipe endpoint pairs (peers), with one endpoint in the client,
and one in the service. Packets are transmitted between endpoint pairs. Packets sent by one endpoint are received
by the other, where they are placed in a receive queue. Received packets can then be retrieved from the receive queue.
Pipe endpoints are created by the client using the Connect() or AsyncConnect() functions. Services receive
incoming connection requests through a callback function. This callback is configured using the PipeConnectCallback
property. Services may also use the PipeBroadcaster class to automate managing pipe endpoint lifecycles and
sending packets to all connected client endpoints. If the PipeConnectCallback function is used, the service
is responsible for keeping track of endpoints as the connect and disconnect. See PipeEndpoint for details
on sending and receiving packets.
Pipe endpoints are *indexed*, meaning that more than one endpoint pair can be created between the client and the service.
Pipes may be *unreliable*, meaning that packets may arrive out of order or be dropped. Use IsUnreliable to check for
unreliable pipes. The member modifier `unreliable` is used to specify that a pipe should be unreliable.
Pipes may be declared *readonly* or *writeonly*. If neither is specified, the pipe is assumed to be full duplex. *readonly*
pipes may only send packets from service to client. *writeonly* pipes may only send packets from client to service. Use
Direction to determine the direction of the pipe.
The PipeBroadcaster is often used to simplify the use of Pipes. See PipeBroadcaster for more information.
This class is instantiated by the node. It should not be instantiated by the user.
"""
__slots__ = ["_innerpipe", "_obj","__weakref__"]
def __init__(self,innerpipe,obj=None):
self._innerpipe=innerpipe
self._obj=obj
def AsyncConnect(self,*args):
"""
AsyncConnect(index,handler,timeout=-1)
Asynchronously connect a pipe endpoint.
Same as Connect(), but returns asynchronously.
Only valid on clients. Will throw InvalidOperationException on the service side.
If ``handler`` is None, returns an awaitable future.
:param index: The index of the pipe endpoint, or -1 to automatically select an index
:type index: int
:param handler: A handler function to receive the connected endpoint, or an exception
:type handler: Callable[[PipeEndpoint,Exception],None]
:param timeout: Timeout in seconds, or -1 for no timeout
"""
if (isinstance(args[0], numbers.Number)):
index=args[0]
handler=args[1]
if (len(args)>=3):
timeout=args[2]
else:
timeout=RobotRaconteurPython.RR_TIMEOUT_INFINITE
else:
index=-1
handler=args[0]
if (len(args)>=2):
timeout=args[1]
else:
timeout=RobotRaconteurPython.RR_TIMEOUT_INFINITE
return async_call(self._innerpipe.AsyncConnect,(index,adjust_timeout(timeout)),PipeAsyncConnectHandlerImpl,handler,directorargs=(self._innerpipe,self._obj))
@property
def MemberName(self):
"""
Get the member name of the pipe
:rtype: str
"""
return self._innerpipe.GetMemberName()
@property
def Direction(self):
"""
The direction of the pipe
Pipes may be declared "readonly" or "writeonly" in the service definition file. (If neither
is specified, the pipe is assumed to be full duplex.) "readonly" pipes may only send packets from
service to client. "writeonly" pipes may only send packets from client to service.
See ``MemberDefinition_Direction`` constants for possible return values.
:rtype: int
"""
return self._innerpipe.Direction()
class WireConnection(object):
"""
WireConnection()
Wire connection used to transmit "most recent" values
Wire connections are used to transmit "most recent" values between connected
wire members. See Wire for more information on wire members.
Wire connections are created by clients using the Wire.Connect() or Wire.AsyncConnect()
functions. Services receive incoming wire connection requests through a
callback function specified using the Wire.WireConnectCallback property. Services
may also use the WireBroadcaster class to automate managing wire connection lifecycles and
sending values to all connected clients, or use WireUnicastReceiver to receive an incoming
value from the most recently connected client.
Wire connections are used to transmit "most recent" values between clients and services. Connection
the wire creates a connection pair, one in the client, and one in the service. Each wire connection
object has an InValue and an OutValue. Setting the OutValue of one will cause the specified value to
be transmitted to the InValue of the peer. See Wire for more information.
Values can optionally be specified to have a finite lifespan using InValueLifespan and
OutValueLifespan. Lifespans can be used to prevent using old values that have
not been recently updated.
This class is instantiated by the Wire class. It should not be instantiated
by the user.
"""
__slots__ = ["__innerwire", "__type", "WireConnectionClosedCallback", "_WireValueChanged", "__obj","__weakref__"]
def __init__(self,innerwire, type, obj=None):
self.__innerwire=innerwire
self.__type=type
self.WireConnectionClosedCallback=None
self._WireValueChanged=EventHook()
self.__obj=obj
@property
def Endpoint(self):
"""
Get the Robot Raconteur node Endpoint ID
Gets the endpoint associated with the ClientContext or ServerEndpoint
associated with the wire connection.
:rtype: int
"""
return | |
import itertools
def sort_unique(edges):
"""Make sure there are no duplicate edges and that for each
``coo_a < coo_b``.
"""
return tuple(sorted(
tuple(sorted(edge))
for edge in set(map(frozenset, edges))
))
# ----------------------------------- 2D ------------------------------------ #
def check_2d(coo, Lx, Ly, cyclic):
"""Check ``coo`` in inbounds for a maybe cyclic 2D lattice.
"""
x, y = coo
if (not cyclic) and not ((0 <= x < Lx) and (0 <= y < Ly)):
return
return (x % Lx, y % Ly)
def edges_2d_square(Lx, Ly, cyclic=False, cells=None):
"""Return the graph edges of a finite 2D square lattice. The nodes
(sites) are labelled like ``(i, j)``.
Parameters
----------
Lx : int
The number of cells along the x-direction.
Ly : int
The number of cells along the y-direction.
cyclic : bool, optional
Whether to use periodic boundary conditions.
cells : list, optional
A list of cells to use. If not given the cells used are
``itertools.product(range(Lx), range(Ly))``.
Returns
-------
edges : list[((int, int), (int, int))]
"""
if cells is None:
cells = itertools.product(range(Lx), range(Ly))
edges = []
for i, j in cells:
for coob in [(i, j + 1), (i + 1, j)]:
coob = check_2d(coob, Lx, Ly, cyclic)
if coob:
edges.append(((i, j), coob))
return sort_unique(edges)
def edges_2d_hexagonal(Lx, Ly, cyclic=False, cells=None):
"""Return the graph edges of a finite 2D hexagonal lattice. There are two
sites per cell, and note the cells do not form a square tiling. The nodes
(sites) are labelled like ``(i, j, s)`` for ``s`` in ``'AB'``.
Parameters
----------
Lx : int
The number of cells along the x-direction.
Ly : int
The number of cells along the y-direction.
cyclic : bool, optional
Whether to use periodic boundary conditions.
cells : list, optional
A list of cells to use. If not given the cells used are
``itertools.product(range(Lx), range(Ly))``.
Returns
-------
edges : list[((int, int, str), (int, int, str))]
"""
if cells is None:
cells = itertools.product(range(Lx), range(Ly))
edges = []
for i, j in cells:
for *coob, lbl in [
(i, j, 'B'),
(i, j - 1, 'B'),
(i - 1, j, 'B'),
]:
coob = check_2d(coob, Lx, Ly, cyclic)
if coob:
edges.append(((i, j, 'A'), (*coob, lbl)))
for *coob, lbl in [
(i, j, 'A'),
(i, j + 1, 'A'),
(i + 1, j, 'A'),
]:
coob = check_2d(coob, Lx, Ly, cyclic)
if coob:
edges.append(((i, j, 'B'), (*coob, lbl)))
return sort_unique(edges)
def edges_2d_triangular(Lx, Ly, cyclic=False, cells=None):
"""Return the graph edges of a finite 2D triangular lattice. There is a
single site per cell, and note the cells do not form a square tiling.
The nodes (sites) are labelled like ``(i, j)``.
Parameters
----------
Parameters
----------
Lx : int
The number of cells along the x-direction.
Ly : int
The number of cells along the y-direction.
cyclic : bool, optional
Whether to use periodic boundary conditions.
cells : list, optional
A list of cells to use. If not given the cells used are
``itertools.product(range(Lx), range(Ly))``.
Returns
-------
edges : list[((int, int), (int, int))]
"""
if cells is None:
cells = itertools.product(range(Lx), range(Ly))
edges = []
for i, j in cells:
for coob in [(i, j + 1), (i + 1, j), (i + 1, j - 1)]:
coob = check_2d(coob, Lx, Ly, cyclic)
if coob:
edges.append(((i, j), coob))
return sort_unique(edges)
def edges_2d_triangular_rectangular(Lx, Ly, cyclic=False, cells=None):
"""Return the graph edges of a finite 2D triangular lattice tiled in a
rectangular geometry. There are two sites per rectangular cell. The nodes
(sites) are labelled like ``(i, j, s)`` for ``s`` in ``'AB'``.
Parameters
----------
Lx : int
The number of cells along the x-direction.
Ly : int
The number of cells along the y-direction.
cyclic : bool, optional
Whether to use periodic boundary conditions.
cells : list, optional
A list of cells to use. If not given the cells used are
``itertools.product(range(Lx), range(Ly))``.
Returns
-------
edges : list[((int, int, s), (int, int, s))]
"""
if cells is None:
cells = itertools.product(range(Lx), range(Ly))
edges = []
for i, j in cells:
for *coob, lbl in [
(i, j, 'B'),
(i, j - 1, 'B'),
(i, j + 1, 'A'),
]:
coob = check_2d(coob, Lx, Ly, cyclic)
if coob:
edges.append(((i, j, 'A'), (*coob, lbl)))
for *coob, lbl in [
(i + 1, j, 'A'),
(i, j + 1, 'B'),
(i + 1, j + 1, 'A'),
]:
coob = check_2d(coob, Lx, Ly, cyclic)
if coob:
edges.append(((i, j, 'B'), (*coob, lbl)))
return sort_unique(edges)
def edges_2d_kagome(Lx, Ly, cyclic=False, cells=None):
"""Return the graph edges of a finite 2D kagome lattice. There are
three sites per cell, and note the cells do not form a square tiling. The
nodes (sites) are labelled like ``(i, j, s)`` for ``s`` in ``'ABC'``.
Parameters
----------
Lx : int
The number of cells along the x-direction.
Ly : int
The number of cells along the y-direction.
cyclic : bool, optional
Whether to use periodic boundary conditions.
cells : list, optional
A list of cells to use. If not given the cells used are
``itertools.product(range(Lx), range(Ly))``.
Returns
-------
edges : list[((int, int, str), (int, int, str))]
"""
if cells is None:
cells = itertools.product(range(Lx), range(Ly))
edges = []
for i, j in cells:
for *coob, lbl in [
(i, j, 'B'),
(i, j - 1, 'B'),
(i, j, 'C'),
(i - 1, j, 'C')
]:
coob = check_2d(coob, Lx, Ly, cyclic)
if coob:
edges.append(((i, j, 'A'), (*coob, lbl)))
for *coob, lbl in [
(i, j, 'C'),
(i - 1, j + 1, 'C'),
(i, j, 'A'),
(i, j + 1, 'A')
]:
coob = check_2d(coob, Lx, Ly, cyclic)
if coob:
edges.append(((i, j, 'B'), (*coob, lbl)))
for *coob, lbl in [
(i, j, 'A'),
(i + 1, j, 'A'),
(i, j, 'B'),
(i + 1, j - 1, 'B')
]:
coob = check_2d(coob, Lx, Ly, cyclic)
if coob:
edges.append(((i, j, 'C'), (*coob, lbl)))
return sort_unique(edges)
# ----------------------------------- 3D ------------------------------------ #
def check_3d(coo, Lx, Ly, Lz, cyclic):
"""Check ``coo`` in inbounds for a maybe cyclic 3D lattice.
"""
x, y, z = coo
OBC = not cyclic
inbounds = (0 <= x < Lx) and (0 <= y < Ly) and (0 <= z < Lz)
if OBC and not inbounds:
return
return (x % Lx, y % Ly, z % Lz)
def edges_3d_cubic(Lx, Ly, Lz, cyclic=False, cells=None):
"""Return the graph edges of a finite 3D cubic lattice. The nodes
(sites) are labelled like ``(i, j, k)``.
Parameters
----------
Lx : int
The number of cells along the x-direction.
Ly : int
The number of cells along the y-direction.
Lz : int
The number of cells along the z-direction.
cyclic : bool, optional
Whether to use periodic boundary conditions.
cells : list, optional
A list of cells to use. If not given the cells used are
``itertools.product(range(Lx), range(Ly), range(Lz))``.
Returns
-------
edges : list[((int, int, int), (int, int, int))]
"""
if cells is None:
cells = itertools.product(range(Lx), range(Ly), range(Lz))
edges = []
for i, j, k in cells:
for coob in [(i, j, k + 1), (i, j + 1, k), (i + 1, j, k)]:
coob = check_3d(coob, Lx, Ly, Lz, cyclic)
if coob:
edges.append(((i, j, k), coob))
return sort_unique(edges)
def edges_3d_pyrochlore(Lx, Ly, Lz, cyclic=False, cells=None):
"""Return the graph edges of a finite 3D pyorchlore lattice. There are
four sites per cell, and note the cells do not form a cubic tiling. The
nodes (sites) are labelled like ``(i, j, k, s)`` for ``s`` in ``'ABCD'``.
Parameters
----------
Lx : int
The number of cells along the x-direction.
Ly : int
The number of cells along the y-direction.
Lz : int
The number of cells along the z-direction.
cyclic : bool, optional
Whether to use periodic boundary conditions.
cells : list, optional
A list of cells to use. If not given the cells used are
``itertools.product(range(Lx), range(Ly), range(Lz))``.
Returns
-------
edges : list[((int, int, int, str), (int, int, int, str))]
| |
<reponame>sahilPereira/2d-driving-simulator
import os
import pygame
from math import tan, radians, degrees, copysign, ceil
from pygame.math import Vector2
import stackelbergPlayer as SCP
from random import randrange
import random
import math
import numpy
import pandas as pd
import pickle
from argparse import ArgumentParser
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
YELLOW = (255, 255, 0)
RED = (255, 0, 0)
GREY = (210, 210 ,210)
PURPLE = (255, 0, 255)
WIDTH = 1900
HEIGHT = 240
NUM_LANES = 3
LANE_WIDTH = int(HEIGHT/NUM_LANES)
ACTION_RESET_TIME = 0.25 # time till next action
NGSIM_RESET_TIME = 0.1
ppu = 32
car_lane_ratio = 3.7/1.8
CAR_HEIGHT = int((HEIGHT/3.0)/car_lane_ratio)
CAR_WIDTH = int(CAR_HEIGHT*2)
# lane center positions
LANE_1_C = (LANE_WIDTH * 1 - (LANE_WIDTH/2))/ppu
LANE_2_C = (LANE_WIDTH * 2 - (LANE_WIDTH/2))/ppu
LANE_3_C = (LANE_WIDTH * 3 - (LANE_WIDTH/2))/ppu
NEW_LANES = [LANE_1_C, LANE_2_C, LANE_3_C]
class Car(pygame.sprite.Sprite):
def __init__(self, id, x, y, vel_x=0.0, vel_y=0.0, lane_id=1, color=RED, angle=0.0, length=4, max_steering=30, max_acceleration=5.0):
# init the sprite object
super().__init__()
self.id = id
self.image = pygame.Surface([CAR_WIDTH, CAR_HEIGHT])
self.image.fill(WHITE)
self.image.set_colorkey(WHITE)
# Draw the car (a rectangle!)
pygame.draw.rect(self.image, color, [0, 0, CAR_WIDTH, CAR_HEIGHT])
# Fetch the rectangle object that has the dimensions of the image.
self.rect = self.image.get_rect()
self.position = Vector2(x, y)
self.velocity = Vector2(vel_x, vel_y)
self.angle = angle
self.length = length
self.max_acceleration = max_acceleration
self.max_steering = max_steering
self.max_velocity = 20.0
self.brake_deceleration = 10.0
self.free_deceleration = 2.0
self.acceleration = 0.0
self.steering = 0.0
self.angular_velocity = 0.0
self.lane_id = lane_id
self.left_mode, self.right_mode, self.do_accelerate, self.do_decelerate, self.do_maintain = False, False, False, False, False
self.cruise_vel = 0.0
def simCopy(self):
sim_car = Car(self.id, self.position.x, self.position.y, self.velocity.x, self.velocity.y, self.lane_id)
# dynamic controls
sim_car.acceleration = self.acceleration
sim_car.steering = self.steering
# action controls
sim_car.left_mode = self.left_mode
sim_car.right_mode = self.right_mode
sim_car.do_accelerate = self.do_accelerate
sim_car.do_decelerate = self.do_decelerate
sim_car.do_maintain = self.do_maintain
sim_car.cruise_vel = self.cruise_vel
return sim_car
def updateNgsim(self, dt):
if self.do_accelerate:
self.accelerate(dt)
elif self.do_decelerate:
self.decelerate(dt)
elif self.do_maintain:
self.maintain(dt)
self.velocity += (self.acceleration * dt, 0)
self.velocity.x = max(-self.max_velocity, min(self.velocity.x, self.max_velocity))
# trigger movement
if self.left_mode:
self.steering += 30.0 * dt
elif self.right_mode:
self.steering -= 30.0 * dt
if self.steering:
turning_radius = self.length / tan(radians(self.steering))
self.angular_velocity = self.velocity.x / turning_radius
else:
self.angular_velocity = 0
self.position += self.velocity.rotate(-self.angle) * dt
# self.angle += degrees(angular_velocity) * dt
def update(self, dt, s_leader):
if self.do_accelerate:
self.accelerate(dt)
elif self.do_decelerate:
self.decelerate(dt)
elif self.do_maintain:
self.maintain(dt)
self.velocity += (self.acceleration * dt, 0)
self.velocity.x = max(0.0, min(self.velocity.x, self.max_velocity))
# trigger movement
new_lane_pos = (LANE_WIDTH * self.lane_id - (LANE_WIDTH/2))/ppu
# print(new_lane_pos)
if self.left_mode:
self.moveLeft(dt, new_lane_pos)
elif self.right_mode:
self.moveRight(dt, new_lane_pos)
if self.steering:
turning_radius = self.length / tan(radians(self.steering))
self.angular_velocity = self.velocity.x / turning_radius
else:
self.angular_velocity = 0
self.position += self.velocity.rotate(-self.angle) * dt
self.position.y -= degrees(self.angular_velocity) * dt * dt
if self.id == s_leader.id:
self.position.x = 10
else:
self.position.x -= s_leader.velocity.x * dt
# prevent the car from leaving the road
if self.position.y < int((CAR_HEIGHT/2)/ppu):
self.position.y = max(self.position.y, int((CAR_HEIGHT/2)/ppu))
elif self.position.y > int((HEIGHT - int(CAR_HEIGHT/2))/ppu):
self.position.y = min(self.position.y, int((HEIGHT - int((CAR_HEIGHT/2)/ppu))/ppu))
# update rect for collision detection
self.rect.x = self.position.x * ppu - self.rect.width / 2
self.rect.y = self.position.y * ppu - self.rect.height / 2
def setCruiseVel(self, cruise_vel):
self.cruise_vel = cruise_vel
def moveLeft(self, dt, new_lane_pos):
self.steering += 30.0 * dt
if self.position.y <= new_lane_pos:
self.steering = 0
self.left_mode = False
def moveRight(self, dt, new_lane_pos):
self.steering -= 30.0 * dt
if self.position.y >= new_lane_pos:
self.steering = 0
self.right_mode = False
def accelerate(self, dt):
# the longitudinal velocity should never be less than 0
if self.acceleration < 0.0:
self.acceleration = 0.0
else:
self.acceleration += 1 * dt
if self.acceleration == self.max_acceleration:
self.do_accelerate = False
def maintain(self, dt):
vel_ceil = ceil(self.velocity.x)
cruise_vel_ceil = ceil(self.cruise_vel)
# check if car needs to speed up or slow down and accelerate accordingly
is_speed = True if vel_ceil <= cruise_vel_ceil else False
self.acceleration = self.max_acceleration if is_speed else -self.max_acceleration
# speed up or slow down until the car reaches cruise velocity
is_cruise_speed = is_speed and vel_ceil >= cruise_vel_ceil
is_cruise_speed |= (not is_speed) and vel_ceil <= cruise_vel_ceil
if is_cruise_speed:
self.velocity.x = self.cruise_vel
self.acceleration = 0.0
self.do_maintain = False
def decelerate(self, dt):
if self.acceleration > 0.0:
self.acceleration = -self.max_acceleration #0.0
else:
self.acceleration -= 1 * dt
if self.velocity.x == 0.0:
self.do_decelerate = False
class Obstacle(pygame.sprite.Sprite):
def __init__(self, id, x, y, vel_x=0.0, vel_y=0.0, lane_id=1, color=RED, angle=0.0, length=4, max_steering=30, max_acceleration=5.0):
# init the sprite object
super().__init__()
self.id = id
self.image = pygame.Surface([CAR_WIDTH, CAR_HEIGHT])
self.image.fill(WHITE)
self.image.set_colorkey(WHITE)
# Draw the car (a rectangle!)
pygame.draw.rect(self.image, color, [0, 0, CAR_WIDTH, CAR_HEIGHT])
# Fetch the rectangle object that has the dimensions of the image.
self.rect = self.image.get_rect()
self.position = Vector2(x, y)
self.velocity = Vector2(vel_x, vel_y)
self.angle = angle
self.length = length
self.max_acceleration = max_acceleration
self.max_steering = max_steering
self.max_velocity = 20
self.brake_deceleration = 10
# self.free_deceleration = 2
self.lane_id = lane_id
self.acceleration = 0.0
self.steering = 0.0
def simCopy(self):
sim_car = Obstacle(self.id, self.position.x, self.position.y, self.velocity.x, self.velocity.y, self.lane_id, color=YELLOW)
# dynamic controls
sim_car.acceleration = self.acceleration
sim_car.steering = self.steering
return sim_car
def update(self, dt, s_leader):
self.velocity += (self.acceleration * dt, 0)
self.velocity.x = max(-self.max_velocity, min(self.velocity.x, self.max_velocity))
if self.steering:
turning_radius = self.length / tan(radians(self.steering))
angular_velocity = self.velocity.x / turning_radius
else:
angular_velocity = 0
self.position += self.velocity.rotate(-self.angle) * dt
self.position.x -= s_leader.velocity.x * dt
self.angle += degrees(angular_velocity) * dt
# update rect for collision detection
self.rect.x = self.position.x * ppu - self.rect.width / 2
self.rect.y = self.position.y * ppu - self.rect.height / 2
class Game:
def __init__(self):
pygame.init()
pygame.display.set_caption("Car tutorial")
width = WIDTH
height = HEIGHT
self.screen = pygame.display.set_mode((width, height))
self.clock = pygame.time.Clock()
self.ticks = 60
self.exit = False
def displayScore(self, steering, car_angle):
font = pygame.font.SysFont(None, 25)
text = font.render("Collision [obstacle]: "+str(steering), True, WHITE)
text_angle = font.render("Collision [agent]: "+str(car_angle), True, WHITE)
self.screen.blit(text, (0,0))
self.screen.blit(text_angle, (0,25))
def displayPos(self, position):
font = pygame.font.SysFont(None, 25)
# text = font.render("X: "+str(position.x)+", Y: "+str(position.y), True, WHITE)
text = font.render("Velocity: "+str(position), True, WHITE)
self.screen.blit(text, (0,50))
def displayAction(self, action):
font = pygame.font.SysFont(None, 25)
text = font.render("Action: "+str(action), True, WHITE)
self.screen.blit(text, (0,75))
def updateSprites(self, vehicles):
for auto in vehicles:
rotated = pygame.transform.rotate(auto.image, auto.angle)
rect = rotated.get_rect()
self.screen.blit(rotated, auto.position * ppu - (rect.width / 2, rect.height / 2))
def manualControl(self, car, all_obstacles):
# User input
pressed = pygame.key.get_pressed()
if pressed[pygame.K_UP] and not car.do_accelerate:
self.accelerate(car)
elif pressed[pygame.K_DOWN] and not car.do_maintain:
self.maintain(car, all_obstacles)
elif pressed[pygame.K_SPACE] and not car.do_decelerate:
self.decelerate(car)
car.acceleration = max(-car.max_acceleration, min(car.acceleration, car.max_acceleration))
if pressed[pygame.K_RIGHT] and not car.right_mode:
self.turn_right(car)
elif pressed[pygame.K_LEFT] and not car.left_mode:
self.turn_left(car)
car.steering = max(-car.max_steering, min(car.steering, car.max_steering))
def stackelbergControl(self, controller, reference_car, all_agents, all_obstacles):
# Step 1. select players to execute action at this instance
players = controller.pickLeadersAndFollowers(all_agents, all_obstacles)
# Step 2. iterate over the set of players and execute their actions
for leader in players:
# Step 3: select actions for all players from step 2 sequentially
selected_action = controller.selectAction(leader, all_obstacles)
# select action using Stackelberg game
# selected_action = controller.selectStackelbergAction(leader, all_obstacles, reference_car)
self.executeAction(selected_action, leader, all_obstacles)
# Note that every player acts as a leader when selecting their actions
return selected_action
# execute the given action for the specified leader
def executeAction(self, selected_action, leader, all_obstacles):
if (selected_action == SCP.Action.ACCELERATE) and not leader.do_accelerate:
self.accelerate(leader)
elif (selected_action == SCP.Action.DECELERATE) and not leader.do_decelerate:
self.decelerate(leader)
elif (selected_action == SCP.Action.MAINTAIN) and not leader.do_maintain:
self.maintain(leader, all_obstacles)
leader.acceleration = max(-leader.max_acceleration, min(leader.acceleration, leader.max_acceleration))
if (selected_action == SCP.Action.RIGHT) and not leader.right_mode:
self.turn_right(leader)
elif (selected_action == SCP.Action.LEFT) and not leader.left_mode:
self.turn_left(leader)
leader.steering = max(-leader.max_steering, min(leader.steering, leader.max_steering))
return
# these booleans are required to ensure the action is executed over a period of time
def accelerate(self, car):
car.do_accelerate = True
car.do_decelerate = False
car.do_maintain = False
def maintain(self, car, all_obstacles):
# only execute this function when required
if car.do_maintain:
return
forward_obstacle = None
for obstacle in all_obstacles:
if obstacle == car:
continue
# obstacle in the same lane
if obstacle.lane_id == car.lane_id:
# obstacle has to be ahead the ego vehicle
if obstacle.position.x > car.position.x:
if not forward_obstacle:
forward_obstacle = obstacle
# obstacle closest to the front
elif obstacle.position.x < forward_obstacle.position.x:
forward_obstacle = obstacle
obstacle_velx = forward_obstacle.velocity.x if forward_obstacle else car.velocity.x
car.setCruiseVel(obstacle_velx)
car.do_maintain = True
car.do_accelerate = False
car.do_decelerate = False
def decelerate(self, car):
car.do_decelerate = True
car.do_accelerate = False
car.do_maintain = False
def turn_right(self, car):
car.lane_id = min(car.lane_id + | |
Raises:
None
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1D
import numpy as np
x = np.array([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]]).astype(np.float32)
w=np.array(
[[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]]).astype(np.float32)
x_t = paddle.to_tensor(x)
conv = Conv1D(3, 2, 3)
conv.weight.set_value(w)
y_t = conv(x_t)
print(y_t)
# [[[133. 238.]
# [160. 211.]]]
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCL"):
super(Conv1D, self).__init__(
in_channels,
out_channels,
kernel_size,
False,
1,
stride=stride,
padding=padding,
padding_mode=padding_mode,
dilation=dilation,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x):
padding = 0
if self._padding_mode != "zeros":
x = F.pad(x,
self._reversed_padding_repeated_twice,
mode=self._padding_mode,
data_format=self._data_format)
else:
padding = self._padding
out = F.conv1d(
x,
self.weight,
bias=self.bias,
padding=padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format)
return out
class Conv1DTranspose(_ConvNd):
r"""
This interface is used to construct a callable object of the ``Conv1DTranspose`` class.
For more details, refer to code examples.
The 1-D convolution transpose layer calculates the output based on the input,
filter, and dilation, stride, padding. Input(Input) and output(Output)
are in 'NCL' format or 'NLC' where N is batch size, C is the number of channels,
L is the length of the feature. The details of convolution transpose
layer, please refer to the following explanation and references
`therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a 3-D Tensor with 'NCL' format or 'NLC' format.
* :math:`W`: Kernel value, a 3-D Tensor with 'MCK' format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, a 3-D Tensor with data format 'NCL' of 'NLC', the shape of :math:`Out` and :math:`X` may be different.
Example:
- Input:
Input shape: :math:`(N, C_{in}, L_{in})`
Filter shape: :math:`(C_{in}, C_{out}, L_f)`
- Output:
Output shape: :math:`(N, C_{out}, L_{out})`
Where
.. math::
L^\prime_{out} &= (L_{in} - 1) * stride - pad_top - pad_bottom + dilation * (L_f - 1) + 1 \\\\
L_{out} &\in [ L^\prime_{out}, L^\prime_{out} + stride ]
Note:
The conv1d_transpose can be seen as the backward of the conv1d. For conv1d,
when stride > 1, conv1d maps multiple input shape to the same output shape,
so for conv1d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`L_{out} = L^\prime_{out}`;
else, the :math:`L_{out}` of the output size must between :math:`L^\prime_{out}`
and :math:`L^\prime_{out} + stride`. conv1d_transpose can compute the kernel size automatically.
Args:
in_channels(int): The number of channels in the input image.
out_channels(int): The number of the filter. It is as same as the output
feature map.
kernel_size(int|tuple|list, optional): The filter size. If kernel_size is a tuple,
it must contain one integers, (kernel_size). None if
use output size to calculate kernel_size. Default: None. kernel_size and
output_size should not be None at the same time.
stride(int|tuple|list, optional): The stride size. It means the stride in transposed convolution.
If stride is a tuple, it must contain one integer, (stride_size).
Default: stride = 1.
padding(int|list|str|tuple, optional): The padding size. The padding argument effectively adds
`dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a
string, either 'VALID' or 'SAME' supported, which is the padding algorithm.
If `padding` is a tuple or list, it could be in two forms:
`[pad]` or `[pad_left, pad_right]`. Default: padding = 0.
output_padding(int|list|tuple, optional): The count of zeros to be added to tail of each dimension.
If it is a tuple, it must contain one integer. Default: 0.
groups(int, optional): The groups number of the Conv2D transpose layer. Inspired by
grouped convolution in <NAME> Deep CNN paper, in which
when group=2, the first half of the filters is only connected to the
first half of the input channels, while the second half of the
filters is only connected to the second half of the input channels.
Default: groups = 1.
bias(bool, optional): Whether to use bias. Default: True.
dilation(int|tuple|list, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a tuple, it must contain one integer, (dilation_size).
Default: dilation = 1.
weight_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
of conv1d_transpose. If it is set to None or one attribute of ParamAttr, conv1d_transpose
will create ParamAttr as param_attr. If the Initializer of the param_attr
is not set, the parameter is initialized with Xavier. Default: None.
bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv1d_transpose.
If it is set to False, no bias will be added to the output units.
If it is set to None or one attribute of ParamAttr, conv1d_transpose
will create ParamAttr as bias_attr. If the Initializer of the bias_attr
is not set, the bias is initialized zero. Default: None.
Attribute:
**weight** (Parameter): the learnable weights of filters of this layer.
**bias** (Parameter or None): the learnable bias of this layer.
Shape:
- x(Tensor): 3-D tensor with shape (batch, in_channels, length) when data_format is "NCL" or shape (batch, length, in_channels) when data_format is "NLC".
- output_size(int|tuple|list, optional): The output image size. If output size is a tuple, it must contain one integer, (feature_length). None if use kernel_size, padding, output_padding and stride to calculate output_size. If output_size and kernel_size are specified at the same time, They should follow the formula above. Default: None. output_size and kernel_size should not be None at the same time.
- output(Tensor): 3-D tensor with same shape as input x.
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1DTranspose
import numpy as np
# shape: (1, 2, 4)
x=np.array([[[4, 0, 9, 7],
[8, 0, 9, 2]]]).astype(np.float32)
# shape: (2, 1, 2)
y=np.array([[[7, 0]],
[[4, 2]]]).astype(np.float32)
x_t = paddle.to_tensor(x)
conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(y)
y_t = conv(x_t)
print(y_t)
# [[[60. 16. 99. 75. 4.]]]
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
weight_attr=None,
bias_attr=None,
data_format="NCL"):
super(Conv1DTranspose, self).__init__(
in_channels,
out_channels,
kernel_size,
True,
1,
stride=stride,
padding=padding,
dilation=dilation,
output_padding=output_padding,
groups=groups,
weight_attr=weight_attr,
bias_attr=bias_attr,
data_format=data_format)
def forward(self, x, output_size=None):
out = F.conv1d_transpose(
x,
self.weight,
bias=self.bias,
output_size=output_size,
output_padding=self.output_padding,
padding=self._padding,
stride=self._stride,
dilation=self._dilation,
groups=self._groups,
data_format=self._data_format)
return out
class Conv2D(_ConvNd):
r"""
This interface is used to construct a callable object of the ``Conv2D`` class.
For more details, refer to code examples.
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW format, where N is batch size, C is the number of
the feature map, H is the height of the feature map, and W is the width of the feature map.
Filter's shape is [MCHW] , where M is the number of output feature map,
C is the number of input feature map, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input feature map divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \\ast X + b)
Where:
* :math:`X`: Input value, a ``Tensor`` with NCHW format.
* :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Parameters:
in_channels(int): The number | |
'Discussion'),
('passed_swc_demo', 'SWC Demo'),
]
try:
return [name for field, name in fields if not getattr(self, field)]
except AttributeError as e:
raise Exception('Did you forget to call '
'annotate_with_instructor_eligibility()?') from e
def get_missing_dc_instructor_requirements(self):
"""Returns set of requirements' names (list of strings) that are not
passed yet by the trainee and are mandatory to become DC Instructor."""
fields = [
('passed_training', 'Training'),
('passed_dc_homework', 'DC Homework'),
('passed_discussion', 'Discussion'),
('passed_dc_demo', 'DC Demo'),
]
try:
return [name for field, name in fields if not getattr(self, field)]
except AttributeError as e:
raise Exception('Did you forget to call '
'annotate_with_instructor_eligibility()?') from e
def get_training_tasks(self):
"""Returns Tasks related to Instuctor Training events at which this
person was trained."""
return Task.objects.filter(person=self,
role__name='learner',
event__tags__name='TTT')
def clean(self):
"""This will be called by the ModelForm.is_valid(). No saving to the
database."""
# lowercase the email
self.email = self.email.lower() if self.email else None
def save(self, *args, **kwargs):
# If GitHub username has changed, clear UserSocialAuth table for this
# person.
if self.pk is not None:
orig = Person.objects.get(pk=self.pk)
github_username_has_changed = orig.github != self.github
if github_username_has_changed:
UserSocialAuth.objects.filter(user=self).delete()
# save empty string as NULL to the database - otherwise there are
# issues with UNIQUE constraint failing
self.personal = self.personal.strip()
if self.family is not None:
self.family = self.family.strip()
self.middle = self.middle.strip()
self.email = self.email.strip() if self.email else None
self.gender = self.gender or None
self.airport = self.airport or None
self.github = self.github or None
self.twitter = self.twitter or None
super().save(*args, **kwargs)
def is_admin(user):
if user is None or user.is_anonymous():
return False
else:
return (user.is_superuser or
user.groups.filter(Q(name='administrators') |
Q(name='steering committee') |
Q(name='invoicing') |
Q(name='trainers')).exists())
class ProfileUpdateRequest(ActiveMixin, CreatedUpdatedMixin, models.Model):
personal = models.CharField(
max_length=STR_LONG,
verbose_name='Personal (first) name',
blank=False,
)
middle = models.CharField(
max_length=STR_LONG,
verbose_name='Middle name',
blank=True,
)
family = models.CharField(
max_length=STR_LONG,
verbose_name='Family (last) name',
blank=False,
)
email = models.EmailField(
verbose_name='Email address',
blank=False,
)
affiliation = models.CharField(
max_length=STR_LONG,
help_text='What university, company, lab, or other organization are '
'you affiliated with (if any)?',
blank=False,
)
airport_iata = models.CharField(
max_length=3,
verbose_name='Nearest major airport',
help_text='Please use its 3-letter IATA code '
'(<a href="http://www.airportcodes.aero/" target="_blank">'
'http://www.airportcodes.aero/</a>) to tell us where you\'re located.',
blank=False, null=False,
)
OCCUPATION_CHOICES = (
('undisclosed', 'Prefer not to say'),
('undergrad', 'Undergraduate student'),
('grad', 'Graduate student'),
('postdoc', 'Post-doctoral researcher'),
('faculty', 'Faculty'),
('research', 'Research staff (including research programmer)'),
('support', 'Support staff (including technical support)'),
('librarian', 'Librarian/archivist'),
('commerce', 'Commercial software developer '),
('', 'Other (enter below)'),
)
occupation = models.CharField(
max_length=STR_MED,
choices=OCCUPATION_CHOICES,
verbose_name='What is your current occupation/career stage?',
help_text='Please choose the one that best describes you.',
null=False, blank=True, default='undisclosed',
)
occupation_other = models.CharField(
max_length=STR_LONG,
verbose_name='Other occupation/career stage',
blank=True, default='',
)
github = models.CharField(
max_length=STR_LONG,
verbose_name='GitHub username',
help_text='Please put only a single username here.',
validators=[github_username_validator],
blank=True, default='',
)
twitter = models.CharField(
max_length=STR_LONG,
verbose_name='Twitter username',
blank=True, default='',
)
orcid = models.CharField(
max_length=STR_LONG,
verbose_name='ORCID ID',
blank=True, default='',
)
website = models.CharField(
max_length=STR_LONG,
verbose_name='Personal website',
default='', blank=True,
)
GENDER_CHOICES = (
(Person.UNDISCLOSED, 'Prefer not to say'),
(Person.FEMALE, 'Female'),
(Person.MALE, 'Male'),
(Person.OTHER, 'Other (enter below)'),
)
gender = models.CharField(
max_length=1,
choices=GENDER_CHOICES,
null=False, blank=False, default=Person.UNDISCLOSED,
)
gender_other = models.CharField(
max_length=STR_LONG,
verbose_name='Other gender',
blank=True, default='',
)
domains = models.ManyToManyField(
'KnowledgeDomain',
verbose_name='Areas of expertise',
help_text='Please check all that apply.',
limit_choices_to=~Q(name__startswith='Don\'t know yet'),
blank=True,
)
domains_other = models.CharField(
max_length=STR_LONGEST,
verbose_name='Other areas of expertise',
blank=True, default='',
)
languages = models.ManyToManyField(
'Language',
verbose_name='Languages you can teach in',
blank=True,
)
lessons = models.ManyToManyField(
'Lesson',
verbose_name='Topic and lessons you\'re comfortable teaching',
help_text='Please mark ALL that apply.',
blank=False,
)
lessons_other = models.CharField(
max_length=STR_LONGEST,
verbose_name='Other topics/lessons you\'re comfortable teaching',
help_text='Please include lesson URLs.',
blank=True, default='',
)
notes = models.TextField(
default="",
blank=True)
def save(self, *args, **kwargs):
"""Save nullable char fields as empty strings."""
self.personal = self.personal.strip()
self.family = self.family.strip()
self.email = self.email.strip()
self.gender = self.gender or ''
self.occupation = self.occupation or ''
return super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('profileupdaterequest_details', args=[self.pk])
def __str__(self):
return "{personal} {family} <{email}> (from {affiliation})".format(
personal=self.personal, family=self.family, email=self.email,
affiliation=self.affiliation,
)
#------------------------------------------------------------
class TagQuerySet(models.query.QuerySet):
def carpentries(self):
return Tag.objects.filter(name__in=['SWC', 'DC', 'LC']).order_by('id')
class Tag(models.Model):
'''Label for grouping events.'''
ITEMS_VISIBLE_IN_SELECT_WIDGET = 10
name = models.CharField(max_length=STR_MED, unique=True)
details = models.CharField(max_length=STR_LONG)
def __str__(self):
return self.name
objects = TagQuerySet.as_manager()
#------------------------------------------------------------
class Language(models.Model):
"""A language tag.
https://tools.ietf.org/html/rfc5646
"""
name = models.CharField(
max_length=STR_MED,
help_text='Description of this language tag in English')
subtag = models.CharField(
max_length=STR_SHORT,
help_text=
'Primary language subtag. '
'https://tools.ietf.org/html/rfc5646#section-2.2.1')
def __str__(self):
return self.name
#------------------------------------------------------------
# In order to make our custom filters chainable, we have to
# define them on the QuerySet, not the Manager - see
# http://www.dabapps.com/blog/higher-level-query-api-django-orm/
class EventQuerySet(models.query.QuerySet):
'''Handles finding past, ongoing and upcoming events'''
def not_cancelled(self):
"""Exclude cancelled events."""
return self.exclude(tags__name='cancelled')
def active(self):
"""Exclude inactive events (stalled, completed or cancelled)."""
return self.exclude(tags__name='stalled').exclude(completed=True) \
.not_cancelled()
def past_events(self):
'''Return past events.
Past events are those which started before today, and
which either ended before today or whose end is NULL
'''
# All events that started before today
queryset = self.filter(start__lt=datetime.date.today())
# Of those events, only those that also ended before today
# or where the end date is NULL
ended_before_today = models.Q(end__lt=datetime.date.today())
end_is_null = models.Q(end__isnull=True)
queryset = queryset.filter(ended_before_today | end_is_null)
return queryset
def upcoming_events(self):
"""Return published upcoming events.
Upcoming events are published events (see `published_events` below)
that start after today."""
queryset = self.published_events() \
.filter(start__gt=datetime.date.today()) \
.order_by('start')
return queryset
def ongoing_events(self):
"""Return ongoing events.
Ongoing events are published events (see `published_events` below)
that are currently taking place (ie. start today or before and end
today or later)."""
# All events that start before or on today, and finish after or on
# today.
queryset = self.published_events() \
.filter(start__lte=datetime.date.today()) \
.filter(end__gte=datetime.date.today()) \
.order_by('start')
return queryset
def unpublished_conditional(self):
"""Return conditional for events without: start OR country OR venue OR
url OR are marked as 'cancelled' (ie. unpublished events). This will be
used in `self.published_events`, too."""
unknown_start = Q(start__isnull=True)
no_country = Q(country__isnull=True)
no_venue = Q(venue__exact='')
no_address = Q(address__exact='')
no_latitude = Q(latitude__isnull=True)
no_longitude = Q(longitude__isnull=True)
no_url = Q(url__isnull=True)
return (
unknown_start | no_country | no_venue | no_address | no_latitude |
no_longitude | no_url
)
def unpublished_events(self):
"""Return events considered as unpublished (see
`unpublished_conditional` above)."""
conditional = self.unpublished_conditional()
return self.not_cancelled().filter(conditional) \
.order_by('slug', 'id').distinct()
def published_events(self):
"""Return events considered as published (see `unpublished_conditional`
above)."""
conditional = self.unpublished_conditional()
return self.not_cancelled().exclude(conditional) \
.order_by('-start', 'id').distinct()
def uninvoiced_events(self):
'''Return a queryset for events that have not yet been invoiced.
These are marked as uninvoiced, and have occurred.
Events are sorted oldest first.'''
return self.not_cancelled().past_events() \
.filter(invoice_status='not-invoiced') \
.order_by('start')
def metadata_changed(self):
"""Return events for which remote metatags have been updated."""
return self.filter(metadata_changed=True)
def ttt(self):
"""Return only TTT events."""
return self.filter(tags__name='TTT').distinct()
@reversion.register
class Event(AssignmentMixin, models.Model):
'''Represent a single event.'''
REPO_REGEX = re.compile(r'https?://github\.com/(?P<name>[^/]+)/'
r'(?P<repo>[^/]+)/?')
REPO_FORMAT = 'https://github.com/{name}/{repo}'
WEBSITE_REGEX = re.compile(r'https?://(?P<name>[^.]+)\.github\.'
r'(io|com)/(?P<repo>[^/]+)/?')
WEBSITE_FORMAT = 'https://{name}.github.io/{repo}/'
PUBLISHED_HELP_TEXT = 'Required in order for this event to be "published".'
host = models.ForeignKey(Organization, on_delete=models.PROTECT,
help_text='Organization hosting the event.')
tags = models.ManyToManyField(
Tag,
help_text='<ul><li><i>stalled</i> — for events with lost contact with '
'the host or TTT events that aren\'t running.</li>'
'<li><i>unresponsive</i> – for events whose hosts and/or '
'organizers aren\'t going to send us attendance data.</li>'
'<li><i>cancelled</i> — for events that were supposed to '
'happen, but due to some circumstances got cancelled.</li>'
'</ul>',
)
administrator = models.ForeignKey(
Organization, related_name='administrator', null=True, blank=True,
on_delete=models.PROTECT,
help_text='Organization responsible for administrative work.'
)
sponsors = models.ManyToManyField(
Organization, related_name='sponsored_events', blank=True,
through=Sponsorship,
)
start = models.DateField(
null=True, blank=True,
help_text=PUBLISHED_HELP_TEXT,
)
end = models.DateField(null=True, blank=True)
slug = models.SlugField(
max_length=STR_LONG, unique=True,
help_text='Use <code>YYYY-MM-DD-location</code> format, where '
'<code>location</code> is either an organization, or city, '
'or both. If the specific date is unknown, use '
'<code>xx</code> instead, for example: <code>2016-12-xx'
'-Krakow</code> means that the event is supposed to run '
'sometime in December 2016 in Kraków. Use only latin '
'characters.',
)
language = models.ForeignKey(
Language, on_delete=models.SET_NULL,
null=True, blank=True,
help_text='Human language of instruction during the workshop.'
)
url = models.CharField(
max_length=STR_LONG, unique=True, null=True, blank=True,
validators=[RegexValidator(REPO_REGEX, inverse_match=True)],
help_text=PUBLISHED_HELP_TEXT +
'<br />Use link to the event\'s <b>website</b>, ' +
'not repository.',
)
reg_key = models.CharField(max_length=STR_REG_KEY, blank=True, verbose_name="Eventbrite key")
attendance = models.PositiveIntegerField(null=True, blank=True)
admin_fee = models.DecimalField(max_digits=6, decimal_places=2, null=True, blank=True, validators=[MinValueValidator(0)])
INVOICED_CHOICES = (
('unknown', 'Unknown'),
('invoiced', | |
self.last_buf = self.connection.buf
# Set up a normal response for the next request
self.connection = test_urllib.fakehttp(
b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 3\r\n'
b'\r\n'
b'123'
)
return result
handler = Handler()
opener = urllib.request.build_opener(handler)
tests = (
(b'/p\xC3\xA5-dansk/', b'/p%C3%A5-dansk/'),
(b'/spaced%20path/', b'/spaced%20path/'),
(b'/spaced path/', b'/spaced%20path/'),
(b'/?p\xC3\xA5-dansk', b'/?p%C3%A5-dansk'),
)
for [location, result] in tests:
with self.subTest(repr(location)):
handler.connection = test_urllib.fakehttp(
b'HTTP/1.1 302 Redirect\r\n'
b'Location: ' + location + b'\r\n'
b'\r\n'
)
response = opener.open('http://example.com/')
expected = b'GET ' + result + b' '
request = handler.last_buf
self.assertTrue(request.startswith(expected), repr(request))
def test_proxy(self):
u = "proxy.example.com:3128"
for d in dict(http=u), dict(HTTP=u):
o = OpenerDirector()
ph = urllib.request.ProxyHandler(d)
o.add_handler(ph)
meth_spec = [
[("http_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://acme.example.com/")
self.assertEqual(req.host, "acme.example.com")
o.open(req)
self.assertEqual(req.host, u)
self.assertEqual([(handlers[0], "http_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_no_proxy(self):
os.environ['no_proxy'] = 'python.org'
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(http="proxy.example.com"))
o.add_handler(ph)
req = Request("http://www.perl.org/")
self.assertEqual(req.host, "www.perl.org")
o.open(req)
self.assertEqual(req.host, "proxy.example.com")
req = Request("http://www.python.org")
self.assertEqual(req.host, "www.python.org")
o.open(req)
self.assertEqual(req.host, "www.python.org")
del os.environ['no_proxy']
def test_proxy_no_proxy_all(self):
os.environ['no_proxy'] = '*'
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(http="proxy.example.com"))
o.add_handler(ph)
req = Request("http://www.python.org")
self.assertEqual(req.host, "www.python.org")
o.open(req)
self.assertEqual(req.host, "www.python.org")
del os.environ['no_proxy']
def test_proxy_https(self):
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(https="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("https_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("https://www.example.com/")
self.assertEqual(req.host, "www.example.com")
o.open(req)
self.assertEqual(req.host, "proxy.example.com:3128")
self.assertEqual([(handlers[0], "https_open")],
[tup[0:2] for tup in o.calls])
def test_proxy_https_proxy_authorization(self):
o = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(https='proxy.example.com:3128'))
o.add_handler(ph)
https_handler = MockHTTPSHandler()
o.add_handler(https_handler)
req = Request("https://www.example.com/")
req.add_header("Proxy-Authorization", "FooBar")
req.add_header("User-Agent", "Grail")
self.assertEqual(req.host, "www.example.com")
self.assertIsNone(req._tunnel_host)
o.open(req)
# Verify Proxy-Authorization gets tunneled to request.
# httpsconn req_headers do not have the Proxy-Authorization header but
# the req will have.
self.assertNotIn(("Proxy-Authorization", "FooBar"),
https_handler.httpconn.req_headers)
self.assertIn(("User-Agent", "Grail"),
https_handler.httpconn.req_headers)
self.assertIsNotNone(req._tunnel_host)
self.assertEqual(req.host, "proxy.example.com:3128")
self.assertEqual(req.get_header("Proxy-authorization"), "FooBar")
@unittest.skipUnless(sys.platform == 'darwin', "only relevant for OSX")
def test_osx_proxy_bypass(self):
bypass = {
'exclude_simple': False,
'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.10',
'10.0/16']
}
# Check hosts that should trigger the proxy bypass
for host in ('foo.bar', 'www.bar.com', '127.0.0.1', '10.10.0.1',
'10.0.0.1'):
self.assertTrue(_proxy_bypass_macosx_sysconf(host, bypass),
'expected bypass of %s to be True' % host)
# Check hosts that should not trigger the proxy bypass
for host in ('abc.foo.bar', 'bar.com', '127.0.0.2', '10.11.0.1',
'notinbypass'):
self.assertFalse(_proxy_bypass_macosx_sysconf(host, bypass),
'expected bypass of %s to be False' % host)
# Check the exclude_simple flag
bypass = {'exclude_simple': True, 'exceptions': []}
self.assertTrue(_proxy_bypass_macosx_sysconf('test', bypass))
def test_basic_auth(self, quote_char='"'):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s%s%s\r\n\r\n' %
(quote_char, realm, quote_char))
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_basic_auth_with_single_quoted_realm(self):
self.test_basic_auth(quote_char="'")
def test_basic_auth_with_unquoted_realm(self):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib.request.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
with self.assertWarns(UserWarning):
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = urllib.request.ProxyHandler(dict(http="proxy.example.com:3128"))
opener.add_handler(ph)
password_manager = MockPasswordManager()
auth_handler = urllib.request.ProxyBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
realm, http_handler, password_manager,
"http://acme.example.com:3128/protected",
"proxy.example.com:3128",
)
def test_basic_and_digest_auth_handlers(self):
# HTTPDigestAuthHandler raised an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
# Also (http://python.org/sf/14797027, RFC 2617 section 1.2), we must
# try digest first (since it's the strongest auth scheme), so we record
# order of calls here to check digest comes first:
class RecordingOpenerDirector(OpenerDirector):
def __init__(self):
OpenerDirector.__init__(self)
self.recorded = []
def record(self, info):
self.recorded.append(info)
class TestDigestAuthHandler(urllib.request.HTTPDigestAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("digest")
urllib.request.HTTPDigestAuthHandler.http_error_401(self,
*args, **kwds)
class TestBasicAuthHandler(urllib.request.HTTPBasicAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("basic")
urllib.request.HTTPBasicAuthHandler.http_error_401(self,
*args, **kwds)
opener = RecordingOpenerDirector()
password_manager = MockPasswordManager()
digest_handler = TestDigestAuthHandler(password_manager)
basic_handler = TestBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(basic_handler)
opener.add_handler(digest_handler)
opener.add_handler(http_handler)
# check basic auth isn't blocked by digest handler failing
self._test_basic_auth(opener, basic_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
# check digest was tried before basic (twice, because
# _test_basic_auth called .open() twice)
self.assertEqual(opener.recorded, ["digest", "basic"]*2)
def test_unsupported_auth_digest_handler(self):
opener = OpenerDirector()
# While using DigestAuthHandler
digest_auth_handler = urllib.request.HTTPDigestAuthHandler(None)
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Kerberos\r\n\r\n')
opener.add_handler(digest_auth_handler)
opener.add_handler(http_handler)
self.assertRaises(ValueError, opener.open, "http://www.example.com")
def test_unsupported_auth_basic_handler(self):
# While using BasicAuthHandler
opener = OpenerDirector()
basic_auth_handler = urllib.request.HTTPBasicAuthHandler(None)
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: NTLM\r\n\r\n')
opener.add_handler(basic_auth_handler)
opener.add_handler(http_handler)
self.assertRaises(ValueError, opener.open, "http://www.example.com")
def _test_basic_auth(self, opener, auth_handler, auth_header,
realm, http_handler, password_manager,
request_url, protected_url):
import base64
user, password = "<PASSWORD>", "<PASSWORD>"
# .add_password() fed through to password manager
auth_handler.add_password(realm, request_url, user, password)
self.assertEqual(realm, password_manager.realm)
self.assertEqual(request_url, password_manager.url)
self.assertEqual(user, password_manager.user)
self.assertEqual(password, password_manager.password)
opener.open(request_url)
# should have asked the password manager for the username/password
self.assertEqual(password_manager.target_realm, realm)
self.assertEqual(password_manager.target_url, protected_url)
# expect one request without authorization, then one with
self.assertEqual(len(http_handler.requests), 2)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
userpass = bytes('%s:%s' % (user, password), "ascii")
auth_hdr_value = ('Basic ' +
base64.encodebytes(userpass).strip().decode())
self.assertEqual(http_handler.requests[1].get_header(auth_header),
auth_hdr_value)
self.assertEqual(http_handler.requests[1].unredirected_hdrs[auth_header],
auth_hdr_value)
# if the password manager can't find a password, the handler won't
# handle the HTTP auth error
password_manager.user = password_manager.password = <PASSWORD>
http_handler.reset()
opener.open(request_url)
self.assertEqual(len(http_handler.requests), 1)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
def test_basic_prior_auth_auto_send(self):
# Assume already authenticated if is_authenticated=True
# for APIs like Github that don't return 401
user, password = "<PASSWORD>", "<PASSWORD>"
request_url = "http://acme.example.com/protected"
http_handler = MockHTTPHandlerCheckAuth(200)
pwd_manager = HTTPPasswordMgrWithPriorAuth()
auth_prior_handler = HTTPBasicAuthHandler(pwd_manager)
auth_prior_handler.add_password(
None, request_url, user, password, is_authenticated=True)
is_auth = pwd_manager.is_authenticated(request_url)
self.assertTrue(is_auth)
opener = OpenerDirector()
opener.add_handler(auth_prior_handler)
opener.add_handler(http_handler)
opener.open(request_url)
# expect request to be sent with auth header
self.assertTrue(http_handler.has_auth_header)
def test_basic_prior_auth_send_after_first_success(self):
# Auto send auth header after authentication is successful once
user, password = '<PASSWORD>', '<PASSWORD>'
request_url = 'http://acme.example.com/protected'
realm = 'ACME'
pwd_manager = HTTPPasswordMgrWithPriorAuth()
auth_prior_handler = HTTPBasicAuthHandler(pwd_manager)
auth_prior_handler.add_password(realm, request_url, user, password)
is_auth = pwd_manager.is_authenticated(request_url)
self.assertFalse(is_auth)
opener = OpenerDirector()
opener.add_handler(auth_prior_handler)
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % None)
opener.add_handler(http_handler)
opener.open(request_url)
is_auth = pwd_manager.is_authenticated(request_url)
self.assertTrue(is_auth)
http_handler = MockHTTPHandlerCheckAuth(200)
self.assertFalse(http_handler.has_auth_header)
opener = OpenerDirector()
opener.add_handler(auth_prior_handler)
opener.add_handler(http_handler)
# After getting 200 from MockHTTPHandler
# Next request sends header in the first request
opener.open(request_url)
# expect request to be sent with auth header
self.assertTrue(http_handler.has_auth_header)
def test_http_closed(self):
"""Test the connection is cleaned up when the response is closed"""
for (transfer, data) in (
("Connection: close", b"data"),
("Transfer-Encoding: chunked", b"4\r\ndata\r\n0\r\n\r\n"),
("Content-Length: 4", b"data"),
):
header = "HTTP/1.1 200 OK\r\n{}\r\n\r\n".format(transfer)
conn = test_urllib.fakehttp(header.encode() + data)
handler = urllib.request.AbstractHTTPHandler()
req = Request("http://dummy/")
req.timeout = None
with handler.do_open(conn, req) as resp:
resp.read()
self.assertTrue(conn.fakesock.closed,
"Connection not closed with {!r}".format(transfer))
def test_invalid_closed(self):
"""Test the connection is cleaned up after an invalid response"""
conn = test_urllib.fakehttp(b"")
handler = urllib.request.AbstractHTTPHandler()
req = Request("http://dummy/")
req.timeout = None
with self.assertRaises(http.client.BadStatusLine):
handler.do_open(conn, req)
self.assertTrue(conn.fakesock.closed, "Connection not closed")
class MiscTests(unittest.TestCase):
def opener_has_handler(self, opener, handler_class):
self.assertTrue(any(h.__class__ == handler_class
for h in opener.handlers))
def test_build_opener(self):
class MyHTTPHandler(urllib.request.HTTPHandler):
pass
class FooHandler(urllib.request.BaseHandler):
def foo_open(self):
pass
class BarHandler(urllib.request.BaseHandler):
def bar_open(self):
pass
build_opener = urllib.request.build_opener
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, urllib.request.HTTPHandler)
o = build_opener(urllib.request.HTTPHandler)
self.opener_has_handler(o, urllib.request.HTTPHandler)
o = build_opener(urllib.request.HTTPHandler())
self.opener_has_handler(o, urllib.request.HTTPHandler)
# Issue2670: multiple handlers sharing the same base class
class MyOtherHTTPHandler(urllib.request.HTTPHandler):
pass
o = build_opener(MyHTTPHandler, MyOtherHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
self.opener_has_handler(o, MyOtherHTTPHandler)
@unittest.skipUnless(support.is_resource_enabled('network'),
'test requires network access')
def test_issue16464(self):
with support.transient_internet("http://www.example.com/"):
opener = urllib.request.build_opener()
request = urllib.request.Request("http://www.example.com/")
self.assertEqual(None, request.data)
opener.open(request, "1".encode("us-ascii"))
self.assertEqual(b"1", request.data)
self.assertEqual("1", request.get_header("Content-length"))
opener.open(request, "1234567890".encode("us-ascii"))
self.assertEqual(b"1234567890", request.data)
self.assertEqual("10", request.get_header("Content-length"))
def test_HTTPError_interface(self):
"""
Issue 13211 reveals that HTTPError didn't implement the URLError
interface even though HTTPError is a subclass of URLError.
"""
msg = 'something bad happened'
url = code = fp = None
hdrs = 'Content-Length: 42'
err = urllib.error.HTTPError(url, code, msg, hdrs, fp)
self.assertTrue(hasattr(err, 'reason'))
self.assertEqual(err.reason, 'something bad happened')
self.assertTrue(hasattr(err, 'headers'))
self.assertEqual(err.headers, 'Content-Length: 42')
expected_errmsg = 'HTTP Error %s: %s' % (err.code, err.msg)
self.assertEqual(str(err), expected_errmsg)
expected_errmsg = '<HTTPError %s: %r>' % (err.code, err.msg)
self.assertEqual(repr(err), expected_errmsg)
def test_parse_proxy(self):
parse_proxy_test_cases = [
('proxy.example.com',
(None, None, None, 'proxy.example.com')),
('proxy.example.com:3128',
(None, None, None, 'proxy.example.com:3128')),
('proxy.example.com', (None, None, None, | |
#! /usr/bin/env python
#
"""
Matplotlib_user_interface.py is an interactive plotting tool using matplotlib.
This is intended to be a front-end for a number of MATPLOTLIB functions similar
in general usage to "xmgrace" (although not intending to duplicate all the
xmgrace funcionality...). From the interface one is able to read in data and
interactively change the plot properties (axes, symbols, lines, and so on).
** NOTE: for this program to work properly it needs to be in a directory
listed in the $PATH environment variable, and also in the $PYTHON_PATH
environment variable. These allow the code to be called from the command
line and from inside python, respectively.
** Note: this code uses python 3, it will not work with python 2.
Various of the functions provided here are not available in the normal
.plot() interface of matplotlib.
The code uses the more general matplotlib functions and not the pyplot
interface for this purpose.
The code needs matplotlib and numpy to function. Numpy loadtxt is used to
read in data.
The code uses TKinter for the widget generation. It needs the "Tk" backend.
One can change the backend right at the start of the code if another such is
needed.
The code also includes some simple FITS image display functionality to have
this in the same file for reference, but that is independent of the main
plotting functionality.
The code assumes that the "times new roman" font is installed along with the
default matplotlib fonts. If this is not the case, one will get a fall-back
font instead (usually "DejaVe Sans"). One can install the Times New Roman
font if this is not already on the system. Due to preferences of the author
the Times New Roman font is the default. If one wishes to change this, search
for 'times new roman' and replace the string by the default that is wanted,
say 'sans-serif' for example. There are commented out lines to make
'sans-serif' the default font, which can be uncommented and used to replace
the instances of 'times new roman' font as the default, should that be needed.
************************************************************
Use from inside Python:
While the code was written to read data from files as a stand-alone
interface, one can also call the routines from inside python and add sets
from the python prompt. The commands needed are as follows, assuming that
data arrays "xvalues" and "yvalues" already exist within the Python session:
>>>> import matplotlib_user_interface as mui
>>>> root, myplot = mui.startup()
>>>> myplot.add_set(xvalues, yvalues)
(repeat for multiple sets, if needed)
>>>> myplot.show_plot()
If one wishes to do all the steps manually the following somewhat longer
set of commands can be used:
(1) import the required packages tkinter and matplotlib_user_interface
>>> import tkinter as Tk
>>> import matplotlib_user_interface as mui
The next step defines a window in tkinter to use for the plotting:
>>> root = Tk.Tk()
This produces a window with nothing in it.
(2) Generate x and y data as numpy arrays as needed
Assume that "xvalues" and "yvalues" hold the data values for a plot. These
need to be numpy arrays and should not be lists.
One simple example would be
>>>> xvalues = numpy.arange(1, 101, 1)
>>>> yvalues = numpy.log10(xvalues * xvalues)
(3) Define the plotting object
>>> myplot = mui.PlotGUI(root)
This fills in the buttons/functions in the window. The functionality is
then available.
(4) Add a set or sets to the plot. Any number of sets up a limit (currently
100) can be added. See the add_set subroutine doc string for the possible
use of error bars in the set. The value self.max_sets determines the maximum
number of sets that can be handled.
>>> myplot.add_set(xvalues, yvalues)
(5) Tell the code to plot or replot. This call can be used as many times
as required.
>>> myplot.show_plot()
The add_set routine takes as input the x and y data values and makes a new
set. Hence one can mix reading in values and generating values inside python
as needed by following the above set of steps. There are additional
parameter values one can pass to this routine, for error values in the data
points.
When running in a script rather than at the python interpreter, one needs
to add
root.mainloop()
after making the call to show_plot with the sets, to keep the window in place.
Otherwise the window will disappear after the show_plot call if nothing else
is being done in the script.
************************************************************
Using the image display functionality
In a similar way one can use the image display functionality from the python
command line. There is a utility routine "showimage" for this purpose.
>>> from astropy.io import fits
>>>> imagefilename = 'test.fits'
>>> image = fits.getdata(imagefilename)
>>> import matplotlib_user_interface as mui
>>> root, myplot = mui.showimage(image, imagefilename, dpi=300)
The file name is optional. The number of dots per inch is also optional.
The default number of dots per inch is 100.
If one wishes to make the calls explicitly they are (parameter self.indpi is
the number of dots per inch):
>>>> root = Tk.Tk()
>>>> plotobject = mui.ImageGUI(root)
>>>> plotobject.image = image
>>>> plotobject.imagefilename = 'some_name.fits'
>>>> plotobject.indpi = 300
>>>> plotobject.make_image_window()
As above, if done in a script one may need to add a call
>>>> root.mainloop()
to keep the window active.
"""
import math
from copy import deepcopy
import sys
import os
import bisect
import tkinter as Tk
import tkinter.ttk
import tkinter.filedialog
import tkinter.simpledialog
import tkinter.messagebox
from tkinter.colorchooser import askcolor
from tkinter.scrolledtext import ScrolledText
import numpy
from numpy.polynomial import polynomial, legendre, laguerre, chebyshev
from scipy.interpolate import UnivariateSpline
import matplotlib
import matplotlib.lines as mlines
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib.colors import LogNorm
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle, Ellipse, FancyArrow
from matplotlib.ticker import MultipleLocator
import astropy.io.fits as fits
import general_utilities
import object_utilities
import label_utilities
import make_plot
import edit_objects
import save_and_restore_plot
import fits_image_display
import histogram_utilities
import data_set_utilities
import data_set_operations
import plot_flag_utilities
import window_creation
import plot_controls
import non_linear_fitting
# The following are "global" variables with line/marker information from
# matplotlib. These are used, but not changed, in the code in more than one
# place hence I am using single variables here for the values.
matplotlib.use('TkAgg')
# define a background colour for windows
BGCOL = '#F8F8FF'
def startup():
"""
Startup.py is a wrapper for starting the plot tool.
This is a wrapper for making a plot from the Python command prompt.
Assuming that numpy data arrays xvalues and yvalues exist one can use the
commands
>>> import matplotlib_user_interface as mui
>>> root, myplot = mui.startup()
>>> myplot.add_set(xvalues, yvalues)
>>> myplot.show_plot()
to bring up the window and plot the data set from inside Python. One can
then add other data sets and use the functionality of the tool.
Parameters
----------
None.
Returns
-------
newroot : The Tkinter window class variable for the plot window.
plotobject : The matplotlib_user_interface plot GUI object variable.
This is the variable one uses for making plots.
"""
# Make a Tkinter window
newroot = Tk.Tk()
newroot.title("Plotting Tool")
plotobject = PlotGUI(newroot)
return newroot, plotobject
class PlotGUI(Tk.Frame):
"""
PlotGUI is the object class for the plotting functions.
The PlotGUI class is the object for the plotting window and all assocated
variables. Typical usage on the python command line would be
root = Tk.Tk()
plotobject = PlotGUI(root)
The plotobject would then be used for all the plot functionality.
Parameters
----------
Tk.Frame : A Tkinter root or Toplevel variable that is parent to the
GUI window.
"""
def __init__(self, parent=None, **args):
"""
Initialize variables for the class and start the widget.
This routine sets a few variables for the interface and calls the
main widget function.
Parameters
----------
parent An optional parameter giving the parent TK root window
name. If it is None the window is not created. This
allows one to use the code non-interactively,
although right now that option has not been developed.
**args A possible list of additional arguments. This is
currently not used.
Returns
-------
No value is returned by this routine.
"""
if sys.version_info[0] == 2:
tkinter.messagebox.showinfo(
"Error",
"The code requires Python version 3.")
return
# The following are the default colours and symbols for 10 sets. If
# more than 10 sets are read in, the values repeat but the symbol sizes
# are changed.
self.colourset = ['black', 'blue', 'forestgreen', 'orange', 'red',
'cyan', 'lime', 'brown', 'violet', 'grey',
'select']
self.altcolourset = ['none', 'black', 'blue', 'forestgreen', 'orange',
'red', 'cyan', 'lime', 'brown', 'violet',
'grey', 'select']
self.markerset = ['o', 's', 'v', '^', '<', '>', 'p', '*', '+', 'x']
# This variable marks the number of active data sets.
self.nsets = 0
# The | |
<reponame>AndreaVoltan/MyKratos7.0
import bisect as bi
import numpy as np
import h5py
from KratosMultiphysics import *
from KratosMultiphysics.DEMApplication import *
from KratosMultiphysics.SwimmingDEMApplication import *
from DEM_procedures import KratosPrint as Say
import json
def DeleteDataSet(file_or_group, dset_name):
if dset_name in file_or_group:
file_or_group.__delitem__(dset_name)
def CreateDataset(file_or_group, name, data):
if name in file_or_group:
file_or_group.__delitem__(name)
dtype = float
if len(data):
dtype = type(data[0])
file_or_group.create_dataset(dtype = dtype, name = name, data = data)
def CreateGroup(file_or_group, name, overwrite_previous = True):
if name in file_or_group:
if overwrite_previous:
file_or_group['/'].__delitem__(name)
else:
return file_or_group['/' + name]
return file_or_group.create_group(name)
def WriteDataToFile(file_or_group, names, data):
for name, datum in zip(names, data):
DeleteDataSet(file_or_group, name)
file_or_group.create_dataset(name = name, data = datum)
def Index():
index = 0
while True:
yield index
index += 1
class FluidHDF5Loader:
def __init__(self, fluid_model_part, particles_model_part, pp, main_path):
self.n_nodes = len(fluid_model_part.Nodes)
self.shape = (self.n_nodes,)
self.store_pressure = pp.CFD_DEM["store_fluid_pressure_option"].GetBool()
self.store_gradient = pp.CFD_DEM["store_full_gradient_option"].GetBool()
self.load_derivatives = pp.CFD_DEM["load_derivatives"].GetBool()
self.there_are_more_steps_to_load = True
self.main_path = main_path
self.pp = pp
self.fluid_model_part = fluid_model_part
self.disperse_phase_model_part = particles_model_part
number_of_variables = 3
if pp.CFD_DEM["store_fluid_pressure_option"].GetBool():
number_of_variables += 1
if self.load_derivatives or self.store_gradient:
number_of_variables += 9
self.extended_shape = self.shape + (number_of_variables, )
self.file_name = self.GetFileName()
self.file_path = main_path + self.file_name
if pp.CFD_DEM["fluid_already_calculated"].GetBool():
with h5py.File(self.file_path, 'r') as f:
nodes_ids = np.array([node_id for node_id in f['nodes'][:, 0]])
self.permutations = np.array(range(len(nodes_ids)))
# obtaining the vector of permutations by ordering [0, 1, ..., n_nodes] as nodes_ids, by increasing order of id.
self.permutations = np.array([x for (y, x) in sorted(zip(nodes_ids, self.permutations))])
self.CheckTimes(f)
self.data_array_past = np.zeros(self.extended_shape)
self.data_array_future = np.zeros(self.extended_shape)
self.time_index_past = - 1 # it starts at an absurd value
self.time_index_future = - 1
viscosity = 1e-6
density = 1000. # BIG TODO: READ THIS FROM NODES!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for node in self.fluid_model_part.Nodes:
node.SetSolutionStepValue(VISCOSITY, viscosity)
node.SetSolutionStepValue(DENSITY, density)
else:
self.dtype = np.float64
if pp.CFD_DEM["store_fluid_in_single_precision"].GetBool():
self.dtype = np.float32
self.compression_type = 'gzip'
for node in self.fluid_model_part.Nodes:
viscosity = node.GetSolutionStepValue(VISCOSITY)
density = node.GetSolutionStepValue(DENSITY)
break
with h5py.File(self.file_path, 'w') as f:
f.attrs['kinematic viscosity'] = viscosity
f.attrs['time step'] = pp.Dt
f.attrs['density'] = density
f.attrs['solver type'] = pp.FluidSolverConfiguration.solver_type
f.attrs['linear system solver type'] = pp.FluidSolverConfiguration.linear_solver_config.solver_type
f.attrs['use orthogonal subscales'] = bool(pp.FluidSolverConfiguration.oss_switch)
nodes = np.array([(node.Id, node.X, node.Y, node.Z) for node in fluid_model_part.Nodes])
f.create_dataset(name = 'nodes', compression = self.compression_type, data = nodes, dtype = np.float64)
self.last_time = 0.0
self.current_data_array = np.zeros(self.extended_shape)
def GetFileName(self):
return self.pp.CFD_DEM.AddEmptyValue("prerun_fluid_file_name").GetString()
def CheckTimes(self, hdf5_file):
self.times_str = list([str(key) for key in hdf5_file.keys() if 'time' in hdf5_file['/' + key].attrs])
self.times = np.array([float(hdf5_file[key].attrs['time']) for key in self.times_str])
if len(self.times) < 2:
raise ValueError("\nThere are only " + str(len(self.times)) + ' time steps stored in the hdf5 file. At least two are needed.\n')
self.times_str = np.array([x for (y, x) in sorted(zip(self.times, self.times_str))])
self.times = sorted(self.times)
self.dt = self.times[-1] - self.times[-2]
def GetTimeIndicesAndWeights(self, current_time):
index_future = bi.bisect(self.times, current_time)
index_past = max(0, index_future - 1)
time_past = self.times[index_past]
if index_future == len(self.times): # we are beyond the last time
alpha_past = 0
alpha_future = 1
index_future = index_past
self.there_are_more_steps_to_load = False
else:
alpha_future = max(0, (current_time - time_past) / self.dt)
alpha_past = 1.0 - alpha_future
return index_past, alpha_past, index_future, alpha_future
def CanLoadMoreSteps(self):
return self.there_are_more_steps_to_load
def FillUpSingleDataset(self, name, variable, variable_index_in_temp_array):
with h5py.File(self.file_path, 'r+') as f:
f.create_dataset(name, compression = self.compression_type, shape = self.shape, dtype = self.dtype)
for i_node, node in enumerate(self.fluid_model_part.Nodes):
self.current_data_array[i_node, variable_index_in_temp_array] = node.GetSolutionStepValue(variable)
f[name][:] = self.current_data_array[:, variable_index_in_temp_array]
def FillFluidDataStep(self):
time = self.fluid_model_part.ProcessInfo[TIME]
name = str(time)
with h5py.File(self.file_path) as f:
f.create_group(name = name)
f[name].attrs['time'] = time
index = Index()
if not self.last_time == time:
self.FillUpSingleDataset(name + '/vx', VELOCITY_X, next(index))
self.FillUpSingleDataset(name + '/vy', VELOCITY_Y, next(index))
self.FillUpSingleDataset(name + '/vz', VELOCITY_Z, next(index))
if self.store_pressure:
self.FillUpSingleDataset(name + '/p', PRESSURE, next(index))
if self.store_gradient:
self.FillUpSingleDataset(name + '/dvxx', VELOCITY_X_GRADIENT_X, next(index))
self.FillUpSingleDataset(name + '/dvxy', VELOCITY_X_GRADIENT_Y, next(index))
self.FillUpSingleDataset(name + '/dvxz', VELOCITY_X_GRADIENT_Z, next(index))
self.FillUpSingleDataset(name + '/dvyx', VELOCITY_Y_GRADIENT_X, next(index))
self.FillUpSingleDataset(name + '/dvyy', VELOCITY_Y_GRADIENT_Y, next(index))
self.FillUpSingleDataset(name + '/dvyz', VELOCITY_Y_GRADIENT_Z, next(index))
self.FillUpSingleDataset(name + '/dvzx', VELOCITY_Z_GRADIENT_X, next(index))
self.FillUpSingleDataset(name + '/dvzy', VELOCITY_Z_GRADIENT_Y, next(index))
self.FillUpSingleDataset(name + '/dvzz', VELOCITY_Z_GRADIENT_Z, next(index))
self.last_time = time
def ConvertComponent(self, f, component_name):
if '/vx' in component_name:
read_values = f[component_name][:,]
elif '/vy' in component_name:
read_values = f[component_name][:,]
elif '/vz' in component_name:
read_values = f[component_name][:,]
else:
read_values = f[component_name][:,]
return read_values[self.permutations]
def UpdateFluidVariable(self,
name,
variable,
variable_index,
must_load_future_values_from_database,
alpha_past,
alpha_future):
if must_load_future_values_from_database:
with h5py.File(self.file_path, 'r') as f:
self.data_array_future[:, variable_index] = self.ConvertComponent(f, name)
self.current_data_array[:, variable_index] = (
alpha_past * self.data_array_past[:, variable_index]
+ alpha_future * self.data_array_future[:, variable_index])
for i, node in enumerate(self.fluid_model_part.Nodes):
node.SetSolutionStepValue(variable, self.current_data_array[i, variable_index])
def GetDatasetName(self, time_index_future):
return self.times_str[time_index_future]
def LoadFluid(self, fluid_time):
Say('\nLoading fluid from hdf5 file...')
# getting time indices and weights (identifying the two fluid time steps surrounding the current DEM step and assigning correspnding weights)
time_index_past, alpha_past, time_index_future, alpha_future = self.GetTimeIndicesAndWeights(fluid_time)
future_step_dataset_name = self.GetDatasetName(time_index_future)
must_load_from_database = self.time_index_past != time_index_past or self.time_index_future != time_index_future# old and future time steps must be updated
if must_load_from_database: # the current time is not between the two already loaded time steps
# the old future becomes the new past
self.data_array_past, self.data_array_future = self.data_array_future, self.data_array_past
index = Index()
self.UpdateFluidVariable(future_step_dataset_name + '/vx', VELOCITY_X, next(index), must_load_from_database, alpha_past, alpha_future)
self.UpdateFluidVariable(future_step_dataset_name + '/vy', VELOCITY_Y, next(index), must_load_from_database, alpha_past, alpha_future)
self.UpdateFluidVariable(future_step_dataset_name + '/vz', VELOCITY_Z, next(index), must_load_from_database, alpha_past, alpha_future)
if self.store_pressure:
self.UpdateFluidVariable(future_step_dataset_name + '/p', PRESSURE, next(index), must_load_from_database, alpha_past, alpha_future)
if self.load_derivatives:
self.UpdateFluidVariable(future_step_dataset_name + '/dvxx', VELOCITY_X_GRADIENT_X, next(index), must_load_from_database, alpha_past, alpha_future)
self.UpdateFluidVariable(future_step_dataset_name + '/dvxy', VELOCITY_X_GRADIENT_Y, next(index), must_load_from_database, alpha_past, alpha_future)
self.UpdateFluidVariable(future_step_dataset_name + '/dvxz', VELOCITY_X_GRADIENT_Z, next(index), must_load_from_database, alpha_past, alpha_future)
self.UpdateFluidVariable(future_step_dataset_name + '/dvyx', VELOCITY_Y_GRADIENT_X, next(index), must_load_from_database, alpha_past, alpha_future)
self.UpdateFluidVariable(future_step_dataset_name + '/dvyy', VELOCITY_Y_GRADIENT_Y, next(index), must_load_from_database, alpha_past, alpha_future)
self.UpdateFluidVariable(future_step_dataset_name + '/dvyz', VELOCITY_Y_GRADIENT_Z, next(index), must_load_from_database, alpha_past, alpha_future)
self.UpdateFluidVariable(future_step_dataset_name + '/dvzx', VELOCITY_Z_GRADIENT_X, next(index), must_load_from_database, alpha_past, alpha_future)
self.UpdateFluidVariable(future_step_dataset_name + '/dvzy', VELOCITY_Z_GRADIENT_Y, next(index), must_load_from_database, alpha_past, alpha_future)
self.UpdateFluidVariable(future_step_dataset_name + '/dvzz', VELOCITY_Z_GRADIENT_Z, next(index), must_load_from_database, alpha_past, alpha_future)
if self.time_index_past == - 1: # it is the first upload
self.data_array_past[:] = self.data_array_future[:]
self.time_index_past = time_index_past
self.time_index_future = time_index_future
Say('Finished loading fluid from hdf5 file.\n')
class ParticleHistoryLoader:
def __init__(self, particles_model_part, particle_watcher, pp, main_path):
self.pp = pp
self.model_part = particles_model_part
self.particle_watcher = particle_watcher
self.main_path = main_path
self.particles_list_file_name = self.main_path + '/all_particles.hdf5'
self.prerun_fluid_file_name = pp.CFD_DEM.AddEmptyValue("prerun_fluid_file_name").GetString()
self.CreateAllParticlesFileIfNecessary()
self.run_code = None
def CreateAllParticlesFileIfNecessary(self):
if not self.pp.CFD_DEM["full_particle_history_watcher"].GetString() == 'Empty':
nodes = [node for node in self.model_part.Nodes if node.IsNot(BLOCKED)]
Ids = np.array([node.Id for node in nodes], dtype = int)
X0s = np.array([node.X0 for node in nodes])
Y0s = np.array([node.Y0 for node in nodes])
Z0s = np.array([node.Z0 for node in nodes])
radii = np.array([node.GetSolutionStepValue(RADIUS) for node in nodes])
times = np.array([0.0 for node in nodes])
with h5py.File(self.particles_list_file_name, 'w') as f:
WriteDataToFile(file_or_group = f,
names = ['Id', 'X0', 'Y0', 'Z0', 'RADIUS', 'TIME'],
data = [Ids, X0s, Y0s, Z0s, radii, times])
def UpdateListOfAllParticles(self):
Ids, X0s, Y0s, Z0s, radii, times = [], [], [], [], [], []
self.particle_watcher.GetNewParticlesData(Ids, X0s, Y0s, Z0s, radii, times)
names = ['Id', 'X0', 'Y0', 'Z0', 'RADIUS', 'TIME']
data = [Ids, X0s, Y0s, Z0s, radii, times]
new_data = []
with h5py.File(self.particles_list_file_name) as f:
for name, datum in zip(names, data):
old_datum = f['/' + name][:]
new_datum = np.concatenate((old_datum, datum))
new_data.append(new_datum)
WriteDataToFile(file_or_group = f, names = names, data = new_data)
# This function records the particles initial positions in an hdf5 file.
# It records both the particles that fall within an input bounding box
# and all the particles in the model part.
def RecordParticlesInBox(self, bounding_box = BoundingBoxRule()):
self.bounding_box = bounding_box
time = self.model_part.ProcessInfo[TIME]
def IsInside(node):
is_a_particle = node.IsNot(BLOCKED)
is_inside = self.bounding_box.CheckIfRuleIsMet(time, node.X, node.Y, node.Z)
return is_a_particle and is_inside
nodes_inside = [node for node in self.model_part.Nodes if IsInside(node)]
Ids_inside = np.array([node.Id for node in nodes_inside])
X0s_inside = np.array([node.X0 for node in nodes_inside])
Y0s_inside = np.array([node.Y0 for node in nodes_inside])
Z0s_inside = np.array([node.Z0 for node in nodes_inside])
radii_inside = np.array([node.GetSolutionStepValue(RADIUS) for node in nodes_inside])
if len(radii_inside):
mean_radius = sum(radii_inside) / len(radii_inside)
else:
mean_radius = 1.0
with h5py.File(self.main_path + '/particles_snapshots.hdf5') as f:
prerun_fluid_file_name = self.prerun_fluid_file_name.split('/')[- 1]
current_fluid = CreateGroup(f, prerun_fluid_file_name, overwrite_previous = False)
# snapshot_name = 't=' + str(round(time, 3)) + '_RADIUS=' + str(round(mean_radius, 4)) + '_in_box'
snapshot_name = str(len(current_fluid.items()) + 1)
self.run_code = prerun_fluid_file_name.strip('.hdf5') + '_' + snapshot_name
snapshot = CreateGroup(current_fluid, snapshot_name)
snapshot.attrs['time'] = time
snapshot.attrs['particles_nondimensional_radius'] = mean_radius
# storing the input parameters for this run, the one corresponding
# to the current pre-calculated fluid
for k, v in ((k, v) for k, v in json.loads(self.pp.CFD_DEM.WriteJsonString()).items() if | |
<gh_stars>1-10
import numpy as np
import torch
class BoxCoder(object):
def __init__(self, weights1=(10., 10., 10., 10., 15.), weights2=(3., 3., 1.5, 1.5)):
self.weights1 = weights1
self.weights2 = weights2
def encode(self, ex_rois, gt_rois):
ex_widths = ex_rois[:, 2] - ex_rois[:, 0]
ex_heights = ex_rois[:, 3] - ex_rois[:, 1]
ex_widths = torch.clamp(ex_widths, min=1)
ex_heights = torch.clamp(ex_heights, min=1)
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
ex_thetas = ex_rois[:, 4]
gt_widths = gt_rois[:, 2] - gt_rois[:, 0]
gt_heights = gt_rois[:, 3] - gt_rois[:, 1]
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1)
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
gt_thetas = gt_rois[:, 4]
wx, wy, ww, wh, wt = self.weights1
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets_dt = wt * (torch.tan(gt_thetas / 180.0 * np.pi) - torch.tan(ex_thetas / 180.0 * np.pi))
targets = torch.stack(
(targets_dx, targets_dy, targets_dw, targets_dh, targets_dt), dim=1
)
return targets
def decode(self, boxes, deltas, mode='xywht'):
widths = boxes[:, :, 2] - boxes[:, :, 0]
heights = boxes[:, :, 3] - boxes[:, :, 1]
widths = torch.clamp(widths, min=1)
heights = torch.clamp(heights, min=1)
ctr_x = boxes[:, :, 0] + 0.5 * widths
ctr_y = boxes[:, :, 1] + 0.5 * heights
thetas = boxes[:, :, 4]
wx, wy, ww, wh, wt = self.weights1
dx = deltas[:, :, 0] / wx
dy = deltas[:, :, 1] / wy
dw = deltas[:, :, 2] / ww
dh = deltas[:, :, 3] / wh
dt = deltas[:, :, 4] / wt
pred_ctr_x = ctr_x if 'x' not in mode else ctr_x + dx * widths
pred_ctr_y = ctr_y if 'y' not in mode else ctr_y + dy * heights
pred_w = widths if 'w' not in mode else torch.exp(dw) * widths
pred_h = heights if 'h' not in mode else torch.exp(dh) * heights
pred_t = thetas if 't' not in mode else torch.atan(torch.tan(thetas / 180.0 * np.pi) + dt) / np.pi * 180.0
pred_boxes_x1 = pred_ctr_x - 0.5 * pred_w
pred_boxes_y1 = pred_ctr_y - 0.5 * pred_h
pred_boxes_x2 = pred_ctr_x + 0.5 * pred_w
pred_boxes_y2 = pred_ctr_y + 0.5 * pred_h
pred_boxes = torch.stack([
pred_boxes_x1,
pred_boxes_y1,
pred_boxes_x2,
pred_boxes_y2,
pred_t], dim=2
)
return pred_boxes
def landmarkencode(self, ex_rois, gt_rois):
ex_widths = ex_rois[:, 2] - ex_rois[:, 0]
ex_heights = ex_rois[:, 3] - ex_rois[:, 1]
ex_widths = torch.clamp(ex_widths, min=1)
ex_heights = torch.clamp(ex_heights, min=1)
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
ex_theta = ex_rois[:, 4] * (ex_heights <= ex_widths) + (ex_rois[:, 4] + 90.0) * (ex_heights > ex_widths)
ex_theta = ex_theta * (ex_theta >= 0) + (ex_theta + 180.0) * (ex_theta < 0)
ex_theta = ex_theta * (ex_theta < 180.0) + (ex_theta - 180.0) * (ex_theta >= 180.0)
gt_ctr_x = gt_rois[:, 0]
gt_ctr_y = gt_rois[:, 1]
mid1 = gt_rois[:, 2] * (gt_rois[:, 2] <= gt_rois[:, 3]) + gt_rois[:, 3] * (gt_rois[:, 2] > gt_rois[:, 3])
mid2 = gt_rois[:, 3] * (gt_rois[:, 2] <= gt_rois[:, 3]) + gt_rois[:, 2] * (gt_rois[:, 2] > gt_rois[:, 3])
gt_theta1 = mid1 * (abs(mid2 - mid1) <= 180.0) + mid2 * (abs(mid2 - mid1) > 180.0)
gt_theta2 = mid2 * (abs(mid2 - mid1) <= 180.0) + mid1 * (abs(mid2 - mid1) > 180.0)
gt_theta1 = gt_theta1 * (gt_theta1 >= 0) + (gt_theta1 + 180.0) * (gt_theta1 < 0)
gt_theta1 = gt_theta1 * (gt_theta1 < 180.0) + (gt_theta1 - 180.0) * (gt_theta1 >= 180.0)
gt_theta2 = gt_theta2 * (gt_theta2 >= 0) + (gt_theta2 + 180.0) * (gt_theta2 < 0)
gt_theta2 = gt_theta2 * (gt_theta2 < 180.0) + (gt_theta2 - 180.0) * (gt_theta2 >= 180.0)
delta_t1 = gt_theta1 - ex_theta
delta_t2 = gt_theta2 - ex_theta
delta_t1[torch.abs(delta_t1 - 90.0) <= 0.1] = delta_t1[torch.abs(delta_t1 - 90.0) <= 0.1] - 0.1
delta_t2[torch.abs(delta_t2 - 90.0) <= 0.1] = delta_t2[torch.abs(delta_t2 - 90.0) <= 0.1] - 0.1
delta_t1[torch.abs(delta_t1 + 90.0) <= 0.1] = delta_t1[torch.abs(delta_t1 + 90.0) <= 0.1] - 0.1
delta_t2[torch.abs(delta_t2 + 90.0) <= 0.1] = delta_t2[torch.abs(delta_t2 + 90.0) <= 0.1] - 0.1
wx, wy, wt1, wt2 = self.weights2
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dt1 = wt1 * (torch.tan(delta_t1 / 180.0 * np.pi))
targets_dt2 = wt2 * (torch.tan(delta_t2 / 180.0 * np.pi))
targets = torch.stack(
(targets_dx, targets_dy, targets_dt1, targets_dt2), dim=1
)
return targets
def landmarkdecode(self, boxes, deltas):
widths = boxes[:, :, 2] - boxes[:, :, 0]
heights = boxes[:, :, 3] - boxes[:, :, 1]
widths = torch.clamp(widths, min=1)
heights = torch.clamp(heights, min=1)
ctr_x = boxes[:, :, 0] + 0.5 * widths
ctr_y = boxes[:, :, 1] + 0.5 * heights
theta = boxes[:, :, 4] * (heights <= widths) + (boxes[:, :, 4] + 90.0) * (heights > widths)
theta = theta * (theta >= 0) + (theta + 180.0) * (theta < 0)
theta = theta * (theta < 180.0) + (theta - 180.0) * (theta >= 180.0)
wx, wy, wt1, wt2 = self.weights2
dx = deltas[:, :, 0] / wx
dy = deltas[:, :, 1] / wy
dt1 = deltas[:, :, 2] / wt1
dt2 = deltas[:, :, 3] / wt2
pred_x = ctr_x + dx * widths
pred_y = ctr_y + dy * heights
pred_standard = torch.atan2(ctr_y-pred_y, ctr_x-pred_x) / np.pi * 180.0
theta = theta * (abs(theta - pred_standard) <= 90.0) + theta * (abs(theta - pred_standard) > 270.0) + (
theta - 180.0) * (abs(theta - pred_standard) > 90.0) * (abs(theta - pred_standard) <= 270.0)
pred_delta1 = torch.atan(dt1) / np.pi * 180.0
pred_delta2 = torch.atan(dt2) / np.pi * 180.0
pred_delta1 = pred_delta1 * (abs(pred_delta1) <= 90.0) + (pred_delta1 - 180.0) * (abs(pred_delta1) > 90.0)
pred_delta2 = pred_delta2 * (abs(pred_delta2) <= 90.0) + (pred_delta2 - 180.0) * (abs(pred_delta2) > 90.0)
pred_theta1 = theta + pred_delta1
pred_theta2 = theta + pred_delta2
pred_boxes = torch.stack([
pred_x,
pred_y,
pred_theta1,
pred_theta2], dim=2
)
return pred_boxes
def lossdecode(self, boxes, deltas):
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
widths = torch.clamp(widths, min=1)
heights = torch.clamp(heights, min=1)
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
thetas = boxes[:, 4]
wx, wy, ww, wh, wt = self.weights1
dx = deltas[:, 0] / wx
dy = deltas[:, 1] / wy
dw = deltas[:, 2] / ww
dh = deltas[:, 3] / wh
dt = deltas[:, 4] / wt
pred_ctr_x = ctr_x + dx * widths
pred_ctr_y = ctr_y + dy * heights
pred_w = torch.exp(dw) * widths
pred_h = torch.exp(dh) * heights
pred_t = torch.atan(torch.tan(thetas / 180.0 * np.pi) + dt) / np.pi * 180.0
pred_boxes_x1 = pred_ctr_x - 0.5 * pred_w
pred_boxes_y1 = pred_ctr_y - 0.5 * pred_h
pred_boxes_x2 = pred_ctr_x + 0.5 * pred_w
pred_boxes_y2 = pred_ctr_y + 0.5 * pred_h
pred_boxes = torch.stack([
pred_boxes_x1,
pred_boxes_y1,
pred_boxes_x2,
pred_boxes_y2,
pred_t], dim=1
)
return pred_boxes
def xywhdecode(self, boxes, deltas):
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
widths = torch.clamp(widths, min=1)
heights = torch.clamp(heights, min=1)
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
thetas = boxes[:, 4]
wx, wy, ww, wh, wt = self.weights1
dx = deltas[:, 0] / wx
dy = deltas[:, 1] / wy
dw = deltas[:, 2] / ww
dh = deltas[:, 3] / wh
dt = deltas[:, 4] / wt
pred_ctr_x = ctr_x + dx * widths
pred_ctr_y = ctr_y + dy * heights
pred_w = torch.exp(dw) * widths
pred_h = torch.exp(dh) * heights
pred_t = torch.atan(torch.tan(thetas / 180.0 * np.pi) + dt)
pred_xywhtboxes = torch.stack([
pred_ctr_x,
pred_ctr_y,
pred_w,
pred_h,
pred_t], dim=1
)
return pred_xywhtboxes
def lmr5pangle(self, anchors, deltas):
wx, wy, ww, wh, wt = self.weights1
| |
import numpy as np
from confidenceMapUtil import FDRutil
import argparse, math, os, sys
from argparse import RawTextHelpFormatter
import time
def compute_padding_average(vol, mask):
mask = (mask > 0.5).astype(np.int8)
average_padding_intensity = np.mean(np.ma.masked_array(vol, mask))
return average_padding_intensity
def pad_or_crop_volume(vol, dim_pad=None, pad_value = None, crop_volume=False):
if (dim_pad == None):
return vol
else:
dim_pad = np.round(np.array(dim_pad)).astype('int')
if pad_value == None:
pad_value = 0
if (dim_pad[0] <= vol.shape[0] or dim_pad[1] <= vol.shape[1] or dim_pad[2] <= vol.shape[2]):
crop_volume = True
if crop_volume:
crop_vol = vol[int(vol.shape[0]/2-dim_pad[0]/2):int(vol.shape[0]/2+dim_pad[0]/2+dim_pad[0]%2), :, :]
crop_vol = crop_vol[:, int(vol.shape[1]/2-dim_pad[1]/2):int(vol.shape[1]/2+dim_pad[1]/2+dim_pad[1]%2), :]
crop_vol = crop_vol[:, :, int(vol.shape[2]/2-dim_pad[2]/2):int(vol.shape[2]/2+dim_pad[2]/2+dim_pad[2]%2)]
return crop_vol
else:
pad_vol = np.pad(vol, ((int(dim_pad[0]/2-vol.shape[0]/2), int(dim_pad[0]/2-vol.shape[0]/2+dim_pad[0]%2)), (0,0), (0,0) ), 'constant', constant_values=(pad_value,))
pad_vol = np.pad(pad_vol, ((0,0), (int(dim_pad[1]/2-vol.shape[1]/2), int(dim_pad[1]/2-vol.shape[1]/2+dim_pad[1]%2) ), (0,0)), 'constant', constant_values=(pad_value,))
pad_vol = np.pad(pad_vol, ((0,0), (0,0), (int(dim_pad[2]/2-vol.shape[2]/2), int(dim_pad[2]/2-vol.shape[2]/2+dim_pad[2]%2))), 'constant', constant_values=(pad_value,))
return pad_vol
def check_for_window_bleeding(mask, wn):
masked_xyz_locs, masked_indices, mask_shape = get_xyz_locs_and_indices_after_edge_cropping_and_masking(mask, 0)
zs, ys, xs = masked_xyz_locs.T
nk, nj, ni = mask_shape
if xs.min() < wn / 2 or xs.max() > (ni - wn / 2) or \
ys.min() < wn / 2 or ys.max() > (nj - wn / 2) or \
zs.min() < wn / 2 or zs.max() > (nk - wn / 2):
window_bleed = True
else:
window_bleed = False
return window_bleed
def get_xyz_locs_and_indices_after_edge_cropping_and_masking(mask, wn):
mask = np.copy(mask);
nk, nj, ni = mask.shape;
kk, jj, ii = np.indices((mask.shape));
kk_flat = kk.ravel();
jj_flat = jj.ravel();
ii_flat = ii.ravel();
mask_bin = np.array(mask.ravel(), dtype=np.bool);
indices = np.arange(mask.size);
masked_indices = indices[mask_bin];
cropped_indices = indices[(wn / 2 <= kk_flat) & (kk_flat < (nk - wn / 2)) &
(wn / 2 <= jj_flat) & (jj_flat < (nj - wn / 2)) &
(wn / 2 <= ii_flat) & (ii_flat < (ni - wn / 2))];
cropp_n_mask_ind = np.intersect1d(masked_indices, cropped_indices);
xyz_locs = np.column_stack((kk_flat[cropp_n_mask_ind], jj_flat[cropp_n_mask_ind], ii_flat[cropp_n_mask_ind]));
return xyz_locs, cropp_n_mask_ind, mask.shape;
def prepare_mask_and_maps_for_scaling(emmap, modelmap, apix, wn_locscale, windowSize, method, locResMap, noiseBox):
mask = np.zeros(emmap.shape);
if mask.shape[0] == mask.shape[1] and mask.shape[0] == mask.shape[2] and mask.shape[1] == mask.shape[2]:
rad = (mask.shape[0] // 2) ;
z,y,x = np.ogrid[-rad: rad+1, -rad: rad+1, -rad: rad+1];
mask = (x**2+y**2+z**2 <= rad**2).astype(np.int_).astype(np.int8);
mask = pad_or_crop_volume(mask,emmap.shape);
mask = (mask > 0.5).astype(np.int8);
else:
mask += 1;
mask = mask[0:mask.shape[0]-1, 0:mask.shape[1]-1, 0:mask.shape[2]-1];
mask = pad_or_crop_volume(emmap, (emmap.shape), pad_value=0);
if wn_locscale is None:
wn_locscale = int(round(7 * 3 * apix)); # set default window size to 7 times average resolution
elif wn_locscale is not None:
wn_locscale = int(math.ceil(wn_locscale / 2.) * 2);
#wn = wn_locscale;
if windowSize is None:
wn = wn_locscale;
elif windowSize is not None:
wn = int(math.ceil(windowSize / 2.) * 2);
if method is not None:
method = method;
else:
method = 'BY';
if noiseBox is not None:
boxCoord = noiseBox;
else:
boxCoord = 0;
window_bleed_and_pad = check_for_window_bleeding(mask, wn_locscale);
if window_bleed_and_pad:
pad_int_emmap = compute_padding_average(emmap, mask);
pad_int_modmap = compute_padding_average(modelmap, mask);
map_shape = [(emmap.shape[0] + wn_locscale), (emmap.shape[1] + wn_locscale), (emmap.shape[2] + wn_locscale)];
emmap = pad_or_crop_volume(emmap, map_shape, pad_int_emmap);
modelmap = pad_or_crop_volume(modelmap, map_shape, pad_int_modmap);
mask = pad_or_crop_volume(mask, map_shape, 0);
if locResMap is not None:
locResMap = pad_or_crop_volume(locResMap, map_shape, 100.0);
#if wished so, do local filtration
if locResMap is not None:
locResMap[locResMap == 0.0] = 100.0;
locResMap[locResMap >= 100.0] = 100.0;
locFilt = True;
else:
locFilt = False;
locResMap = np.ones(emmap.shape);
return emmap, modelmap, mask, wn, wn_locscale, window_bleed_and_pad, method, locFilt, locResMap, boxCoord;
def compute_radial_profile(volFFT, frequencyMap):
dim = volFFT.shape;
ps = np.real(np.abs(volFFT));
frequencies = np.fft.rfftfreq(dim[0]);
#frequencies = np.linspace(0, 0.5, int(math.ceil(dim[0]/2.0)));
bins = np.digitize(frequencyMap, frequencies);
bins = bins - 1;
radial_profile = np.bincount(bins.ravel(), ps.ravel()) / np.bincount(bins.ravel())
return radial_profile, frequencies;
def compute_scale_factors(em_profile, ref_profile):
np.seterr(divide='ignore', invalid='ignore'); #no error for division by zero
#scale_factor = (ref_profile**2/em_profile**2);
#scale_factor[ ~ np.isfinite( scale_factor )] = 0 #handle division by zero
#scale_factor = np.sqrt(scale_factor);
scale_factor = np.divide(np.abs(ref_profile), np.abs(em_profile));
scale_factor[ ~ np.isfinite( scale_factor )] = 0; #handle division by zero
return scale_factor;
def set_radial_profile(volFFT, scaleFactors, frequencies, frequencyMap, shape):
scalingMap = np.interp(frequencyMap, frequencies, scaleFactors);
scaledMapFFT = scalingMap * volFFT;
scaledMap = np.real(np.fft.irfftn(scaledMapFFT, shape, norm='ortho'));
return scaledMap, scaledMapFFT;
def calculate_scaled_map(emmap, modmap, mask, wn, wn_locscale, apix, locFilt, locResMap, boxCoord, ecdfBool, stepSize):
sizeMap = emmap.shape
sharpened_map = np.zeros(sizeMap);
sharpened_mean_vals = np.zeros(sizeMap);
sharpened_var_vals = np.zeros(sizeMap);
sharpened_ecdf_vals = np.zeros(sizeMap);
central_pix = int(round(wn_locscale / 2.0));
center = np.array([0.5*sizeMap[0], 0.5*sizeMap[1], 0.5*sizeMap[2]]);
#get the background noise sample
if boxCoord == 0:
noiseMap = emmap[int(center[0]-0.5*wn):(int(center[0]-0.5*wn) + wn), int(0.02*wn+wn_locscale/2.0):(int(0.02*wn+wn_locscale/2.0) + wn), (int(center[2]-0.5*wn)):(int((center[2]-0.5*wn) + wn))];
else:
noiseMap = emmap[int(boxCoord[0]-0.5*wn +wn_locscale/2.0):(int(boxCoord[0]-0.5*wn + wn_locscale/2.0) + wn), int(boxCoord[1]-0.5*wn+ wn_locscale/2.0):(int(boxCoord[1]-0.5*wn + wn_locscale/2.0) + wn), (int(boxCoord[2]-0.5*wn + wn_locscale/2.0)):(int((boxCoord[2]-0.5*wn + wn_locscale/2.0)+wn))];
#prepare noise map for scaling
frequencyMap_noise = FDRutil.calculate_frequency_map(noiseMap);
noiseMapFFT = np.fft.rfftn(noiseMap, norm='ortho');
noise_profile, frequencies_noise = compute_radial_profile(noiseMapFFT, frequencyMap_noise);
#prepare windows of particle for scaling
frequencyMap_mapWindow = FDRutil.calculate_frequency_map(np.zeros((wn_locscale, wn_locscale, wn_locscale)));
numSteps = len(range(0, sizeMap[0] - int(wn_locscale), stepSize))*len(range(0, sizeMap[1] - int(wn_locscale), stepSize))*len(range(0, sizeMap[2] - int(wn_locscale), stepSize));
print("Sart LocScale. This might take a minute ...");
counterSteps = 0;
for k in range(0, sizeMap[0] - int(wn_locscale), stepSize):
for j in range(0, sizeMap[1] - int(wn_locscale), stepSize):
for i in range(0, sizeMap[2] - int(wn_locscale), stepSize):
#print progress
counterSteps = counterSteps + 1;
progress = counterSteps/float(numSteps);
if counterSteps%(int(numSteps/20.0)) == 0:
output = "%.1f" %(progress*100) + "% finished ..." ;
print(output);
#crop windows
emmap_wn = emmap[k: k + wn_locscale, j: j + wn_locscale, i: i + wn_locscale];
modmap_wn = modmap[k: k + wn_locscale, j: j + wn_locscale, i: i + wn_locscale];
#do sharpening of the sliding window
emmap_wn_FFT = np.fft.rfftn(np.copy(emmap_wn), norm='ortho');
modmap_wn_FFT = np.fft.rfftn(np.copy(modmap_wn), norm='ortho');
em_profile, frequencies_map = compute_radial_profile(emmap_wn_FFT, frequencyMap_mapWindow);
mod_profile, _ = compute_radial_profile(modmap_wn_FFT, frequencyMap_mapWindow);
scale_factors = compute_scale_factors(em_profile, mod_profile);
map_b_sharpened, map_b_sharpened_FFT = set_radial_profile(emmap_wn_FFT, scale_factors, frequencies_map, frequencyMap_mapWindow, emmap_wn.shape);
#scale noise window with the interpolated scaling factors
mapNoise_sharpened, mapNoise_sharpened_FFT = set_radial_profile(np.copy(noiseMapFFT), scale_factors, frequencies_map, frequencyMap_noise, noiseMap.shape);
#local filtering routines
if locFilt == True:
tmpRes = round(apix/locResMap[k, j, i], 3);
mapNoise_sharpened = FDRutil.lowPassFilter(mapNoise_sharpened_FFT, frequencyMap_noise, tmpRes, noiseMap.shape);
map_b_sharpened = FDRutil.lowPassFilter(map_b_sharpened_FFT, frequencyMap_mapWindow, tmpRes, emmap_wn.shape);
#calculate noise statistics
map_noise_sharpened_data = mapNoise_sharpened;
if ecdfBool:
tmpECDF, sampleSort = FDRutil.estimateECDFFromMap(map_noise_sharpened_data, -1, -1);
ecdf = np.interp(map_b_sharpened[central_pix, central_pix, central_pix], sampleSort, tmpECDF, left=0.0, right=1.0);
else:
ecdf = 0;
mean = np.mean(map_noise_sharpened_data);
var = np.var(map_noise_sharpened_data);
if var < 0.5:
var = 0.5;
mean = 0.0;
if tmpRes == round(apix/100.0, 3):
mean = 0.0;
var = 0.0;
ecdf = 0;
else:
#calculate noise statistics
map_noise_sharpened_data = np.copy(mapNoise_sharpened);
if ecdfBool:
tmpECDF, sampleSort = FDRutil.estimateECDFFromMap(map_noise_sharpened_data, -1, -1);
ecdf = np.interp(map_b_sharpened, sampleSort, tmpECDF, left=0.0, right=1.0);
else:
ecdf = 0;
mean = np.mean(map_noise_sharpened_data);
var = np.var(map_noise_sharpened_data);
if var < 0.5:
var = 0.5;
mean = 0.0;
#put values back into the the original maps
halfStep=int((wn_locscale/2.0) - (stepSize/2.0));
sharpened_map[k + halfStep : k + halfStep + stepSize, j + halfStep : j + halfStep + stepSize, i + halfStep : i + halfStep + stepSize] = np.copy(map_b_sharpened[halfStep:halfStep+stepSize, halfStep:halfStep+stepSize, halfStep:halfStep+stepSize]);
sharpened_mean_vals[k + halfStep : k + halfStep + stepSize, j + halfStep : j + halfStep + stepSize, i + halfStep : i + halfStep + stepSize] = mean;
sharpened_var_vals[k + halfStep : k + halfStep + stepSize, j + halfStep : j + halfStep + stepSize, i + halfStep : i + halfStep + stepSize] = var;
if ecdfBool:
sharpened_ecdf_vals[k + halfStep : k + halfStep + stepSize, j + halfStep : j + halfStep + stepSize, i + halfStep : i + halfStep + stepSize] = ecdf[halfStep:halfStep+stepSize, halfStep:halfStep+stepSize, halfStep:halfStep+stepSize];
else:
sharpened_ecdf_vals[k + halfStep: k + halfStep + stepSize, j + halfStep: j + halfStep + stepSize,
i + halfStep: i + halfStep + stepSize] = 0.0;
return sharpened_map, sharpened_mean_vals, sharpened_var_vals, sharpened_ecdf_vals;
def get_central_scaled_pixel_vals_after_scaling(emmap, modmap, masked_xyz_locs, wn, wn_locscale, apix, locFilt, locResMap, boxCoord, ecdfBool):
sharpened_vals = [];
sharpened_mean_vals = [];
sharpened_var_vals = [];
sharpened_ecdf_vals = [];
central_pix = int(round(wn_locscale / 2.0));
sizeMap = emmap.shape;
center = np.array([0.5*sizeMap[0], 0.5*sizeMap[1], 0.5*sizeMap[2]]);
#get the background noise sample
if boxCoord == 0:
noiseMap = emmap[int(center[0]-0.5*wn):(int(center[0]-0.5*wn) + wn), int(0.02*wn+wn_locscale):(int(0.02*wn+wn_locscale) + wn), (int(center[2]-0.5*wn)):(int((center[2]-0.5*wn) + wn))];
else:
noiseMap = emmap[int(boxCoord[0]-0.5*wn + wn_locscale):(int(boxCoord[0]-0.5*wn + wn_locscale) + wn), int(boxCoord[1]-0.5*wn+ wn_locscale):(int(boxCoord[1]-0.5*wn + wn_locscale) + | |
False
.. attribute:: device_description
Device Name
**type**\: str
**length:** 0..50
**config**\: False
.. attribute:: units
Units of variable being read
**type**\: str
**length:** 0..50
**config**\: False
.. attribute:: device_id
Identifier for this device
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: value
Current reading of sensor
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: alarm_type
Indicates threshold violation
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: data_type
Sensor data type enums
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: scale
Sensor scale enums
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: precision
Sensor precision range
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: status
Sensor operation state enums
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: age_time_stamp
Age of the sensor value; set to the current time if directly access the value from sensor
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: update_rate
Sensor value update rate;set to 0 if sensor value is updated and evaluated immediately
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: average
Average sensor value over time interval
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: minimum
Minimum Sensor value over time interval
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: maximum
Maximum Sensor value over time interval
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: interval
Time Interval over which sensor value is monitored
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ValueDetailedCli, self).__init__()
self.yang_name = "value-detailed-cli"
self.yang_parent_name = "sensor-name-cli"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('field_validity_bitmap', (YLeaf(YType.uint32, 'field-validity-bitmap'), ['int'])),
('device_description', (YLeaf(YType.str, 'device-description'), ['str'])),
('units', (YLeaf(YType.str, 'units'), ['str'])),
('device_id', (YLeaf(YType.uint32, 'device-id'), ['int'])),
('value', (YLeaf(YType.uint32, 'value'), ['int'])),
('alarm_type', (YLeaf(YType.uint32, 'alarm-type'), ['int'])),
('data_type', (YLeaf(YType.uint32, 'data-type'), ['int'])),
('scale', (YLeaf(YType.uint32, 'scale'), ['int'])),
('precision', (YLeaf(YType.uint32, 'precision'), ['int'])),
('status', (YLeaf(YType.uint32, 'status'), ['int'])),
('age_time_stamp', (YLeaf(YType.uint32, 'age-time-stamp'), ['int'])),
('update_rate', (YLeaf(YType.uint32, 'update-rate'), ['int'])),
('average', (YLeaf(YType.int32, 'average'), ['int'])),
('minimum', (YLeaf(YType.int32, 'minimum'), ['int'])),
('maximum', (YLeaf(YType.int32, 'maximum'), ['int'])),
('interval', (YLeaf(YType.int32, 'interval'), ['int'])),
])
self.field_validity_bitmap = None
self.device_description = None
self.units = None
self.device_id = None
self.value = None
self.alarm_type = None
self.data_type = None
self.scale = None
self.precision = None
self.status = None
self.age_time_stamp = None
self.update_rate = None
self.average = None
self.minimum = None
self.maximum = None
self.interval = None
self._segment_path = lambda: "value-detailed-cli"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ValueDetailedCli, ['field_validity_bitmap', 'device_description', 'units', 'device_id', 'value', 'alarm_type', 'data_type', 'scale', 'precision', 'status', 'age_time_stamp', 'update_rate', 'average', 'minimum', 'maximum', 'interval'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_oper as meta
return meta._meta_table['EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ValueDetailedCli']['meta_info']
class ThresholdClis(_Entity_):
"""
The threshold information
.. attribute:: threshold_cli
Types of thresholds
**type**\: list of :py:class:`ThresholdCli <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_oper.EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis, self).__init__()
self.yang_name = "threshold-clis"
self.yang_parent_name = "sensor-name-cli"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("threshold-cli", ("threshold_cli", EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli))])
self._leafs = OrderedDict()
self.threshold_cli = YList(self)
self._segment_path = lambda: "threshold-clis"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis, [], name, value)
class ThresholdCli(_Entity_):
"""
Types of thresholds
.. attribute:: type (key)
Threshold type
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
**config**\: False
.. attribute:: value_detailed_cli
Detailed sensor threshold information
**type**\: :py:class:`ValueDetailedCli <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_oper.EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli.ValueDetailedCli>`
**config**\: False
.. attribute:: trap_cli
Threshold trap enable flag true\-ENABLE, false\-DISABLE
**type**\: bool
**config**\: False
.. attribute:: value_brief_cli
Threshold value for the sensor
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli, self).__init__()
self.yang_name = "threshold-cli"
self.yang_parent_name = "threshold-clis"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['type']
self._child_classes = OrderedDict([("value-detailed-cli", ("value_detailed_cli", EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli.ValueDetailedCli))])
self._leafs = OrderedDict([
('type', (YLeaf(YType.str, 'type'), ['str'])),
('trap_cli', (YLeaf(YType.boolean, 'trap-cli'), ['bool'])),
('value_brief_cli', (YLeaf(YType.str, 'value-brief-cli'), ['str'])),
])
self.type = None
self.trap_cli = None
self.value_brief_cli = None
self.value_detailed_cli = EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli.ValueDetailedCli()
self.value_detailed_cli.parent = self
self._children_name_map["value_detailed_cli"] = "value-detailed-cli"
self._segment_path = lambda: "threshold-cli" + "[type='" + str(self.type) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli, ['type', 'trap_cli', 'value_brief_cli'], name, value)
class ValueDetailedCli(_Entity_):
"""
Detailed sensor threshold
information
.. attribute:: threshold_severity
Indicates minor, major, critical severities
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_relation
Indicates relation between sensor value and threshold
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_value
Value of the configured threshold
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: threshold_evaluation
Indicates the result of the most recent evaluation of the thresholD
**type**\: bool
**config**\: False
.. attribute:: threshold_notification_enabled
Indicates whether or not a notification should result, in case of threshold violation
**type**\: bool
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli.ValueDetailedCli, self).__init__()
self.yang_name = "value-detailed-cli"
self.yang_parent_name = "threshold-cli"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('threshold_severity', (YLeaf(YType.uint32, 'threshold-severity'), ['int'])),
('threshold_relation', (YLeaf(YType.uint32, 'threshold-relation'), ['int'])),
('threshold_value', (YLeaf(YType.uint32, 'threshold-value'), ['int'])),
('threshold_evaluation', (YLeaf(YType.boolean, 'threshold-evaluation'), ['bool'])),
('threshold_notification_enabled', (YLeaf(YType.boolean, 'threshold-notification-enabled'), ['bool'])),
])
self.threshold_severity = None
self.threshold_relation = None
self.threshold_value = None
self.threshold_evaluation = None
self.threshold_notification_enabled = None
self._segment_path = lambda: "value-detailed-cli"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli.ValueDetailedCli, ['threshold_severity', 'threshold_relation', 'threshold_value', 'threshold_evaluation', 'threshold_notification_enabled'], name, value)
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_oper as meta
return meta._meta_table['EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli.ValueDetailedCli']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_oper as meta
return meta._meta_table['EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis.ThresholdCli']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_oper as meta
return meta._meta_table['EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli.ThresholdClis']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_oper as meta
return meta._meta_table['EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis.SensorNameCli']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_oper as meta
return meta._meta_table['EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli.SensorNameClis']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_oper as meta
return meta._meta_table['EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis.SensorTypeCli']['meta_info']
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_sc_envmon_oper as meta
return meta._meta_table['EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.SensorTypeClis']['meta_info']
class PowerCli(_Entity_):
"""
Module Power Draw
.. attribute:: power_bag_cli
Detailed power bag information
**type**\: :py:class:`PowerBagCli <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_sc_envmon_oper.EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.PowerCli.PowerBagCli>`
**config**\: False
"""
_prefix = 'asr9k-sc-envmon-oper'
_revision = '2017-01-19'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.PowerCli, self).__init__()
self.yang_name = "power-cli"
self.yang_parent_name = "module-cli"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("power-bag-cli", ("power_bag_cli", EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.PowerCli.PowerBagCli))])
self._leafs = OrderedDict()
self.power_bag_cli = EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.PowerCli.PowerBagCli()
self.power_bag_cli.parent = self
self._children_name_map["power_bag_cli"] = "power-bag-cli"
self._segment_path = lambda: "power-cli"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(EnvironmentalMonitoringCli.RackClis.RackCli.SlotClis.SlotCli.ModuleClis.ModuleCli.PowerCli, [], name, value)
class PowerBagCli(_Entity_):
"""
Detailed power bag information
.. attribute:: power_value
Current Power Value of the Unit
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: power_max_value
Max Power Value of the Unit
**type**\: int
**range:** \-2147483648..2147483647
**config**\: False
.. attribute:: power_unit_multiplier
Unit Multiplier of Power
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_accuracy
Accuracy of the Power Value
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_measure_caliber
Measure Caliber
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_current_type
Current Type of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_origin
The Power Origin of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_admin_state
Admin Status of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_oper_state
Oper Status of the Unit
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: power_state_enter_reason
Enter Reason for the State
**type**\: str
**length:** 0..50
**config**\: | |
☆")
else:
if wait["AutoCancel"] == True:
if op.param3 in Bots:
pass
else:
cl.cancelGroupInvitation(op.param1, [op.param3])
else:
if op.param3 in wait["blacklist"]:
cl.cancelGroupInvitation(op.param1, [op.param3])
cl.sendText(op.param1, "Blacklist Detected")
else:
pass
if op.type == 13:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
if op.type == 19:
if wait["AutoKick"] == True:
try:
if op.param3 in Creator:
if op.param3 in admin:
if op.param3 in Bots:
pass
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
cl.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
cl.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
if mid in op.param3:
if op.param2 in Creator:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Creator in op.param3:
if admin in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param2 not in Bots:
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
cl.inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
cl.inviteIntoGroup(op.param1,[op.param3])
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 11:
if wait["Qr"] == True:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
cl.kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 17:
if wait["Sambutan"] == True:
if op.param2 in mid:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(op.param1,"สวัสดี " + cl.getContact(op.param2).displayName + "\nยินดีต้อนรับเข้าสู่กลุ่ม ☞ " + str(ginfo.name) + " ☜" + "\nเข้ามาแล้วอย่าลืมดูที่โน๊ตกลุ่มด้วยนะ\nอย่าลืมปิดเสียงแจ้งเตือนด้วยล่ะ ^_^")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendImageWithURL(op.param1,image)
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "23701829",
"STKPKGID": "1740802",
"STKVER": "1" }
cl.sendMessage(d)
print "MEMBER JOIN TO GROUP"
if op.type == 19:
if wait["Sambutan"] == True:
if op.param2 in mid:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "คืออยังมันโหดแท้ว่ะ(|||゚д゚)")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendImageWithURL(op.param1,image)
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "22832861",
"STKPKGID": "1705396",
"STKVER": "1" }
cl.sendMessage(d)
print "MEMBER KICK OUT FORM GROUP"
if op.type == 15:
if wait["Sambutan"] == True:
if op.param2 in mid:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(op.param1,"Goodbye.. " + cl.getContact(op.param2).displayName + "\nแล้วเจอกันใหม่นะ. . . (p′︵‵。) 🤗")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendImageWithURL(op.param1,image)
d = Message(to=op.param1, from_=None, text=None, contentType=7)
d.contentMetadata={
"STKID": "23701835",
"STKPKGID": "1740802",
"STKVER": "1" }
cl.sendAudio(msg.to,'tts.mp3')
cl.sendMessage(d)
print "MEMBER HAS LEFT THE GROUP"
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if op.type == 25:
msg = op.message
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment"])
if op.type == 26:
msg = op.message
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment"])
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to, "[ChatBOT] " + data['result']['response'].encode('utf-8'))
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
cl.findAndAddContactsByMid(invite)
cl.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
if op.type == 25:
msg = op.message
if msg.text in ["Bot on"]:
wait["Selfbot"] = True
cl.sendText(msg.to,"Selfbot Sudah On Kembali.")
if op.type == 25:
if wait["Selfbot"] == True:
msg = op.message
if op.type in [26,25]:
msg = op.message
if msg.contentType == 7:
if wait['stiker'] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
filler = "[Stiker Check] \nSTKID : %s\nSTKPKGID : %s \nSTKVER : %s\n =>> Link...\nline://shopdetail/%s"%(stk_id,pkg_id,stk_ver,pkg_id)
cl.sendText(msg.to, filler)
else:
pass
if op.type == 26:
msg = op.message
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['Tagvirus'] == True:
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention["MENTIONEES"]
for mention in mentionees:
if mention["M"] in mid:
msg.contentType = 13
msg.contentMetadata = {'mid': "JANDA'"}
cl.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["A<NAME>agi " + cName + "\nAku Kick Kamu! Sorry, Byee!!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.kickoutFromGroup(msg.to,[msg.from_])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["ว่าไงคับน้องสาว? " + cName + "มีอะไรให้ผมรับใช้คับ😂😂",cName + " แทคทำไมมิทราบ? มีอิโรยก๊ะว่ามา",cName + " แทคบ่อยๆเดะจับทำเมียนะ -..-","หยุดแทคสักพัก" + cName + " แล้วมาพบรักที่หลังแชท😝😝","😎😎😎\nคับ มีไรคับ " + cName, "ยังไม่ว่าง เดี๋ยวมาตอบนะ " + cName, "ไม่อยู่ ไปทำธุระ " + cName + "มีไรทิ้งแชทไว้ที่แชท.สตนะ?", "อ่ะ เอาอีกแระ " + cName + "แทคตมอย??????????????????","ป๊าาาด " + cName + " คุณนายคับ จะแทคทำไมคับ!"]
balas1 = "รูปภาพคนแทค. . ."
ret_ = random.choice(balas)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.sendText(msg.to,balas1)
cl.sendImageWithURL(msg.to,image)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "23701825",
"STKPKGID": "1740802",
"STKVER": "1" }
cl.sendMessage(msg)
jawaban1 = ("มีอะไรครับ แทคแล้วไม่พูดจับรันนะ ห้าห้าห้าห้า")
tts = gTTS(text=jawaban1, lang='th')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
break
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["gift"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in | |
in range(n_types):
effort_searching_simulated[
type_idx, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching[type_idx, :, :, period_idx],
hc_simulated[type_idx, :],
assets_simulated[type_idx, :],
)
effort_searching_loss_simulated[
type_idx, :
] = interpolate_2d_ordered_to_unordered(
hc_grid_reduced_h_a,
assets_grid_h_a,
policy_effort_searching_loss[type_idx, :, :, period_idx],
hc_simulated[type_idx, :],
assets_simulated[type_idx, :],
)
effort_searching_simulated = np.minimum(
np.maximum(effort_searching_simulated, 0.0), 1.0
)
effort_searching_loss_simulated = np.minimum(
np.maximum(effort_searching_loss_simulated, 0.0), 1.0
)
job_finding_probability_searching_simulated = job_finding_probability(
effort_searching_simulated
)
job_finding_probability_searching_loss_simulated = job_finding_probability(
effort_searching_loss_simulated
)
# compute search phase statistics
share_searching[:, period_idx] = np.mean(searching_all_simulated, axis=1)
job_finding_probability_searching_all_mean[
:, period_idx
] = conditional_mean(
(
job_finding_probability_searching_simulated * searching_simulated
+ job_finding_probability_searching_loss_simulated
* searching_loss_simulated
),
searching_all_simulated,
axis=1,
)
job_finding_probability_searching_mean[:, period_idx] = conditional_mean(
job_finding_probability_searching_simulated, searching_simulated, axis=1
)
job_finding_probability_searching_loss_mean[
:, period_idx
] = conditional_mean(
job_finding_probability_searching_loss_simulated,
searching_loss_simulated,
axis=1,
)
# generate transition events
job_finding_event_searching_simulated = (
job_finding_probability_searching_simulated
>= np.random.rand(n_types, n_simulations)
).astype(bool)
job_finding_event_searching_loss_simulated = (
job_finding_probability_searching_loss_simulated
>= np.random.rand(n_types, n_simulations)
).astype(bool)
# calculate average job finding rates
job_finding_rate_searching_all_mean[:, period_idx] = conditional_mean(
(
job_finding_event_searching_simulated * searching_simulated
+ job_finding_event_searching_loss_simulated
* searching_loss_simulated
),
searching_all_simulated,
axis=1,
)
job_finding_rate_searching_mean[:, period_idx] = conditional_mean(
job_finding_event_searching_simulated, searching_simulated, axis=1
)
job_finding_rate_searching_loss_mean[:, period_idx] = conditional_mean(
job_finding_event_searching_loss_simulated,
searching_loss_simulated,
axis=1,
)
# calculate unemployment duration statistics # todo: check timing
duration_unemployed_simulated_weeks = _get_duration_weeks(
(
job_finding_probability_searching_simulated * searching_simulated
+ job_finding_probability_searching_loss_simulated
* searching_loss_simulated
),
duration_unemployed_simulated,
)
duration_unemployed_weeks_mean[:, period_idx] = conditional_mean(
np.minimum(
duration_unemployed_simulated_weeks, 98
), # unemployment duration is capped at 98 weeks in the data
searching_all_simulated,
axis=1,
)
duration_unemployed_median[:, period_idx] = [
np.median(
duration_unemployed_simulated[i, searching_all_simulated[i, :]]
)
for i in range(n_types)
]
duration_unemployed_stdev[:, period_idx] = [
np.std(duration_unemployed_simulated[i, searching_all_simulated[i, :]])
for i in range(n_types)
]
# simulate transitions from search phase to consumption phase
# transitions of labor force status
employed_simulated = (
employed_simulated
+ searching_simulated * job_finding_event_searching_simulated
+ searching_loss_simulated * job_finding_event_searching_loss_simulated
).astype(bool)
unemployed_simulated = (
searching_simulated * (1 - job_finding_event_searching_simulated)
).astype(bool)
unemployed_loss_simulated = (
searching_loss_simulated
* (1 - job_finding_event_searching_loss_simulated)
).astype(bool)
nonemployed_simulated = (
unemployed_simulated + unemployed_loss_simulated
).astype(bool)
# simulate hc transition to consumption phase
hc_loss_simulated = np.full((n_types, n_simulations), np.nan)
pre_displacement_wage_simulated = np.full((n_types, n_simulations), np.nan)
new_wage_simulated = np.full((n_types, n_simulations), np.nan)
for type_idx in range(n_types):
hc_loss_simulated[type_idx, :] = (
(
hc_simulated[type_idx, :]
- _hc_after_loss_1_agent(
hc_simulated[type_idx, :],
wage_loss_factor_vector[type_idx, :],
wage_loss_reference_vector[type_idx, :],
period_idx,
)
)
* searching_loss_simulated[type_idx, :]
* job_finding_event_searching_loss_simulated[type_idx, :]
)
pre_displacement_wage_simulated[type_idx, :] = (
wage_level
* wage_hc_factor_interpolated_1_agent(
hc_simulated[type_idx, :], wage_hc_factor_vector[type_idx, :]
)
* searching_loss_simulated[type_idx, :]
* job_finding_event_searching_loss_simulated[type_idx, :]
)
new_wage_simulated[type_idx, :] = (
wage_level
* wage_hc_factor_interpolated_1_agent(
hc_simulated[type_idx, :] - hc_loss_simulated[type_idx, :],
wage_hc_factor_vector[type_idx, :],
)
* searching_loss_simulated[type_idx, :]
* job_finding_event_searching_loss_simulated[type_idx, :]
)
# calculate wage loss statistics
wage_loss_simulated = new_wage_simulated - pre_displacement_wage_simulated
wage_loss_median[:, period_idx] = np.array(
[
np.median(
wage_loss_simulated[
type_idx,
searching_loss_simulated[type_idx, :]
* job_finding_event_searching_loss_simulated[type_idx, :],
]
)
for type_idx in range(n_types)
]
)
# simulate hc loss upon reemployment
hc_simulated = hc_simulated - hc_loss_simulated
# transition of durations
duration_unemployed_simulated = (
duration_unemployed_simulated
+ searching_simulated * (1 - job_finding_event_searching_simulated)
+ searching_loss_simulated
* (1 - job_finding_event_searching_loss_simulated)
) # +1 for workers that remain unemployed
duration_unemployed_simulated = (
duration_unemployed_simulated * nonemployed_simulated
) # =0 for everyone else
# check for error in state simulation
if (
np.sum(
unemployed_simulated
+ unemployed_loss_simulated
+ employed_simulated
)
< n_simulations
):
warnings.warn(
f"ERROR! in transition from search phase "
f"to consumption phase in period {period_idx}"
)
# (ii) consumption phase
# simulate consumption
consumption_simulated = _simulate_consumption(
policy_consumption_employed,
policy_consumption_unemployed,
policy_consumption_unemployed_loss,
employed_simulated,
unemployed_simulated,
unemployed_loss_simulated,
hc_simulated,
assets_simulated,
period_idx,
)
# update wages
wage_hc_factor_simulated = np.array(
[
wage_hc_factor_interpolated_1_agent(
hc_simulated[i, :], wage_hc_factor_vector[i, :]
)
for i in range(n_types)
]
)
wage_hc_factor_pre_displacement_simulated = np.array(
[
wage_hc_factor_interpolated_1_agent(
hc_pre_displacement_simulated[i, :], wage_hc_factor_vector[i, :]
)
for i in range(n_types)
]
)
# simulate savings
savings_simulated = _simulate_savings(
employed_simulated,
nonemployed_simulated,
consumption_simulated,
assets_simulated,
wage_hc_factor_simulated,
tax_ss,
tax_ui_vector,
tax_income,
transfers_lumpsum,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# compute consumption phase statistics
# update pv of simulated consumption
pv_consumption_simulated += (
1 / (1 + interest_rate_raw) ** period_idx
) * consumption_simulated
# update aggregate variables
revenue_ss_simulated = (
np.repeat(tax_ss, n_simulations).reshape((n_types, n_simulations))
* wage_level
* wage_hc_factor_simulated
* employed_simulated
)
revenue_ui_simulated = (
np.repeat(tax_ui_vector[:, period_idx], n_simulations).reshape(
(n_types, n_simulations)
)
* wage_level
* wage_hc_factor_simulated
* employed_simulated
)
revenue_lumpsum_simulated = (
np.repeat(tax_income, n_simulations).reshape((n_types, n_simulations))
* wage_level
* wage_hc_factor_simulated
* employed_simulated
+ np.repeat(tax_income, n_simulations).reshape((n_types, n_simulations))
* interest_rate_raw
* assets_simulated
)
revenue_consumption_simulated = (
np.repeat(tax_consumption, n_simulations).reshape(
n_types, n_simulations
)
* consumption_simulated
)
cost_ui_simulated = (
simulate_ui_benefits(
wage_level * wage_hc_factor_pre_displacement_simulated,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
* nonemployed_simulated
)
cost_lumpsum_simulated = np.repeat(
transfers_lumpsum, n_simulations
).reshape((n_types, n_simulations))
average_revenue_ss_simulated[:, period_idx] = (
np.sum(revenue_ss_simulated, axis=1) / n_simulations
)
average_revenue_ui_simulated[:, period_idx] = (
np.sum(revenue_ui_simulated, axis=1) / n_simulations
)
average_revenue_lumpsum_simulated[:, period_idx] = (
np.sum(revenue_lumpsum_simulated, axis=1) / n_simulations
)
average_revenue_consumption_simulated[:, period_idx] = (
np.sum(revenue_consumption_simulated, axis=1) / n_simulations
)
average_cost_ss_simulated[:, period_idx] = np.zeros(n_types) / n_simulations
average_cost_ui_simulated[:, period_idx] = (
np.sum(cost_ui_simulated, axis=1) / n_simulations
)
average_cost_lumpsum_simulated[:, period_idx] = (
np.sum(cost_lumpsum_simulated, axis=1) / n_simulations
)
average_cost_consumption_simulated[:, period_idx] = (
np.zeros(n_types) / n_simulations
)
average_cost_total_simulated[:, period_idx] = (
np.zeros(n_types) # no cost of social security during working age
+ np.sum(cost_ui_simulated, axis=1)
+ np.sum(cost_lumpsum_simulated, axis=1)
+ np.zeros(n_types) # consumption tax not used to finance anything
) / n_simulations
average_revenue_total_simulated[:, period_idx] = (
np.sum(revenue_ss_simulated, axis=1)
+ np.sum(revenue_ui_simulated, axis=1)
+ np.sum(revenue_lumpsum_simulated, axis=1)
+ np.sum(revenue_consumption_simulated, axis=1)
) / n_simulations
average_balance_total_simulated[:, period_idx] = (
average_revenue_total_simulated[:, period_idx]
- average_cost_total_simulated[:, period_idx]
)
# get statistics
(
share_employed[:, period_idx],
share_unemployed[:, period_idx],
share_unemployed_loss[:, period_idx],
share_nonemployed[:, period_idx],
log_consumption_employed_mean[:, period_idx],
log_consumption_nonemployed_mean[:, period_idx],
consumption_employed_mean[:, period_idx],
consumption_nonemployed_mean[:, period_idx],
wage_hc_factor_mean[:, period_idx],
wage_hc_factor_employed_mean[:, period_idx],
wage_hc_factor_unemployed_loss_mean[:, period_idx],
wage_hc_factor_nonemployed_mean[:, period_idx],
wage_hc_factor_displaced_mean[:, :, period_idx],
wage_hc_factor_nondisplaced_mean[:, :, period_idx],
wage_hc_factor_pre_displacement_mean[:, period_idx],
marginal_utility_nonemployed_mean[:, period_idx],
income_median[:, period_idx],
hc_mean[:, period_idx],
hc_employed_mean[:, period_idx],
hc_nonemployed_mean[:, period_idx],
ui_benefits_mean[:, period_idx],
ui_effective_replacement_rate[:, period_idx],
ui_share_floor_binding[:, period_idx],
ui_share_cap_binding[:, period_idx],
assets_mean[:, period_idx],
assets_nonemployed_mean[:, period_idx],
assets_distribution[:, :, period_idx],
assets_over_income_mean[:, period_idx],
distribution_hc_assets_nonemployed[:, :, :, period_idx],
log_assets_over_income_nonemployed_mean[:, period_idx],
) = _get_statistics_consumption_phase(
employed_simulated,
unemployed_simulated,
unemployed_loss_simulated,
nonemployed_simulated,
consumption_simulated,
wage_hc_factor_simulated,
wage_hc_factor_pre_displacement_simulated,
duration_unemployed_simulated,
duration_since_displacement_simulated,
hc_simulated,
assets_simulated,
tax_ui_vector,
ui_replacement_rate_vector,
ui_floor,
ui_cap,
period_idx,
)
# update simulated discounted value
pv_utility_simulated[
searching_all_simulated
] += discount_factor_compounded * leisure_utility_interpolated(
effort_searching_simulated[searching_all_simulated]
)
pv_utility_simulated[
employed_simulated
] += discount_factor_compounded * consumption_utility(
consumption_simulated[employed_simulated]
)
pv_utility_simulated[
nonemployed_simulated
] += discount_factor_compounded * consumption_utility(
np.maximum(
consumption_simulated[nonemployed_simulated],
consumption_min,
)
)
if np.any(np.isnan(pv_utility_simulated)):
warnings.warn("NaN in simulated discounted value at birth")
# simulate transition
# simulate transition events
hc_loss_event_simulated = (
np.repeat(hc_loss_probability, n_simulations).reshape(
n_types, n_simulations
)
>= np.random.rand(n_types, n_simulations)
).astype(bool)
job_loss_event_simulated = (
np.repeat(separation_rate_vector[:, period_idx], n_simulations).reshape(
n_types, n_simulations
)
>= np.random.rand(n_types, n_simulations)
).astype(bool)
# simulate experience transition
hc_simulated = (
hc_simulated
+ np.full((n_types, n_simulations), 1.0) * employed_simulated
) # increase experience of employed workers by 1
hc_pre_displacement_simulated = (
hc_pre_displacement_simulated * nonemployed_simulated
+ hc_simulated * employed_simulated
)
# update duration tracker # todo: resolve this abomination
duration_unemployed_simulated = (
duration_unemployed_simulated
+ np.logical_and(
duration_unemployed_simulated >= 1,
duration_unemployed_simulated <= 5,
)
)
duration_unemployed_simulated = (
duration_unemployed_simulated
- 10
* (duration_unemployed_simulated > 0)
* employed_simulated
* job_loss_event_simulated
)
duration_unemployed_simulated = (
duration_unemployed_simulated
+ (duration_unemployed_simulated == 0)
* employed_simulated
* job_loss_event_simulated
)
duration_unemployed_simulated = duration_unemployed_simulated - 6 * (
duration_unemployed_simulated > 5
)
duration_unemployed_simulated = np.maximum(duration_unemployed_simulated, 0)
duration_since_displacement_simulated += (
np.full((n_types, n_simulations), 1.0)
* employed_simulated
* (1 - job_loss_event_simulated)
) # +1 for all workers that are still employed
duration_since_displacement_simulated *= (
np.full((n_types, n_simulations), 1.0)
* employed_simulated
* (1 - job_loss_event_simulated)
) # =0 for everyone else
duration_since_displacement_simulated = np.minimum(
duration_since_displacement_simulated, 5
) # capped at 5
# simulate transitions in employment status
searching_simulated = (
unemployed_simulated * (1 - hc_loss_event_simulated)
+ employed_simulated * job_loss_event_simulated
).astype(bool)
searching_loss_simulated = (
unemployed_loss_simulated
+ unemployed_simulated * hc_loss_event_simulated
).astype(bool)
searching_all_simulated = (
searching_simulated + searching_loss_simulated
).astype(bool)
employed_simulated = (
employed_simulated * (1 - job_loss_event_simulated)
).astype(bool)
# update assets
assets_simulated = savings_simulated
# compound discount factor
discount_factor_compounded = discount_factor_compounded * discount_factor
# check for error in state simulation
if (
np.sum(
searching_simulated + searching_loss_simulated + employed_simulated
)
< n_simulations
):
warnings.warn(
f"ERROR! in transition from consumption phase "
f"in period {period_idx} to search phase in {period_idx + 1}"
)
# retirement period
# simulate one more transition to consumption phase
unemployed_simulated = searching_simulated
unemployed_loss_simulated = searching_loss_simulated
# hc loss materialises upon retirement
hc_loss_simulated = np.full((n_types, n_simulations), np.nan)
for type_idx in range(n_types):
hc_loss_simulated[type_idx, :] = (
hc_simulated[type_idx, :]
- _hc_after_loss_1_agent(
hc_simulated[type_idx, :],
wage_loss_factor_vector[type_idx, :],
wage_loss_reference_vector[type_idx, :],
period_idx + 1,
)
) * searching_loss_simulated[type_idx, :]
hc_simulated = (
| |
Screen size
:type size: Size
:raises ValueError: In case of unknown unit value
:return: rectangle in pixel units
:rtype: RectWithUnit
"""
if size is None:
return None
if not isinstance(size, Size) and not isinstance(size, tuple):
raise ValueError('Invalid parameter. Expected instance of Size or tuple')
w = size.width if isinstance(size, Size) else size[0]
h = size.height if isinstance(size, Size) else size[1]
if self.unit == self.RECT_UNIT_PX:
return RectWithUnit(self.left, self.top, self.right, self.bottom, self.unit)
elif self.unit == self.RECT_UNIT_PER:
return RectWithUnit(w * (self.left / 100.0), h * (self.top / 100.0), w * (self.right / 100.0), h * (self.bottom / 100.0), self.RECT_UNIT_PX)
elif self.unit == self.RECT_UNIT_HU:
pxinhu = h / 100.0
return RectWithUnit(self.left * pxinhu, self.top * pxinhu, self.right * pxinhu, self.bottom * pxinhu, self.RECT_UNIT_PX)
else:
raise ValueError('Unknown unit. unit: \"{}\"'.format(self.unit))
def transform(self, delta, relSize=None):
"""
Transforms rectangle to delta
:param delta: delta rectangle
:type delta: RectWithUnit
:param relSize: Screen size
:type relSize: Size
:raises ValueError: In case of delta is not instance of RectWithUnit or relSize is not set in case of this rectangle is not in pixel unit
:return: transformed rectangle
:rtype: RectWithUnit
"""
if delta is None:
return None
if not isinstance(delta, RectWithUnit):
raise ValueError(
'Invalid parameter. Expected instance of \"RectWithUnit\" class')
size = self.size()
deltaSize = delta.size()
res = None
if delta.unit == self.RECT_UNIT_PX:
if self.unit != self.RECT_UNIT_PX and relSize is None:
raise ValueError('Invalid parameter. Relative size is not specified')
res = (self if self.unit == self.RECT_UNIT_PX else self.toPx(relSize)) + delta
elif delta.unit == self.RECT_UNIT_PER:
res = RectWithUnit()
res.left = self.left + (size.width * (delta.left / 100.0))
res.top = self.top + (size.height * (delta.top / 100.0))
res.right = res.left + (size.width * (deltaSize.width / 100.0))
res.bottom = res.top + (size.height * (deltaSize.height / 100.0))
res.unit = self.unit
elif delta.unit == self.RECT_UNIT_HU:
huval = size.height / 100.0
res = RectWithUnit()
res.left = self.left + (delta.left * huval)
res.top = self.top + (delta.top * huval)
res.right = res.left + (size.width + (deltaSize.width * huval))
res.bottom = res.top + (size.height + (deltaSize.height * huval))
res.unit = self.unit
return res
# if self.unit == self.RECT_UNIT_PX:
# if delta.unit == self.RECT_UNIT_PX:
# return self + delta
# elif delta.unit == self.RECT_UNIT_PER:
# return RectWithUnit(self.left + (size.width * (delta.left / 100.0)), self.top + (size.height * (delta.top / 100.0)),
# self.left + (size.width * (deltaSize.width / 100.0)), self.top + (size.height * (deltaSize.height / 100.0)),
# self.RECT_UNIT_PX)
# elif delta.unit == self.RECT_UNIT_HU:
# pxinhu = size.height / 100.0
# return RectWithUnit(self.left + (delta.left * pxinhu), self.top + (delta.top * pxinhu),
# self.left + (size.width * (deltaSize.width * pxinhu)), self.top + (size.height * (deltaSize.height * pxinhu)), self.RECT_UNIT_PX)
# elif self.unit == self.RECT_UNIT_PER:
# if delta.unit == self.RECT_UNIT_PX:
# return self.toPx(relSize) + delta
# elif delta.unit == self.RECT_UNIT_PER:
# return RectWithUnit(self.left + (size.width * (delta.left / 100.0)), self.top + (size.height * (delta.top / 100.0)),
# self.left + (size.width * (deltaSize.width / 100.0)), self.top + (size.height * (deltaSize.height / 100.0)),
# self.RECT_UNIT_PER)
# elif delta.unit == self.RECT_UNIT_HU:
# perinhu = size.height / 100.0
# return RectWithUnit(self.left + (delta.left * pxinhu), self.top + (delta.top * pxinhu),
# self.left + (size.width * (deltaSize.width * pxinhu)), self.top + (size.height * (deltaSize.height * pxinhu)), self.RECT_UNIT_PER)
# elif self.unit == self.RECT_UNIT_HU:
# if delta.unit == self.RECT_UNIT_PX:
# if relSize is None:
# raise ValueError('Invalid parameter. Relative size is not specified')
# return self.toPx(relSize) + delta
# elif delta.unit == self.RECT_UNIT_PER:
# return RectWithUnit(self.left + (size.width * (delta.left / 100.0)), self.top + (size.height * (delta.top / 100.0)),
# self.left + (size.width * (deltaSize.width / 100.0)), self.top + (size.height * (deltaSize.height / 100.0)),
# self.RECT_UNIT_HU)
# elif delta.unit == self.RECT_UNIT_HU:
# perinhu = size.height / 100.0
# return RectWithUnit(self.left + (delta.left * pxinhu), self.top + (delta.top * pxinhu),
# self.left + (size.width * (deltaSize.width * pxinhu)), self.top + (size.height * (deltaSize.height * pxinhu)), self.RECT_UNIT_HU)
#
# return None
__rmul__ = __mul__
class Vector(object):
"""
The Vector class defines the vector in two-dimensional space using floating point precision.
:param x: coordinate on the x-axis
:type x: float, optional
:param y: coordinate on the y-axis
:type y: float, optional
"""
def __init__(self, x=0.0, y=0.0):
super().__init__()
self.x = x
self.y = y
@classmethod
def fromPoint(cls, point: Point) -> 'Vector':
"""
Returns Vector converted from point
:raises ValueError: If the point does not exist
:param point: Point in two-dimensional space
:type point: Point
"""
if point is None:
raise ValueError('point does not exist')
return cls(point.x, point.y)
@classmethod
def fromPoints(cls, origin: Point, point: Point) -> 'Vector':
"""
Returns Vector calculated from two points
:raises ValueError: If origin or point does not exist
:param origin: Starting point in two-dimensional space
:type origin: Point
:param point: Terminal point in two-dimensional space
:type point: Point
"""
if origin is None:
raise ValueError('origin does not exist')
if point is None:
raise ValueError('point does not exist')
return cls(point.x - origin.x, point.y - origin.y)
@property
def magnitude(self):
"""
Returns the length of this vector
:return: length of this vector
:rtype: float
"""
return math.sqrt(self.x * self.x + self.y * self.y)
def normalize(self):
"""
Makes this vector have a magnitude of 1.0
"""
length = math.fabs(math.sqrt((self.x * self.x) + (self.y * self.y)))
self.x = self.x / length
self.y = self.y / length
def __str__(self):
return '{{x: {0}, y: {1}}}'.format(self.x, self.y)
def __add__(self, other: 'Vector'):
return Vector(self.x + other.x, self.y + other.y)
def __sub__(self, other: 'Vector'):
return Vector(self.x - other.x, self.y - other.y)
def __eq__(self, other: 'Vector'):
if other is None:
return False
return self.x == other.x and self.y == other.y
def __mul__(self, other):
if not isinstance(other, int) and not isinstance(other, float):
raise NotImplementedError()
return Vector(self.x * other, self.y * other)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, int) and not isinstance(other, float):
raise NotImplementedError()
return Vector(self.x / other, self.y / other)
class VectorWithUnit(Vector):
"""
The VectorWithUnit class defines the vector in two-dimensional space with particular unit using floating point precision.
:param x: coordinate on the x-axis
:type x: float, optional
:param y: coordinate on the y-axis
:type y: float, optional
:param unit: coordinate unit
:type unit: str, optional
.. note::
| *Coordinate Unit:*
| 'px' - value in pixels
| 'hu' - value in height units.
| Ex. 1hu is `{height of the screen} / 100`
| '%' - value in percentages.
| Ex. 50% of the x is half of the width of the screen
"""
VECTOR_UNIT_PX = 'px'
VECTOR_UNIT_PER = '%'
VECTOR_UNIT_HU = 'hu'
def __init__(self, x=0.0, y=0.0, unit=VECTOR_UNIT_PX, **kwargs):
super().__init__(x, y)
self.unit = unit
@classmethod
def fromPoint(cls, point: PointWithUnit) -> 'VectorWithUnit':
"""
Returns VectorWithUnit converted from point
:raises ValueError: If the point does not exist
:param point: Point in two-dimensional space
:type point: PointWithUnit
"""
if point is None:
raise ValueError('point does not exist')
return cls(point.x, point.y, point.unit)
@classmethod
def fromPoints(cls, origin: PointWithUnit, point: PointWithUnit) -> 'VectorWithUnit':
"""
Returns VectorWithUnit calculated from two points
:raises ValueError: If starting point or terminal point does not exist, also if unit of two points is not same
:param origin: Starting point in two-dimensional space
:type origin: PointWithUnit
:param point: Terminal point in two-dimensional space
:type point: PointWithUnit
"""
if origin is None:
raise ValueError('origin does not exist')
if point is None:
raise ValueError('point does not exist')
if origin.unit != point.unit:
raise ValueError('units of points is not equal')
return cls(point.x - origin.x, point.y - origin.y, origin.unit)
def __str__(self):
return '{{x: {0}, y: {1}, unit: {2}}}'.format(self.x, self.y, self.unit)
def __add__(self, other: 'VectorWithUnit'):
if self.unit != other.unit:
raise ValueError('units of vectors is not equal')
return VectorWithUnit(self.x + other.x, self.y + other.y, self.unit)
def __sub__(self, other: 'VectorWithUnit'):
if self.unit != other.unit:
raise ValueError('units of vectors is not equal')
return VectorWithUnit(self.x - other.x, self.y - other.y, self.unit)
def __eq__(self, other: 'VectorWithUnit'):
if other is None:
return False
return self.x == other.x and self.y == other.y and self.unit == other.unit
def __mul__(self, other):
if not isinstance(other, int) and not | |
the cache entry,
:param sample: a sample to compare the stored value with,
:param value: new value for the given key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param value_hint: (optional) Ignite data type, for which the given value
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REPLACE_IF_EQUALS,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('sample', sample_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'sample': sample,
'value': value,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_clear(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Clears the cache without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
)
def cache_clear_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint: object=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache key without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
)
def cache_clear_keys(
connection: 'Connection', cache: Union[str, int], keys: list,
binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache keys without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_CLEAR_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
)
def cache_remove_key(
connection: 'Connection', cache: Union[str, int], key,
key_hint: object=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Clears the cache key without notifying listeners or cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REMOVE_KEY,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_remove_if_equals(
connection: 'Connection', cache: Union[str, int], key, sample,
key_hint=None, sample_hint=None,
binary=False, query_id=None,
) -> 'APIResult':
"""
Removes an entry with a given key if provided value is equal to
actual value, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry,
:param sample: a sample to compare the stored value with,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param sample_hint: (optional) Ignite data type, for whic
the given sample should be converted
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned
as-is in response.query_id. When the parameter is omitted, a random
value is generated,
:return: API result data object. Contains zero status and a boolean
success code, or non-zero status and an error description if something
has gone wrong.
"""
query_struct = Query(
OP_CACHE_REMOVE_IF_EQUALS,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('sample', sample_hint or AnyDataObject),
],
query_id=query_id,
)
result = query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'sample': sample,
},
response_config=[
('success', Bool),
],
)
if result.status == 0:
result.value = result.value['success']
return result
def cache_remove_keys(
connection: 'Connection', cache: Union[str, int], keys: Iterable,
binary=False, query_id=None,
) -> 'APIResult':
"""
Removes entries with given keys, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param keys: list of keys or tuples of (key, key_hint),
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_KEYS,
[
('hash_code', Int),
('flag', Byte),
('keys', AnyDataArray()),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'keys': keys,
},
)
def cache_remove_all(
connection: 'Connection', cache: Union[str, int], binary=False,
query_id=None,
) -> 'APIResult':
"""
Removes all entries from cache, notifying listeners and cache writers.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status on success,
non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_REMOVE_ALL,
[
('hash_code', Int),
('flag', Byte),
],
query_id=query_id,
)
return query_struct.perform(
connection,
query_params={
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
},
)
def cache_get_size(
connection: 'Connection', cache: Union[str, int], peek_modes=0,
binary=False, query_id=None,
) -> 'APIResult':
"""
Gets the number of entries in cache.
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param peek_modes: (optional) limit count to near cache partition
(PeekModes.NEAR), primary cache (PeekModes.PRIMARY), or backup cache
(PeekModes.BACKUP). Defaults to all cache partitions (PeekModes.ALL),
:param binary: (optional) pass True to keep the value in binary form.
False by | |
<reponame>celiafish/VisTrails
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: <EMAIL>
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
##############################################################################
# Transfer Function Widget for VTK
from PyQt4 import QtCore, QtGui
from vistrails.core.modules.vistrails_module import Module
from vistrails.core.modules.basic_modules import Constant
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.system import get_elementtree_library
from vistrails.core.utils.color import ColorByName
from vistrails.gui.modules.constant_configuration import ConstantWidgetMixin
import vtk
import math
import pickle
import copy
import StringIO
import unittest
ElementTree = get_elementtree_library()
from .identifiers import identifier as vtk_pkg_identifier
################################################################################
# etc
def clamp(v, mn, mx, eps=0.0):
mne = mn + eps
mxe = mx - eps
if v < mne: return mn
if v > mxe: return mx
return v
# Because of a Qt bug see
# http://bugreports.qt.nokia.com/browse/QTBUG-17985
# We cannot set the scene from 0 to 1. In this case we will set it
# 4000 x 4000 with GLOBAL_SCALE. When the bug is fixed, just set it to 1.0
GLOBAL_SCALE = 4000.0
##############################################################################
# Transfer Function object
class TransferFunction(object):
def __init__(self):
self._min_range = 0.0
self._max_range = 1.0
self._pts = []
def set_range(self, mn, mx):
self._min_range = mn
self._max_range = mx
def set_on_vtk_volume_property(self, vtk_volume_property):
# Builds the opacity and color functions
of = vtk.vtkPiecewiseFunction()
cf = vtk.vtkColorTransferFunction()
vp = vtk_volume_property
for pt in self._pts:
(scalar, opacity, color) = pt
# Map scalar to tf range
s = self._min_range + (self._max_range - self._min_range) * scalar
of.AddPoint(s, opacity)
cf.AddRGBPoint(s, color[0], color[1], color[2])
vp.SetScalarOpacity(of)
vp.SetColor(cf)
def get_vtk_transfer_functions(self):
of = vtk.vtkPiecewiseFunction()
cf = vtk.vtkColorTransferFunction()
for pt in self._pts:
(scalar, opacity, color) = pt
# Map scalar to tf range
s = self._min_range + (self._max_range - self._min_range) * scalar
of.AddPoint(s, opacity)
cf.AddRGBPoint(s, color[0], color[1], color[2])
return (of,cf)
def add_point(self, scalar, opacity, color):
self._pts.append((scalar, opacity, color))
self._pts.sort()
def get_value(self, scalar):
"""get_value(scalar): returns the opacity and color
linearly interpolated at the value. Useful for
adding knots."""
ix = 0
while ix < len(self._pts) and self._pts[ix][0] > scalar:
ix += 1
if ix == 0:
return (self._pts[0][1], self._pts[0][2])
elif ix == len(self._pts):
return (self._pts[-1][1], self._pts[-1][2])
else:
u = ((self._pts[ix][0] - scalar) /
(self._pts[ix][0] - self._pts[ix-1][0]))
do = self._pts[ix][1] - self._pts[ix-1][1]
dr = self._pts[ix][2][0] - self._pts[ix-1][2][0]
dg = self._pts[ix][2][1] - self._pts[ix-1][2][1]
db = self._pts[ix][2][2] - self._pts[ix-1][2][2]
return (self._pts[ix-1][1] + u * do,
(self._pts[ix-1][2][0] + u * dr,
self._pts[ix-1][2][1] + u * dg,
self._pts[ix-1][2][2] + u * db))
def __copy__(self):
result = TransferFunction()
result._min_range = self._min_range
result._max_range = self._max_range
result._pts = copy.copy(self._pts)
return result
def __eq__(self, other):
if type(other) != type(self):
return False
if self._min_range != other._min_range:
return False
if self._max_range != other._max_range:
return False
for my_pt, other_pt in zip(self._pts, other._pts):
if my_pt != other_pt:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def serialize(self, node=None):
"""serialize(node: ElementTree.Element) -> str
Convert this object to an XML representation in string format.
"""
if node is None:
node = ElementTree.Element('transfer_function')
node.set('min_range', str(self._min_range))
node.set('max_range', str(self._max_range))
for pt in self._pts:
ptNode = ElementTree.SubElement(node, 'point')
ptNode.set('scalar', str(pt[0]))
ptNode.set('opacity', str(pt[1]))
color = pt[2]
colorNode = ElementTree.SubElement(ptNode, 'color')
colorNode.set('R', str(color[0]))
colorNode.set('G', str(color[1]))
colorNode.set('B', str(color[2]))
return ElementTree.tostring(node)
@staticmethod
def parse(strNode):
"""parse(strNode: str) -> TransferFunction
Parses a string representing a TransferFunction and returns a
TransferFunction object
"""
try:
node = ElementTree.fromstring(strNode)
except SyntaxError:
#it was serialized using pickle
class FixUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'packages.vtk.tf_widget':
module = 'vistrails.packages.vtk.tf_widget'
return pickle.Unpickler.find_class(self, module, name)
tf = FixUnpickler(StringIO.StringIO(strNode.decode('hex'))).load()
tf._pts.sort()
return tf
if node.tag != 'transfer_function':
return None
#read attributes
tf = TransferFunction()
tf._min_range = float(node.get('min_range', "0.0"))
tf._max_range = float(node.get('max_range', "1.0"))
for ptNode in node.getchildren():
if ptNode.tag == 'point':
scalar = float(ptNode.get('scalar','-1.0'))
opacity = float(ptNode.get('opacity', '1.0'))
for colorNode in ptNode.getchildren():
if colorNode.tag == 'color':
color = (float(colorNode.get('R','0.0')),
float(colorNode.get('G','0.0')),
float(colorNode.get('B','0.0')))
break
tf._pts.append((scalar,opacity,color))
tf._pts.sort()
return tf
##############################################################################
# Graphics Items
class TransferFunctionPoint(QtGui.QGraphicsEllipseItem):
selection_pens = { True: QtGui.QPen(QtGui.QBrush(
QtGui.QColor(*(ColorByName.get_int('goldenrod_medium')))),GLOBAL_SCALE * 0.012),
False: QtGui.QPen() }
def __init__(self, scalar, opacity, color, parent=None):
QtGui.QGraphicsEllipseItem.__init__(self, parent)
self._scalar = scalar
self._opacity = opacity
self._color = QtGui.QColor(color[0]*255,
color[1]*255,
color[2]*255)
self.setPen(QtGui.QPen(QtGui.QColor(0,0,0)))
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable)
self.setFlag(QtGui.QGraphicsItem.ItemIsFocusable)
if QtCore.QT_VERSION >= 0x40600:
self.setFlag(QtGui.QGraphicsItem.ItemSendsGeometryChanges)
self.setZValue(2.0)
self._sx = 1.0
self._sy = 1.0
# fixed scale
self._fsx = GLOBAL_SCALE
self._fsy = GLOBAL_SCALE
self._left_line = None
self._right_line = None
self._point = QtCore.QPointF(scalar * self._fsx, opacity * self._fsy)
self.refresh()
self.setToolTip("Double-click to change color\n"
"Right-click to remove point\n"
"Scalar: %.5f, Opacity: %.5f" % (self._scalar,
self._opacity))
# This sets up the linked list of Lines
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Backspace or \
event.key() == QtCore.Qt.Key_Delete:
self.remove_self()
def refresh(self):
dx = self._fsx * 0.025 / self._sx
dy = self._fsy * 0.025 / self._sy
# this is the setup
self.setBrush(QtGui.QBrush(self._color))
self.setRect(-dx,
-dy,
2 * dx, 2 * dy)
self.setPos(self._fsx * self._scalar,
self._fsy * self._opacity)
self.update()
def update_scale(self, sx, sy):
self._sx = sx
self._sy = sy
self.refresh()
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemSelectedChange:
self.setPen(self.selection_pens[value])
if change == QtGui.QGraphicsItem.ItemPositionChange:
# moves point
# value is now a QPointF, not a QPoint so no conversion needed
pt = value
pt.setY(clamp(pt.y(), 0.0, 1.0 * self._fsy) )
self._opacity = pt.y() / self._fsy
self._point.setY(pt.y())
if not self._left_line:
pt.setX(0.0)
elif not self._right_line:
pt.setX(1.0 * self._fsx)
else:
assert self._left_line._point_right == self
assert self._right_line._point_left == self
pt.setX(clamp(pt.x(),
self._left_line._point_left._point.x(),
self._right_line._point_right._point.x(),
1e-6))
self._point.setX(pt.x())
self._scalar = pt.x() / self._fsx
if self._left_line:
self._left_line.refresh()
if self._right_line:
self._right_line.refresh()
if self.parentItem():
self.parentItem()._tf_poly.setup()
self.setToolTip("Double-click to change color\n"
"Right-click to remove point\n"
"Scalar: %.5f, Opacity: %.5f" % (self._scalar,
self._opacity))
return QtGui.QGraphicsItem.itemChange(self, change, pt)
return QtGui.QGraphicsItem.itemChange(self, change, value)
def remove_self(self):
if not self._left_line or not self._right_line:
# Ignore, self is a corner node that can't be removed
return
# Removes the right line and self, re-ties data structure
self._left_line._point_right = self._right_line._point_right
self._left_line._point_right._left_line = self._left_line
# be friends with garbage collector
self._right_line._point_left = None
self._right_line._point_right = None
self.parentItem()._tf_poly.setup()
self.scene().removeItem(self._right_line)
self.scene().removeItem(self)
self._left_line.refresh()
def mouseDoubleClickEvent(self, event):
new_color = QtGui.QColorDialog.getColor(self._color)
if not new_color.isValid():
return
self._color = new_color
if self._left_line:
self._left_line.refresh()
if self._right_line:
self._right_line.refresh()
self.refresh()
# sometimes the graphicsitem gets recreated, and we need to abort
if self.parentItem():
self.parentItem()._tf_poly.setup()
QtGui.QGraphicsEllipseItem.mouseDoubleClickEvent(self, event)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.RightButton:
event.accept()
self.remove_self()
else:
QtGui.QGraphicsEllipseItem.mousePressEvent(self, event)
def paint(self, painter, option, widget=None):
""" paint(painter: QPainter, option: QStyleOptionGraphicsItem,
widget: QWidget) -> None
Peform painting of the point without the ugly default dashed-line black
square
"""
painter.setBrush(self.brush())
painter.setPen(self.pen())
painter.drawEllipse(self.rect())
def add_self_to_transfer_function(self, tf):
tf.add_point(self._scalar,
self._opacity,
(self._color.redF(),
self._color.greenF(),
self._color.blueF()))
class TransferFunctionPolygon(QtGui.QGraphicsPolygonItem):
def __init__(self, parent=None):
QtGui.QGraphicsPolygonItem.__init__(self, parent)
def setup(self):
# This inspects the scene, finds the left-most point, and
# then builds the polygon traversing the linked list structure
pt = self.parentItem().get_leftmost_point()
if not pt:
return
self.setZValue(1.25)
g = QtGui.QLinearGradient()
g.setStart(0.0, 0.5)
g.setFinalStop(1.0, 0.5)
g.setCoordinateMode(QtGui.QGradient.ObjectBoundingMode)
p = QtGui.QPen()
p.setStyle(QtCore.Qt.NoPen)
pts = [QtCore.QPointF(pt.x(), 0)]
self.setPen(p)
while 1:
c = QtGui.QColor(pt._color)
c.setAlphaF(pt._opacity)
g.setColorAt(pt._scalar, c)
pts.append(pt._point)
# | |
# Copyright (c) 2001-2013 WorldViz LLC.
# All rights reserved.
# Changed function removeSensor() so it will correctly report no active sensor after one has been removed
import viz
import math
import vizact
import vizmat
import viztask
import vizshape
# Default color for debug shapes
DEFAULT_DEBUG_COLOR = [1,0,1]
# Default color for activated debug shapes
DEFAULT_DEBUG_ACTIVE_COLOR = [0,1,0]
# Default priority for updating proximity sensors
DEFAULT_UPDATE_PRIORITY = viz.PRIORITY_LINKS + 1
# Proximity events
ENTER_PROXIMITY_EVENT = viz.getEventID('vizproximity_ENTER_PROXIMITY_EVENT')
EXIT_PROXIMITY_EVENT = viz.getEventID('vizproximity_EXIT_PROXIMITY_EVENT')
class ProximityEvent(viz.Event):
"""Object used for proximity enter/exit events"""
def __init__(self,sensor,target,manager):
"""@args Sensor(), Target(), Manager()"""
self.sensor = sensor
self.target = target
self.manager = manager
class Source(object):
"""Base class for objects providing source position of proximity sensors/targets"""
def getObject(self):
return None
def getMatrix(self):
return viz.Matrix()
def getPosition(self):
return self.getMatrix().getPosition()
class MatrixSource(Source):
"""Use static matrix object as source"""
def __init__(self,matrix):
self._matrix = matrix
def __repr__(self):
return "{}(matrix={})".format(self.__class__.__name__,self._matrix)
def getObject(self):
return self._matrix
def getMatrix(self):
return viz.Matrix(self._matrix)
def getPosition(self):
return self._matrix.getPosition()
class LinkableSource(Source):
"""Use linkable object as source"""
def __init__(self,linkable,flag=0):
"""@arg linkable viz.VizLinkable()"""
self._linkable = linkable
self._flag = flag
def __repr__(self):
return "{}(linkable={}, flag={})".format(self.__class__.__name__,self._linkable,self._flag)
def getObject(self):
return self._linkable
def getMatrix(self):
return self._linkable.getMatrix(self._flag)
def getPosition(self):
return self._linkable.getPosition(self._flag)
class NodeSource(Source):
"""Use node object as source"""
def __init__(self,node, flag=viz.ABS_GLOBAL, name=''):
"""@arg node viz.VizNode()"""
self._node = node
self._flag = flag
self._name = name
def __repr__(self):
return "{}(node={}, flag={}, name={})".format(self.__class__.__name__,self._node,self._flag,repr(self._name))
def getObject(self):
return self._node
def getMatrix(self):
return self._node.getMatrix(self._flag,self._name)
def createSource(source):
"""Create Source object from standard Vizard object (e.g. node, view, tracker, bone, matrix, ...)"""
# Use null source if None
if source is None:
return Source()
# Check for Source objects
if isinstance(source,Source):
return source
# Check for matrix objects
if isinstance(source,viz.Matrix):
return MatrixSource(source)
# Check for node objects
if isinstance(source,viz.VizNode):
return NodeSource(source)
# Check for bone objects
if isinstance(source,viz.VizBone):
return LinkableSource(source,viz.AVATAR_WORLD)
# Check for any other linkable object
if viz._getLinkableType(source) >= 0:
return LinkableSource(source)
raise TypeError, 'source is not a valid Source type'
class Shape(object):
"""Base class for objects providing shapes for proximity sensors"""
def containsPoint(self,point):
"""Returns whether the point is inside the shape"""
raise NotImplementedError
def createDebugNode(self,data):
"""Create and return node object for visualizing proximity shape"""
return None
class Sphere(Shape):
"""Spherical shape for proximity sensors"""
def __init__(self,radius,center=(0.0,0.0,0.0)):
self._radius = radius
self._center = tuple(center)
def __repr__(self):
return "{}(radius={}, center={})".format(self.__class__.__name__,self._radius,self._center)
def containsPoint(self,point):
"""Returns whether the point is inside the shape"""
return vizmat.Distance(point,self._center) < self._radius
def createDebugNode(self,data):
"""Create and return node object for visualizing proximity shape"""
s = vizshape.addSphere(radius=self._radius,slices=4,stacks=4,transform=viz.Matrix.translate(self._center))
s.disable([viz.LIGHTING,viz.CULL_FACE])
s.color(data.color)
s.polyMode(viz.POLY_WIRE)
return s
class Box(Shape):
"""Box shape for proximity sensors"""
def __init__(self,size,center=(0.0,0.0,0.0)):
self._size = tuple(size)
self._center = tuple(center)
x = self._size[0] / 2.0
y = self._size[1] / 2.0
z = self._size[2] / 2.0
self._extents = [ self._center[0] - x, self._center[0] + x
, self._center[1] - y, self._center[1] + y
, self._center[2] - z, self._center[2] + z ]
def __repr__(self):
return "{}(size={}, center={})".format(self.__class__.__name__,self._size,self._center)
def containsPoint(self,point):
"""Returns whether the point is inside the shape"""
xmin,xmax,ymin,ymax,zmin,zmax = self._extents
return xmin < point[0] < xmax and ymin < point[1] < ymax and zmin < point[2] < zmax
def createDebugNode(self,data):
"""Create and return node object for visualizing proximity shape"""
s = vizshape.addBox(size=self._size,transform=viz.Matrix.translate(self._center))
s.disable([viz.LIGHTING,viz.CULL_FACE])
s.color(data.color)
s.polyMode(viz.POLY_WIRE)
return s
class CircleArea(Shape):
"""2D circular area shape for proximity sensors"""
def __init__(self,radius,center=(0.0,0.0)):
self._radius = radius
self._center = tuple(center[:2])
def __repr__(self):
return "{}(radius={}, center={})".format(self.__class__.__name__,self._radius,self._center)
def containsPoint(self,point):
"""Returns whether the point is inside the shape"""
return vizmat.Distance((point[0],point[2]),self._center) < self._radius
def createDebugNode(self,data):
"""Create and return node object for visualizing proximity shape"""
viz.startLayer(viz.LINE_LOOP)
# Add slices
slices = 10
dtheta = 2.0 * math.pi / slices
radius = self._radius
sin = math.sin
cos = math.cos
x = self._center[0]
y = self._center[1]
for i in range(slices+1):
if i == slices:
theta = 0.0
else:
theta = i * dtheta
viz.vertex( (sin(theta) * radius) + x , 0.0 , (cos(theta) * radius) + y )
s = viz.endLayer()
s.disable([viz.LIGHTING,viz.CULL_FACE])
s.color(data.color)
return s
class RectangleArea(Shape):
"""2D rectangle area shape for proximity sensors"""
def __init__(self,size,center=(0.0,0.0)):
self._size = tuple(size[:2])
self._center = tuple(center[:2])
x = self._size[0] / 2.0
z = self._size[1] / 2.0
self._extents = [ self._center[0] - x, self._center[0] + x
, self._center[1] - z, self._center[1] + z ]
def __repr__(self):
return "{}(size={}, center={})".format(self.__class__.__name__,self._size,self._center)
def containsPoint(self,point):
"""Returns whether the point is inside the shape"""
xmin,xmax,zmin,zmax = self._extents
return xmin < point[0] < xmax and zmin < point[2] < zmax
def createDebugNode(self,data):
"""Create and return node object for visualizing proximity shape"""
xmin,xmax,zmin,zmax = self._extents
viz.startLayer(viz.LINE_LOOP)
viz.vertex((xmin,0,zmin))
viz.vertex((xmin,0,zmax))
viz.vertex((xmax,0,zmax))
viz.vertex((xmax,0,zmin))
s = viz.endLayer()
s.disable([viz.LIGHTING,viz.CULL_FACE])
s.color(data.color)
return s
class PolygonArea(Shape):
"""2D polygonal area shape for proximity sensors"""
def __init__(self,points,offset=(0.0,0.0)):
self._points = tuple( (float(p[0]),float(p[1])) for p in points )
self._offset = tuple(offset[:2])
cx = offset[0]
cy = offset[1]
self._verts = tuple( (x+cx,y+cy) for x,y in self._points )
def __repr__(self):
return "{}(points={}, offset={})".format(self.__class__.__name__,self._points,self._offset)
def containsPoint(self,point):
"""Returns whether the point is inside the shape"""
return vizmat.pointInPolygon(self._verts,(point[0],point[2]))
def createDebugNode(self,data):
"""Create and return node object for visualizing proximity shape"""
viz.startLayer(viz.LINE_LOOP)
for x,y in self._verts:
viz.vertex((x,0,y))
s = viz.endLayer()
s.disable([viz.LIGHTING,viz.CULL_FACE])
s.color(data.color)
return s
class PathArea(PolygonArea):
"""2D path area specified through series of points and a radius distance from path"""
def __init__(self,points,radius,offset=(0.0,0.0)):
self._radius = float(radius)
self._path = tuple( (p[0],p[1]) for p in points )
leftSide = []
rightSide = []
numPoints = len(self._path)
def vector(p):
return (p[0],0.0,p[1])
leftMat = viz.Matrix.translate(-radius,0,0)
rightMat = viz.Matrix.translate(radius,0,0)
for x in range(numPoints-1):
begin = vector(self._path[x])
end = vector(self._path[x+1])
# Add begin vertices
if x == 0:
mat = viz.Matrix.lookat(begin,end,[0,1,0])
mat.preTrans((0,0,-radius))
leftSide.append( (leftMat * mat).getPosition() )
rightSide.append( (rightMat * mat).getPosition() )
# And joint vertices
elif x > 0:
prev = vector(self._path[x-1])
curr = vector(self._path[x])
next = vector(self._path[x+1])
# Get incoming/outgoing rotation matrix
inMat = viz.Matrix.lookat(prev,curr,[0,1,0])
outMat = viz.Matrix.lookat(curr,next,[0,1,0])
# Get angle at joint
angle = (inMat.inverse() * outMat).getEuler()[0]
if angle > 0.0:
# Going to the right
# Add intersection point to right side
lineInMat = viz.Matrix(inMat)
lineInMat.preTrans((radius,0,0))
lineIn = viz.Line(begin=lineInMat.getPosition(),dir=lineInMat.getForward())
lineOutMat = viz.Matrix(outMat)
lineOutMat.preTrans((radius,0,0))
lineOut = viz.Line(begin=lineOutMat.getPosition(),dir=lineOutMat.getForward())
rightSide.append( lineIn.intersectLine(lineOut) )
# Add extra vertices to left side
inMat.setPosition(curr)
leftSide.append( (leftMat * inMat).getPosition() )
inMat.preEuler((angle * 0.5,0.0,0.0))
leftSide.append( (leftMat * inMat).getPosition() )
leftSide.append( (leftMat * outMat).getPosition() )
elif angle < 0.0:
# Going to the left
# Add intersection point to left side
lineInMat = viz.Matrix(inMat)
lineInMat.preTrans((-radius,0,0))
lineIn = viz.Line(begin=lineInMat.getPosition(),dir=lineInMat.getForward())
lineOutMat = viz.Matrix(outMat)
lineOutMat.preTrans((-radius,0,0))
lineOut = viz.Line(begin=lineOutMat.getPosition(),dir=lineOutMat.getForward())
leftSide.append( lineIn.intersectLine(lineOut) )
# Add extra vertices to right side
inMat.setPosition(curr)
rightSide.append( (rightMat * inMat).getPosition() )
inMat.preEuler((angle * 0.5,0.0,0.0))
rightSide.append( (rightMat * inMat).getPosition() )
rightSide.append( (rightMat * outMat).getPosition() )
else:
# No angle difference, skip vertices
pass
# Add end vertices
if (x+1) == (numPoints-1):
mat = viz.Matrix.lookat(begin,end,[0,1,0])
mat.setPosition(end)
mat.preTrans((0,0,radius))
leftSide.append( (leftMat * mat).getPosition() )
rightSide.append( (rightMat * mat).getPosition() )
verts = [ (x,z) for x,y,z in leftSide ]
verts.extend((x,z) for x,y,z in reversed(rightSide))
PolygonArea.__init__(self,verts,offset)
def __repr__(self):
return "{}(points={}, radius={}, offset={})".format(self.__class__.__name__,self._path,self._radius,self._offset)
class CompositeShape(Shape):
"""Shape composted of multiple shapes"""
def __init__(self,shapes):
self._shapes = list(shapes)
def __repr__(self):
return "{}(shapes={})".format(self.__class__.__name__,self._shapes)
def containsPoint(self,point):
"""Returns whether the point is inside the shape"""
for shape in self._shapes:
if shape.containsPoint(point):
return True
return False
def createDebugNode(self,data):
"""Create and return node object for visualizing proximity shape"""
root = viz.addGroup()
for shape in self._shapes:
s = shape.createDebugNode(data)
if s:
s.setParent(root)
return root
class Target(viz.Removable):
"""A proximity target is the object that is detected by proximity sensors.
The targets position is determined by the specified source object."""
def __init__(self,source):
self._source = createSource(source)
# Remove self when source object is removed
if isinstance(self.getSourceObject(),viz.Removable):
self.getSourceObject().addRemoveCallback(self._notifyRemoveCallbacks)
def __repr__(self):
return "{}(source={})".format(self.__class__.__name__,self._source)
def getSource(self):
"""Return the source object of the target"""
return self._source
def getSourceObject(self):
"""Return the underlying object of the target source"""
return self._source.getObject()
def getPosition(self):
"""Get current position of target"""
return self._source.getPosition()
class Sensor(viz.Removable):
"""A proximity sensor is the object that detects when a target is within range.
The area of the sensor is determined by the specified shape.
The sensors position is determined by the specified source object."""
def __init__(self,shape,source):
"""@arg shape Shape()"""
self._shape = shape
self._source = createSource(source)
# Remove self when source object is removed
if isinstance(self.getSourceObject(),viz.Removable):
self.getSourceObject().addRemoveCallback(self._notifyRemoveCallbacks)
def __repr__(self):
return "{}(shape={}, source={})".format(self.__class__.__name__,self._shape,self._source)
def getShape(self):
"""Return the shape object of the sensor"""
return self._shape
def getSource(self):
"""Return the source object of the sensor"""
return self._source
def getSourceObject(self):
"""Return the underlying object of the sensor source"""
return self._source.getObject()
def containsPoint(self,point):
"""Return whether the sensor contains the specified point"""
# Transform point into source reference frame
m = self._source.getMatrix()
m.invert()
point_local = m.preMultVec(point)
# Return whether point is inside shape
return self._shape.containsPoint(point_local)
def addBoundingBoxSensor(node,name='',scale=(1.0,1.0,1.0)):
"""Utility function for creating a proximity sensor using the bounding box of a node"""
bb = node.getBoundingBox(viz.ABS_LOCAL,name)
w,h,d = bb.size
return Sensor(Box(size=[w*scale[0],h*scale[1],d*scale[2]],center=bb.center),node)
def addBoundingSphereSensor(node,name='',scale=1.0):
"""Utility function for creating a proximity sensor using the bounding sphere of a node"""
bs = node.getBoundingSphere(viz.ABS_LOCAL,name)
return Sensor(Sphere(radius=bs.radius*scale,center=bs.center),node)
class Manager(viz.EventClass,viz.Removable):
"""Manages a collection of proximity sensor and targets and
automatically triggers events when a target enters/exits a sensor range"""
def __init__(self, priority=DEFAULT_UPDATE_PRIORITY):
viz.EventClass.__init__(self)
self._sensors = {}
self._targets = {}
self._debugRoot = None
self._debugColor = list(DEFAULT_DEBUG_COLOR)
self._debugActiveColor = list(DEFAULT_DEBUG_ACTIVE_COLOR)
self.callback(viz.UPDATE_EVENT,self._onUpdate,priority=priority)
def _eventClassRemoved(self):
self._notifyRemoveCallbacks()
self._sensors.clear()
self._targets.clear()
if self._debugRoot:
self._debugRoot.remove()
self._debugRoot = None
def _onUpdate(self,e):
self.update()
def remove(self):
"""Remove the proximity manager"""
self.unregister()
def update(self):
"""Update state of proximity sensors and trigger enter/exit events if needed"""
# Don't bother checking if no targets or sensors
if not (self._targets and self._sensors):
return
# Set of active sensors for debugging purposes
activeSensors = None
if self._debugRoot is not None:
activeSensors = set()
# Check if each target is in range of each sensor
events = []
for t,active in self._targets.iteritems():
pos = t.getPosition()
for s in self._sensors.iterkeys():
isInside = s.containsPoint(pos)
if isInside and activeSensors is not None:
activeSensors.add(s)
wasInside = s in active
if wasInside != isInside:
if isInside:
active.add(s)
events.append( (ENTER_PROXIMITY_EVENT, ProximityEvent(s,t,self)) )
else:
active.remove(s)
events.append( (EXIT_PROXIMITY_EVENT, ProximityEvent(s,t,self)) )
# Update debug nodes
if self._debugRoot is not None:
for sensor,data in self._sensors.iteritems():
if data.debug:
data.debug.setMatrix(sensor.getSource().getMatrix())
isActive = sensor in activeSensors
if isActive != data.debug.lastActive:
data.debug.lastActive = isActive
data.debug.color(self._debugActiveColor if isActive else self._debugColor)
# Trigger events
for id,e in events:
viz.sendEvent(id,e)
def _updateActiveDebugColor(self):
"""Update color for active debug shapes"""
for sensor,data in self._sensors.iteritems():
if data.debug and data.debug.lastActive:
data.debug.color(self._debugActiveColor)
def _createSensorDebugNode(self,sensor,data):
"""Create debug node for sensor"""
node = sensor.getShape().createDebugNode(data)
if node:
node.setParent(self._debugRoot)
sensor_data = self._sensors[sensor]
if sensor_data.debug:
sensor_data.debug.remove()
sensor_data.debug = node
sensor_data.debug.lastActive = False
def setDebug(self,mode, parent = viz.WORLD, scene = viz.MainScene):
"""Set whether debugging of proximity sensor shapes is enabled"""
if mode == viz.TOGGLE:
mode = not self.getDebug()
if mode and self._debugRoot is None:
# Create debug root node and shapes for all current sensors
self._debugRoot = viz.addGroup(parent=parent,scene=scene)
self._debugRoot.disable([viz.PICKING,viz.INTERSECTION])
debugData = viz.Data(color=self._debugColor)
for sensor in self._sensors.iterkeys():
self._createSensorDebugNode(sensor,debugData)
elif not mode and self._debugRoot is not None:
# Remove debug root node and shapes for all current sensors
self._debugRoot.remove()
self._debugRoot = None
for sensor,data in self._sensors.iteritems():
data.debug = None
def getDebug(self):
"""Get whether debugging of proximity sensor shapes | |
"""tests/test_output_format.py.
Tests the output format handlers included with Hug
Copyright (C) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os
from collections import namedtuple
from datetime import datetime, timedelta
from decimal import Decimal
from io import BytesIO
from uuid import UUID
import numpy
import pytest
import hug
from .constants import BASE_DIRECTORY
def test_text():
"""Ensure that it's possible to output a Hug API method as text"""
hug.output_format.text("Hello World!") == "Hello World!"
hug.output_format.text(str(1)) == "1"
def test_html(hug_api):
"""Ensure that it's possible to output a Hug API method as HTML"""
hug.output_format.html("<html>Hello World!</html>") == "<html>Hello World!</html>"
hug.output_format.html(str(1)) == "1"
with open(os.path.join(BASE_DIRECTORY, 'README.md'), 'rb') as html_file:
assert hasattr(hug.output_format.html(html_file), 'read')
class FakeHTMLWithRender():
def render(self):
return 'test'
assert hug.output_format.html(FakeHTMLWithRender()) == b'test'
@hug.get('/get/html', output=hug.output_format.html, api=hug_api)
def get_html(**kwargs):
"""
Returns command help document when no command is specified
"""
with open(os.path.join(BASE_DIRECTORY, 'examples/document.html'), 'rb') as html_file:
return html_file.read()
assert '<html>' in hug.test.get(hug_api, '/get/html').data
def test_json():
"""Ensure that it's possible to output a Hug API method as JSON"""
now = datetime.now()
one_day = timedelta(days=1)
test_data = {'text': 'text', 'datetime': now, 'bytes': b'bytes', 'delta': one_day}
output = hug.output_format.json(test_data).decode('utf8')
assert 'text' in output
assert 'bytes' in output
assert str(one_day.total_seconds()) in output
assert now.isoformat() in output
class NewObject(object):
pass
test_data['non_serializable'] = NewObject()
with pytest.raises(TypeError):
hug.output_format.json(test_data).decode('utf8')
class NamedTupleObject(namedtuple('BaseTuple', ('name', 'value'))):
pass
data = NamedTupleObject('name', 'value')
converted = hug.input_format.json(BytesIO(hug.output_format.json(data)))
assert converted == {'name': 'name', 'value': 'value'}
data = set((1, 2, 3, 3))
assert hug.input_format.json(BytesIO(hug.output_format.json(data))) == [1, 2, 3]
data = (number for number in range(1, 4))
assert hug.input_format.json(BytesIO(hug.output_format.json(data))) == [1, 2, 3]
data = [Decimal(1.5), Decimal("155.23"), Decimal("1234.25")]
assert hug.input_format.json(BytesIO(hug.output_format.json(data))) == ["1.5", "155.23", "1234.25"]
with open(os.path.join(BASE_DIRECTORY, 'README.md'), 'rb') as json_file:
assert hasattr(hug.output_format.json(json_file), 'read')
assert hug.input_format.json(BytesIO(hug.output_format.json(b'\x9c'))) == 'nA=='
class MyCrazyObject(object):
pass
@hug.output_format.json_convert(MyCrazyObject)
def convert(instance):
return 'Like anyone could convert this'
assert hug.input_format.json(BytesIO(hug.output_format.json(MyCrazyObject()))) == 'Like anyone could convert this'
assert hug.input_format.json(BytesIO(hug.output_format.json({'data': ['Τη γλώσσα μου έδωσαν ελληνική']}))) == \
{'data': ['Τη γλώσσα μου έδωσαν ελληνική']}
def test_pretty_json():
"""Ensure that it's possible to output a Hug API method as prettified and indented JSON"""
test_data = {'text': 'text'}
assert hug.output_format.pretty_json(test_data).decode('utf8') == ('{\n'
' "text": "text"\n'
'}')
def test_json_camelcase():
"""Ensure that it's possible to output a Hug API method as camelCased JSON"""
test_data = {'under_score': 'values_can', 'be_converted': [{'to_camelcase': 'value'}, 'wont_be_convert']}
output = hug.output_format.json_camelcase(test_data).decode('utf8')
assert 'underScore' in output
assert 'values_can' in output
assert 'beConverted' in output
assert 'toCamelcase' in output
assert 'value' in output
assert 'wont_be_convert' in output
def test_image():
"""Ensure that it's possible to output images with hug"""
logo_path = os.path.join(BASE_DIRECTORY, 'artwork', 'logo.png')
assert hasattr(hug.output_format.png_image(logo_path, hug.Response()), 'read')
with open(logo_path, 'rb') as image_file:
assert hasattr(hug.output_format.png_image(image_file, hug.Response()), 'read')
assert hug.output_format.png_image('Not Existent', hug.Response()) is None
class FakeImageWithSave():
def save(self, to, format):
to.write(b'test')
assert hasattr(hug.output_format.png_image(FakeImageWithSave(), hug.Response()), 'read')
class FakeImageWithRender():
def render(self):
return 'test'
assert hug.output_format.svg_xml_image(FakeImageWithRender(), hug.Response()) == 'test'
class FakeImageWithSaveNoFormat():
def save(self, to):
to.write(b'test')
assert hasattr(hug.output_format.png_image(FakeImageWithSaveNoFormat(), hug.Response()), 'read')
def test_file():
"""Ensure that it's possible to easily output files"""
class FakeResponse(object):
pass
logo_path = os.path.join(BASE_DIRECTORY, 'artwork', 'logo.png')
fake_response = FakeResponse()
assert hasattr(hug.output_format.file(logo_path, fake_response), 'read')
assert fake_response.content_type == 'image/png'
with open(logo_path, 'rb') as image_file:
hasattr(hug.output_format.file(image_file, fake_response), 'read')
assert not hasattr(hug.output_format.file('NON EXISTENT FILE', fake_response), 'read')
assert hug.output_format.file(None, fake_response) == ''
def test_video():
"""Ensure that it's possible to output videos with hug"""
gif_path = os.path.join(BASE_DIRECTORY, 'artwork', 'example.gif')
assert hasattr(hug.output_format.mp4_video(gif_path, hug.Response()), 'read')
with open(gif_path, 'rb') as image_file:
assert hasattr(hug.output_format.mp4_video(image_file, hug.Response()), 'read')
assert hug.output_format.mp4_video('Not Existent', hug.Response()) is None
class FakeVideoWithSave():
def save(self, to, format):
to.write(b'test')
assert hasattr(hug.output_format.mp4_video(FakeVideoWithSave(), hug.Response()), 'read')
class FakeVideoWithSave():
def render(self):
return 'test'
assert hug.output_format.avi_video(FakeVideoWithSave(), hug.Response()) == 'test'
def test_on_valid():
"""Test to ensure formats that use on_valid content types gracefully handle error dictionaries"""
error_dict = {'errors': {'so': 'many'}}
expected = hug.output_format.json(error_dict)
assert hug.output_format.mp4_video(error_dict, hug.Response()) == expected
assert hug.output_format.png_image(error_dict, hug.Response()) == expected
@hug.output_format.on_valid('image', hug.output_format.file)
def my_output_format(data):
raise ValueError('This should never be called')
assert my_output_format(error_dict, hug.Response())
def test_on_content_type():
"""Ensure that it's possible to route the output type format by the requested content-type"""
formatter = hug.output_format.on_content_type({'application/json': hug.output_format.json,
'text/plain': hug.output_format.text})
class FakeRequest(object):
content_type = 'application/json'
request = FakeRequest()
response = FakeRequest()
converted = hug.input_format.json(formatter(BytesIO(hug.output_format.json({'name': 'name'})), request, response))
assert converted == {'name': 'name'}
request.content_type = 'text/plain'
assert formatter('hi', request, response) == b'hi'
with pytest.raises(hug.HTTPNotAcceptable):
request.content_type = 'undefined; always'
formatter('hi', request, response)
def test_accept():
"""Ensure that it's possible to route the output type format by the requests stated accept header"""
formatter = hug.output_format.accept({'application/json': hug.output_format.json,
'text/plain': hug.output_format.text})
class FakeRequest(object):
accept = 'application/json'
request = FakeRequest()
response = FakeRequest()
converted = hug.input_format.json(formatter(BytesIO(hug.output_format.json({'name': 'name'})), request, response))
assert converted == {'name': 'name'}
request.accept = 'text/plain'
assert formatter('hi', request, response) == b'hi'
request.accept = 'application/json, text/plain; q=0.5'
assert formatter('hi', request, response) == b'"hi"'
request.accept = 'text/plain; q=0.5, application/json'
assert formatter('hi', request, response) == b'"hi"'
request.accept = 'application/json;q=0.4,text/plain; q=0.5'
assert formatter('hi', request, response) == b'hi'
request.accept = '*'
assert formatter('hi', request, response) in [b'"hi"', b'hi']
request.accept = 'undefined; always'
with pytest.raises(hug.HTTPNotAcceptable):
formatter('hi', request, response)
formatter = hug.output_format.accept({'application/json': hug.output_format.json,
'text/plain': hug.output_format.text}, hug.output_format.json)
assert formatter('hi', request, response) == b'"hi"'
def test_accept_with_http_errors():
"""Ensure that content type based output formats work for HTTP error responses"""
formatter = hug.output_format.accept({'application/json': hug.output_format.json,
'text/plain': hug.output_format.text},
default=hug.output_format.json)
api = hug.API('test_accept_with_http_errors')
hug.default_output_format(api=api)(formatter)
@hug.get('/500', api=api)
def error_500():
raise hug.HTTPInternalServerError('500 Internal Server Error',
'This is an example')
response = hug.test.get(api, '/500')
assert response.status == '500 Internal Server Error'
assert response.data == {
'errors': {'500 Internal Server Error': 'This is an example'}}
def test_suffix():
"""Ensure that it's possible to route the output type format by the suffix of the requested URL"""
formatter = hug.output_format.suffix({'.js': hug.output_format.json, '.html': hug.output_format.text})
class FakeRequest(object):
path = 'endpoint.js'
request = FakeRequest()
response = FakeRequest()
converted = hug.input_format.json(formatter(BytesIO(hug.output_format.json({'name': 'name'})), request, response))
assert converted == {'name': 'name'}
request.path = 'endpoint.html'
assert formatter('hi', request, response) == b'hi'
with pytest.raises(hug.HTTPNotAcceptable):
request.path = 'undefined.always'
formatter('hi', request, response)
def test_prefix():
"""Ensure that it's possible to route the output type format by the prefix of the requested URL"""
formatter = hug.output_format.prefix({'js/': hug.output_format.json, 'html/': hug.output_format.text})
class FakeRequest(object):
path = 'js/endpoint'
request = FakeRequest()
response = FakeRequest()
converted = hug.input_format.json(formatter(BytesIO(hug.output_format.json({'name': 'name'})), request, response))
assert converted == {'name': 'name'}
request.path = 'html/endpoint'
assert formatter('hi', request, response) == b'hi'
with pytest.raises(hug.HTTPNotAcceptable):
request.path = 'undefined.always'
formatter('hi', request, response)
def test_json_converter_numpy_types():
"""Ensure that numpy-specific data types (array, int, float) are properly supported in JSON output."""
ex_int = numpy.int_(9)
ex_np_array = numpy.array([1, 2, 3, 4, 5])
ex_np_int_array = numpy.int_([5, 4, 3])
ex_np_float = numpy.float(.5)
assert 9 is hug.output_format._json_converter(ex_int)
assert [1, 2, 3, 4, 5] == hug.output_format._json_converter(ex_np_array)
assert [5, 4, 3] == hug.output_format._json_converter(ex_np_int_array)
assert .5 == hug.output_format._json_converter(ex_np_float)
# Some type names are merely shorthands.
# The following shorthands for built-in types are excluded: numpy.bool, numpy.int, numpy.float.
np_bool_types = [numpy.bool_, numpy.bool8]
np_int_types = [numpy.int_, numpy.byte, numpy.ubyte, numpy.intc, numpy.uintc, numpy.intp, numpy.uintp, numpy.int8,
numpy.uint8, numpy.int16, numpy.uint16, numpy.int32, numpy.uint32, numpy.int64, numpy.uint64,
numpy.longlong, numpy.ulonglong, numpy.short, numpy.ushort]
np_float_types = [numpy.float_, numpy.float32, numpy.float64, numpy.half, numpy.single,
numpy.longfloat]
np_unicode_types = [numpy.unicode_]
np_bytes_types = [numpy.bytes_]
for np_type in np_bool_types:
assert True == hug.output_format._json_converter(np_type(True))
for np_type in np_int_types:
assert 1 is hug.output_format._json_converter(np_type(1))
for np_type in np_float_types:
assert .5 == hug.output_format._json_converter(np_type(.5))
for np_type in np_unicode_types:
assert "a" == hug.output_format._json_converter(np_type('a'))
for np_type in np_bytes_types:
assert "a" == hug.output_format._json_converter(np_type('a'))
def test_json_converter_uuid():
"""Ensure that uuid data type is properly supported in JSON output."""
uuidstr = '8ae4d8c1-e2d7-5cd0-8407-6baf16dfbca4'
assert uuidstr == hug.output_format._json_converter(UUID(uuidstr))
def test_output_format_with_no_docstring():
"""Ensure it is safe to use formatters with no docstring"""
@hug.format.content_type('test/fmt')
def | |
import math
class Reward:
def __init__(self, verbose=False):
self.first_racingpoint_index = None
self.verbose = verbose
def reward_function(self, params):
# Import package (needed for heading)
import math
################## HELPER FUNCTIONS ###################
def dist_2_points(x1, x2, y1, y2):
return abs(abs(x1-x2)**2 + abs(y1-y2)**2)**0.5
def closest_2_racing_points_index(racing_coords, car_coords):
# Calculate all distances to racing points
distances = []
for i in range(len(racing_coords)):
distance = dist_2_points(x1=racing_coords[i][0], x2=car_coords[0],
y1=racing_coords[i][1], y2=car_coords[1])
distances.append(distance)
# Get index of the closest racing point
closest_index = distances.index(min(distances))
# Get index of the second closest racing point
distances_no_closest = distances.copy()
distances_no_closest[closest_index] = 999
second_closest_index = distances_no_closest.index(
min(distances_no_closest))
return [closest_index, second_closest_index]
def dist_to_racing_line(closest_coords, second_closest_coords, car_coords):
# Calculate the distances between 2 closest racing points
a = abs(dist_2_points(x1=closest_coords[0],
x2=second_closest_coords[0],
y1=closest_coords[1],
y2=second_closest_coords[1]))
# Distances between car and closest and second closest racing point
b = abs(dist_2_points(x1=car_coords[0],
x2=closest_coords[0],
y1=car_coords[1],
y2=closest_coords[1]))
c = abs(dist_2_points(x1=car_coords[0],
x2=second_closest_coords[0],
y1=car_coords[1],
y2=second_closest_coords[1]))
# Calculate distance between car and racing line (goes through 2 closest racing points)
# try-except in case a=0 (rare bug in DeepRacer)
try:
distance = abs(-(a**4) + 2*(a**2)*(b**2) + 2*(a**2)*(c**2) -
(b**4) + 2*(b**2)*(c**2) - (c**4))**0.5 / (2*a)
except:
distance = b
return distance
# Calculate which one of the closest racing points is the next one and which one the previous one
def next_prev_racing_point(closest_coords, second_closest_coords, car_coords, heading):
# Virtually set the car more into the heading direction
heading_vector = [math.cos(math.radians(
heading)), math.sin(math.radians(heading))]
new_car_coords = [car_coords[0]+heading_vector[0],
car_coords[1]+heading_vector[1]]
# Calculate distance from new car coords to 2 closest racing points
distance_closest_coords_new = dist_2_points(x1=new_car_coords[0],
x2=closest_coords[0],
y1=new_car_coords[1],
y2=closest_coords[1])
distance_second_closest_coords_new = dist_2_points(x1=new_car_coords[0],
x2=second_closest_coords[0],
y1=new_car_coords[1],
y2=second_closest_coords[1])
if distance_closest_coords_new <= distance_second_closest_coords_new:
next_point_coords = closest_coords
prev_point_coords = second_closest_coords
else:
next_point_coords = second_closest_coords
prev_point_coords = closest_coords
return [next_point_coords, prev_point_coords]
def racing_direction_diff(closest_coords, second_closest_coords, car_coords, heading):
# Calculate the direction of the center line based on the closest waypoints
next_point, prev_point = next_prev_racing_point(closest_coords,
second_closest_coords,
car_coords,
heading)
# Calculate the direction in radius, arctan2(dy, dx), the result is (-pi, pi) in radians
track_direction = math.atan2(
next_point[1] - prev_point[1], next_point[0] - prev_point[0])
# Convert to degree
track_direction = math.degrees(track_direction)
# Calculate the difference between the track direction and the heading direction of the car
direction_diff = abs(track_direction - heading)
if direction_diff > 180:
direction_diff = 360 - direction_diff
return direction_diff
# Gives back indexes that lie between start and end index of a cyclical list
# (start index is included, end index is not)
def indexes_cyclical(start, end, array_len):
if end < start:
end += array_len
return [index % array_len for index in range(start, end)]
# Calculate how long car would take for entire lap, if it continued like it did until now
def projected_time(first_index, closest_index, step_count, times_list):
# Calculate how much time has passed since start
current_actual_time = (step_count-1) / 15
# Calculate which indexes were already passed
indexes_traveled = indexes_cyclical(first_index, closest_index, len(times_list))
# Calculate how much time should have passed if car would have followed optimals
current_expected_time = sum([times_list[i] for i in indexes_traveled])
# Calculate how long one entire lap takes if car follows optimals
total_expected_time = sum(times_list)
# Calculate how long car would take for entire lap, if it continued like it did until now
try:
projected_time = (current_actual_time/current_expected_time) * total_expected_time
except:
projected_time = 9999
return projected_time
#################### RACING LINE ######################
# Optimal racing line for the Spain track
# Each row: [x,y,speed,timeFromPreviousPoint]
racing_track = [[0.3312, 2.82902, 1.30028, 0.1125],
[0.33882, 2.68171, 1.30028, 0.11344],
[0.36236, 2.53659, 1.30028, 0.11307],
[0.40087, 2.39698, 1.30507, 0.11097],
[0.45275, 2.26508, 1.31176, 0.10805],
[0.51635, 2.142, 1.31921, 0.10502],
[0.59024, 2.02819, 1.33692, 0.10149],
[0.67311, 1.92365, 1.35733, 0.09828],
[0.76393, 1.82822, 1.38414, 0.09517],
[0.8618, 1.74165, 1.41847, 0.09212],
[0.96597, 1.66362, 1.46121, 0.08907],
[1.07578, 1.59378, 1.52061, 0.08558],
[1.19062, 1.53169, 1.59994, 0.08159],
[1.30986, 1.47678, 1.65938, 0.07912],
[1.43296, 1.42844, 1.62186, 0.08154],
[1.55936, 1.38595, 1.59191, 0.08376],
[1.68849, 1.34851, 1.56949, 0.08567],
[1.8198, 1.31519, 1.55379, 0.08719],
[1.9527, 1.28493, 1.5382, 0.08861],
[2.08658, 1.25661, 1.53483, 0.08916],
[2.22298, 1.22686, 1.53483, 0.09096],
[2.35884, 1.19553, 1.53483, 0.09084],
[2.49393, 1.16191, 1.53483, 0.0907],
[2.628, 1.12536, 1.53483, 0.09054],
[2.76085, 1.08528, 1.53483, 0.09041],
[2.89229, 1.04114, 1.53483, 0.09033],
[3.02211, 0.99244, 1.53483, 0.09034],
[3.15014, 0.93869, 1.53483, 0.09047],
[3.27618, 0.87939, 1.53483, 0.09075],
[3.40004, 0.81404, 1.53483, 0.09125],
[3.52152, 0.742, 1.53483, 0.09202],
[3.64034, 0.66262, 1.53483, 0.09311],
[3.76074, 0.59177, 1.53483, 0.09102],
[3.88233, 0.52963, 1.53483, 0.08897],
[4.00501, 0.47617, 1.53483, 0.08719],
[4.12874, 0.43135, 1.53483, 0.08574],
[4.25349, 0.39509, 1.53483, 0.08464],
[4.37923, 0.36738, 1.53483, 0.08389],
[4.50595, 0.3482, 1.53483, 0.0835],
[4.6336, 0.33768, 1.53483, 0.08345],
[4.76212, 0.33589, 1.54, 0.08346],
[4.89139, 0.34292, 1.54759, 0.08365],
[5.02124, 0.35891, 1.56493, 0.0836],
[5.15142, 0.38395, 1.58956, 0.08339],
[5.2816, 0.41807, 1.62477, 0.08283],
[5.4114, 0.4612, 1.65551, 0.08262],
[5.54035, 0.5133, 1.69861, 0.08188],
[5.66795, 0.57414, 1.74608, 0.08096],
[5.79366, 0.6434, 1.80192, 0.07965],
[5.91696, 0.7206, 1.8709, 0.07776],
[6.03741, 0.80512, 1.94201, 0.07577],
[6.15464, 0.89629, 2.02687, 0.07327],
[6.26838, 0.99337, 2.12691, 0.07031],
[6.37852, 1.09559, 2.23799, 0.06714],
[6.48505, 1.20221, 2.13052, 0.07074],
[6.58809, 1.31255, 1.96143, 0.07697],
[6.68784, 1.42601, 1.83003, 0.08255],
[6.78459, 1.54207, 1.72188, 0.08775],
[6.87867, 1.6603, 1.6428, 0.09198],
[6.97035, 1.78041, 1.57631, 0.09586],
[7.05971, 1.90227, 1.52045, 0.09938],
[7.1468, 2.02575, 1.47813, 0.10223],
[7.23169, 2.15076, 1.4419, 0.1048],
[7.31445, 2.27719, 1.4134, 0.10691],
[7.39504, 2.40502, 1.39004, 0.10871],
[7.47341, 2.53422, 1.37567, 0.10984],
[7.5495, 2.66476, 1.36693, 0.11054],
[7.62327, 2.79664, 1.36328, 0.11084],
[7.69465, 2.92982, 1.36328, 0.11084],
[7.76358, 3.06429, 1.36328, 0.11084],
[7.82997, 3.20002, 1.36328, 0.11084],
[7.89374, 3.33701, 1.36328, 0.11084],
[7.95479, 3.47524, 1.36328, 0.11084],
[8.01207, 3.61501, 1.36328, 0.1108],
[8.06441, 3.75637, 1.36328, 0.11057],
[8.11059, 3.89904, 1.36328, 0.11],
[8.14944, 4.04252, 1.36328, 0.10904],
[8.1799, 4.18616, 1.36328, 0.10771],
[8.20114, 4.32922, 1.36328, 0.10608],
[8.2126, 4.47094, 1.36328, 0.1043],
[8.21392, 4.6106, 1.36328, 0.10245],
[8.20493, 4.74752, 1.36328, 0.10065],
[8.18559, 4.88108, 1.36328, 0.09899],
[8.15597, 5.01071, 1.36328, 0.09753],
[8.1162, 5.13587, 1.36328, 0.09633],
[8.06644, 5.25605, 1.36328, 0.09541],
[8.00692, 5.37079, 1.36328, 0.09481],
[7.93789, 5.47964, 1.36328, 0.09455],
[7.85959, 5.58214, 1.36531, 0.09447],
[7.77232, 5.67786, 1.37127, 0.09446],
[7.67635, 5.76633, 1.38082, 0.09453],
[7.57197, 5.84706, 1.39066, 0.09488],
[7.45949, 5.91949, 1.40348, 0.09533],
[7.33925, 5.98304, 1.41835, 0.09589],
[7.2117, 6.03706, 1.437, 0.0964],
[7.07742, 6.0809, 1.45157, 0.09731],
[6.93719, 6.11382, 1.46888, 0.09806],
[6.79213, 6.13516, 1.48699, 0.0986],
[6.64383, 6.14441, 1.50369, 0.09881],
[6.49436, 6.14136, 1.51987, 0.09836],
[6.3459, 6.12623, 1.54239, 0.09675],
[6.20016, 6.09972, 1.56605, 0.09459],
[6.05822, 6.06266, 1.59331, 0.09207],
[5.92067, 6.01592, 1.63208, 0.08901],
[5.78774, 5.96039, 1.60394, 0.08982],
[5.65947, 5.89687, 1.56189, 0.09164],
[5.53577, 5.82614, 1.52974, 0.09315],
[5.41646, 5.74889, 1.5097, 0.09415],
[5.30127, 5.66582, 1.4917, 0.09521],
[5.18985, 5.57764, 1.48342, 0.09579],
[5.08179, 5.48502, 1.48113, 0.09609],
[4.97664, 5.38868, 1.48113, 0.09629],
[4.87391, 5.28929, 1.48113, 0.09651],
[4.77307, 5.18754, 1.48113, 0.09671],
[4.67361, 5.08412, 1.48113, 0.09688],
[4.57497, 4.97969, 1.48113, 0.09699],
[4.47987, 4.87962, 1.4601, 0.09455],
[4.38389, 4.78101, 1.4257, 0.09652],
[4.28641, 4.68486, 1.39322, 0.09827],
[4.18688, 4.59216, 1.3655, 0.09961],
[4.08481, 4.50378, 1.34119, 0.10067],
[3.97981, 4.4205, 1.32388, 0.10123],
[3.87157, 4.34304, 1.31182, 0.10146],
[3.75985, 4.27207, 1.30536, 0.10139],
[3.64453, 4.20816, 1.30188, 0.10128],
[3.52552, 4.15187, 1.30124, 0.10117],
[3.40284, 4.10371, 1.3, 0.10138],
[3.27657, 4.0642, 1.3, 0.10177],
[3.14689, 4.03382, 1.3, 0.10246],
[3.01402, 4.0131, 1.3, 0.10344],
[2.87828, 4.00255, 1.3, 0.10472],
[2.74013, 4.00268, 1.3, 0.10628],
[2.6001, 4.01393, 1.3, 0.10806],
[2.45888, 4.0368, 1.3, 0.11004],
[2.31735, 4.07166, 1.3, 0.11213],
[2.17655, 4.11882, 1.3, 0.11422],
[2.03352, 4.15406, 1.3, 0.11332],
[1.89032, 4.17652, 1.3, 0.1115],
[1.74811, 4.18603, 1.3, 0.10964],
[1.60798, 4.18259, 1.3, 0.10783],
[1.47095, 4.16632, 1.3, 0.10615],
[1.33799, 4.13745, 1.3, 0.10466],
[1.20995, 4.09635, 1.3, 0.10344],
[1.08765, 4.04344, 1.3, 0.10251],
[0.97178, 3.97921, 1.3, 0.1019],
[0.86304, 3.90417, 1.3, 0.10163],
[0.7621, 3.81879, 1.3, 0.1017],
[0.66966, 3.72352, 1.30028, 0.10209],
[0.58645, 3.61884, 1.30028, 0.10284],
[0.51326, 3.50525, 1.30028, 0.10392],
[0.45096, 3.38332, 1.30028, 0.10531],
[0.4005, 3.25369, 1.30028, 0.10698],
[0.36297, 3.11721, 1.30028, 0.10886],
[0.33953, 2.97506, 1.30028, 0.1108]]
################## INPUT PARAMETERS ###################
# Read all input parameters
all_wheels_on_track = params['all_wheels_on_track']
x = params['x']
y = params['y']
distance_from_center = params['distance_from_center']
is_left_of_center = params['is_left_of_center']
heading = params['heading']
progress = params['progress']
steps = params['steps']
speed = params['speed']
steering_angle = params['steering_angle']
track_width = params['track_width']
waypoints = params['waypoints']
closest_waypoints = params['closest_waypoints']
is_offtrack = params['is_offtrack']
############### OPTIMAL X,Y,SPEED,TIME ################
# Get closest indexes for racing line (and distances to all points on racing line)
closest_index, second_closest_index = closest_2_racing_points_index(
racing_track, [x, y])
# Get optimal [x, y, speed, time] for closest and second closest index
optimals = racing_track[closest_index]
optimals_second = racing_track[second_closest_index]
# Save first racingpoint of episode for later
if self.verbose == True:
self.first_racingpoint_index = 0 # this is just for testing purposes
if steps == 1:
self.first_racingpoint_index = closest_index
################ REWARD AND PUNISHMENT ################
## Define the default reward ##
reward = 1
## Reward if car goes close to optimal racing line ##
DISTANCE_MULTIPLE = 1
dist = dist_to_racing_line(optimals[0:2], optimals_second[0:2], [x, y])
distance_reward = max(1e-3, 1 - (dist/(track_width*0.5)))
reward += distance_reward * DISTANCE_MULTIPLE
## Reward if speed is close to optimal speed ##
SPEED_DIFF_NO_REWARD = 1
SPEED_MULTIPLE = 2
speed_diff = abs(optimals[2]-speed)
if speed_diff <= SPEED_DIFF_NO_REWARD:
# we use quadratic punishment (not linear) bc we're not as confident with the optimal speed
# so, we do not punish small deviations from optimal speed
speed_reward = (1 - (speed_diff/(SPEED_DIFF_NO_REWARD))**2)**2
else:
speed_reward = 0
reward += speed_reward * SPEED_MULTIPLE
# Reward if less steps
REWARD_PER_STEP_FOR_FASTEST_TIME = 1
STANDARD_TIME = 37
FASTEST_TIME = 27
times_list = [row[3] for row in racing_track]
projected_time = projected_time(self.first_racingpoint_index, closest_index, steps, times_list)
try:
steps_prediction = projected_time * 15 + 1
reward_prediction = max(1e-3, (-REWARD_PER_STEP_FOR_FASTEST_TIME*(FASTEST_TIME) /
(STANDARD_TIME-FASTEST_TIME))*(steps_prediction-(STANDARD_TIME*15+1)))
steps_reward = min(REWARD_PER_STEP_FOR_FASTEST_TIME, reward_prediction / steps_prediction)
except:
steps_reward | |
from tkinter import Tk
from tkinter import ttk
from tkinter import *
from tkinter import messagebox
from ttkthemes import ThemedTk
from PIL import ImageTk, Image
import PIL
from ..model.Author import *
from ..model.Book import *
from ..model.Reader import *
from ..model.Publisher import *
from ..sqlTools.BorrowTools import *
from ..sqlTools.ReaderTools import *
from ..sqlTools.AuthorTools import *
from ..sqlTools.BookTools import *
from ..sqlTools.PublisherTools import *
from ..frame.UpdateBook_Frame import *
from ..frame.UpdateReader_Frame import *
class Login_LibrarianFrame:
def Logout(self):
self.CloseFrame()
self.frame = self.LoginFrame
self.frame.loginFrame()
def CloseFrame(self):
self.root.destroy()
def Open_Reader_RegisterFrame(self):
self.CloseFrame()
self.frame = reader_RegisterFrame(self.LoginFrame)
def Open_Book_RegisterFrame(self):
self.CloseFrame()
self.frame = book_RegisterFrame(self.LoginFrame)
def Open_Reader_ManagementFrame(self):
self.CloseFrame()
self.frame = Reader_ManagementFrame(self.LoginFrame)
def Open_Book_ManagementFrame(self):
self.CloseFrame()
self.frame = Book_ManegementFrame(self.LoginFrame)
def Open_Lending_ManegementFrame(self):
self.CloseFrame()
self.frame = Lending_ManagementFrame(self.LoginFrame)
def __init__(self, LoginFrame):
self.LoginFrame = LoginFrame
self.root = ThemedTk(theme="equilux")
#Setting the Title
self.root.title("Library Management System")
#Setting the icon
self.root.iconbitmap('src\\picture\\library.ico')
#Get the screen resolution
self.x = self.root.winfo_screenwidth()
self.y = self.root.winfo_screenheight()
#Get the value for windows size
self.x1 = self.x * (13/20)
self.y1 = self.y * (0.81)
#Get the value for Starting point for windows
self.x2 = self.x * (1.1/6)
self.y2 = self.y * (1/12)
self.root.geometry("%dx%d+%d+%d" % (self.x1, self.y1, self.x2, self.y2))
self.root.resizable(False, False)
#Easy for configure within attribute
self.x1 = int(self.x1)
self.y1 = int(self.y1)
self.x_nav = int(self.x1*0.3)
self.y_nav = int(self.y1*0.8)
self.style = ttk.Style()
self.style.configure("Title.TLabel", foreground="snow")
self.style.configure("Logout.TButton", font=("Cascadia Code SemiBold", 14))
self.style.configure("Nav.TButton", font=("Cascadia Code SemiBold", 12))
self.style.configure("Content.TFrame", foreground="black", background="LightSkyBlue2")
self.style.configure("Nav.TFrame",foreground="black", background="SeaGreen1")
self.title_frame = ttk.Frame(self.root)
self.title_frame.place(relwidth=1, relheight=0.2)
self.text_frame = ttk.Frame(self.title_frame)
self.text_frame.place(relx=0.1,rely=0.5, relwidth=0.4, relheight=0.5)
self.title_text = ttk.Label(self.text_frame,text="Library Management System",font=("Cascadia Code SemiBold", 18), style="Title.TLabel")
self.title_text.place(relx=0.05, rely=0.4)
self.logout_button = ttk.Button(self.title_frame,text="Logout", style="Logout.TButton", command=self.Logout)
self.logout_button.place(relx=0.78, rely=0.58, relwidth=0.15)
self.content_frame = ttk.Frame(self.root, style="Content.TFrame")
self.content_frame.place(relx=0.3, rely=0.2, relwidth=1, relheight=0.8)
self.nav_frame = ttk.Frame(self.root, style="Nav.TFrame")
self.nav_frame.place(rely=0.2, relwidth=0.3, relheight=0.8)
#Resize the Image
self.Nav_image = PIL.Image.open("src\\picture\\Nav.jpg")
self.Nav_image = self.Nav_image.resize((self.x_nav, self.y_nav), PIL.Image.ANTIALIAS)
self.Nav_image = ImageTk.PhotoImage(self.Nav_image)
# (highlightthickness = 0) is for remove the border for the Canvas
self.Nav_label = Canvas(self.nav_frame, width=self.x_nav, height=self.y_nav, highlightthickness=0)
self.Nav_label.pack()
self.Nav_label.create_image(0, 0, anchor=NW, image=self.Nav_image)
self.nav_button1 = ttk.Button(self.nav_frame, text="User Register",
style="Nav.TButton", command=self.Open_Reader_RegisterFrame)
self.nav_button1.place(relx=0.25, rely=0.05, relwidth=0.5)
self.nav_button2 = ttk.Button(
self.nav_frame, text="Book Regsiter", style="Nav.TButton", command=self.Open_Book_RegisterFrame)
self.nav_button2.place(relx=0.25, rely=0.25, relwidth=0.5)
self.nav_button3 = ttk.Button(self.nav_frame, text="User Manage",
style="Nav.TButton", command=self.Open_Reader_ManagementFrame)
self.nav_button3.place(relx=0.25, rely=0.45, relwidth=0.5)
self.nav_button4 = ttk.Button(
self.nav_frame, text="Book Manage", style="Nav.TButton", command=self.Open_Book_ManagementFrame)
self.nav_button4.place(relx=0.25, rely=0.65, relwidth=0.5)
self.nav_button5 = ttk.Button(self.nav_frame, text="Lending Manage",
style="Nav.TButton", command=self.Open_Lending_ManegementFrame)
self.nav_button5.place(relx=0.25, rely=0.85, relwidth=0.5)
self.root.mainloop()
class reader_RegisterFrame :
def Logout(self):
self.CloseFrame()
self.frame = self.LoginFrame
self.frame.loginFrame()
def CloseFrame(self):
self.root.destroy()
def Open_Reader_RegisterFrame(self):
self.CloseFrame()
self.frame = reader_RegisterFrame(self.LoginFrame)
def Open_Book_RegisterFrame(self):
self.CloseFrame()
self.frame = book_RegisterFrame(self.LoginFrame)
def Open_Reader_ManagementFrame(self):
self.CloseFrame()
self.frame = Reader_ManagementFrame(self.LoginFrame)
def Open_Book_ManagementFrame(self):
self.CloseFrame()
self.frame = Book_ManegementFrame(self.LoginFrame)
def Open_Lending_ManegementFrame(self):
self.CloseFrame()
self.frame = Lending_ManagementFrame(self.LoginFrame)
def do_InsertReader(self):
readerTools = ReaderTools()
reader = Reader()
if self.idReaderEntry != None and self.idReaderEntry != "" and self.nameReaderEntry != None and self.nameReaderEntry != "" and self.positionEntry != None and self.positionEntry != "" and self.sexEntry != None and self.sexEntry != "" and self.passwordEntry != None and self.passwordEntry != "" :
reader.setIdReader(self.idReaderEntry.get())
reader.setNameReader(self.nameReaderEntry.get())
reader.setLevel(self.positionEntry.get())
reader.setSex(self.sexEntry.get())
reader.setPassword(self.passwordEntry.get())
i = readerTools.addReader(reader)
if i == 1 :
messagebox.showinfo("Successfully Added", "Successfully Added the Reader")
return
else :
messagebox.showinfo("Failed to Add", "Failed to Add the Reader")
return
else :
messagebox.showinfo("Please Enter the Data","Please Fill All the Entry provided")
return
def __init__(self, LoginFrame):
self.LoginFrame = LoginFrame
self.root = ThemedTk(theme="equilux")
#Setting the Title
self.root.title("Library Management System")
#Setting the icon
self.root.iconbitmap('src\\picture\\library.ico')
#Get the screen resolution
self.x = self.root.winfo_screenwidth()
self.y = self.root.winfo_screenheight()
#Get the value for windows size
self.x1 = self.x * (13/20)
self.y1 = self.y * (0.81)
#Get the value for Starting point for windows
self.x2 = self.x * (1.1/6)
self.y2 = self.y * (1/12)
self.root.geometry("%dx%d+%d+%d" % (self.x1, self.y1, self.x2, self.y2))
self.root.resizable(False, False)
#Easy for configure within attribute
self.x1 = int(self.x1)
self.y1 = int(self.y1)
self.x_nav = int(self.x1*0.3)
self.y_nav = int(self.y1*0.8)
self.style = ttk.Style()
self.style.configure("Title.TLabel", foreground="snow")
self.style.configure("Logout.TButton", font=("Cascadia Code SemiBold", 14))
self.style.configure("Nav.TButton", font=("Cascadia Code SemiBold", 12))
self.style.configure("Content.TFrame", foreground="black", background="LightSkyBlue2")
self.style.configure("Content.TLabel", foreground="black", background="LightSkyBlue2")
self.style.configure("Nav.TFrame", foreground="black", background="SeaGreen1")
self.title_frame = ttk.Frame(self.root)
self.title_frame.place(relwidth=1, relheight=0.2)
self.text_frame = ttk.Frame(self.title_frame)
self.text_frame.place(relx=0.1, rely=0.5, relwidth=0.4, relheight=0.5)
self.title_text = ttk.Label(self.text_frame, text="Library Management System", font=("Cascadia Code SemiBold", 18), style="Title.TLabel")
self.title_text.place(relx=0.05, rely=0.4)
self.logout_button = ttk.Button(self.title_frame, text="Logout", style="Logout.TButton", command=self.Logout)
self.logout_button.place(relx=0.78, rely=0.58, relwidth=0.15)
self.content_frame = ttk.Frame(self.root, style="Content.TFrame")
self.content_frame.place(relx=0.3, rely=0.2, relwidth=1, relheight=0.8)
self.nav_frame = ttk.Frame(self.root, style="Nav.TFrame")
self.nav_frame.place(rely=0.2, relwidth=0.3, relheight=0.8)
#Resize the Image
self.Nav_image = PIL.Image.open("src\\picture\\Nav.jpg")
self.Nav_image = self.Nav_image.resize((self.x_nav, self.y_nav), PIL.Image.ANTIALIAS)
self.Nav_image = ImageTk.PhotoImage(self.Nav_image)
# (highlightthickness = 0) is for remove the border for the Canvas
self.Nav_label = Canvas(self.nav_frame, width=self.x_nav, height=self.y_nav, highlightthickness=0)
self.Nav_label.pack()
self.Nav_label.create_image(0, 0, anchor=NW, image=self.Nav_image)
self.nav_button1 = ttk.Button(self.nav_frame, text="User Register",
style="Nav.TButton", command=self.Open_Reader_RegisterFrame)
self.nav_button1.place(relx=0.25, rely=0.05, relwidth=0.5)
self.nav_button2 = ttk.Button(
self.nav_frame, text="Book Regsiter", style="Nav.TButton", command=self.Open_Book_RegisterFrame)
self.nav_button2.place(relx=0.25, rely=0.25, relwidth=0.5)
self.nav_button3 = ttk.Button(self.nav_frame, text="User Manage",
style="Nav.TButton", command=self.Open_Reader_ManagementFrame)
self.nav_button3.place(relx=0.25, rely=0.45, relwidth=0.5)
self.nav_button4 = ttk.Button(
self.nav_frame, text="Book Manage", style="Nav.TButton", command=self.Open_Book_ManagementFrame)
self.nav_button4.place(relx=0.25, rely=0.65, relwidth=0.5)
self.nav_button5 = ttk.Button(self.nav_frame, text="Lending Manage",
style="Nav.TButton", command=self.Open_Lending_ManegementFrame)
self.nav_button5.place(relx=0.25, rely=0.85, relwidth=0.5)
self.idReaderLabel = ttk.Label(self.content_frame, text="ID Reader :", font=("Cascadia Code SemiBold", 18), style="Content.TLabel")
self.idReaderLabel.place(relx=0.2, rely=0.15)
self.idReaderEntry = ttk.Entry(self.content_frame, font=("Cascadia Code", 12))
self.idReaderEntry.place(relx=0.37, rely=0.16)
self.nameReaderLabel = ttk.Label(self.content_frame, text="Name :", font=("Cascadia Code SemiBold", 18), style="Content.TLabel")
self.nameReaderLabel.place(relx=0.272, rely=0.25)
self.nameReaderEntry = ttk.Entry(self.content_frame, font=("Cascadia Code", 12))
self.nameReaderEntry.place(relx=0.37, rely=0.26)
self.positionLabel = ttk.Label(self.content_frame, text="Position :", font=("Cascadia Code SemiBold", 18), style="Content.TLabel")
self.positionLabel.place(relx=0.215, rely=0.35)
self.positionEntry = ttk.Entry(self.content_frame, font=("Cascadia Code", 12))
self.positionEntry.place(relx=0.37, rely=0.36)
self.sexLabel = ttk.Label(self.content_frame, text="Sex :", font=("Cascadia Code SemiBold", 18), style="Content.TLabel")
self.sexLabel.place(relx=0.285, rely=0.45)
self.sexEntry = ttk.Entry(self.content_frame, font=("Cascadia Code", 12))
self.sexEntry.place(relx=0.37, rely=0.46)
self.passwordLabel = ttk.Label(self.content_frame, text="Password :", font=("Cascadia Code SemiBold", 18), style="Content.TLabel")
self.passwordLabel.place(relx=0.215, rely=0.55)
self.passwordEntry = ttk.Entry(self.content_frame, font=("Cascadia Code", 12))
self.passwordEntry.place(relx=0.37, rely=0.56)
self.registerButton = ttk.Button(self.content_frame, text="Register", style="Nav.TButton", command=self.do_InsertReader)
self.registerButton.place(relx=0.3, rely=0.7)
self.root.mainloop()
class book_RegisterFrame :
def Logout(self):
self.CloseFrame()
self.frame = self.LoginFrame
self.frame.loginFrame()
def CloseFrame(self):
self.root.destroy()
def Open_Reader_RegisterFrame(self):
self.CloseFrame()
self.frame = reader_RegisterFrame(self.LoginFrame)
def Open_Book_RegisterFrame(self):
self.CloseFrame()
self.frame = book_RegisterFrame(self.LoginFrame)
def Open_Reader_ManagementFrame(self):
self.CloseFrame()
self.frame = Reader_ManagementFrame(self.LoginFrame)
def Open_Book_ManagementFrame(self):
self.CloseFrame()
self.frame = Book_ManegementFrame(self.LoginFrame)
def Open_Lending_ManegementFrame(self):
self.CloseFrame()
self.frame = Lending_ManagementFrame(self.LoginFrame)
def do_InsertBook(self):
bookTools = BookTools()
book = Book()
author = Author()
authorTools = AuthorTools()
publisher = Publisher()
publisherTools = PublisherTools()
if self.idBookEntry.get() != None and self.idBookEntry.get() != "" and self.nameBookEntry.get() != None and self.nameBookEntry.get() != "" and self.priceEntry.get() != None and self.priceEntry.get() != "" and self.typeEntry.get() != None and self.typeEntry.get() != "" and self.authorEntry.get() != None and self.authorEntry.get() != "" and self.publisherEntry.get() != None and self.publisherEntry.get() != "" and self.workplaceEntry.get() != None and self.workplaceEntry.get() != "" and self.addressEntry.get() != None and self.addressEntry.get() != "" :
book.setIdBook(self.idBookEntry.get())
book.setNameBook(self.nameBookEntry.get())
book.setPrice(self.priceEntry.get())
book.setType(self.typeEntry.get())
book.setAuthor(self.authorEntry.get())
book.setPublisher(self.publisherEntry.get())
author.setName(self.authorEntry.get())
author.setWorkplace(self.workplaceEntry.get())
publisher.setName(self.publisherEntry.get())
publisher.setAddress(self.addressEntry.get())
publisherTools.addPublisher(publisher)
authorTools.addAuthor(author)
i = bookTools.AddBook(book)
if i == 1 :
messagebox.showinfo("Successfully Added", "Successfully Added the Book")
return
else :
messagebox.showinfo("Failed to Add","Failed to Add the Book")
return
else :
messagebox.showinfo("Please Enter the Data","Please Fill All the Entry provided")
def __init__(self, LoginFrame):
self.LoginFrame = LoginFrame
self.root = ThemedTk(theme="equilux")
#Setting the Title
self.root.title("Library Management System")
#Setting the icon
self.root.iconbitmap('src\\picture\\library.ico')
#Get the screen resolution
self.x = self.root.winfo_screenwidth()
self.y = self.root.winfo_screenheight()
#Get the value for windows size
self.x1 = self.x * (13/20)
self.y1 = self.y * (0.81)
#Get the value for Starting point for windows
self.x2 = self.x * (1.1/6)
self.y2 = self.y * (1/12)
self.root.geometry("%dx%d+%d+%d" %
(self.x1, self.y1, self.x2, self.y2))
self.root.resizable(False, False)
#Easy for configure within attribute
self.x1 = int(self.x1)
self.y1 = int(self.y1)
self.x_nav = int(self.x1*0.3)
self.y_nav = int(self.y1*0.8)
#Setting the ttk Style
self.style = ttk.Style()
self.style.configure("Title.TLabel", foreground="snow")
self.style.configure("Logout.TButton", font=("Cascadia Code SemiBold", 14))
self.style.configure("Nav.TButton", font=("Cascadia Code SemiBold", 12))
self.style.configure("Content.TFrame", foreground="black", background="LightSkyBlue2")
self.style.configure("Content.TLabel", foreground="black", background="LightSkyBlue2")
self.style.configure("Nav.TFrame", foreground="black", background="SeaGreen1")
self.title_frame = ttk.Frame(self.root)
self.title_frame.place(relwidth=1, relheight=0.2)
self.text_frame = ttk.Frame(self.title_frame)
self.text_frame.place(relx=0.1, rely=0.5, relwidth=0.4, relheight=0.5)
self.title_text = ttk.Label(self.text_frame, text="Library Management System", font=(
"Cascadia Code SemiBold", 18), style="Title.TLabel")
self.title_text.place(relx=0.05, rely=0.4)
self.logout_button = ttk.Button(
self.title_frame, text="Logout", style="Logout.TButton", command=self.Logout)
self.logout_button.place(relx=0.78, rely=0.58, relwidth=0.15)
self.content_frame = ttk.Frame(self.root, style="Content.TFrame")
self.content_frame.place(relx=0.3, rely=0.2, relwidth=1, relheight=0.8)
self.nav_frame = ttk.Frame(self.root, style="Nav.TFrame")
self.nav_frame.place(rely=0.2, relwidth=0.3, relheight=0.8)
#Resize the Image
self.Nav_image = PIL.Image.open("src\\picture\\Nav.jpg")
self.Nav_image = self.Nav_image.resize((self.x_nav, self.y_nav), PIL.Image.ANTIALIAS)
self.Nav_image = ImageTk.PhotoImage(self.Nav_image)
# (highlightthickness = 0) is for remove the border for the Canvas
self.Nav_label = Canvas(self.nav_frame, width=self.x_nav, height=self.y_nav, highlightthickness=0)
self.Nav_label.pack()
self.Nav_label.create_image(0, 0, anchor=NW, image=self.Nav_image)
self.nav_button1 = ttk.Button(self.nav_frame, text="User Register", style="Nav.TButton", command=self.Open_Reader_RegisterFrame)
self.nav_button1.place(relx=0.25, rely=0.05, relwidth=0.5)
self.nav_button2 = ttk.Button(self.nav_frame, text="Book Regsiter", style="Nav.TButton", command=self.Open_Book_RegisterFrame)
self.nav_button2.place(relx=0.25, rely=0.25, relwidth=0.5)
self.nav_button3 = ttk.Button(self.nav_frame, text="User Manage", style="Nav.TButton", command=self.Open_Reader_ManagementFrame)
self.nav_button3.place(relx=0.25, rely=0.45, relwidth=0.5)
self.nav_button4 = ttk.Button(self.nav_frame, text="Book Manage", style="Nav.TButton", command=self.Open_Book_ManagementFrame)
self.nav_button4.place(relx=0.25, rely=0.65, relwidth=0.5)
self.nav_button5 = ttk.Button(self.nav_frame, text="Lending Manage", style="Nav.TButton", command=self.Open_Lending_ManegementFrame)
self.nav_button5.place(relx=0.25, rely=0.85, relwidth=0.5)
self.idBookLabel = ttk.Label(self.content_frame, text="Book ID :", font=("Cascadia Code SemiBold", 18), style="Content.TLabel")
self.idBookLabel.place(relx=0.23, rely=0.05)
self.idBookEntry = ttk.Entry(self.content_frame, | |
"blanks",
"stig",
"puncture",
"spank",
"utah",
"booing",
"abs",
"minimal",
"gals",
"flashes",
"andrei",
"requirements",
"greenhouse",
"marx",
"'ma",
"vermont",
"comprehend",
"canoe",
"baton",
"arsehole",
"graduating",
"partially",
"sensing",
"kinky",
"titans",
"microscope",
"intimidating",
"ren",
"babu",
"territories",
"vaguely",
"geese",
"hardship",
"karin",
"exceptions",
"jing",
"gramps",
"florrick",
"doubles",
"knitting",
"slowed",
"administrator",
"blacksmith",
"ctu",
"tolling",
"correspondence",
"fashionable",
"overlooked",
"siobhan",
"matron",
"arcade",
"gestures",
"lonnie",
"bord0",
"quits",
"outnumbered",
"lockhart",
"verbal",
"mc",
"terminate",
"hosts",
"rye",
"chirps",
"presidency",
"roberta",
"devote",
"30th",
"dumps",
"hallo",
"improvise",
"monique",
"pooh",
"starship",
"spades",
"fumes",
"cawing",
"aluminum",
"squeak",
"drafted",
"ssh",
"everest",
"michaels",
"commonwealth",
"candace",
"cappuccino",
"cucumber",
"inventor",
"folder",
"slamming",
"favours",
"peppers",
"appeals",
"walton",
"reasoning",
"adequate",
"snickers",
"glued",
"ifs",
"nets",
"hangar",
"godmother",
"empty-handed",
"sátur",
"forum",
"joys",
"foolishness",
"sybil",
"crest",
"playful",
"peers",
"horrors",
"grown-ups",
"bleach",
"disastrous",
"lineup",
"glamour",
"coronation",
"halo",
"sookie",
"highlight",
"ziva",
"ponies",
"errors",
"viv",
"alvarez",
"pluto",
"weights",
"arabia",
"imperative",
"trader",
"galileo",
"tainted",
"drugstore",
"lenses",
"obstruction",
"segment",
"envious",
"gertrude",
"twisting",
"nikolai",
"insured",
"overlook",
"icon",
"rollin",
"commentator",
"pitt",
"camilla",
"cerebral",
"qin",
"zelda",
"spoiling",
"tsar",
"frames",
"cushion",
"divers",
"hallowed",
"chute",
"cavity",
"reds",
"volcanoes",
"on.",
"angelina",
"sahib",
"strung",
"watts",
"40s",
"monopoly",
"bros",
"immigrant",
"back-up",
"apache",
"caffeine",
"squeezing",
"postponed",
"encounters",
"stuttering",
"rut",
"calmed",
"opener",
"baek",
"impotent",
"tabs",
"franchise",
"entity",
"slimy",
"hygiene",
"andi",
"containment",
"knickers",
"jumpy",
"pistols",
"irregular",
"lowly",
"controller",
"rung",
"hades",
"anarchy",
"wisely",
"blackmailed",
"dudley",
"dramatically",
"researching",
"borg",
"touchy",
"fay",
"seventy",
"m.o.",
"bullies",
"stealth",
"divya",
"divisions",
"yearbook",
"abdul",
"poll",
"blinds",
"sneezes",
"bigfoot",
"ninety",
"dharma",
"cramp",
"cockpit",
"truthful",
"lilith",
"ghastly",
"hangin",
"edwin",
"c.j.",
"whines",
"corrections",
"missionary",
"infinity",
"cola",
"penance",
"moist",
"sham",
"investor",
"founding",
"krystle",
"caldwell",
"orientation",
"captains",
"flop",
"columbo",
"jamming",
"yoo-hoo",
"crossroads",
"p.d.",
"infrared",
"pleading",
"sven",
"carroll",
"flushed",
"magda",
"gaming",
"gurgling",
"vinny",
"prairie",
"component",
"saudi",
"slapping",
"gases",
"aching",
"lau",
"abusive",
"vermin",
"bracelets",
"tyre",
"rift",
"wiring",
"gauge",
"robby",
"lifelong",
"abstract",
"pilgrimage",
"theoretically",
"mythology",
"johnnie",
"photographers",
"wilma",
"render",
"rocked",
"iq",
"licked",
"android",
"rhythmic",
"pianist",
"clover",
"unwanted",
"salvador",
"troupe",
"feat",
"primarily",
"preposterous",
"drying",
"unseen",
"raoul",
"vulture",
"constantine",
"outlet",
"git",
"pictured",
"functional",
"24th",
"nitrogen",
"cc",
"lest",
"right-hand",
"speechless",
"delusions",
"peeping",
"follow-up",
"refrain",
"jerking",
"engaging",
"accountable",
"piles",
"ch",
"intro",
"rescuing",
"orgy",
"zane",
"jaguar",
"prue",
"'she",
"concerts",
"schemes",
"abusing",
"fatigue",
"taj",
"charitable",
"lili",
"attracts",
"stirred",
"invaders",
"wench",
"kittens",
"riggs",
"gras",
"heartbroken",
"suffocating",
"youse",
"underage",
"amar",
"bugged",
"dimitri",
"'m--i",
"klinger",
"metropolitan",
"unions",
"worf",
"cds",
"hamster",
"omelet",
"humility",
"grad",
"obedience",
"grub",
"victorian",
"lassie",
"posed",
"conway",
"resentment",
"didi",
"locke",
"dat",
"booming",
"aunts",
"shay",
"brigadier",
"intrusion",
"hedgehog",
"recipes",
"tacky",
"'or",
"here.",
"hk",
"saviour",
"physicist",
"yonder",
"admiring",
"anyplace",
"forfeit",
"lullaby",
"oasis",
"unleash",
"avengers",
"indifferent",
"boxers",
"truthfully",
"organise",
"actresses",
"grin",
"arranging",
"voila",
"hydra",
"shutters",
"pearson",
"genesis",
"daria",
"energetic",
"postal",
"henrik",
"aloud",
"hmph",
"archive",
"nauseous",
"funniest",
"muse",
"ernst",
"constitutional",
"recital",
"covenant",
"cloudy",
"prestige",
"olivier",
"pencils",
"gunnar",
"helper",
"decks",
"astray",
"leland",
"that-that",
"ip",
"casing",
"cooing",
"ozzy",
"sterile",
"bagel",
"releases",
"recognizes",
"lorelai",
"intimidate",
"forgery",
"kenya",
"bred",
"dunn",
"vector",
"huck",
"nausea",
"sakura",
"detonate",
"lenin",
"inhuman",
"horsepower",
"violating",
"pascal",
"informer",
"hooting",
"plugged",
"oriental",
"about-",
"pretends",
"tram",
"reyes",
"spoils",
"chaplain",
"farce",
"parasites",
"whipping",
"cruising",
"violently",
"tile",
"ashtray",
"und",
"neighing",
"zones",
"donut",
"spoilt",
"nobility",
"decoration",
"sharpe",
"scoot",
"kelso",
"abdominal",
"updated",
"blueprints",
"dealings",
"pitched",
"resource",
"coalition",
"hoss",
"professors",
"espionage",
"dishwasher",
"minded",
"cheyenne",
"resulted",
"brewster",
"wrestler",
"cons",
"protesting",
"barbarians",
"loony",
"kangaroo",
"rite",
"dialect",
"counsellor",
"tsunami",
"locket",
"sorcerer",
"pilgrims",
"incorrect",
"parameters",
"risotto",
"unreliable",
"geniuses",
"agricultural",
"willard",
"marital",
"wilkes",
"scramble",
"hera",
"swears",
"starvation",
"'my",
"summons",
"aquarium",
"flavors",
"sanity",
"roxanne",
"caste",
"wedge",
"bedside",
"experimenting",
"truths",
"tribunal",
"emerson",
"darlene",
"automated",
"treachery",
"explorer",
"greens",
"dunk",
"amused",
"fishes",
"kappa",
"oleg",
"goblin",
"appreciates",
"lark",
"commissioned",
"reassuring",
"disclose",
"seaside",
"politely",
"preoccupied",
"incense",
"folly",
"pear",
"vikings",
"irs",
"lavender",
"graceful",
"stretcher",
"grande",
"simulation",
"lasers",
"groovy",
"discussions",
"basin",
"sacrificing",
"luncheon",
"disobey",
"snot",
"mil",
"sinful",
"yearning",
"gruesome",
"aide",
"i`m",
"shen",
"haha",
"masculine",
"mortimer",
"extras",
"hopper",
"guides",
"banjo",
"dina",
"camelot",
"blazing",
"dos",
"crowned",
"gonzalo",
"painkillers",
"rave",
"foley",
"silvia",
"sylvie",
"cover-up",
"cassette",
"sorting",
"misplaced",
"sprung",
"vs.",
"practising",
"prehistoric",
"howls",
"enterprises",
"designing",
"suspense",
"patti",
"japs",
"renounce",
"paints",
"relying",
"w.",
"n-no",
"rendered",
"herald",
"randomly",
"stylish",
"spleen",
"mugged",
"investing",
"protestant",
"circumstantial",
"smear",
"pantry",
"guiding",
"flavour",
"hollis",
"cursing",
"erection",
"gravel",
"artifact",
"raccoon",
"0h",
"classics",
"enlighten",
"enhance",
"delivers",
"spinach",
"binding",
"goldman",
"invalid",
"touring",
"johns",
"fashioned",
"gilmore",
"spilling",
"ledger",
"quieter",
"meadows",
"majestic",
"frequencies",
"zhou",
"historian",
"containers",
"marisa",
"diplomat",
"wrinkles",
"aryan",
"subtitling",
"para",
"nationals",
"natalia",
"rudolph",
"palestine",
"insulin",
"torpedoes",
"pagan",
"swimmer",
"disgraced",
"bygones",
"intellect",
"accommodate",
"posture",
"adjusting",
"giraffe",
"peer",
"petersburg",
"religions",
"hammering",
"audiences",
"ruben",
"gigs",
"damaging",
"colossal",
"renew",
"iceberg",
"vigilante",
"emile",
"dougie",
"biggie",
"pietro",
"chewed",
"cellular",
"indecent",
"dangerously",
"duane",
"bribed",
"rascals",
"chai",
"ramona",
"grammar",
"thrash",
"hutch",
"runners",
"pager",
"stumble",
"dispatcher",
"thankyou",
"iast",
"stump",
"wraps",
"gator",
"teammates",
"skye",
"weakest",
"disconnect",
"melts",
"morally",
"feud",
"apiece",
"theatrical",
"distinguish",
"cooperating",
"passions",
"strive",
"flourish",
"reeves",
"unnecessarily",
"janie",
"perceive",
"upstate",
"accuracy",
"cosmo",
"bitterness",
"fundraiser",
"reflects",
"insolent",
"strauss",
"circular",
"generated",
"rumble",
"pilgrim",
"squirrels",
"purge",
"xander",
"mingle",
"manufacture",
"firemen",
"casually",
"handler",
"hobbs",
"slain",
"phd",
"multiply",
"confinement",
"patches",
"divert",
"alfonso",
"protests",
"versions",
"ambrose",
"adriana",
"ia",
"mit",
"youthful",
"numbered",
"mustafa",
"herbal",
"kathryn",
"gunther",
"lawful",
"stacked",
"damian",
"reasonably",
"suzuki",
"myra",
"logged",
"prospects",
"mainstream",
"sykes",
"alimony",
"stating",
"budapest",
"nelly",
"awakened",
"curses",
"ore",
"slices",
"curl",
"uprising",
"cabaret",
"manufacturer",
"ration",
"boulder",
"manpower",
"badges",
"mush",
"recycling",
"disarm",
"elusive",
"snarls",
"chiu",
"flawless",
"considers",
"youre",
"harmful",
"horseback",
"m.e.",
"owens",
"admits",
"morales",
"helene",
"thankfully",
"recreate",
"travelers",
"navigate",
"pun",
"ethnic",
"voice-over",
"kono",
"esteemed",
"exterior",
"duffy",
"triad",
"licensed",
"gems",
"moira",
"mutants",
"testifying",
"dissolve",
"iraqi",
"robes",
"wildest",
"portfolio",
"tossing",
"pussycat",
"booby",
"nbc",
"simms",
"knack",
"lodged",
"cpr",
"lei",
"racism",
"nato",
"mayo",
"gillian",
"stingy",
"kaiser",
"shooters",
"napkins",
"continents",
"registry",
"fags",
"shah",
"blinking",
"capt.",
"duo",
"gymnastics",
"jag",
"bernadette",
"valiant",
"exploited",
"attachment",
"'for",
"trans",
"intruders",
"owning",
"mega",
"crispy",
"administrative",
"hawks",
"crave",
"qu",
"reviewed",
"persuasive",
"turmoil",
"promoting",
"shoving",
"bette",
"unworthy",
"elevators",
"fulfilling",
"ronny",
"rooting",
"irma",
"mcqueen",
"s.h.i.e.l.d.",
"stoop",
"ee",
"a.j.",
"greer",
"panicking",
"panels",
"demonstrated",
"myths",
"mattie",
"volleyball",
"cheats",
"chord",
"berkeley",
"oblige",
"radiant",
"anticipation",
"fibers",
"daleks",
"aroused",
"paralysis",
"ferocious",
"heinous",
"roulette",
"emergencies",
"olaf",
"enjoyable",
"torso",
"compassionate",
"softer",
"delegation",
"can`t",
"fangs",
"plaque",
"alonso",
"moi",
"o.j.",
"bowman",
"uterus",
"buggy",
"kristina",
"denies",
"tally",
"neighs",
"unusually",
"tibet",
"fez",
"charcoal",
"healer",
"weirdest",
"eel",
"recordings",
"tamil",
"naw",
"camouflage",
"seagulls",
"hub",
"shoulda",
"mailman",
"buckley",
"furry",
"pronto",
"sedan",
"kermit",
"amir",
"edible",
"trifle",
"delusion",
"foundations",
"calcutta",
"cubes",
"colon",
"crotch",
"tariq",
"costing",
"resisted",
"leila",
"snail",
"ursula",
"strippers",
"there-",
"vista",
"sorta",
"fiend",
"ultra",
"restrain",
"pinkie",
"voluntary",
"widely",
"uncommon",
"blackie",
"farrell",
"infiltrate",
"erika",
"sophomore",
"radios",
"ringo",
"medallion",
"eiffel",
"yamato",
"wilder",
"dyke",
"bailiff",
"peril",
"plaintiff",
"emilia",
"transmitted",
"bonnet",
"assassinated",
"emptiness",
"helmets",
"normandy",
"cheetah",
"clips",
"thicker",
"whiz",
"saga",
"bullock",
"binoculars",
"duckman",
"accord",
"diva",
"jeanette",
"birmingham",
"pick-up",
"bawk",
"sita",
"concierge",
"clams",
"questionable",
"volunteering",
"lunches",
"spooked",
"pinpoint",
"hereafter",
"brook",
"weakened",
"verses",
"think-",
"marshals",
"deposited",
"cleans",
"ironically",
"bodily",
"distances",
"hypnosis",
"secretive",
"dominion",
"civic",
"tori",
"bursts",
"uncertainty",
"streams",
"vortex",
"tights",
"sponsors",
"tickles",
"cello",
"herring",
"ab",
"disability",
"griffith",
"orchid",
"hostility",
"conflicts",
"pans",
"housekeeping",
"panthers",
"wonderland",
"ruiz",
"interrogated",
"andromeda",
"premium",
"broccoli",
"garfield",
"mussolini",
"pottery",
"shrapnel",
"planner",
"jacked",
"robyn",
"rounded",
"giuseppe",
"cactus",
"motherland",
"defendants",
"wilt",
"jia",
"executives",
"spoons",
"mnh-mnh",
"belinda",
"unbelievably",
"fairness",
"continuously",
"spices",
"organisms",
"regretted",
"monarch",
"narrative",
"roth",
"catcher",
"dictatorship",
"controversy",
"manu",
"alejandro",
"josef",
"dignified",
"bonded",
"tilt",
"beginnings",
"peaks",
"tissues",
"pretentious",
"dell",
"jeopardize",
"osborne",
"diversity",
"shabby",
"brownies",
"overdo",
"sharma",
"crunching",
"tel",
"marnie",
"separating",
"horrid",
"overrated",
"harmon",
"rethink",
"mastered",
"eww",
"reopen",
"camels",
"tux",
"denis",
"depended",
"tak",
"whine",
"axis",
"orchard",
"cherries",
"pleasing",
"unforgivable",
"craziest",
"rods",
"distressed",
"bubbling",
"'on",
"interact",
"manipulating",
"rewind",
"burgundy",
"blacked",
"trillion",
"onwards",
"reservoir",
"participation",
"caucasian",
"flushing",
"serbian",
"manhood",
"lao",
"matilda",
"exhale",
"dinah",
"in-laws",
"caramel",
"slander",
"identifying",
"drunkard",
"blooming",
"stocking",
"one-on-one",
"rinse",
"artwork",
"catholics",
"stormy",
"refresh",
"elisa",
"cor",
"floats",
"expanded",
"chilling",
"congo",
"afghan",
"backside",
"antarctica",
"sandals",
"investigative",
"wills",
"searches",
"yawns",
"collaboration",
"untouched",
"psyched",
"hussein",
"departments",
"tuxedo",
"mak",
"bummed",
"heinrich",
"finer",
"manure",
"him-",
"compatible",
"moor",
"keg",
"monsignor",
"peeled",
"dona",
"stings",
"seeker",
"watering",
"ids",
"malaria",
"aj",
"walters",
"sinatra",
"lending",
"dodgers",
"fortnight",
"reassure",
"braun",
"meddling",
"overthrow",
"postmortem",
"achievements",
"excellence",
"'ight",
"latch",
"coppers",
"raids",
"dickens",
"thing-",
"drifted",
"leeds",
"obsessive",
"upwards",
"lynette",
"oatmeal",
"midday",
"implants",
"unmarried",
"murat",
"sewers",
"bowie",
"mercenary",
"perks",
"outrun",
"copying",
"upload",
"freya",
"helga",
"shu",
"bumping",
"inland",
"yorkshire",
"humbly",
"injections",
"participating",
"python",
"laurent",
"prof.",
"cockroaches",
"puberty",
"technologies",
"crumble",
"toots",
"miki",
"roscoe",
"high-school",
"clot",
"exterminate",
"rake",
"cholesterol",
"deli",
"augustus",
"sublime",
"bottled",
"fung",
"charlene",
"the-the",
"demolition",
"methane",
"appendix",
"aches",
"ahoy",
"distractions",
"dashing",
"plank",
"medications",
"chevy",
"zurich",
"makers",
"invading",
"loner",
"comm",
| |
:param min: Lower boundary (inclusive) to check against.
:param max: Upper boundary (inclusive) to check against.
:param exclude_max: Exclude the upper boundary `max` if set to `true`. Defaults to `false`.
:return: `true` if `x` is between the specified bounds, otherwise `false`.
"""
return process('between', x=x, min=min, max=max, exclude_max=exclude_max)
def ceil(x) -> ProcessBuilder:
"""
Round fractions up
:param x: A number to round up.
:return: The number rounded up.
"""
return process('ceil', x=x)
def climatological_normal(data, period, climatology_period=UNSET) -> ProcessBuilder:
"""
Computes climatology normals
:param data: A data cube with exactly one temporal dimension. The data cube must span at least the temporal
interval specified in the parameter `climatology-period`. Seasonal periods may span two consecutive years,
e.g. temporal winter that includes months December, January and February. If the required months before the
actual climate period are available, the season is taken into account. If not available, the first season
is not taken into account and the seasonal mean is based on one year less than the other seasonal normals.
The incomplete season at the end of the last year is never taken into account.
:param period: The time intervals to aggregate the average value for. The following pre-defined frequencies
are supported: * `day`: Day of the year * `month`: Month of the year * `climatology-period`: The period
specified in the `climatology-period`. * `season`: Three month periods of the calendar seasons (December -
February, March - May, June - August, September - November). * `tropical-season`: Six month periods of the
tropical seasons (November - April, May - October).
:param climatology_period: The climatology period as closed temporal interval. The first element of the
array is the first year to be fully included in the temporal interval. The second element is the last year
to be fully included in the temporal interval. The default period is from 1981 until 2010 (both inclusive).
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except for the resolution and dimension labels of the temporal
dimension. The temporal dimension has the following dimension labels: * `day`: `001` - `365` * `month`:
`01` - `12` * `climatology-period`: `climatology-period` * `season`: `djf` (December - February), `mam`
(March - May), `jja` (June - August), `son` (September - November) * `tropical-season`: `ndjfma` (November
- April), `mjjaso` (May - October)
"""
return process('climatological_normal', data=data, period=period, climatology_period=climatology_period)
def clip(x, min, max) -> ProcessBuilder:
"""
Clip a value between a minimum and a maximum
:param x: A number.
:param min: Minimum value. If the value is lower than this value, the process will return the value of this
parameter.
:param max: Maximum value. If the value is greater than this value, the process will return the value of
this parameter.
:return: The value clipped to the specified range.
"""
return process('clip', x=x, min=min, max=max)
def constant(x) -> ProcessBuilder:
"""
Define a constant value
:param x: The value of the constant.
:return: The value of the constant.
"""
return process('constant', x=x)
def cos(x) -> ProcessBuilder:
"""
Cosine
:param x: An angle in radians.
:return: The computed cosine of `x`.
"""
return process('cos', x=x)
def cosh(x) -> ProcessBuilder:
"""
Hyperbolic cosine
:param x: An angle in radians.
:return: The computed hyperbolic cosine of `x`.
"""
return process('cosh', x=x)
def count(data, condition=UNSET, context=UNSET) -> ProcessBuilder:
"""
Count the number of elements
:param data: An array with elements of any data type.
:param condition: A condition consists of one ore more processes, which in the end return a boolean value.
It is evaluated against each element in the array. An element is counted only if the condition returns
`true`. Defaults to count valid elements in a list (see ``is_valid()``). Setting this parameter to boolean
`true` counts all elements in the list.
:param context: Additional data to be passed to the condition.
:return: The counted number of elements.
"""
return process('count', data=data, condition=condition, context=context)
def create_raster_cube() -> ProcessBuilder:
"""
Create an empty raster data cube
:return: An empty raster data cube with zero dimensions.
"""
return process('create_raster_cube', )
def cummax(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative maxima
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative maxima.
"""
return process('cummax', data=data, ignore_nodata=ignore_nodata)
def cummin(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative minima
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative minima.
"""
return process('cummin', data=data, ignore_nodata=ignore_nodata)
def cumproduct(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative products
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative products.
"""
return process('cumproduct', data=data, ignore_nodata=ignore_nodata)
def cumsum(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Cumulative sums
:param data: An array of numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is set for all the following elements.
:return: An array with the computed cumulative sums.
"""
return process('cumsum', data=data, ignore_nodata=ignore_nodata)
def debug(data, code=UNSET, level=UNSET, message=UNSET) -> ProcessBuilder:
"""
Publish debugging information
:param data: Data to publish.
:param code: An identifier to help identify the log entry in a bunch of other log entries.
:param level: The severity level of this message, defaults to `info`. Note that the level `error` forces
the computation to be stopped!
:param message: A message to send in addition to the data.
:return: Returns the data as passed to the `data` parameter.
"""
return process('debug', data=data, code=code, level=level, message=message)
def dimension_labels(data, dimension) -> ProcessBuilder:
"""
Get the dimension labels
:param data: The data cube.
:param dimension: The name of the dimension to get the labels for.
:return: The labels as array.
"""
return process('dimension_labels', data=data, dimension=dimension)
def divide(x, y) -> ProcessBuilder:
"""
Division of two numbers
:param x: The dividend.
:param y: The divisor.
:return: The computed result.
"""
return process('divide', x=x, y=y)
def drop_dimension(data, name) -> ProcessBuilder:
"""
Remove a dimension
:param data: The data cube to drop a dimension from.
:param name: Name of the dimension to drop.
:return: A data cube without the specified dimension. The number of dimensions decreases by one, but the
dimension properties (name, type, labels, reference system and resolution) for all other dimensions remain
unchanged.
"""
return process('drop_dimension', data=data, name=name)
def e() -> ProcessBuilder:
"""
Euler's number (e)
:return: The numerical value of Euler's number.
"""
return process('e', )
def eq(x, y, delta=UNSET, case_sensitive=UNSET) -> ProcessBuilder:
"""
Equal to comparison
:param x: First operand.
:param y: Second operand.
:param delta: Only applicable for comparing two numbers. If this optional parameter is set to a positive
non-zero number the equality of two numbers is checked against a delta value. This is especially useful to
circumvent problems with floating point inaccuracy in machine-based computation. This option is basically
an alias for the following computation: `lte(abs(minus([x, y]), delta)`
:param case_sensitive: Only applicable for comparing two strings. Case sensitive comparison can be disabled
by setting this parameter to `false`.
:return: Returns `true` if `x` is equal to `y`, `null` if any operand is `null`, otherwise `false`.
"""
return process('eq', x=x, y=y, delta=delta, case_sensitive=case_sensitive)
def exp(p) -> ProcessBuilder:
"""
Exponentiation to the base e
:param p: The numerical exponent.
:return: The computed value for *e* raised to the power of `p`.
"""
return process('exp', p=p)
def extrema(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Minimum and maximum values
:param data: An array of | |
<gh_stars>1-10
""" Module for network components such as buses, lines and transformers
"""
import collections
import warnings
import math
import numpy as np
from component_base import Branch, Component
from monsterexceptions import (
PsseBaseException, PsseWnddtException, PsseMacdatdException
)
from monsterpsspy import MonsterPssPy
from monsterexceptions import PsseLoddtException, PsseMacintException
from serviceenumsandcontants import ComponentStatus, ComponentTypeEnum
from component_busdetails import BusDetails
from component_errors import (
TwoWindingTransformerInitError, ThreeWindingTransformerInitError, LoadInitError,
MachineInitError
)
from sharedconstantsandenums import RATE_NAME
warnings.simplefilter('ignore')
class Line(Branch):
"""Main class for a transmission line and cables"""
_component_type = ComponentTypeEnum.LineComponent
def __init__(
self, from_bus, to_bus, identificator, rx=None, length=None,
rate_a=None, rate_b=None, rate_c=None, msl_component=False, msl_lines=None
):
self.msl_component = msl_component
self.msl_lines = msl_lines
self._validate_voltage_levels(from_bus, to_bus)
super(Line, self).__init__(from_bus, to_bus, identificator, rx, length)
@staticmethod
def _validate_voltage_levels(from_bus, to_bus):
if from_bus.base_voltage != to_bus.base_voltage:
warnings.warn(
'Voltage at %s different from voltage at %s.' % (from_bus, to_bus)
)
def _change_status(self, from_status, to_status):
if self.status() == from_status and not self.msl_component:
if self.msl_lines:
MonsterPssPy.multi_section_line_edit(
self.from_bus.number,
self.to_bus.number,
self.identificator,
[to_status.get_index()]
)
else:
MonsterPssPy.branch_data(
self.from_bus.number,
self.to_bus.number,
self.identificator,
[to_status.get_index()]
)
def is_branch(self):
return True
class OverHeadLine(Line):
"""Main class for a transmission line.
The line is described according to the model in PSSE.
Parameters
==========
from_bus: Bus component
the from bus component (not the bus number)
to_bus: Bus component
the to bus component (not the bus number)
Id:str
the string id of the line
length: double (optional)
line length in km.
rate_a, rate_b, rate_c: double (optional)
rates as given in the PSSE model.
metered_from: Bus component (optional)
the bus from which the line is metered from
Examples
========
>>> frogner400 = component.Bus(51081, 'FROGNER4', 400)
>>> aadal = component.Bus(51171, 'ADAL4', 400)
>>> frogner_aadal = component.Line(frogner400, aadal, '1')
Raises
======
Error if rates are not ordered.
"""
_component_type = ComponentTypeEnum.OverHeadLineComponent
class Cable(Line):
""" Main class for a transmission line.
This class is similar to (overhead) Line, except for a different _component_type. This
is done as cables and overhead lines have different failure statistics.
"""
_component_type = ComponentTypeEnum.CableComponent
class TwoWindingTransformer(Branch):
""" Main class for a two winding trasnformer
The transformer is described according to the model in PSSE.
Parameters
==========
from_bus: Bus component
the from bus component (not the bus number)
to_bus: Bus component
the to bus component (not the bus number)
name: str (optional)
name of transformer
metered_from: Bus component (optional)
the bus from which the line is metered from
Examples
========
>>> from accc_pv.component import TwoWindingTransformer
>>> frogner400 = component.Bus(51081, 'FROGNER4', 400)
>>> frogner300 = component.Bus(51082, 'FROGNER3', 300)
>>> f_trafo = TwoWindingTransformer(frogner400, frogner300, '1')
Raises
======
TwoWindingTransformerInitError
"""
_component_type = ComponentTypeEnum.TwoWindingTransformerComponent
def __init__(
self, from_bus, to_bus, identificator, rx=None, rate_a=None,
rate_b=None, rate_c=None, name=None, metered_from=True
):
if name is not None and not isinstance(name, str):
raise TwoWindingTransformerInitError(
'Two winding name must be a string!'
)
self.name = name
self.metered_from = metered_from
Branch.__init__(
self, from_bus, to_bus, identificator, metered_from=metered_from, rx=rx
)
def _change_status(self, from_status, to_status):
if self.status() == from_status:
MonsterPssPy.two_winding_data(
self.from_bus.number,
self.to_bus.number,
self.identificator,
[to_status.get_index()],
[]
)
class ThreeWindingTransformer(Branch):
""" Main class for a three winding trasnformer
The transformer is described according to the model in PSSE.
Parameters
==========
from_bus: Bus component
the from bus component (not the bus number)
to_bus: Bus component
the to bus component (not the bus number)
other_bus: Bus component
tertiary component winding
name: str (optional)
name of transformer
none_metered_end: Bus component (optional)
the bus from which the line is metered from
Examples
========
>>> from accc_pv.component import ThreeWindingTransformer
>>> f400 = component.Bus(51081, 'FROGNER4', 400)
>>> f300 = component.Bus(51082, 'FROGNER3', 300)
>>> fX = component.Bus(51085, 'FROGNERX', 66)
>>> f_trafo = ThreeWindingTransformer(400, f300, fX, '1')
Raises
======
ThreeWindingTransformerInitError
"""
_component_type = ComponentTypeEnum.ThreeWindingTransformerComponent
def __init__(
self, from_bus, to_bus, other_bus, identificator,
name=None, non_metered_end=None
):
if not isinstance(other_bus, BusDetails):
raise ThreeWindingTransformerInitError(
'The other_bus must be of Bus-type.'
)
if name is not None and not isinstance(name, str):
raise ThreeWindingTransformerInitError(
'Transformer name must be a string.'
)
if non_metered_end is not None and \
not isinstance(non_metered_end, BusDetails):
raise ThreeWindingTransformerInitError(
'non_metered_end must be a bus.'
)
if non_metered_end is not None and \
non_metered_end not in (from_bus, to_bus, other_bus):
raise ThreeWindingTransformerInitError(
'non_metered_end must be one of the three defining buses.'
)
self.other_bus = other_bus
self.three_winding_name = name
self.non_metered_end = non_metered_end
Branch.__init__(self, from_bus, to_bus, identificator)
def __str__(self):
""" Return the string representation of a three winding transformer
Examples
========
>>> f400 = component.Bus(51081, 'FROGNER4', 400)
>>> f300 = component.Bus(51082, 'FROGNER3', 300)
>>> fX = component.Bus(51085, 'FROGNERX', 66)
>>> f_trafo = ThreeWindingTransformer(f400, f300, fX, '1')
>>> f_trafo.__str__()
'Three winding transformer Bus 51081 FROGNER4 400.0 - Bus 51082
FROGNER3 300.0 - Bus 51085 FROGNERX 66.0 1'
"""
return 'Three winding transformer %s - %s - %s %s' % (
self.from_bus,
self.to_bus,
self.other_bus,
self.identificator
)
def _make_screening_string(self):
self._screening_string = '{} THREEWINDING AT BUS {} TO BUS {} TO BUS {} CKT {}\n'.format(
self.action_string['disconnect'], self.from_bus.number, self.to_bus.number,
self.other_bus, self.identificator
)
def _relay_screening_string(self):
screening_string = '{} THREEWINDING AT BUS {} TO BUS {} TO BUS {} CKT {}\n'.format(
self._relay_action(), self.from_bus.number, self.to_bus.number, self.other_bus,
self.identificator
)
return screening_string
def status(self):
_status = MonsterPssPy.tr3int(
self.from_bus.number,
self.to_bus.number,
self.other_bus.number,
self.identificator,
'STATUS'
)
return self._component_status(_status)
def _change_status(self, from_status, to_status):
""" Take a two winding transformer out of service in the PSSE model.
This function uses the psspy API to manually set a line in out
of service. It manipulates the 'status' argument of the
:func:`psspy.two_winding_data_3` function and sets it to 0.
Warnings
========
If the line is a part of a multi-line section a waring is issued.
In this case the same line might be tripped more than once.
Raises
======
A psspy.PsseException is rasied if one tries to trip an already
disconnected line.
"""
if self.status() == from_status:
MonsterPssPy.three_wnd_imped_chng(
self.from_bus.number,
self.to_bus.number,
self.other_bus.number,
self.identificator,
[MonsterPssPy._i()] * 7 + [to_status.get_index()],
[]
)
def get_real_power(self):
buses = collections.deque(self.get_busnumbers())
pct = -1
for i in range(3):
buses.rotate(1)
pct_wnd = MonsterPssPy.wnddat(
buses[0], buses[1], buses[2], self.identificator, 'PCTRTA'
)
if pct_wnd > pct:
pct = pct_wnd
n_rotate = i + 1
buses.rotate(n_rotate)
return MonsterPssPy.wnddt2(
buses[0], buses[1], buses[2], self.identificator, 'FLOW'
).real
def _flow(self, *branch_buses_and_id):
try:
s = MonsterPssPy.wnddt2(*branch_buses_and_id, string='FLOW')
except PsseWnddtException as e:
if e._ierr == 7:
s = []
else:
raise
return s
def flow_amp_rate(self):
buses = collections.deque(self.get_busnumbers(False))
flow = []
rate_a = self.get_rate()
for i in range(3):
branch_buses_and_id = [buses[0], buses[1], buses[2], self.identificator]
s = self._flow(branch_buses_and_id)
if s:
i_rate = MonsterPssPy.wnddat(*branch_buses_and_id, string='PCTRTA')
s_rate = rate_a[i] * i_rate / 100.0
if s_rate < abs(s.real):
flow.append(complex(s.real, 0))
else:
flow.append(complex(s.real, math.sqrt(s_rate**2 - s.real**2)))
buses.rotate(-1)
return flow
def flow(self):
buses = collections.deque(self.get_busnumbers(False))
flow = []
for i in range(3):
branch_buses_and_id = [buses[0], buses[1], buses[2], self.identificator]
s = self._flow(*branch_buses_and_id)
flow.append(s)
buses.rotate(-1)
return flow
def get_flow_direction(self):
return [np.sign(flow.real) for flow in self.flow()]
def set_rate(self, file_path):
buses = collections.deque(self.get_busnumbers(False))
self._rate[file_path] = []
for i in range(3):
self._rate[file_path].append(
MonsterPssPy.wnddat(buses[0], buses[1], buses[2], self.identificator, RATE_NAME)
)
buses.rotate(-1)
def set_rx(self):
buses = collections.deque(self.get_busnumbers(False))
self.rx = []
for i in range(3):
self.rx.append(
MonsterPssPy.wnddt2(buses[0], buses[1], buses[2], self.identificator, 'RX')
)
buses.rotate(-1)
class Load(Component):
""" Main class for load components
Parameters
==========
bus: Bus component
identificator: str
"""
_component_type = ComponentTypeEnum.LoadComponent
action_string = {'connect': 'ADD', 'disconnect': 'REMOVE'}
def __init__(self, load_bus, load_id):
if not isinstance(load_bus, BusDetails):
raise LoadInitError('load_bus must be a Bus type.')
if not isinstance(load_id, str):
raise LoadInitError('load_id must be a String.')
self.from_bus = load_bus
self.identificator = load_id.strip()
super(type(self), self).__init__(activation_sign=-1.0)
def get_pq(self):
""" Get PQ value from case as a complex value
"""
return MonsterPssPy.loddt2(self.from_bus.number, self.identificator, 'MVA', 'ACT')
def is_injection(self):
return True
def set_pq(self, cmplx_pq):
"""Set PQ value in case as actual MVA load
"""
try:
return MonsterPssPy.load_chng(
self.from_bus.number,
self.identificator,
realar1=cmplx_pq.real,
realar2=cmplx_pq.imag
)
except PsseBaseException:
raise
def get_base_voltage(self):
""" Return base voltage of load
Defined as the base voltage of the connected bus.
"""
return self.from_bus.get_base_voltage()
def get_area(self):
""" Return the area of the load
Defined as the area of the conencted bus.
"""
area_list = list()
area_list.extend([self.from_bus.get_area()])
return list(set(area_list))
def get_zone(self):
""" Return the zone of the load
Defined as the zone of the conencted bus.
"""
zone_list = list()
zone_list.extend([self.from_bus.get_zone()])
return list(set(zone_list))
def __str__(self):
""" Return string representation of a Load object
Examples
========
>>> from accc_pv.component import Load, Bus
>>> f400 = component.Bus(51081, 'FROGNER4', 400)
>>> | |
return SeqBasedSymbolicStr(self.var)
def __hash__(self):
return hash(self.__str__())
@staticmethod
def _concat_strings(
a: Union[str, "SeqBasedSymbolicStr"], b: Union[str, "SeqBasedSymbolicStr"]
) -> Union[str, "SeqBasedSymbolicStr"]:
assert not is_tracing()
# Assumes at least one argument is symbolic and not tracing
if isinstance(a, SeqBasedSymbolicStr) and isinstance(b, SeqBasedSymbolicStr):
return SeqBasedSymbolicStr(a.var + b.var)
elif isinstance(a, str) and isinstance(b, SeqBasedSymbolicStr):
return SeqBasedSymbolicStr(
SeqBasedSymbolicStr._coerce_to_smt_sort(a) + b.var
)
else:
assert isinstance(a, SeqBasedSymbolicStr)
assert isinstance(b, str)
return SeqBasedSymbolicStr(
a.var + SeqBasedSymbolicStr._coerce_to_smt_sort(b)
)
def __add__(self, other):
with NoTracing():
if isinstance(other, (SeqBasedSymbolicStr, str)):
return SeqBasedSymbolicStr._concat_strings(self, other)
if isinstance(other, AnySymbolicStr):
return NotImplemented
raise TypeError
def __radd__(self, other):
with NoTracing():
if isinstance(other, (SeqBasedSymbolicStr, str)):
return SeqBasedSymbolicStr._concat_strings(other, self)
if isinstance(other, AnySymbolicStr):
return NotImplemented
raise TypeError
def __mul__(self, other):
self.statespace
if isinstance(other, Integral):
if other <= 1:
return self if other == 1 else ""
# Note that in SymbolicInt, we attempt string multiplication via regex.
# Z3 cannot do much with a symbolic regex, so we case-split on
# the repetition count.
return SeqBasedSymbolicStr(z3.Concat(*[self.var for _ in range(other)]))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, other):
return self.__str__() % realize(other)
def __contains__(self, other):
forced = force_to_smt_sort(other, SeqBasedSymbolicStr)
return SymbolicBool(z3.Contains(self.var, forced))
def __getitem__(self, i: Union[int, slice]):
with NoTracing():
idx_or_pair = process_slice_vs_symbolic_len(
self.statespace, i, z3.Length(self.var)
)
if isinstance(idx_or_pair, tuple):
(start, stop) = idx_or_pair
smt_result = z3.Extract(self.var, start, stop - start)
else:
smt_result = z3.Unit(self.var[idx_or_pair])
return SeqBasedSymbolicStr(smt_result)
def endswith(self, substr):
smt_substr = force_to_smt_sort(substr, SeqBasedSymbolicStr)
return SymbolicBool(z3.SuffixOf(smt_substr, self.var))
def find(self, substr, start=None, end=None):
if not isinstance(substr, str):
raise TypeError
with NoTracing():
space = self.statespace
smt_my_len = z3.Length(self.var)
if start is None and end is None:
smt_start = z3IntVal(0)
smt_end = smt_my_len
smt_str = self.var
if len(substr) == 0:
return 0
else:
(smt_start, smt_end) = flip_slice_vs_symbolic_len(
space, slice(start, end, None), smt_my_len
)
if len(substr) == 0:
# Add oddity of CPython. We can find the empty string when over-slicing
# off the left side of the string, but not off the right:
# ''.find('', 3, 4) == -1
# ''.find('', -4, -3) == 0
if space.smt_fork(smt_start > smt_my_len):
return -1
elif space.smt_fork(smt_start > 0):
return SymbolicInt(smt_start)
else:
return 0
(smt_start, smt_end) = clip_range_to_symbolic_len(
space, smt_start, smt_end, smt_my_len
)
smt_str = z3.SubString(self.var, smt_start, smt_end - smt_start)
smt_sub = force_to_smt_sort(substr, SeqBasedSymbolicStr)
if space.smt_fork(z3.Contains(smt_str, smt_sub)):
return SymbolicInt(z3.IndexOf(smt_str, smt_sub, 0) + smt_start)
else:
return -1
def partition(self, sep: str):
if not isinstance(sep, str):
raise TypeError
if len(sep) == 0:
raise ValueError
with NoTracing():
space = context_statespace()
smt_str = self.var
smt_sep = force_to_smt_sort(sep, SeqBasedSymbolicStr)
if space.smt_fork(z3.Contains(smt_str, smt_sep)):
uniq = space.uniq()
# Divide my contents into 4 concatenated parts:
prefix = SeqBasedSymbolicStr(f"prefix{uniq}")
match1 = SeqBasedSymbolicStr(
f"match1{uniq}"
) # the first character of the match
match_tail = SeqBasedSymbolicStr(f"match_tail{uniq}")
suffix = SeqBasedSymbolicStr(f"suffix{uniq}")
space.add(z3.Length(match1.var) == 1)
space.add(smt_sep == z3.Concat(match1.var, match_tail.var))
space.add(smt_str == z3.Concat(prefix.var, smt_sep, suffix.var))
space.add(
z3.Not(z3.Contains(z3.Concat(match_tail.var, suffix.var), smt_sep))
)
return (prefix, sep, suffix)
else:
return (self, "", "")
def rfind(self, substr, start=None, end=None) -> Union[int, SymbolicInt]:
if not isinstance(substr, str):
raise TypeError
with NoTracing():
space = self.statespace
smt_my_len = z3.Length(self.var)
if start is None and end is None:
smt_start = z3IntVal(0)
smt_end = smt_my_len
smt_str = self.var
if len(substr) == 0:
return SymbolicInt(smt_my_len)
else:
(smt_start, smt_end) = flip_slice_vs_symbolic_len(
space, slice(start, end, None), smt_my_len
)
if len(substr) == 0:
# Add oddity of CPython. We can find the empty string when over-slicing
# off the left side of the string, but not off the right:
# ''.find('', 3, 4) == -1
# ''.find('', -4, -3) == 0
if space.smt_fork(smt_start > smt_my_len):
return -1
elif space.smt_fork(smt_end < 0):
return 0
elif space.smt_fork(smt_end < smt_my_len):
return SymbolicInt(smt_end)
else:
return SymbolicInt(smt_my_len)
(smt_start, smt_end) = clip_range_to_symbolic_len(
space, smt_start, smt_end, smt_my_len
)
smt_str = z3.SubString(self.var, smt_start, smt_end - smt_start)
smt_sub = force_to_smt_sort(substr, SeqBasedSymbolicStr)
if space.smt_fork(z3.Contains(smt_str, smt_sub)):
uniq = space.uniq()
# Divide my contents into 4 concatenated parts:
prefix = SeqBasedSymbolicStr(f"prefix{uniq}")
match1 = SeqBasedSymbolicStr(f"match1{uniq}")
match_tail = SeqBasedSymbolicStr(f"match_tail{uniq}")
suffix = SeqBasedSymbolicStr(f"suffix{uniq}")
space.add(z3.Length(match1.var) == 1)
space.add(smt_sub == z3.Concat(match1.var, match_tail.var))
space.add(smt_str == z3.Concat(prefix.var, smt_sub, suffix.var))
space.add(
z3.Not(z3.Contains(z3.Concat(match_tail.var, suffix.var), smt_sub))
)
return SymbolicInt(smt_start + z3.Length(prefix.var))
else:
return -1
def rpartition(self, sep: str):
result = self.rsplit(sep, maxsplit=1)
if len(result) == 1:
return ("", "", self)
elif len(result) == 2:
return (result[0], sep, result[1])
def startswith(self, substr, start=None, end=None):
if isinstance(substr, tuple):
return any(self.startswith(s, start, end) for s in substr)
smt_substr = force_to_smt_sort(substr, SeqBasedSymbolicStr)
if start is not None or end is not None:
# TODO: "".startswith("", 1) should be False, not True
return self[start:end].startswith(substr)
return SymbolicBool(z3.PrefixOf(smt_substr, self.var))
def buffer_to_byte_seq(obj: object) -> Optional[Sequence[int]]:
if isinstance(obj, (bytes, bytearray)):
return list(obj)
elif isinstance(obj, (array, memoryview)):
if isinstance(obj, memoryview):
if obj.ndim > 1 or obj.format != "B":
return None
else:
if obj.typecode != "B":
return None
return list(obj)
elif isinstance(obj, SymbolicBytes):
return obj.inner
elif isinstance(obj, SymbolicByteArray):
return obj.inner
elif isinstance(obj, SymbolicMemoryView):
return obj._sliced
elif isinstance(obj, Sequence):
return obj
return None
_ALL_BYTES_TYPES = (bytes, bytearray, memoryview, array)
class BytesLike(collections.abc.ByteString, AbcString, CrossHairValue):
def __eq__(self, other) -> bool:
if not isinstance(other, _ALL_BYTES_TYPES):
return False
if len(self) != len(other):
return False
return list(self) == list(other)
def _cmp_op(self, other, op) -> bool:
# Surprisingly, none of (bytes, memoryview, array) are ordered-comparable with
# the other types.
# Even more surprisingly, bytearray is comparable with all types.
other_type = type(other)
if other_type == type(self) or other_type == bytearray:
return op(tuple(self), tuple(other))
else:
raise TypeError
def __lt__(self, other):
return self._cmp_op(other, ops.lt)
def __le__(self, other):
return self._cmp_op(other, ops.le)
def __gt__(self, other):
return self._cmp_op(other, ops.gt)
def __ge__(self, other):
return self._cmp_op(other, ops.ge)
def __bytes__(self) -> bytes:
with NoTracing():
return bytes(tracing_iter(self))
def __radd__(self, left):
with NoTracing():
if isinstance(left, bytes):
left = SymbolicBytes(left)
elif isinstance(left, bytearray):
left = SymbolicByteArray(left)
else:
return NotImplemented
with ResumedTracing():
return left.__add__(self)
def __repr__(self):
return repr(realize(self))
def _bytes_data_prop(s):
with NoTracing():
return bytes(s.inner)
class SymbolicBytes(BytesLike):
def __init__(self, inner):
with NoTracing():
inner = buffer_to_byte_seq(inner)
self.inner = inner
# TODO: find all uses of str() in AbcString and check SymbolicBytes behavior for
# those cases.
# TODO: implement __str__
data = property(_bytes_data_prop)
def __ch_realize__(self):
return bytes(tracing_iter(self.inner))
def __ch_pytype__(self):
return bytes
def __len__(self):
return self.inner.__len__()
def __getitem__(self, i: Union[int, slice]):
if isinstance(i, slice):
return SymbolicBytes(self.inner.__getitem__(i))
else:
return self.inner.__getitem__(i)
def __iter__(self):
return self.inner.__iter__()
def __copy__(self):
return SymbolicBytes(self.inner)
def __add__(self, other):
with NoTracing():
byte_seq = buffer_to_byte_seq(other)
if byte_seq is other:
# plain numeric sequences can't be added to byte-like objects
raise TypeError
if byte_seq is None:
return self.__ch_realize__().__add__(realize(other))
return SymbolicBytes(self.inner + byte_seq)
def decode(self, encoding="utf-8", errors="strict"):
return codecs.decode(self, encoding, errors=errors)
def make_byte_string(creator: SymbolicFactory):
return SymbolicBytes(SymbolicBoundedIntTuple(0, 255, creator.varname))
class SymbolicByteArray(BytesLike, ShellMutableSequence): # type: ignore
def __init__(self, byte_seq):
assert not is_tracing()
byte_seq = buffer_to_byte_seq(byte_seq)
if byte_seq is None:
raise TypeError
super().__init__(byte_seq)
__hash__ = None # type: ignore
data = property(_bytes_data_prop)
def __ch_realize__(self):
return bytearray(tracing_iter(self.inner))
def __ch_pytype__(self):
return bytearray
def __len__(self):
return self.inner.__len__()
def __getitem__(self, key):
byte_seq_return = self.inner.__getitem__(key)
if isinstance(key, slice):
with NoTracing():
return SymbolicByteArray(byte_seq_return)
else:
return byte_seq_return
def _cmp_op(self, other, op) -> bool:
if isinstance(other, _ALL_BYTES_TYPES):
return op(tuple(self), tuple(other))
else:
raise TypeError
def __add__(self, other):
with NoTracing():
byte_seq = buffer_to_byte_seq(other)
if byte_seq is other:
# plain numeric sequences can't be added to byte-like objects
raise TypeError
if byte_seq is None:
raise TypeError
with ResumedTracing():
byte_seq = self.inner + byte_seq
return SymbolicByteArray(byte_seq)
def _spawn(self, items: Sequence) -> ShellMutableSequence:
return SymbolicByteArray(items)
def decode(self, encoding="utf-8", errors="strict"):
return codecs.decode(self, encoding, errors=errors)
class SymbolicMemoryView(BytesLike):
format = "B"
itemsize = 1
ndim = 1
strides = (1,)
suboffsets = ()
c_contiguous = True
f_contiguous = True
contiguous = True
def __init__(self, obj):
assert not is_tracing()
if not isinstance(obj, (_ALL_BYTES_TYPES, BytesLike)):
raise TypeError
objlen = obj.__len__()
self.obj = obj
self.nbytes = objlen
self.shape = (objlen,)
self.readonly = isinstance(obj, bytes)
self._sliced = SliceView(obj, 0, objlen)
def __ch_realize__(self):
sliced = self._sliced
obj, start, stop = self.obj, sliced.start, sliced.stop
self.obj = obj
return memoryview(realize(obj))[realize(start) : realize(stop)]
def __ch_pytype__(self):
return memoryview
def _cmp_op(self, other, op) -> bool:
# memoryview is the only bytes-like type that isn't ordered-comparable with
# instances of its own type. But it is comparable with bytearrays!
if isinstance(other, bytearray):
return op(tuple(self), tuple(other))
else:
raise TypeError
def __hash__(self):
return | |
# WARNING: This is an automatically generated file and will be overwritten
# by CellBlender on the next model export.
import mcell as m
# ---- Sphere2 ----
Sphere2_vertex_list = [
[0.000573527067899704, 1.1245698928833, -0.943746387958527],
[0.724180817604065, 0.598844587802887, -0.39096587896347],
[-0.27581450343132, 0.273920655250549, -0.390966206789017],
[-0.893852710723877, 1.1245698928833, -0.390961974859238],
[-0.27581450343132, 1.97521913051605, -0.390966206789017],
[0.724180817604065, 1.65029525756836, -0.39096587896347],
[0.276961535215378, 0.273920655250549, 0.503473460674286],
[-0.723033785820007, 0.598844587802887, 0.503473162651062],
[-0.723033785820007, 1.65029525756836, 0.503473162651062],
[0.276961535215378, 1.97521913051605, 0.503473460674286],
[0.894999742507935, 1.1245698928833, 0.503469228744507],
[0.000573527067899704, 1.1245698928833, 1.05625367164612],
[-0.232247993350029, 0.40800678730011, -0.601265609264374],
[-0.161882027983665, 0.624574661254883, -0.794400811195374],
[-0.0770331174135208, 0.885717153549194, -0.911696076393127],
[0.203754439949989, 0.976952075958252, -0.911695957183838],
[0.425896197557449, 0.815558552742004, -0.794400572776794],
[0.610120177268982, 0.681713461875916, -0.601265251636505],
[0.53251439332962, 0.442857503890991, -0.446048051118851],
[0.26344233751297, 0.315558254718781, -0.469484001398087],
[-0.0290657468140125, 0.260385692119598, -0.44604828953743],
[0.813302755355835, 1.41980767250061, -0.446046978235245],
[0.85122138261795, 1.1245698928833, -0.469482272863388],
[0.813302755355835, 0.829332172870636, -0.446046978235245],
[0.203754439949989, 1.27218770980835, -0.911695957183838],
[0.425896197557449, 1.4335812330246, -0.794400572776794],
[0.610120177268982, 1.56742632389069, -0.601265251636505],
[-0.752868175506592, 1.1245698928833, -0.601261138916016],
[-0.525156259536743, 1.1245698928833, -0.794398069381714],
[-0.250573456287384, 1.1245698928833, -0.911695241928101],
[-0.483397901058197, 0.408005356788635, -0.446048051118851],
[-0.687615871429443, 0.624572992324829, -0.469482570886612],
[-0.830477118492126, 0.885716497898102, -0.446045011281967],
[-0.232247993350029, 1.84113299846649, -0.601265609264374],
[-0.161882027983665, 1.62456512451172, -0.794400811195374],
[-0.0770331174135208, 1.36342263221741, -0.911696076393127],
[-0.830477118492126, 1.36342334747314, -0.446045011281967],
[-0.687615871429443, 1.62456679344177, -0.469482570886612],
[-0.483397901058197, 1.84113442897797, -0.446048051118851],
[-0.0290657468140125, 1.98875403404236, -0.44604828953743],
[0.26344233751297, 1.93358159065247, -0.469484001398087],
[0.53251439332962, 1.80628228187561, -0.446048051118851],
[0.957199275493622, 0.976951539516449, 0.307403087615967],
[0.951631367206573, 0.815557241439819, 0.056253619492054],
[0.861271142959595, 0.68171238899231, -0.194897204637527],
[0.861271142959595, 1.56742739677429, -0.194897204637527],
[0.951631367206573, 1.43358254432678, 0.056253656744957],
[0.957199275493622, 1.2721883058548, 0.307403087615967],
[0.155788630247116, 0.169147849082947, 0.307405173778534],
[0.000573527067899704, 0.124569952487946, 0.056253619492054],
[-0.154641568660736, 0.169147849082947, -0.194897890090942],
[0.687732100486755, 0.442854583263397, -0.194898277521133],
[0.588359117507935, 0.315553188323975, 0.056253656744957],
[0.436580330133438, 0.260381996631622, 0.307405680418015],
[-0.860124111175537, 0.68171238899231, 0.307404488325119],
[-0.950484335422516, 0.815557241439819, 0.056253619492054],
[-0.956052243709564, 0.976951539516449, -0.194895803928375],
[-0.43543329834938, 0.260381996631622, -0.194898396730423],
[-0.587212085723877, 0.31555312871933, 0.056253656744957],
[-0.686585009098053, 0.442854583263397, 0.307405531406403],
[-0.686585068702698, 1.80628514289856, 0.307405561208725],
[-0.587212085723877, 1.93358659744263, 0.056253619492054],
[-0.43543329834938, 1.98875784873962, -0.194898396730423],
[-0.956052243709564, 1.2721883058548, -0.194895803928375],
[-0.950484335422516, 1.43358254432678, 0.056253656744957],
[-0.860124111175537, 1.56742739677429, 0.307404488325119],
[0.436580330133438, 1.98875784873962, 0.307405680418015],
[0.588359117507935, 1.93358659744263, 0.056253619492054],
[0.687732040882111, 1.80628514289856, -0.194898247718811],
[-0.154641568660736, 2.07999181747437, -0.194897890090942],
[0.000573527067899704, 2.1245698928833, 0.056253656744957],
[0.155788630247116, 2.07999181747437, 0.307405173778534],
[0.831624150276184, 0.885716497898102, 0.558552265167236],
[0.688762903213501, 0.624572992324829, 0.581989824771881],
[0.484544932842255, 0.408005356788635, 0.55855530500412],
[0.0302128009498119, 0.260385692119598, 0.558555543422699],
[-0.262295305728912, 0.315558195114136, 0.581991255283356],
[-0.531367361545563, 0.442857444286346, 0.55855530500412],
[-0.812155723571777, 0.829332172870636, 0.558554232120514],
[-0.850074350833893, 1.1245698928833, 0.581989526748657],
[-0.812155723571777, 1.41980767250061, 0.558554232120514],
[-0.531367361545563, 1.80628228187561, 0.55855530500412],
[-0.262295305728912, 1.93358159065247, 0.581991255283356],
[0.0302128009498119, 1.98875403404236, 0.558555543422699],
[0.484544932842255, 1.84113442897797, 0.55855530500412],
[0.688762903213501, 1.62456679344177, 0.581989824771881],
[0.831624150276184, 1.36342334747314, 0.558552265167236],
[0.0781801640987396, 0.885717153549194, 1.02420330047607],
[0.163029089570045, 0.624574661254883, 0.90690803527832],
[0.233395054936409, 0.40800678730011, 0.713772833347321],
[0.754015207290649, 1.1245698928833, 0.713768362998962],
[0.526303291320801, 1.1245698928833, 0.906905293464661],
[0.251720488071442, 1.1245698928833, 1.02420246601105],
[-0.202607378363609, 0.976952075958252, 1.02420318126678],
[-0.424749165773392, 0.815558552742004, 0.906907796859741],
[-0.608973145484924, 0.681713461875916, 0.713772475719452],
[-0.202607378363609, 1.27218770980835, 1.02420318126678],
[-0.424749165773392, 1.4335812330246, 0.906907796859741],
[-0.608973145484924, 1.56742632389069, 0.713772475719452],
[0.0781801640987396, 1.36342263221741, 1.02420330047607],
[0.163029089570045, 1.62456512451172, 0.90690803527832],
[0.233395054936409, 1.84113299846649, 0.713772833347321],
[0.362373828887939, 1.38743281364441, 0.950682818889618],
[0.638767063617706, 1.3874340057373, 0.779863655567169],
[0.447782665491104, 1.65029811859131, 0.779865205287933],
[-0.137623727321625, 1.54988932609558, 0.950683534145355],
[-0.0522160455584526, 1.81275486946106, 0.779865980148315],
[-0.361230164766312, 1.71234834194183, 0.779865860939026],
[-0.446636378765106, 1.1245698928833, 0.950682699680328],
[-0.670243084430695, 1.28702688217163, 0.779864490032196],
[-0.670243084430695, 0.962112843990326, 0.779864490032196],
[-0.137623757123947, 0.699250400066376, 0.950683534145355],
[-0.361230164766312, 0.536791443824768, 0.779865860939026],
[-0.0522160455584526, 0.436384975910187, 0.779865980148315],
[0.362373828887939, 0.861706972122192, 0.950682818889618],
[0.447782725095749, 0.598841726779938, 0.779865264892578],
[0.638767063617706, 0.861705780029297, 0.779863655567169],
[0.86237770318985, 1.54989182949066, 0.332649856805801],
[0.80959278345108, 1.71235203742981, 0.0562536381185055],
[0.671394050121307, 1.81275904178619, 0.332651048898697],
[-0.137624993920326, 2.07562494277954, 0.332650721073151],
[-0.308442950248718, 2.07562661170959, 0.0562531463801861],
[-0.446641534566879, 1.97521865367889, 0.332650899887085],
[-0.946639716625214, 1.28702747821808, 0.332649528980255],
[-0.999426424503326, 1.12456953525543, 0.0562542602419853],
[-0.94663941860199, 0.962112009525299, 0.33265021443367],
[-0.446642249822617, 0.273921489715576, 0.332650482654572],
[-0.308443665504456, 0.173513472080231, 0.0562528520822525],
[-0.137625724077225, 0.173514783382416, 0.332650423049927],
[0.671393871307373, 0.43638014793396, 0.332649797201157],
[0.809592187404633, 0.536786913871765, 0.056251734495163],
[0.862377643585205, 0.6992467045784, 0.332648068666458],
[0.309590727090836, 2.07562637329102, 0.0562536381185055],
[0.44778910279274, 1.97521829605103, -0.220144063234329],
[0.13877260684967, 2.07562494277954, -0.220143884420395],
[-0.808444917201996, 1.71235322952271, 0.0562536381185055],
[-0.670245468616486, 1.81276059150696, -0.220143646001816],
[-0.861229598522186, 1.54989385604858, -0.220142692327499],
[-0.808444917201996, 0.536786615848541, 0.0562536381185055],
[-0.861229658126831, 0.699245929718018, -0.220142692327499],
[-0.67024552822113, 0.436379194259644, -0.220143646001816],
[0.309590727090836, 0.173513472080231, 0.0562536381185055],
[0.138772621750832, 0.17351496219635, -0.220143884420395],
[0.447789132595062, 0.273921608924866, -0.220144063234329],
[1.00057351589203, 1.1245698928833, 0.0562536381185055],
[0.947786748409271, 0.962112367153168, -0.220142394304276],
[0.947786748409271, 1.28702747821808, -0.220142394304276],
[0.362376987934113, 1.71234917640686, -0.667357981204987],
[0.138770326972008, 1.54989123344421, -0.838175475597382],
[0.0533626228570938, 1.81275618076324, -0.667357325553894],
[-0.446637064218521, 1.65029692649841, -0.667357921600342],
[-0.361227691173553, 1.38743245601654, -0.838175296783447],
[-0.637621223926544, 1.38743269443512, -0.667355835437775],
[-0.63762104511261, 0.86170619726181, -0.66735565662384],
[-0.361227810382843, 0.861705541610718, -0.838174760341644],
[-0.446637004613876, 0.59884124994278, -0.667356848716736],
[0.671390533447266, 1.28702664375305, -0.66735702753067],
[0.671391069889069, 0.962112128734589, -0.667356431484222],
[0.44778448343277, 1.12456917762756, -0.838174879550934],
[0.0533638782799244, 0.436384558677673, -0.667358160018921],
[0.138772219419479, 0.699249267578125, -0.838175535202026],
[0.362378478050232, 0.536790728569031, -0.667357325553894]
] # Sphere2_vertex_list
Sphere2_wall_list = [
[0, 15, 14],
[1, 17, 23],
[0, 14, 29],
[0, 29, 35],
[0, 35, 24],
[1, 23, 44],
[2, 20, 50],
[3, 32, 56],
[4, 38, 62],
[5, 41, 68],
[1, 44, 51],
[2, 50, 57],
[3, 56, 63],
[4, 62, 69],
[5, 68, 45],
[6, 74, 89],
[7, 77, 95],
[8, 80, 98],
[9, 83, 101],
[10, 86, 90],
[92, 99, 11],
[91, 102, 92],
[90, 103, 91],
[92, 102, 99],
[102, 100, 99],
[91, 103, 102],
[103, 104, 102],
[102, 104, 100],
[104, 101, 100],
[90, 86, 103],
[86, 85, 103],
[103, 85, 104],
[85, 84, 104],
[104, 84, 101],
[84, 9, 101],
[99, 96, 11],
[100, 105, 99],
[101, 106, 100],
[99, 105, 96],
[105, 97, 96],
[100, 106, 105],
[106, 107, 105],
[105, 107, 97],
[107, 98, 97],
[101, 83, 106],
[83, 82, 106],
[106, 82, 107],
[82, 81, 107],
[107, 81, 98],
[81, 8, 98],
[96, 93, 11],
[97, 108, 96],
[98, 109, 97],
[96, 108, 93],
[108, 94, 93],
[97, 109, 108],
[109, 110, 108],
[108, 110, 94],
[110, 95, 94],
[98, 80, 109],
[80, 79, 109],
[109, 79, 110],
[79, 78, 110],
[110, 78, 95],
[78, 7, 95],
[93, 87, 11],
[94, 111, 93],
[95, 112, 94],
[93, 111, 87],
[111, 88, 87],
[94, 112, 111],
[112, 113, 111],
[111, 113, 88],
[113, 89, 88],
[95, 77, 112],
[77, 76, 112],
[112, 76, 113],
[76, 75, 113],
[113, 75, 89],
[75, 6, 89],
[87, 92, 11],
[88, 114, 87],
[89, 115, 88],
[87, 114, 92],
[114, 91, 92],
[88, 115, 114],
[115, 116, 114],
[114, 116, 91],
[116, 90, 91],
[89, 74, 115],
[74, 73, 115],
[115, 73, 116],
[73, 72, 116],
[116, 72, 90],
[72, 10, 90],
[47, 86, 10],
[46, 117, 47],
[45, 118, 46],
[47, 117, 86],
[117, 85, 86],
[46, 118, 117],
[118, 119, 117],
[117, 119, 85],
[119, 84, 85],
[45, 68, 118],
[68, 67, 118],
[118, 67, 119],
[67, 66, 119],
[119, 66, 84],
[66, 9, 84],
[71, 83, 9],
[70, 120, 71],
[69, 121, 70],
[71, 120, 83],
[120, 82, 83],
[70, 121, 120],
[121, 122, 120],
[120, 122, 82],
[122, 81, 82],
[69, 62, 121],
[62, 61, 121],
[121, 61, 122],
[61, 60, 122],
[122, 60, 81],
[60, 8, 81],
[65, 80, 8],
[64, 123, 65],
[63, 124, 64],
[65, 123, 80],
[123, 79, 80],
[64, 124, 123],
[124, 125, 123],
[123, 125, 79],
[125, 78, 79],
[63, 56, 124],
[56, 55, 124],
[124, 55, 125],
[55, 54, 125],
[125, 54, 78],
[54, 7, 78],
[59, 77, 7],
[58, 126, 59],
[57, 127, 58],
[59, 126, 77],
[126, 76, 77],
[58, 127, 126],
[127, 128, 126],
[126, 128, 76],
[128, 75, 76],
[57, 50, 127],
[50, 49, | |
self.params.retry_delay,
self._on_connect_timer)
else:
# TODO connect must not call failure callback from constructor. The
# current behavior is error-prone, because the user code may get a
# callback upon socket connection failure before user's other state
# may be sufficiently initialized. Constructors must either succeed
# or raise an exception. To be forward-compatible with failure
# reporting from fully non-blocking connection establishment,
# connect() should set INIT state and schedule a 0-second timer to
# continue the rest of the logic in a private method. The private
# method should use itself instead of connect() as the callback for
# scheduling retries.
# TODO This should use _on_terminate for consistent behavior/cleanup
self.callbacks.process(0, self.ON_CONNECTION_ERROR, self, self,
error)
self.remaining_connection_attempts = self.params.connection_attempts
self._set_connection_state(self.CONNECTION_CLOSED)
@staticmethod
def _negotiate_integer_value(client_value, server_value):
"""Negotiates two values. If either of them is 0 or None,
returns the other one. If both are positive integers, returns the
smallest one.
:param int client_value: The client value
:param int server_value: The server value
:rtype: int
"""
if client_value is None:
client_value = 0
if server_value is None:
server_value = 0
# this is consistent with how Java client and Bunny
# perform negotiation, see pika/pika#874
if client_value == 0 or server_value == 0:
val = max(client_value, server_value)
else:
val = min(client_value, server_value)
return val
@staticmethod
def _tune_heartbeat_timeout(client_value, server_value):
""" Determine heartbeat timeout per AMQP 0-9-1 rules
Per https://www.rabbitmq.com/resources/specs/amqp0-9-1.pdf,
> Both peers negotiate the limits to the lowest agreed value as follows:
> - The server MUST tell the client what limits it proposes.
> - The client responds and **MAY reduce those limits** for its
connection
If the client specifies a value, it always takes precedence.
:param client_value: None to accept server_value; otherwise, an integral
number in seconds; 0 (zero) to disable heartbeat.
:param server_value: integral value of the heartbeat timeout proposed by
broker; 0 (zero) to disable heartbeat.
:returns: the value of the heartbeat timeout to use and return to broker
"""
if client_value is None:
# Accept server's limit
timeout = server_value
else:
timeout = client_value
return timeout
def _on_connection_tune(self, method_frame):
"""Once the Broker sends back a Connection.Tune, we will set our tuning
variables that have been returned to us and kick off the Heartbeat
monitor if required, send our TuneOk and then the Connection. Open rpc
call on channel 0.
:param pika.frame.Method method_frame: The frame received
"""
self._set_connection_state(self.CONNECTION_TUNE)
# Get our max channels, frames and heartbeat interval
self.params.channel_max = Connection._negotiate_integer_value(
self.params.channel_max,
method_frame.method.channel_max)
self.params.frame_max = Connection._negotiate_integer_value(
self.params.frame_max,
method_frame.method.frame_max)
if callable(self.params.heartbeat):
ret_heartbeat = self.params.heartbeat(self, method_frame.method.heartbeat)
if ret_heartbeat is None or callable(ret_heartbeat):
# Enforce callback-specific restrictions on callback's return value
raise TypeError('heartbeat callback must not return None '
'or callable, but got %r' % (ret_heartbeat,))
# Leave it to hearbeat setter deal with the rest of the validation
self.params.heartbeat = ret_heartbeat
# Negotiate heatbeat timeout
self.params.heartbeat = self._tune_heartbeat_timeout(
client_value=self.params.heartbeat,
server_value=method_frame.method.heartbeat)
# Calculate the maximum pieces for body frames
self._body_max_length = self._get_body_frame_max_length()
# Create a new heartbeat checker if needed
self.heartbeat = self._create_heartbeat_checker()
# Send the TuneOk response with what we've agreed upon
self._send_connection_tune_ok()
# Send the Connection.Open RPC call for the vhost
self._send_connection_open()
def _on_data_available(self, data_in):
"""This is called by our Adapter, passing in the data from the socket.
As long as we have buffer try and map out frame data.
:param str data_in: The data that is available to read
"""
self._append_frame_buffer(data_in)
while self._frame_buffer:
consumed_count, frame_value = self._read_frame()
if not frame_value:
return
self._trim_frame_buffer(consumed_count)
self._process_frame(frame_value)
def _on_terminate(self, reason_code, reason_text):
"""Terminate the connection and notify registered ON_CONNECTION_ERROR
and/or ON_CONNECTION_CLOSED callbacks
:param integer reason_code: either IETF RFC 821 reply code for
AMQP-level closures or a value from `InternalCloseReasons` for
internal causes, such as socket errors
:param str reason_text: human-readable text message describing the error
"""
LOGGER.info(
'Disconnected from RabbitMQ at %s:%i (%s): %s',
self.params.host, self.params.port, reason_code,
reason_text)
if not isinstance(reason_code, numbers.Integral):
raise TypeError('reason_code must be an integer, but got %r'
% (reason_code,))
# Stop the heartbeat checker if it exists
self._remove_heartbeat()
# Remove connection management callbacks
# TODO This call was moved here verbatim from legacy code and the
# following doesn't seem to be right: `Connection.Open` here is
# unexpected, we don't appear to ever register it, and the broker
# shouldn't be sending `Connection.Open` to us, anyway.
self._remove_callbacks(0, [spec.Connection.Close, spec.Connection.Start,
spec.Connection.Open])
if self.params.blocked_connection_timeout is not None:
self._remove_callbacks(0, [spec.Connection.Blocked,
spec.Connection.Unblocked])
# Close the socket
self._adapter_disconnect()
# Determine whether this was an error during connection setup
connection_error = None
if self.connection_state == self.CONNECTION_PROTOCOL:
LOGGER.error('Incompatible Protocol Versions')
connection_error = exceptions.IncompatibleProtocolError(
reason_code,
reason_text)
elif self.connection_state == self.CONNECTION_START:
LOGGER.error('Connection closed while authenticating indicating a '
'probable authentication error')
connection_error = exceptions.ProbableAuthenticationError(
reason_code,
reason_text)
elif self.connection_state == self.CONNECTION_TUNE:
LOGGER.error('Connection closed while tuning the connection '
'indicating a probable permission error when '
'accessing a virtual host')
connection_error = exceptions.ProbableAccessDeniedError(
reason_code,
reason_text)
elif self.connection_state not in [self.CONNECTION_OPEN,
self.CONNECTION_CLOSED,
self.CONNECTION_CLOSING]:
LOGGER.warning('Unexpected connection state on disconnect: %i',
self.connection_state)
# Transition to closed state
self._set_connection_state(self.CONNECTION_CLOSED)
# Inform our channel proxies
for channel in dictkeys(self._channels):
if channel not in self._channels:
continue
# pylint: disable=W0212
self._channels[channel]._on_close_meta(reason_code, reason_text)
# Inform interested parties
if connection_error is not None:
LOGGER.error('Connection setup failed due to %r', connection_error)
self.callbacks.process(0,
self.ON_CONNECTION_ERROR,
self, self,
connection_error)
self.callbacks.process(0, self.ON_CONNECTION_CLOSED, self, self,
reason_code, reason_text)
# Reset connection properties
self._init_connection_state()
def _process_callbacks(self, frame_value):
"""Process the callbacks for the frame if the frame is a method frame
and if it has any callbacks pending.
:param pika.frame.Method frame_value: The frame to process
:rtype: bool
"""
if (self._is_method_frame(frame_value) and
self._has_pending_callbacks(frame_value)):
self.callbacks.process(frame_value.channel_number, # Prefix
frame_value.method, # Key
self, # Caller
frame_value) # Args
return True
return False
def _process_frame(self, frame_value):
"""Process an inbound frame from the socket.
:param frame_value: The frame to process
:type frame_value: pika.frame.Frame | pika.frame.Method
"""
# Will receive a frame type of -1 if protocol version mismatch
if frame_value.frame_type < 0:
return
# Keep track of how many frames have been read
self.frames_received += 1
# Process any callbacks, if True, exit method
if self._process_callbacks(frame_value):
return
# If a heartbeat is received, update the checker
if isinstance(frame_value, frame.Heartbeat):
if self.heartbeat:
self.heartbeat.received()
else:
LOGGER.warning('Received heartbeat frame without a heartbeat '
'checker')
# If the frame has a channel number beyond the base channel, deliver it
elif frame_value.channel_number > 0:
self._deliver_frame_to_channel(frame_value)
def _read_frame(self):
"""Try and read from the frame buffer and decode a frame.
:rtype tuple: (int, pika.frame.Frame)
"""
return frame.decode_frame(self._frame_buffer)
def _remove_callbacks(self, channel_number, method_classes):
"""Remove the callbacks for the specified channel number and list of
method frames.
:param int channel_number: The channel number to remove the callback on
:param sequence method_classes: The method classes (derived from
`pika.amqp_object.Method`) for the callbacks
"""
for method_cls in method_classes:
self.callbacks.remove(str(channel_number), method_cls)
def _rpc(self, channel_number, method,
callback=None,
acceptable_replies=None):
"""Make an RPC call for the given callback, channel number and method.
acceptable_replies lists out what responses we'll process from the
server with the specified callback.
:param int channel_number: The channel number for the RPC call
:param pika.amqp_object.Method method: The method frame to call
:param method callback: The callback for the RPC response
:param list acceptable_replies: The replies this RPC call expects
"""
# Validate that acceptable_replies is a list or None
if acceptable_replies and not isinstance(acceptable_replies, list):
raise TypeError('acceptable_replies should be list or None')
# Validate the callback is callable
if callback is not None:
if not callable(callback):
raise TypeError('callback should be None, function or method.')
for reply in acceptable_replies:
self.callbacks.add(channel_number, reply, callback)
# Send the rpc call to RabbitMQ
self._send_method(channel_number, method)
def _send_connection_close(self, reply_code, reply_text):
"""Send a Connection.Close method frame.
:param int reply_code: The reason for the close
:param str reply_text: The text reason for the close
"""
self._rpc(0, spec.Connection.Close(reply_code, reply_text, 0, 0),
self._on_connection_close_ok, [spec.Connection.CloseOk])
def _send_connection_open(self):
"""Send a Connection.Open frame"""
self._rpc(0, spec.Connection.Open(self.params.virtual_host,
insist=True),
self._on_connection_open, [spec.Connection.OpenOk])
def _send_connection_start_ok(self, authentication_type, response):
"""Send a Connection.StartOk frame
:param str authentication_type: The auth type value
:param str response: The encoded value to send
"""
self._send_method(0,
spec.Connection.StartOk(self._client_properties,
| |
< to.opidx
def points_above_at(self, at):
return self.at.opidx < at.opidx
def i_points_above_at(self, idx):
return self.at.opidx < idx
def points_to(self, to):
return self.to == to
def points_at(self, at):
return self.at == at
def add_dependency(self, at, to, arg):
self.args.append((at,arg))
def set_failarg(self, value):
self.failarg = value
if self.backward:
self.backward.failarg = value
def is_failarg(self):
return self.failarg
def reverse_direction(self, ref):
""" if the parameter index is the same as idx_to then
this edge is in reverse direction.
"""
return self.to == ref
def __repr__(self):
return 'Dep(T[%d] -> T[%d], arg: %s)' \
% (self.at.opidx, self.to.opidx, self.args)
class DefTracker(object):
def __init__(self, graph):
self.graph = graph
self.defs = {}
self.non_pure = []
def add_non_pure(self, node):
self.non_pure.append(node)
def define(self, arg, node, argcell=None):
if isinstance(arg, Const):
return
if arg in self.defs:
self.defs[arg].append((node,argcell))
else:
self.defs[arg] = [(node,argcell)]
def redefinitions(self, arg):
for _def in self.defs[arg]:
yield _def[0]
def is_defined(self, arg):
return arg in self.defs
def definition(self, arg, node=None, argcell=None):
if arg.is_constant():
return None
def_chain = self.defs.get(arg,None)
if not def_chain:
return None
if not argcell:
return def_chain[-1][0]
else:
assert node is not None
i = len(def_chain)-1
try:
mref = node.memory_ref
while i >= 0:
def_node = def_chain[i][0]
oref = def_node.memory_ref
if oref is not None and mref.alias(oref):
return def_node
elif oref is None:
return def_node
i -= 1
return None
except KeyError:
# when a key error is raised, this means
# no information is available, safe default
pass
return def_chain[-1][0]
def depends_on_arg(self, arg, to, argcell=None):
try:
at = self.definition(arg, to, argcell)
if at is None:
return
at.edge_to(to, arg)
except KeyError:
if not we_are_translated():
if not isinstance(arg, Const):
assert False, "arg %s must be defined" % arg
class DependencyGraph(object):
""" A graph that represents one of the following dependencies:
* True dependency
* Anti dependency (not present in SSA traces)
* Ouput dependency (not present in SSA traces)
Traces in RPython are not in SSA form when it comes to complex
object modification such as array or object side effects.
Representation is an adjacent list. The number of edges between the
vertices is expected to be small.
Note that adjacent lists order their dependencies. They are ordered
by the target instruction they point to if the instruction is
a dependency.
memory_refs: a dict that contains indices of memory references
(load,store,getarrayitem,...). If none provided, the construction
is conservative. It will never dismiss dependencies of two
modifications of one array even if the indices can never point to
the same element.
"""
def __init__(self, loop):
self.loop = loop
self.label = Node(loop.label, 0)
self.nodes = [ Node(op,0) for op in loop.operations if not op.is_jit_debug() ]
for i,node in enumerate(self.nodes):
node.opidx = i+1
self.inodes = [] # imaginary nodes
self.jump = Node(loop.jump, len(self.nodes)+1)
self.invariant_vars = {}
self.update_invariant_vars()
self.memory_refs = {}
self.schedulable_nodes = []
self.index_vars = {}
self.comparison_vars = {}
self.guards = []
self.build_dependencies()
def getnode(self, i):
return self.nodes[i]
def imaginary_node(self, label):
node = ImaginaryNode(label)
self.inodes.append(node)
return node
def update_invariant_vars(self):
label_op = self.label.getoperation()
jump_op = self.jump.getoperation()
assert label_op.numargs() == jump_op.numargs()
for i in range(label_op.numargs()):
label_box = label_op.getarg(i)
jump_box = jump_op.getarg(i)
if label_box == jump_box:
self.invariant_vars[label_box] = None
def box_is_invariant(self, box):
return box in self.invariant_vars
def build_dependencies(self):
""" This is basically building the definition-use chain and saving this
information in a graph structure. This is the same as calculating
the reaching definitions and the 'looking back' whenever it is used.
Write After Read, Write After Write dependencies are not possible,
the operations are in SSA form
"""
tracker = DefTracker(self)
#
label_pos = 0
jump_pos = len(self.nodes)-1
intformod = IntegralForwardModification(self.memory_refs, self.index_vars,
self.comparison_vars, self.invariant_vars)
# pass 1
for i,node in enumerate(self.nodes):
op = node.op
if op.is_always_pure():
node.setpriority(1)
if op.is_guard():
node.setpriority(2)
# the label operation defines all operations at the
# beginning of the loop
intformod.inspect_operation(op,node)
# definition of a new variable
if op.type != 'v':
# In SSA form. Modifications get a new variable
tracker.define(op, node)
# usage of defined variables
if op.is_always_pure() or op.is_final():
# normal case every arguments definition is set
for arg in op.getarglist():
tracker.depends_on_arg(arg, node)
elif op.is_guard():
if node.exits_early():
pass
else:
# consider cross iterations?
if len(self.guards) > 0:
last_guard = self.guards[-1]
last_guard.edge_to(node, failarg=True, label="guardorder")
for nonpure in tracker.non_pure:
nonpure.edge_to(node, failarg=True, label="nonpure")
tracker.non_pure = []
self.guards.append(node)
self.build_guard_dependencies(node, tracker)
else:
self.build_non_pure_dependencies(node, tracker)
def guard_argument_protection(self, guard_node, tracker):
""" the parameters the guard protects are an indicator for
dependencies. Consider the example:
i3 = ptr_eq(p1,p2)
guard_true(i3) [...]
guard_true|false are exceptions because they do not directly
protect the arguments, but a comparison function does.
"""
guard_op = guard_node.getoperation()
guard_opnum = guard_op.getopnum()
for arg in guard_op.getarglist():
if not arg.is_constant() and arg.type not in ('i','f'):
# redefine pointers, consider the following example
# guard_nonnull(r1)
# i1 = getfield(r1, ...)
# guard must be emitted before the getfield, thus
# redefine r1 at guard_nonnull
tracker.define(arg, guard_node)
if guard_opnum == rop.GUARD_NOT_FORCED_2:
# must be emitted before finish, thus delayed the longest
guard_node.setpriority(-10)
elif guard_opnum in (rop.GUARD_OVERFLOW, rop.GUARD_NO_OVERFLOW):
# previous operation must be an ovf_operation
guard_node.setpriority(100)
i = guard_node.getindex()-1
while i >= 0:
node = self.nodes[i]
op = node.getoperation()
if op.is_ovf():
break
i -= 1
else:
raise AssertionError("(no)overflow: no overflowing op present")
node.edge_to(guard_node, None, label='overflow')
elif guard_opnum in (rop.GUARD_NO_EXCEPTION, rop.GUARD_EXCEPTION, rop.GUARD_NOT_FORCED):
# previous op must be one that can raise or a not forced guard
guard_node.setpriority(100)
i = guard_node.getindex() - 1
while i >= 0:
node = self.nodes[i]
op = node.getoperation()
if op.can_raise():
node.edge_to(guard_node, None, label='exception/notforced')
break
if op.is_guard():
node.edge_to(guard_node, None, label='exception/notforced')
break
i -= 1
else:
raise AssertionError("(no)exception/not_forced: not op raises for them")
else:
pass # not invalidated, future condition!
def guard_exit_dependence(self, guard_node, var, tracker):
def_node = tracker.definition(var)
if def_node is None:
return
for dep in def_node.provides():
if guard_node.is_before(dep.to) and dep.because_of(var):
guard_node.edge_to(dep.to, var, label='guard_exit('+str(var)+')')
def build_guard_dependencies(self, guard_node, tracker):
guard_op = guard_node.op
if guard_op.getopnum() >= rop.GUARD_FUTURE_CONDITION:
# ignore invalidated & future condition guard & early exit
return
# true dependencies
for arg in guard_op.getarglist():
tracker.depends_on_arg(arg, guard_node)
# dependencies to uses of arguments it protects
self.guard_argument_protection(guard_node, tracker)
#
descr = guard_op.getdescr()
if descr.exits_early():
return
# handle fail args
if guard_op.getfailargs():
for i,arg in enumerate(guard_op.getfailargs()):
if arg is None:
continue
if not tracker.is_defined(arg):
continue
try:
for at in tracker.redefinitions(arg):
# later redefinitions are prohibited
if at.is_before(guard_node):
at.edge_to(guard_node, arg, failarg=True, label="fail")
except KeyError:
assert False
def build_non_pure_dependencies(self, node, tracker):
op = node.op
if node.loads_from_complex_object():
# If this complex object load operation loads an index that has been
# modified, the last modification should be used to put a def-use edge.
for opnum, i, j in unrolling_iterable(LOAD_COMPLEX_OBJ):
if opnum == op.getopnum():
cobj = op.getarg(i)
if j != -1:
index_var = op.getarg(j)
tracker.depends_on_arg(cobj, node, index_var)
tracker.depends_on_arg(index_var, node)
else:
tracker.depends_on_arg(cobj, node)
break
else:
for arg, argcell, destroyed in node.side_effect_arguments():
if argcell is not None:
# tracks the exact cell that is modified
tracker.depends_on_arg(arg, node, argcell)
tracker.depends_on_arg(argcell, node)
else:
if destroyed:
# cannot be sure that only a one cell is modified
# assume all cells are (equivalent to a redefinition)
try:
# A trace is not entirely in SSA form. complex object
# modification introduces WAR/WAW dependencies
def_node = tracker.definition(arg)
if def_node:
for dep in def_node.provides():
if dep.to != node:
dep.to.edge_to(node, argcell, label='war')
def_node.edge_to(node, argcell)
except KeyError:
pass
else:
# not destroyed, just a normal use of arg
tracker.depends_on_arg(arg, node)
if destroyed:
tracker.define(arg, node, argcell=argcell)
# it must be assumed that a side effect operation must not be executed
# before the last guard operation
if len(self.guards) > 0:
last_guard = self.guards[-1]
last_guard.edge_to(node, label="sideeffect")
# and the next guard instruction
tracker.add_non_pure(node)
def cycles(self):
""" NOT_RPYTHON """
stack = []
for node in self.nodes:
node._stack = False
#
label = self.nodes[0]
if _first_cycle(stack, label):
return stack
return None
def __repr__(self):
graph = "graph([\n"
for node in self.nodes:
graph += " " + str(node.opidx) + ": "
for dep in node.provides():
graph += "=>" + str(dep.to.opidx) + ","
graph += " | "
for dep in node.depends():
graph += "<=" + str(dep.to.opidx) + ","
graph += | |
import sys
import os
############################################################################################################
# Class: FileSystem #
# Objectif : implémenter les fonctions pour les opérations nécessaires de systeme de gestion des fichiers. #
############################################################################################################
class FileSystem():
BLOCK_SIZE = 512 #la taille du block
BLOCK_NUM = 1000 #nombre de blocks
#inode map: une table qui indique l'emplacement de chaque inode dans le disque
INODE_MAP = [x for x in range(0, 80 // 8)] #[0-9]
INODE_BLOCK = [x for x in range(1, 81)] #[1-81]
def __init__(self):
#récuperer le chemin du répertoire
fspath = os.getcwd() + '/' + "vsf"
print(fspath)
#Vérifier s'il exite
if False == os.path.exists(fspath):
print("Info: système de fichier n'existe pas, reconstruction")
#ouverture du fichier(image disque) en mode écriture sur lequel on va stocké les informations de notre système de fichier
self.fs = open("vsf", "w+")
#La technique bitmap permet de trouver rapidement un emplacement libre sur la table des inodes(ou sur les blocks de donees)
#lors de la modification sur le système de fichier
#Initialisation des données
initData = (chr(0x80) + chr (0x00) * self.INODE_MAP[-1] + #iNode bitmap
chr(0x80) + chr(0x00) * (self.BLOCK_SIZE - len(self.INODE_MAP) - 1) + #data bitmap
'D' + chr(0x00) + chr(len(self.INODE_BLOCK) + 1) + chr(0x00) * (self.BLOCK_SIZE - 3) + #iNode pour "/"
chr(0x00) * self.BLOCK_SIZE * (len(self.INODE_BLOCK) - 1) + #autres inodes
chr(0x00) * self.BLOCK_SIZE * (self.BLOCK_NUM - len(self.INODE_BLOCK) - 1) #blocks de donnees
)
#Ecriture sur le système de fichier
self.fs.write(initData)
#Fermeture du système de fichier
self.fs.close()
#si le système de fichier existe
else:
print("Info: système de fichier existe, importation")
self.data = []
self.__load()
self.dataStart = self.BLOCK_SIZE * (len(self.INODE_BLOCK) + 1) #512*82 la taille des blocs
self.curDir = "/"
self.curInode = self.__getInode(0)
#spprimer le système de fichier
def __del__(self):
self.__save()
self.fs.close()
#Importer le système de fichier
def __load(self):
self.fs = open("vsf", "r+")
self.data = list(self.fs.read())
self.fs.close()
#sauvegarder le système de fichier
def __save(self):
#initilize data
self.data = []
data = "".join(self.data)
self.fs = open("vsf", "w+")
self.fs.write(data)
self.fs.close()
def __lookup(self):
pass
def __find(self, path):
pass
# verifier l'image bitmap
def checkMap(self):
line = ""
for x in range(0, self.BLOCK_SIZE):
byte = self.data[x]
if x % 5 == 0:
print(line)
line = ""
if x == len(self.INODE_MAP) or x == self.BLOCK_SIZE:
print("")
for y in range(0, 8):
line += str((ord(byte) >> (7 - y)) & 0x01)
line += " "
#créer un nouveau inode
def __newInode(self, itype):
for x in range(0, len(self.INODE_MAP)):
#recuperer un bit de bitmap
byte = self.data[x]
for y in range(0, 8): #range() = retourne une sequence de nombre, par defaut elle commence par un 0 increment par 1, et elle s'arrete avant un chiffre specifie
#verifier s'il est = 0 pour creer un nouv inode
if ((ord(byte) >> (7 - y)) & 0x01) == 0: #ord() retourne un entier qui represente unicode character
print("Info: nouveau inode " + str(x * 8 + y))
self.data[x] = chr(ord(byte) | (0x80 >> y))
self.__initInode(x * 8 + y, itype)
return x * 8 + y
return -1
#supprimer un inode
def __delInode(self, no):
x = no // 8
self.data[x] = chr(ord(self.data[x]) & (~(0x80 >> no % 8)) % 256)
self.__initInode(no, chr(0x00))
print("Info: supprimer l'inode " + str(no))
#initialiser un inode
def __initInode(self, no, itype):
inode = []
for x in range(0, self.BLOCK_SIZE):
inode += [chr(0x00)]
inode[0] = itype
if itype == 'D': #si le itype = D donc il s'agit d'une directorie/rep
#création d'un nouveau block
blockNum = self.__newBlock()
inode[1] = chr(blockNum // 256)
inode[2] = chr(blockNum % 256)
#insérer sur l'inode
self.__writeInode(no, inode)
#récupérer un inode
def __getInode(self, no):
inode = []
#récupérer une tranche 512-1024
inode = self.data[self.BLOCK_SIZE * (1 + no) : self.BLOCK_SIZE * (2 + no)]
return inode
#insérer un inode
def __setInode(self, no, itype, length, blocks):
start = self.BLOCK_SIZE * (1 + no)
self.data[start + 0] = itype
if(itype == 'F'): #il s'agit d'un fichier
self.data[start + 1] = chr(length // 256)
self.data[start + 2] = chr(length % 256)
offset = 2
else:
offset = 0
for x in range(0, len(blocks)):
block = blocks[x]
self.data[start + offset + x * 2 + 1] = chr(block // 256)
self.data[start + offset + x * 2 + 2] = chr(block % 256)
#insérer dans l'inode
def __writeInode(self, no, data):
self.data[self.BLOCK_SIZE * (1 + no):self.BLOCK_SIZE * (2 + no)] = list(data)
#créer un nouveau block
def __newBlock(self):
for x in range(len(self.INODE_MAP), len(self.INODE_MAP) + self.BLOCK_SIZE):
byte = self.data[x]
for y in range(0, 8):
if ((ord(byte) >> (7 - y)) & 0x01) == 0:
print("Info: nouveau block " + str(x * 8 + y + 1))
self.data[x] = chr(ord(byte) | (0x80 >> y))
self.__initBlock(x * 8 + y + 1)
return x * 8 + y + 1
return -1
#supprimer un block
def __delBlocks(self, nos):
for no in nos:
x = no // 8
self.data[x] = chr(ord(self.data[x]) & (~(0x80 >> no % 8) % 256))
self.__initBlock(no)
print("Info: supprimer le block " + str(no))
#initilaliser un block
def __initBlock(self, no):
for x in range(0, self.BLOCK_SIZE):
self.data[self.dataStart + no * self.BLOCK_SIZE + x] = chr(0x00)
#récupérer un block
def __getBlocks(self, inode):
blocks = inode[1:]
listbn = []
#print blocks
if inode[0] == 'F': #si la valeur de indoe[0] = 'F' donc il s'agit d'un fichier
offset = 2
else:
offset = 0
for x in range(0, (self.BLOCK_SIZE - 1) // 2):
blockNo = (ord(blocks[x * 2 + offset]) << 7) + ord(blocks[x * 2 + 1 + offset])
if blockNo == 0:
return listbn
else:
listbn += [blockNo]
return listbn
#lire un block
def __readBlocks(self, nos):
return_data = []
print("Lire inodes: " + str(nos))
for no in nos:
return_data += (self.data[no * self.BLOCK_SIZE :
no * self.BLOCK_SIZE + self.BLOCK_SIZE])
#print str(len(return_data))
return return_data
#écrire un block
def __writeBlocks(self, nos, datas):
if len(datas) < self.BLOCK_SIZE * len(nos):
for x in range(len(datas), self.BLOCK_SIZE * len(nos)):
datas += chr(0x00)
for x in range(0, len(nos)):
no = nos[x]
self.data[no * self.BLOCK_SIZE :
no * self.BLOCK_SIZE + self.BLOCK_SIZE] = datas[x * self.BLOCK_SIZE : (x + 1) * self.BLOCK_SIZE]
#récupérer la liste
def __getList(self, inode):
lists = self.__readBlocks(self.__getBlocks(inode))
lists = "".join(lists).strip().split('\n')
listDir = {}
if inode[0] == 'D': #si la valeur de inode[0] =='D' donc il s'agit d'une dirèctorie
for l in lists:
if ord(l[0]) != 0x00:
name = l.split(':')[0]
inode = int(l.split(':')[1])
listDir.update({name:inode})
return listDir
#insérer la liste
def __setList(self, inode, newList):
lists = ""
for key in newList.keys():
lists += (str(key) + ":" + str(newList[key]) + "\n")
lists = list(lists)
self.__writeBlocks(self.__getBlocks(inode), lists)
#ouvrir un fichier s'il n'existe pas il sera créer
def open(self, path):
if path[0] != '/':
#récupérer le chemin
path = self.curDir + '/' + path
#récupérer l'inode
inode = self.__getInode(0)
#initialiser le numéro de l'inode à 0
inodeNum = 0
if path == '/':
return inodeNum, inode
#récupérer le chemin
nods = path.split('/')[1:]
#ré
listDir = self.__getList(inode)
for nod in nods[:-1]:
for key in listDir.keys():
if str(nod) == str(key):
inode = self.__getInode(listDir[key])
inodeNum = listDir[key]
listDir = self.__getList(inode)
found = False
nod = nods[-1]
for key in listDir.keys():
if str(nod) == str(key):
inode = self.__getInode(int(listDir[key]))
inodeNum = int(listDir[key])
found = True
#fichier n'exite pas
if found == False: # création du fichier
print("Info: Le fichier " + nod + " n'existe pas, création")
#créer un nouveau inode
inodeNum = self.__newInode('F')
#insérer le numéro de l'inode dans la listDir
listDir.update({nod:str(inodeNum)})
#insrérer le nouveau inode et son numéro dans la liste
self.__setList(inode, listDir)
#récupérer le numéro de l'inode
inode = self.__getInode(inodeNum)
#retourner l'inode et son numéro
return inodeNum, inode
#fermer le fichier/rep
def close(self):
pass
#lire dans un répertoire
def read(self, inodeNum):
#récupérer l'inode
inode = self.__getInode(inodeNum)
if inode[0] != 'F': #s'il est différent de F donc il s'agit | |
try:
from django.db.models import Prefetch, F, Value, OuterRef, Subquery
except ImportError:
raise Exception('django must be installed')
import copy
from pyresource.executor import Executor
from pyresource.translator import ResourceTranslator
from pyresource.resolver import RequestResolver
from pyresource.exceptions import (
Forbidden,
FilterError,
ResourceMisconfigured,
SerializationError,
SchemaResolverError,
QueryValidationError,
QueryExecutionError,
NotFound,
MethodNotAllowed,
)
from pyresource.utils.types import get_link
from django.contrib.postgres.aggregates import ArrayAgg
from pyresource.utils import resource_to_django, make_literal
from .operators import make_expression, make_filter
# use a single resolver across all executors
from .resolver import resolver
from .prefetch import FastQuery, FastPrefetch
from .utils import maybe_atomic, maybe_capture_queries
from pyresource.conf import settings
class DjangoQueryLogic:
@classmethod
def _get_sorts(cls, sorts, translate=None):
if isinstance(sorts, str):
sorts = [sorts]
if not sorts:
return None
results = []
for sort in sorts:
desc = False
if sort.startswith("-"):
sort = sort[1:]
desc = True
if translate:
sort = ResourceTranslator.translate(sort, translate)
sort = resource_to_django(sort)
if desc:
# add descending sort marker
sort = f'-{sort}'
results.append(sort)
return results
@classmethod
def _get_filters(cls, resource, where, query=None, request=None, translate=False):
"""Build `django.db.models.Q` object for a queryset
For example:
request.user.id = 123
where = {
"or": [
{'=': ['.request.user.id', 'id']},
{'>=': ['created', {'now': {}}]}
]
}
return = Q(id=123) | Q(created__gte=Now())
"""
if not where:
return None
where = RequestResolver.resolve(where, query=query, request=request)
try:
return make_filter(where, translate=resource if translate else None)
except FilterError as e:
raise ResourceMisconfigured(
f"{resource.id}: failed to build filters\n" f"Error: {e}"
)
@classmethod
def _add_queryset_sorts(
cls,
resource,
fields,
queryset,
query,
request=None,
level=None,
related=None,
**context,
):
"""Add .order_by"""
source = cls._get_queryset_source(resource, related=related)
sorts = None
if isinstance(source, dict):
qs = source.get("queryset")
sort = qs.get("sort", None)
sorts = cls._get_sorts(sort)
state = cls._get_query_state(query, level=level)
if state is True:
state = {}
sort = state.get("sort", None)
if sort:
sorts = cls._get_sorts(sort, translate=resource)
# order by request sorts, or by default sorts
if sorts:
queryset = queryset.order_by(*sorts)
return queryset
@classmethod
def _add_queryset_filters(
cls,
resource,
fields,
queryset,
query,
request=None,
level=None,
related=None,
**context,
):
"""Add .filter"""
source = cls._get_queryset_source(resource, related=related)
can_filters = request_filters = default_filters = None
if isinstance(source, dict):
qs = source.get("queryset")
where = qs.get("where", None)
default_filters = cls._get_filters(
resource, where, query=query, request=request
)
state = cls._get_query_state(query, level=level)
if state is True:
state = {}
record_id = state.get("id", None)
where = state.get("where", None)
if where:
request_filters = cls._get_filters(
resource, where, query=query, request=request, translate=True
)
can = context.get('can')
if isinstance(can, dict):
can_filters = cls._get_filters(
resource, can, query=query, request=request, translate=True
)
for filters in (can_filters, default_filters, request_filters):
if filters:
queryset = queryset.filter(filters)
if record_id:
queryset = queryset.filter(pk=record_id)
return queryset
@classmethod
def _add_queryset_prefetches(
cls,
resource,
fields,
queryset,
query,
level=None,
related=None,
request=None,
**context,
):
"""Add .prefetch_related to optimize deep query performance
This indirectly supported nested filtering/ordering/pagination by recursively
calling get_queryset to build the querysets at each query node.
Prefetches are added for relation fields for which "take" is an object.
This indicates that fields, not just values, should be included
"""
state = cls._get_query_state(query, level=level)
if state is True:
return queryset
prefetches = []
take = state.get("take", {})
root_field = query.state.get("field", None) if level is None else None
take_root = root_field is not None and query.state.get("take") is not None
if take or take_root:
for field in fields:
take_field = take.get(field.name)
if take_root or (
take_field and (
isinstance(take_field, dict) or
(field.is_link and field.is_list)
)
):
# recursively build nested querysets
source = resolver.get_field_source(field.source)
source = resource_to_django(source)
related_resource = field.related
related_level = f"{level}.{field.name}" if level else field.name
# selection: which fields should be selected
related_fields = cls._take_fields(
related_resource,
action="get",
query=query,
request=request,
level=related_level,
)
# authorization: is the request allowed to prefetch this relation
related_can = cls._can(
related_resource,
'get.prefetch',
query=query,
request=request,
field=field
)
next_queryset = cls._get_queryset(
related_resource,
related_fields,
query,
request=request,
level=related_level,
can=related_can,
related=field
)
prefetches.append(
FastPrefetch(
source, queryset=next_queryset, to_attr=f".{field.name}"
)
)
if prefetches:
queryset = queryset.prefetch_related(*prefetches)
return queryset
@classmethod
def _add_queryset_pagination(
cls, resource, fields, queryset, query, count=None, level=None, **context,
):
"""Add pagination"""
if isinstance(queryset, dict):
# .aggregate was called, producing a dictionary result
return queryset
state = cls._get_query_state(query, level=level)
if state is True:
state = {}
page = state.get("page", {})
size = int(page.get("size", settings.PAGE_SIZE))
after = page.get("after", None)
offset = 0
if level is not None:
pass # return queryset # TODO
if after:
try:
after = cls._decode_cursor(after)
except Exception as e:
raise QueryValidationError(f"page:after is invalid: {after} ({str(e)})")
if "offset" in after:
# offset-pagination
# after = {'offset': 100}
offset = after["offset"]
queryset = queryset[offset : offset + size + 1]
elif "after" in after:
# keyset-pagination
# after = {'after': {'id': 1, 'name': 'test', ...}}
# only ordered fields are included
filters = {f"{key}__gt": value for key, value in after["after"].items()}
queryset = queryset.filter(**filters)
else:
raise QueryValidationError("page:after is invalid: {after}")
if count is not None:
count["total"] = queryset.count()
queryset = queryset[: size + 1]
return queryset
@classmethod
def _make_aggregation(cls, aggregation):
return make_expression(aggregation)
@classmethod
def _make_annotation(cls, field, **context):
is_list = field.is_list
source = resolver.get_field_source(field.source)
if isinstance(source, str):
# string annotation e.g. "user.name"
source = resource_to_django(source)
if is_list:
kwargs = {}
# TODO: refactor this to use the normal get_queryset logic
# ArrayAgg does not work properly in prefetch querysets
# optional ordering
if isinstance(field.source, dict):
qs = field.source.get("queryset")
sort = qs.get("sort", None) if qs else None
if sort:
sort = f"{source}.{sort}"
kwargs["ordering"] = resource_to_django(sort)
return ArrayAgg(source, **kwargs)
else:
return F(source)
else:
# functional annotation e.g. {"count": "location.users"}
return make_expression(field.source)
@classmethod
def _add_queryset_fields(
cls, resource, fields, queryset, query, level=None, **context,
):
"""Add fields
All of a Resource's fields represented in a queryset ("resourced fields")
are annotated with a prefix of "." in order to prevent
naming conflicts between source and resourced fields
"""
if isinstance(queryset, dict):
return queryset
annotations = {}
state = cls._get_query_state(query, level=level)
if state is True:
# id only
return queryset.only('pk')
take = state.get("take", None)
root_field = query.state.get("field", None) if level is None else None
root_take = query.state.get("take", None)
for field in fields:
if root_field:
if root_take:
# ignore field being prefetched
break
else:
if take and (
isinstance(take.get(field.name), dict) or
(field.is_link and field.is_list)
):
# ignore fields being prefetched
continue
annotations[f".{field.name}"] = cls._make_annotation(field, **context)
if annotations:
queryset = queryset.annotate(**annotations)
only = list(annotations.keys())
only.append('pk')
return queryset.only(*only)
@classmethod
def _add_queryset_distinct(
cls, resource, fields, queryset, query, **context,
):
"""Add .distinct if the query has left/outer joins"""
if isinstance(queryset, dict):
# .aggregate was called, producing a dictionary result
return queryset
if context.get('related'):
# handled separately
return queryset
has_joins = False
for join in queryset.query.alias_map.values():
if join.join_type: # and join.join_type != "INNER JOIN":
has_joins = True
break
if has_joins:
queryset = queryset.distinct()
return queryset
@classmethod
def _add_queryset_aggregations(
cls, resource, fields, queryset, query, **context,
):
level = context.get('level', None)
state = cls._get_query_state(query, level=level)
if state is True:
return queryset
group = state.get('group')
if not group:
return queryset
aggregations = {}
for name, aggregation in group.items():
# use .{name} for consistency with annotations/fields
aggregations[f'.{name}'] = cls._make_aggregation(aggregation)
result = queryset.aggregate(**aggregations)
return result
@classmethod
def _get_queryset(
cls, resource, fields, query, **context,
):
queryset = cls._get_queryset_base(resource, **context)
for add in (
"prefetches",
"filters",
"sorts",
"aggregations",
"distinct",
"pagination",
"fields",
):
queryset = getattr(cls, f"_add_queryset_{add}")(
resource, fields, queryset, query, **context,
)
return queryset
@classmethod
def _get_queryset_source(self, resource, related=None):
if related:
# add context from related field
related_source = related.source
if isinstance(related_source, dict) and "queryset" in related_source:
source = copy.deepcopy(resource.source) if isinstance(resource.source, dict) else {
'queryset': {
'model': resource.source
}
}
queryset = source['queryset']
related_queryset = related_source['queryset']
# add "where" from related_source
if 'where' in related_queryset:
if 'where' not in queryset:
# use related queryset filter only
queryset['where'] = related_queryset['where']
elif queryset['where'] != related_queryset['where']:
# use related queryset filter and resource filter
queryset['where'] = {'and': [queryset['where'], related_queryset['where']]}
# otherwise, keep the same filter (it is the same)
# add "sort" from related_source
if 'sort' in related_queryset:
if 'sort' not in queryset:
queryset['sort'] = related_queryset['sort']
elif queryset['sort'] != related_queryset['sort']:
# overwrite the default sort order instead of concatenating sorts
# this is because a concatenated sort is rarely the intent
queryset['sort'] = related_queryset['sort']
else:
source = resource.source
else:
source = resource.source
return source
@classmethod
def _get_queryset_base(cls, resource, related=None, **context):
source = cls._get_queryset_source(resource, related=related)
try:
model = | |
URITemplate(org["public_members_url"])
self.repos_url = org["repos_url"]
self.url = self._api = org["url"]
self.type = "Organization"
def _repr(self):
display_name = ""
name = getattr(self, "name", None)
if name is not None:
display_name = ":{}".format(name)
return "<{s.class_name} [{s.login}{display}]>".format(
s=self, display=display_name
)
@requires_auth
def add_member(self, username, team_id):
"""Add ``username`` to ``team`` and thereby to this organization.
.. warning::
This method is no longer valid. To add a member to a team, you
must now retrieve the team directly, and use the ``invite``
method.
.. warning::
This method is no longer valid. To add a member to a team, you
must now retrieve the team directly, and use the ``invite``
method.
Any user that is to be added to an organization, must be added
to a team as per the GitHub api.
.. versionchanged:: 1.0
The second parameter used to be ``team`` but has been changed to
``team_id``. This parameter is now required to be an integer to
improve performance of this method.
:param str username:
(required), login name of the user to be added
:param int team_id:
(required), team id
:returns:
True if successful, False otherwise
:rtype:
bool
"""
warnings.warn(
"This is no longer supported by the GitHub API, see "
"https://developer.github.com/changes/2014-09-23-one-more-week"
"-before-the-add-team-member-api-breaking-change/",
DeprecationWarning,
)
if int(team_id) < 0:
return False
url = self._build_url("teams", str(team_id), "members", str(username))
return self._boolean(self._put(url), 204, 404)
@requires_auth
def add_or_update_membership(self, username, role="member"):
"""Add a member or update their role.
:param str username:
(required), user to add or update.
:param str role:
(optional), role to give to the user. Options are ``member``,
``admin``. Defaults to ``member``.
:returns:
the created or updated membership
:rtype:
:class:`~github3.orgs.Membership`
:raises:
ValueError if role is not a valid choice
"""
if role not in self.member_roles:
raise ValueError(
"'role' must be one of {}".format(
", ".join(sorted(self.member_roles))
)
)
data = {"role": role}
url = self._build_url(
"memberships", str(username), base_url=self._api
)
json = self._json(self._put(url, json=data), 200)
return self._instance_or_null(Membership, json)
@requires_auth
def add_repository(self, repository, team_id): # FIXME(jlk): add perms
"""Add ``repository`` to ``team``.
.. versionchanged:: 1.0
The second parameter used to be ``team`` but has been changed to
``team_id``. This parameter is now required to be an integer to
improve performance of this method.
:param str repository:
(required), form: 'user/repo'
:param int team_id:
(required), team id
:returns:
True if successful, False otherwise
:rtype:
bool
"""
if int(team_id) < 0:
return False
url = self._build_url("teams", str(team_id), "repos", str(repository))
return self._boolean(self._put(url), 204, 404)
@requires_auth
def create_hook(self, name, config, events=["push"], active=True):
"""Create a hook on this organization.
:param str name:
(required), name of the hook
:param dict config:
(required), key-value pairs which act as settings for this hook
:param list events:
(optional), events the hook is triggered for
:param bool active:
(optional), whether the hook is actually triggered
:returns:
the created hook
:rtype:
:class:`~github3.orgs.OrganizationHook`
"""
json = None
if name and config and isinstance(config, dict):
url = self._build_url("hooks", base_url=self._api)
data = {
"name": name,
"config": config,
"events": events,
"active": active,
}
json = self._json(self._post(url, data=data), 201)
return OrganizationHook(json, self) if json else None
@requires_auth
def create_project(self, name, body=""):
"""Create a project for this organization.
If the client is authenticated and a member of the organization, this
will create a new project in the organization.
:param str name:
(required), name of the project
:param str body:
(optional), the body of the project
:returns:
the new project
:rtype:
:class:`~github3.projects.Project`
"""
url = self._build_url("projects", base_url=self._api)
data = {"name": name, "body": body}
json = self._json(
self._post(url, data, headers=Project.CUSTOM_HEADERS), 201
)
return self._instance_or_null(Project, json)
@requires_auth
def create_repository(
self,
name,
description="",
homepage="",
private=False,
has_issues=True,
has_wiki=True,
team_id=0,
auto_init=False,
gitignore_template="",
license_template="",
):
"""Create a repository for this organization.
If the client is authenticated and a member of the organization, this
will create a new repository in the organization.
``name`` should be no longer than 100 characters
:param str name:
(required), name of the repository
.. warning:: this should be no longer than 100 characters
:param str description:
(optional)
:param str homepage:
(optional)
:param bool private:
(optional), If ``True``, create a private repository. API default:
``False``
:param bool has_issues:
(optional), If ``True``, enable issues for this repository. API
default: ``True``
:param bool has_wiki:
(optional), If ``True``, enable the wiki for this repository. API
default: ``True``
:param int team_id:
(optional), id of the team that will be granted access to this
repository
:param bool auto_init:
(optional), auto initialize the repository.
:param str gitignore_template:
(optional), name of the template; this is ignored if auto_int is
False.
:param str license_template:
(optional), name of the license; this is ignored if auto_int is
False.
:returns:
the created repository
:rtype:
:class:`~github3.repos.Repository`
"""
url = self._build_url("repos", base_url=self._api)
data = {
"name": name,
"description": description,
"homepage": homepage,
"private": private,
"has_issues": has_issues,
"has_wiki": has_wiki,
"license_template": license_template,
"auto_init": auto_init,
"gitignore_template": gitignore_template,
}
if int(team_id) > 0:
data.update({"team_id": team_id})
json = self._json(self._post(url, data), 201)
return self._instance_or_null(Repository, json)
@requires_auth
def conceal_member(self, username):
"""Conceal ``username``'s membership in this organization.
:param str username:
username of the organization member to conceal
:returns:
True if successful, False otherwise
:rtype:
bool
"""
url = self._build_url("public_members", username, base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
@requires_auth
def create_team(self, name, repo_names=[], permission="pull"):
"""Create a new team and return it.
This only works if the authenticated user owns this organization.
:param str name:
(required), name to be given to the team
:param list repo_names:
(optional) repositories, e.g. ['github/dotfiles']
:param str permission:
(optional), options:
- ``pull`` -- (default) members can not push or administer
repositories accessible by this team
- ``push`` -- members can push and pull but not administer
repositories accessible by this team
- ``admin`` -- members can push, pull and administer
repositories accessible by this team
:returns:
the created team
:rtype:
:class:`~github3.orgs.Team`
"""
data = {
"name": name,
"repo_names": repo_names,
"permission": permission,
}
url = self._build_url("teams", base_url=self._api)
json = self._json(self._post(url, data), 201)
return self._instance_or_null(Team, json)
@requires_auth
def edit(
self,
billing_email=None,
company=None,
email=None,
location=None,
name=None,
description=None,
has_organization_projects=None,
has_repository_projects=None,
default_repository_permission=None,
members_can_create_repositories=None,
):
"""Edit this organization.
:param str billing_email:
(optional) Billing email address (private)
:param str company:
(optional)
:param str email:
(optional) Public email address
:param str location:
(optional)
:param str name:
(optional)
:param str description:
(optional) The description of the company.
:param bool has_organization_projects:
(optional) Toggles whether organization projects are enabled for
the organization.
:param bool has_repository_projects:
(optional) Toggles whether repository projects are enabled for
repositories that belong to the organization.
:param string default_repository_permission:
(optional) Default permission level members have for organization
repositories:
- ``read`` -- (default) can pull, but not push to or administer
this repository.
- ``write`` -- can pull and push, but not administer this
repository.
- ``admin`` -- can pull, push, and administer this repository.
- ``none`` -- no permissions granted by default.
:param bool members_can_create_repositories:
(optional) Toggles ability of non-admin organization members to
create repositories:
- ``True`` -- (default) all organization members can create
repositories.
- ``False`` -- only admin members can create repositories.
:returns:
True if successful, False otherwise
:rtype:
bool
"""
json = None
data = {
"billing_email": billing_email,
"company": company,
"email": email,
"location": location,
"name": name,
"description": description,
"has_organization_projects": has_organization_projects,
"has_repository_projects": has_repository_projects,
"default_repository_permission": default_repository_permission,
"members_can_create_repositories": members_can_create_repositories,
}
self._remove_none(data)
if data:
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_attributes(json)
return True
return False
@requires_auth
def hook(self, hook_id):
"""Get a single hook.
:param int hook_id:
(required), id of the hook
:returns:
the hook
:rtype:
:class:`~github3.orgs.OrganizationHook`
"""
json = None
if int(hook_id) > 0:
url = self._build_url("hooks", str(hook_id), base_url=self._api)
json = self._json(self._get(url), 200)
return self._instance_or_null(OrganizationHook, json)
@requires_auth
def hooks(self, number=-1, etag=None):
"""Iterate over hooks registered on this organization.
:param int number:
(optional), number of hoks to return. Default: -1
returns all hooks
:param str etag:
(optional), ETag from a previous request to the same endpoint
:returns:
generator of hooks
:rtype:
:class:`~github3.orgs.OrganizationHook`
"""
url = self._build_url("hooks", base_url=self._api)
return self._iter(int(number), url, OrganizationHook, etag=etag)
@requires_auth
def invite(
self, team_ids, invitee_id=None, email=None, role="direct_member"
):
"""Invite the user to join this organization.
:param list[int] team_ids:
(required), list of | |
<filename>tests/test_response_parser.py
# Copyright (c) 2014, <NAME>
# Released subject to the New BSD License
# Please see http://en.wikipedia.org/wiki/BSD_licenses
'''
Unit tests for the FetchTokeniser and FetchParser classes
'''
from __future__ import unicode_literals
from datetime import datetime
from imapclient.datetime_util import datetime_to_native
from imapclient.fixed_offset import FixedOffset
from imapclient.response_parser import (
parse_response,
parse_message_list,
parse_fetch_response,
)
from imapclient.response_types import Envelope, Address
from imapclient.exceptions import ProtocolError
from tests.util import unittest
from .util import patch
# TODO: test invalid dates and times
CRLF = b'\r\n'
class TestParseResponse(unittest.TestCase):
def test_unquoted(self):
self._test(b'FOO', b'FOO')
self._test(b'F.O:-O_0;', b'F.O:-O_0;')
self._test(br'\Seen', br'\Seen')
def test_string(self):
self._test(b'"TEST"', b'TEST')
def test_int(self):
self._test(b'45', 45)
def test_int_zero(self):
self._test(b'0', 0)
def test_not_an_int(self):
self._test(b'0123', b'0123')
def test_nil(self):
self._test(b'NIL', None)
def test_empty_tuple(self):
self._test(b'()', ())
def test_tuple(self):
self._test(b'(123 "foo" GeE)', (123, b'foo', b'GeE'))
def test_int_and_tuple(self):
self._test(b'1 (123 "foo")', (1, (123, b'foo')), wrap=False)
def test_nested_tuple(self):
self._test(b'(123 "foo" ("more" NIL) 66)',
(123, b"foo", (b"more", None), 66))
def test_deeper_nest_tuple(self):
self._test(b'(123 "foo" ((0 1 2) "more" NIL) 66)',
(123, b"foo", ((0, 1, 2), b"more", None), 66))
def test_complex_mixed(self):
self._test(b'((FOO "PLAIN" ("CHARSET" "US-ASCII") NIL NIL "7BIT" 1152 23) '
b'("TEXT" "PLAIN" ("CHARSET" "US-ASCII" "NAME" "cc.diff") '
b'"<hi.there>" "foo" "BASE64" 4554 73) "MIXED")',
((b'FOO', b'PLAIN', (b'CHARSET', b'US-ASCII'), None, None, b'7BIT', 1152, 23),
(b'TEXT', b'PLAIN', (b'CHARSET', b'US-ASCII', b'NAME', b'cc.diff'),
b'<hi.there>', b'foo', b'BASE64', 4554, 73), b'MIXED'))
def test_envelopey(self):
self._test(b'(UID 5 ENVELOPE ("internal_date" "subject" '
b'(("name" NIL "address1" "domain1.com")) '
b'((NIL NIL "address2" "domain2.com")) '
b'(("name" NIL "address3" "domain3.com")) '
b'((NIL NIL "address4" "domain4.com")) '
b'NIL NIL "<reply-to-id>" "<msg_id>"))',
(b'UID',
5,
b'ENVELOPE',
(b'internal_date',
b'subject',
((b'name', None, b'address1', b'domain1.com'),),
((None, None, b'address2', b'domain2.com'),),
((b'name', None, b'address3', b'domain3.com'),),
((None, None, b'address4', b'domain4.com'),),
None,
None,
b'<reply-to-id>',
b'<msg_id>')))
def test_envelopey_quoted(self):
self._test(b'(UID 5 ENVELOPE ("internal_date" "subject with \\"quotes\\"" '
b'(("name" NIL "address1" "domain1.com")) '
b'((NIL NIL "address2" "domain2.com")) '
b'(("name" NIL "address3" "domain3.com")) '
b'((NIL NIL "address4" "domain4.com")) '
b'NIL NIL "<reply-to-id>" "<msg_id>"))',
(b'UID',
5,
b'ENVELOPE',
(b'internal_date',
b'subject with "quotes"',
((b'name', None, b'address1', b'domain1.com'),),
((None, None, b'address2', b'domain2.com'),),
((b'name', None, b'address3', b'domain3.com'),),
((None, None, b'address4', b'domain4.com'),),
None,
None,
b'<reply-to-id>',
b'<msg_id>')))
def test_literal(self):
literal_text = add_crlf(
b"012\n"
b"abc def XYZ\n"
)
self._test([(b'{18}', literal_text)], literal_text)
def test_literal_with_more(self):
literal_text = add_crlf(
b"012\n"
b"abc def XYZ\n"
)
response = [(b'(12 "foo" {18}', literal_text), b")"]
self._test(response, (12, b'foo', literal_text))
def test_quoted_specials(self):
self._test(br'"\"foo bar\""', b'"foo bar"')
self._test(br'"foo \"bar\""', b'foo "bar"')
self._test(br'"foo\\bar"', br'foo\bar')
def test_square_brackets(self):
self._test(b'foo[bar rrr]', b'foo[bar rrr]')
self._test(b'"foo[bar rrr]"', b'foo[bar rrr]')
self._test(b'[foo bar]def', b'[foo bar]def')
self._test(b'(foo [bar rrr])', (b'foo', b'[bar rrr]'))
self._test(b'(foo foo[bar rrr])', (b'foo', b'foo[bar rrr]'))
def test_incomplete_tuple(self):
self._test_parse_error(b'abc (1 2', r'Tuple incomplete before "\(1 2"')
def test_bad_literal(self):
self._test_parse_error([(b'{99}', b'abc')],
'Expecting literal of size 99, got 3')
def test_bad_quoting(self):
self._test_parse_error(b'"abc next', """No closing '"'""")
def _test(self, to_parse, expected, wrap=True):
if wrap:
# convenience - expected value should be wrapped in another tuple
expected = (expected,)
if not isinstance(to_parse, list):
to_parse = [to_parse]
output = parse_response(to_parse)
self.assertSequenceEqual(output, expected)
def _test_parse_error(self, to_parse, expected_msg):
if not isinstance(to_parse, list):
to_parse = [to_parse]
self.assertRaisesRegex(ProtocolError, expected_msg,
parse_response, to_parse)
class TestParseMessageList(unittest.TestCase):
def test_basic(self):
out = parse_message_list([b'1 2 3'])
self.assertSequenceEqual(out, [1, 2, 3])
self.assertEqual(out.modseq, None)
def test_one_id(self):
self.assertSequenceEqual(parse_message_list([b'4']), [4])
def test_modseq(self):
out = parse_message_list([b'1 2 3 (modseq 999)'])
self.assertSequenceEqual(out, [1, 2, 3])
self.assertEqual(out.modseq, 999)
def test_modseq_no_space(self):
out = parse_message_list([b'1 2 3(modseq 999)'])
self.assertSequenceEqual(out, [1, 2, 3])
self.assertEqual(out.modseq, 999)
def test_modseq_interleaved(self):
# Unlikely but test it anyway.
out = parse_message_list([b'1 2 (modseq 9) 3 4'])
self.assertSequenceEqual(out, [1, 2, 3, 4])
self.assertEqual(out.modseq, 9)
class TestParseFetchResponse(unittest.TestCase):
def test_basic(self):
self.assertEqual(parse_fetch_response([b'4 ()']), {4: {b'SEQ': 4}})
def test_none_special_case(self):
self.assertEqual(parse_fetch_response([None]), {})
def test_bad_msgid(self):
self.assertRaises(ProtocolError, parse_fetch_response, [b'abc ()'])
def test_bad_data(self):
self.assertRaises(ProtocolError, parse_fetch_response, [b'2 WHAT'])
def test_missing_data(self):
self.assertRaises(ProtocolError, parse_fetch_response, [b'2'])
def test_simple_pairs(self):
self.assertEqual(parse_fetch_response([b'23 (ABC 123 StUfF "hello")']),
{23: {b'ABC': 123,
b'STUFF': b'hello',
b'SEQ': 23}})
def test_odd_pairs(self):
self.assertRaises(ProtocolError, parse_fetch_response, [b'(ONE)'])
self.assertRaises(ProtocolError, parse_fetch_response, [b'(ONE TWO THREE)'])
def test_UID(self):
self.assertEqual(parse_fetch_response([b'23 (UID 76)']),
{76: {b'SEQ': 23}})
self.assertEqual(parse_fetch_response([b'23 (uiD 76)']),
{76: {b'SEQ': 23}})
def test_not_uid_is_key(self):
self.assertEqual(parse_fetch_response([b'23 (UID 76)'], uid_is_key=False),
{23: {b'UID': 76,
b'SEQ': 23}})
def test_bad_UID(self):
self.assertRaises(ProtocolError, parse_fetch_response, [b'(UID X)'])
def test_FLAGS(self):
self.assertEqual(parse_fetch_response([br'23 (FLAGS (\Seen Stuff))']),
{23: {b'SEQ': 23, b'FLAGS': (br'\Seen', b'Stuff')}})
def test_multiple_messages(self):
self.assertEqual(parse_fetch_response(
[b"2 (FLAGS (Foo Bar)) ",
b"7 (FLAGS (Baz Sneeve))"]),
{
2: {b'FLAGS': (b'Foo', b'Bar'), b'SEQ': 2},
7: {b'FLAGS': (b'Baz', b'Sneeve'), b'SEQ': 7},
})
def test_same_message_appearing_multiple_times(self):
# This can occur when server sends unsolicited FETCH responses
# (e.g. RFC 4551)
self.assertEqual(parse_fetch_response(
[b"2 (FLAGS (Foo Bar)) ",
b"2 (MODSEQ 4)"]),
{2: {b'FLAGS': (b'Foo', b'Bar'), b'SEQ': 2, b'MODSEQ': 4}})
def test_literals(self):
self.assertEqual(parse_fetch_response([(b'1 (RFC822.TEXT {4}', b'body'),
(b' RFC822 {21}', b'Subject: test\r\n\r\nbody'),
b')']),
{1: {b'RFC822.TEXT': b'body',
b'RFC822': b'Subject: test\r\n\r\nbody',
b'SEQ': 1}})
def test_literals_and_keys_with_square_brackets(self):
self.assertEqual(parse_fetch_response([(b'1 (BODY[TEXT] {11}', b'Hi there.\r\n'), b')']),
{1: {b'BODY[TEXT]': b'Hi there.\r\n',
b'SEQ': 1}})
def test_BODY_HEADER_FIELDS(self):
header_text = b'Subject: A subject\r\nFrom: Some one <<EMAIL>>\r\n\r\n'
self.assertEqual(parse_fetch_response(
[(b'123 (UID 31710 BODY[HEADER.FIELDS (from subject)] {57}', header_text), b')']),
{31710: {b'BODY[HEADER.FIELDS (FROM SUBJECT)]': header_text,
b'SEQ': 123}})
def test_BODY(self):
self.check_BODYish_single_part(b'BODY')
self.check_BODYish_multipart(b'BODY')
self.check_BODYish_nested_multipart(b'BODY')
def test_BODYSTRUCTURE(self):
self.check_BODYish_single_part(b'BODYSTRUCTURE')
self.check_BODYish_nested_multipart(b'BODYSTRUCTURE')
def check_BODYish_single_part(self, respType):
text = b'123 (UID 317 ' + respType + \
b'("TEXT" "PLAIN" ("CHARSET" "us-ascii") NIL NIL "7BIT" 16 1))'
parsed = parse_fetch_response([text])
self.assertEqual(parsed, {
317: {
respType: (b'TEXT', b'PLAIN', (b'CHARSET', b'us-ascii'), None, None, b'7BIT', 16, 1),
b'SEQ': 123
}
})
self.assertFalse(parsed[317][respType].is_multipart)
def check_BODYish_multipart(self, respType):
text = b'123 (UID 269 ' + respType + b' ' \
b'(("TEXT" "HTML" ("CHARSET" "us-ascii") NIL NIL "QUOTED-PRINTABLE" 55 3)' \
b'("TEXT" "PLAIN" ("CHARSET" "us-ascii") NIL NIL "7BIT" 26 1) "MIXED"))'
parsed = parse_fetch_response([text])
self.assertEqual(parsed, {
269: {
respType: ([(b'TEXT', b'HTML', (b'CHARSET', b'us-ascii'), None, None, b'QUOTED-PRINTABLE', 55, 3),
(b'TEXT', b'PLAIN', (b'CHARSET', b'us-ascii'), None, None, b'7BIT', 26, 1)],
b'MIXED'),
b'SEQ': 123}
})
self.assertTrue(parsed[269][respType].is_multipart)
def check_BODYish_nested_multipart(self, respType):
text = b'1 (' + respType + b'(' \
b'(' \
b'("text" "html" ("charset" "utf-8") NIL NIL "7bit" 97 3 NIL NIL NIL NIL)' \
b'("text" "plain" ("charset" "utf-8") NIL NIL "7bit" 62 3 NIL NIL NIL NIL)' \
b'"alternative" ("boundary" "===============8211050864078048428==") NIL NIL NIL' \
b')' \
b'("text" "plain" ("charset" "utf-8") NIL NIL "7bit" 16 1 NIL ("attachment" ("filename" "attachment.txt")) NIL NIL) ' \
b'"mixed" ("boundary" "===============0373402508605428821==") NIL NIL NIL))'
parsed = parse_fetch_response([text])
self.assertEqual(parsed, {1: {
respType: (
[
(
[
(b'text', b'html', (b'charset', b'utf-8'), None,
None, b'7bit', 97, 3, None, None, None, None),
(b'text', b'plain', (b'charset', b'utf-8'), None,
None, b'7bit', 62, 3, None, None, None, None)
], b'alternative', (b'boundary', b'===============8211050864078048428=='), None, None, None
),
(b'text', b'plain', (b'charset', b'utf-8'), None, None, b'7bit', 16, 1,
None, (b'attachment', (b'filename', b'attachment.txt')), None, None)
], b'mixed', (b'boundary', b'===============0373402508605428821=='), None, None, None,
),
b'SEQ': 1,
}})
self.assertTrue(parsed[1][respType].is_multipart)
self.assertTrue(parsed[1][respType][0][0].is_multipart)
self.assertFalse(parsed[1][respType][0][0][0][0].is_multipart)
def test_partial_fetch(self):
body = b'01234567890123456789'
self.assertEqual(parse_fetch_response(
[(b'123 (UID 367 BODY[]<0> {20}', body), b')']),
{367: {b'BODY[]<0>': body,
b'SEQ': 123}})
def test_ENVELOPE(self):
envelope_str = (b'1 (ENVELOPE ( '
b'"Sun, 24 Mar 2013 22:06:10 +0200" '
b'"subject" '
b'(("name" NIL "address1" "domain1.com")) ' # from (name and address)
b'((NIL NIL "address2" "domain2.com")) ' # sender (just address)
b'(("name" NIL "address3" "domain3.com") NIL) ' # reply to
b'NIL' # to (no address)
b'((NIL NIL "address4" "domain4.com") ' # cc
b'("person" NIL "address4b" "domain4b.com")) '
b'NIL ' # bcc
b'"<reply-to-id>" '
b'"<msg_id>"))')
output = parse_fetch_response([envelope_str], normalise_times=False)
self.assertSequenceEqual(output[1][b'ENVELOPE'],
Envelope(
datetime(2013, 3, 24, 22, 6, 10, tzinfo=FixedOffset(120)),
b"subject",
(Address(b"name", None, b"address1", b"domain1.com"),),
(Address(None, None, b"address2", b"domain2.com"),),
(Address(b"name", None, b"address3", b"domain3.com"),),
None,
(Address(None, None, b"address4", b"domain4.com"),
Address(b"person", None, b"address4b", b"domain4b.com")),
None, b"<reply-to-id>", b"<msg_id>"
)
)
def test_ENVELOPE_with_no_date(self):
envelope_str = (
b'1 (ENVELOPE ( '
b'NIL '
b'"subject" '
b'NIL '
b'NIL '
b'NIL '
b'NIL '
b'NIL '
b'NIL '
b'"<reply-to-id>" '
b'"<msg_id>"))'
)
output = parse_fetch_response([envelope_str], normalise_times=False)
self.assertSequenceEqual(output[1][b'ENVELOPE'],
Envelope(
None,
b"subject",
None,
None,
None,
None,
None,
None,
b"<reply-to-id>", b"<msg_id>"
)
)
def test_ENVELOPE_with_invalid_date(self):
envelope_str = (b'1 (ENVELOPE ( '
b'"wtf" ' # bad date
b'"subject" '
b'NIL NIL NIL NIL NIL NIL '
b'"<reply-to-id>" "<msg_id>"))')
output = parse_fetch_response([envelope_str], normalise_times=False)
self.assertSequenceEqual(output[1][b'ENVELOPE'],
Envelope(
None,
b"subject",
None, None, None, None, None, None,
b"<reply-to-id>", b"<msg_id>",
)
)
def test_ENVELOPE_with_empty_addresses(self):
envelope_str = (b'1 (ENVELOPE ( '
b'NIL '
b'"subject" '
b'(("name" NIL "address1" "domain1.com") NIL) '
b'(NIL (NIL NIL "address2" "domain2.com")) '
b'(("name" NIL "address3" "domain3.com") NIL ("name" NIL "address3b" "domain3b.com")) '
b'NIL'
b'((NIL NIL "address4" "domain4.com") '
b'("person" NIL "address4b" "domain4b.com")) '
b'NIL "<reply-to-id>" "<msg_id>"))')
output = parse_fetch_response([envelope_str], normalise_times=False)
self.assertSequenceEqual(output[1][b'ENVELOPE'],
Envelope(
None,
b"subject",
(Address(b"name", None, | |
fclusterdata(pairs, max_dist, criterion="distance")
# Finds how many clusters there are, and how many points belong to each cluster
uniq_vals, uniq_cnts = np.unique(cluster_inds, return_counts=True)
# Choose the cluster with the most points associated with it
chosen_clust = uniq_vals[np.argmax(uniq_cnts)]
# Retrieves the inds for the main merged_data in the chosen cluster
chosen_inds = np.where(cluster_inds == chosen_clust)[0]
# X column, then Y column - these are pixel coordinates
chosen_coord_pairs = np.stack([inds[1][chosen_inds], inds[0][chosen_inds]]).T
# Grabbing the none chosen point cluster indexes
other_clusts = [np.where(cluster_inds == cl)[0] for cl in uniq_vals if cl != chosen_clust]
# And more importantly the coordinates of the none chosen point clusters
other_coord_pairs = [np.stack([inds[1][cl_ind], inds[0][cl_ind]]).T for cl_ind in other_clusts]
cutout = np.zeros(masked_data.shape)
# Make a masking array to select only the points in the cluster
cutout[inds[0][chosen_inds], inds[1][chosen_inds]] = 1
# Mask the data
masked_data = masked_data * cutout
# Uses argmax to find the flattened coordinate of the max value, then unravel_index to convert
# it back to a 2D coordinate
max_coords = np.unravel_index(np.argmax(masked_data == masked_data.max()), masked_data.shape)
# Defines an astropy pix quantity of the peak coordinates
peak_pix = Quantity([max_coords[1], max_coords[0]], pix)
# Don't bother converting if the desired output coordinates are already pix, but otherwise use this
# objects coord_conv function to move to desired coordinate units.
if out_unit != pix:
peak_conv = self.coord_conv(peak_pix, out_unit)
else:
peak_conv = peak_pix
# Find if the peak coordinates sit near an edge/chip gap
edge_flag = self.near_edge(peak_pix)
if clean_point_clusters:
cleaned_clusters = []
for pcl in other_coord_pairs:
if len(pcl) > 4:
cleaned_clusters.append(pcl)
other_coord_pairs = cleaned_clusters
return peak_conv, edge_flag, chosen_coord_pairs, other_coord_pairs
def convolved_peak(self, mask: np.ndarray, redshift: float, cosmology, out_unit: UnitBase = deg) \
-> Tuple[Quantity, bool]:
"""
A very experimental peak finding algorithm, credit for the idea and a lot of the code in this function
go to <NAME>. A radial profile (for instance a project king profile for clusters) is convolved
with the ratemap, using a suitable radius for the object type (so for a cluster r might be ~1000kpc). As
such objects that are similar to this profile will be boosted preferentially over objects that aren't,
making it less likely that we accidentally select the peak brightness pixel from a point source remnant or
something similar. The convolved image is then masked to only look at the area of interest, and the peak
brightness pixel is found.
:param np.ndarray mask: A numpy array used to weight the data. It should be 0 for pixels that
aren't to be searched, and 1 for those that are.
:param float redshift: The redshift of the source that we wish to find the X-ray centroid of.
:param cosmology: An astropy cosmology object.
:param UnitBase out_unit: The desired output unit of the peak coordinates, the default is degrees.
:return: An astropy quantity containing the coordinate of the X-ray peak of this ratemap (given
the user's mask), in units of out_unit, as specified by the user.
:rtype: Tuple[Quantity, bool]
"""
def cartesian(arrays):
arrays = [np.asarray(a) for a in arrays]
shape = (len(x) for x in arrays)
ix = np.indices(shape, dtype=int)
ix = ix.reshape(len(arrays), -1).T
for n, arr in enumerate(arrays):
ix[:, n] = arrays[n][ix[:, n]]
return ix
def projected_king(r, pix_size, beta):
n_pix = int(r / pix_size)
_ = np.arange(-n_pix, n_pix + 1)
ds = cartesian([_, _])
r_grid = np.hypot(ds[:, 0], ds[:, 1]).reshape((len(_), len(_))) * pix_size
func = (1 + (r_grid / r)**2)**((-3*beta) + 0.5)
res = (r_grid < r) * func
return res / np.sum(r_grid < r)
raise NotImplementedError("The convolved peak method sort of works, but needs to be much more general"
" before its available for proper use.")
if mask.shape != self.data.shape:
raise ValueError("The shape of the mask array ({0}) must be the same as that of the data array "
"({1}).".format(mask.shape, self.data.shape))
start_pos = self.coord_conv(Quantity([int(self.shape[1]/2), int(self.shape[0]/2)], pix), deg)
end_pos = self.coord_conv(Quantity([int(self.shape[1]/2) + 10, int(self.shape[0]/2)], pix), deg)
separation = Quantity(np.sqrt(abs(start_pos[0].value - end_pos[0].value) ** 2 +
abs(start_pos[1].value - end_pos[1].value) ** 2), deg)
resolution = ang_to_rad(separation, redshift, cosmology) / 10
# TODO Need to make this more general, with different profiles, also need to figure out what
# Lucas's code does and comment it
# TODO Should probably go through different projected king profile parameters
# TODO Could totally make this into a basic cluster finder combined with clustering algorithm
filt = projected_king(1000, resolution.value, 3)
n_cut = int(filt.shape[0] / 2)
conv_data = fftconvolve(self.data*self.edge_mask, filt)[n_cut:-n_cut, n_cut:-n_cut]
mask_conv_data = conv_data * mask
max_coords = np.unravel_index(np.argmax(mask_conv_data == mask_conv_data.max()), mask_conv_data.shape)
# Defines an astropy pix quantity of the peak coordinates
peak_pix = Quantity([max_coords[1], max_coords[0]], pix)
if out_unit != pix:
peak_conv = self.coord_conv(peak_pix, out_unit)
else:
peak_conv = peak_pix
# Find if the peak coordinates sit near an edge/chip gap
edge_flag = self.near_edge(peak_pix)
return peak_conv, edge_flag
def near_edge(self, coord: Quantity) -> bool:
"""
Uses the edge mask generated for RateMap objects to determine if the passed coordinates are near
an edge/chip gap. If the coordinates are within +- 2 pixels of an edge the result will be true.
:param Quantity coord: The coordinates to check.
:return: A boolean flag as to whether the coordinates are near an edge.
:rtype: bool
"""
# The try except is to catch instances where the pix coord conversion fails because its off an image,
# in which case we classify it as near an edge
try:
# Convert to pixel coordinates
pix_coord = self.coord_conv(coord, pix).value
# Checks the edge mask within a 5 by 5 array centered on the peak coord, if there are no edges then
# all elements will be 1 and it will sum to 25.
edge_sum = self.edge_mask[pix_coord[1] - 2:pix_coord[1] + 3,
pix_coord[0] - 2:pix_coord[0] + 3].sum()
# If it sums to less then we know that there is an edge near the peak.
if edge_sum != 25:
edge_flag = True
else:
edge_flag = False
except ValueError:
edge_flag = True
return edge_flag
def signal_to_noise(self, source_mask: np.ndarray, back_mask: np.ndarray, exp_corr: bool = True,
allow_negative: bool = False):
"""
A signal to noise calculation method which takes information on source and background regions, then uses
that to calculate a signal to noise for the source. This was primarily motivated by the desire to produce
valid SNR values for combined data, where uneven exposure times across the combined field of view could
cause issues with the usual approach of just summing the counts in the region images and scaling by area.
This method can also measure signal to noises without exposure time correction.
:param np.ndarray source_mask: The mask which defines the source region, ideally with interlopers removed.
:param np.ndarray back_mask: The mask which defines the background region, ideally with interlopers removed.
:param bool exp_corr: Should signal to noises be measured with exposure time correction, default is True. I
recommend that this be true for combined observations, as exposure time could change quite dramatically
across the combined product.
:param bool allow_negative: Should pixels in the background subtracted count map be allowed to go below
zero, which results in a lower signal to noise (and can result in a negative signal to noise).
:return: A signal to noise value for the source region.
:rtype: float
"""
# Perform some quick checks on the masks to check they are broadly compatible with this ratemap
if source_mask.shape != self.shape:
raise ValueError("The source mask shape {sm} is not the same as the ratemap shape "
"{rt}!".format(sm=source_mask.shape, rt=self.shape))
elif not (source_mask >= 0).all() or not (source_mask <= 1).all():
raise ValueError("The source mask has illegal values in it, there should only be ones and zeros.")
elif back_mask.shape != self.shape:
raise ValueError("The background mask shape {bm} is not the same as the ratemap shape "
"{rt}!".format(bm=back_mask.shape, rt=self.shape))
elif not (back_mask >= 0).all() or not (back_mask <= 1).all():
raise ValueError("The background mask has illegal values in it, there should only be ones and zeros.")
# Find the total | |
<gh_stars>1-10
import os
import glob
from ctypes import CDLL, c_int, c_void_p, c_double
from typing import Optional
import numpy as np
from scipy.linalg import blas
from scipy.sparse import csr_matrix
libdir = os.path.dirname(os.path.realpath(__file__))
try:
libfile = glob.glob('{}/liblinalg_kernels*.so'.format(libdir))[0]
ext_lib = CDLL(os.path.join(libdir, libfile))
except Exception as e:
print('Warning: could not find {}/liblinalg_kernels*.so'.format(libdir))
print('Caught exception: {}. Trying to load from LD_LIBRARY_PATH...'.format(e))
ext_lib = CDLL('liblinalg_kernels.so')
# arg types
ext_lib.csrcgs.argtypes = [
c_int, c_int, c_int, c_int, c_int, c_void_p, c_void_p, c_void_p, c_void_p
]
ext_lib.csrjlt.argtypes = [c_int, c_int, c_int, c_int, c_void_p, c_void_p, c_void_p, c_void_p]
ext_lib.csrrk.argtypes = [
c_int, c_int, c_int, c_double, c_void_p, c_void_p, c_void_p, c_double, c_void_p
]
ext_lib.csrsqn.argtypes = [
c_int, c_int, c_int, c_double, c_void_p, c_void_p, c_void_p, c_double, c_void_p, c_void_p
]
ext_lib.gemm.argtypes = [c_int, c_int, c_int, c_double, c_void_p, c_void_p, c_double, c_void_p]
ext_lib.rmcgs.argtypes = [c_int, c_int, c_int, c_int, c_void_p, c_void_p]
ext_lib.rmdsc.argtypes = [c_int, c_int, c_void_p, c_void_p]
ext_lib.rmsqn.argtypes = [c_int, c_int, c_int, c_double, c_void_p, c_double, c_void_p, c_void_p]
ext_lib.scale.argtypes = [c_int, c_int, c_void_p, c_double]
ext_lib.set_randn.argtypes = [c_int, c_int, c_void_p]
ext_lib.set_value.argtypes = [c_int, c_int, c_void_p, c_double]
# return types
for kernel in [
ext_lib.csrcgs, ext_lib.csrjlt, ext_lib.csrrk, ext_lib.csrsqn, ext_lib.gemm, ext_lib.rmcgs,
ext_lib.rmdsc, ext_lib.rmsqn, ext_lib.scale, ext_lib.set_randn, ext_lib.set_value
]:
kernel.restype = None
def assert_shape(a: int, b: int) -> None:
if a != b:
raise ValueError('dimension mismatch: {} is not equal to {}.'.format(a, b))
def assert_dtype(A: np.ndarray, dtype: str) -> None:
if A.dtype != dtype:
raise TypeError('unsupported dtype: {}.'.format(A.dtype))
def assert_contiguous_type(A: np.ndarray, contiguous_type: str) -> None:
if A.flags[contiguous_type] is False:
raise TypeError('array is not {} as expected.'.format(contiguous_type))
def csrrk(alpha: float, A: csr_matrix, beta: float, C: np.ndarray) -> None:
"""
Compute the product: C <- alpha * A' * A + beta * C, where A' is the transpose of A.
Args:
alpha (float): scalar to multiply the product A' * A.
A (csr_matrix): matrix A in csr format.
beta (float): scalar to multiply C before adding to alpha * A' * A.
C (np.ndarray): matrix C in row-major ordering (C_CONTIGUOUS).
"""
assert_dtype(A.data, 'float64')
assert_dtype(C, 'float64')
assert_shape(A.shape[1], C.shape[0])
assert_contiguous_type(C, 'C_CONTIGUOUS')
ext_lib.csrrk(
int(A.shape[0]), int(A.shape[1]), int(A.nnz), c_double(alpha),
A.indptr.ctypes.data_as(c_void_p), A.indices.ctypes.data_as(c_void_p),
A.data.ctypes.data_as(c_void_p), c_double(beta), C.ctypes.data_as(c_void_p)
)
def rmsqn(alpha: float, A: np.ndarray, B: np.ndarray, beta: float, x: np.ndarray) -> None:
"""
Compute the product: x <- alpha * squared_row_norms(A * B) + beta * x.
Args:
alpha (float): scalar to multiply the squared row norms of the product A*B.
A (np.ndarray): matrix A in row-major ordering (C_CONTIGUOUS).
B (np.ndarray): matrix B in row-major ordering (C_CONTIGUOUS).
beta (float): scalar to multiply x before adding to the squared row norms of A*B.
x (np.ndarray): vector x.
"""
assert_dtype(A, 'float64')
assert_dtype(B, 'float64')
assert_shape(A.shape[1], B.shape[0])
assert_contiguous_type(A, 'C_CONTIGUOUS')
assert_contiguous_type(B, 'C_CONTIGUOUS')
ext_lib.rmsqn(
int(A.shape[0]), int(B.shape[1]), int(A.shape[1]), c_double(alpha),
A.ctypes.data_as(c_void_p), c_double(beta), B.ctypes.data_as(c_void_p),
x.ctypes.data_as(c_void_p)
)
def csrsqn(alpha: float, A: csr_matrix, B: np.ndarray, beta: float, x: np.ndarray) -> None:
"""
Compute the product: x <- alpha * squared_row_norms(A * B) + beta * x.
Args:
alpha (float): scalar to multiply the squared row norms of the product A*B.
A (csr_matrix): matrix A in csr format.
B (np.ndarray): matrix B in row-major ordering (C_CONTIGUOUS).
beta (float): scalar to multiply x before adding to the squared row norms of A*B.
x (np.ndarray): vector x
"""
assert_dtype(A.data, 'float64')
assert_dtype(B, 'float64')
assert_shape(A.shape[1], B.shape[0])
assert_contiguous_type(B, 'C_CONTIGUOUS')
B = blas.dgemm(1.0, B.T, B.T, 0.0, trans_a=True, trans_b=False)
ext_lib.csrsqn(
int(A.shape[0]), int(B.shape[0]), int(A.nnz), c_double(alpha),
A.indptr.ctypes.data_as(c_void_p), A.indices.ctypes.data_as(c_void_p),
A.data.ctypes.data_as(c_void_p), c_double(beta), B.ctypes.data_as(c_void_p),
x.ctypes.data_as(c_void_p)
)
def rmdsc(B: np.ndarray, D: np.ndarray) -> None:
"""
Compute B <- B * D where B is C_CONTIGUOUS and D is diagonal, updating B in-place.
Args:
B (np.ndarray): matrix B in row-major ordering (C_CONTIGUOUS)
D (np.ndarray): Diagonal matrix represented as a 1-dimensional np.ndarray (a vector)
"""
assert_dtype(D, 'float64')
assert_dtype(B, 'float64')
assert_shape(B.shape[1], D.shape[0])
assert_contiguous_type(B, 'C_CONTIGUOUS')
ext_lib.rmdsc(
int(B.shape[0]), int(B.shape[1]), D.ctypes.data_as(c_void_p), B.ctypes.data_as(c_void_p)
)
def csrjlt(A: csr_matrix, m: int) -> np.ndarray:
"""
Compute the product: B <- A' * G' where G has size m * n and elements from the standard
normal distribution, rescaled by 1/sqrt(m). The matrix G is not explicitly formed, and
its elements are computed on-the-fly only when required.
Args:
A (csr_matrix): matrix A in csr format.
m (int): number of rows for the matrix G.
Returns:
B (np.ndarray): matrix B in row-major ordering (C_CONTIGUOUS).
"""
assert_dtype(A.data, 'float64')
B = np.zeros((A.shape[1], m))
ext_lib.csrjlt(
int(A.shape[1]), m, int(A.shape[0]), int(A.nnz), A.indptr.ctypes.data_as(c_void_p),
A.indices.ctypes.data_as(c_void_p), A.data.ctypes.data_as(c_void_p),
B.ctypes.data_as(c_void_p)
)
return B
def csrcgs(A: csr_matrix, m: Optional[int] = None, r: Optional[int] = None) -> np.ndarray:
"""
Compute the product: B <- G * S * A where G has size m * r and elements from the standard
normal distribution, rescaled by 1/sqrt(m), and S is a CountSketch of size r * n. The matrix
G is not explicitly formed, and its elements are computed on-the-fly only when required.
Args:
A (csr_matrix): matrix A in csr format.
m (int): number of rows for the Gaussian sketch. If zero, the Gaussian sketch will
not be applied. If None, it defaults to 2*d..
r (int): number of rows to use for the CountSketch transform. If zero, this transform
will not be applied. If None, it defaults to 5 * (d**2 + d).
Returns:
np.ndarray: matrix B in row-major ordering (C_CONTIGUOUS).
"""
assert_dtype(A.data, 'float64')
n = int(A.shape[0])
d = int(A.shape[1])
r = r if r is not None else 5 * (d**2 + d)
m = m if m is not None else 2 * d
if (m > n) or (r > n):
raise ValueError(f'Either m={m} or r={r} is larger than n={n}, the number of rows of A.')
n_rows_B = m or r # if m is zero, fallback to r
B = np.zeros((n_rows_B, d))
ext_lib.csrcgs(
d, m, n, r, int(A.nnz), A.indptr.ctypes.data_as(c_void_p),
A.indices.ctypes.data_as(c_void_p), A.data.ctypes.data_as(c_void_p),
B.ctypes.data_as(c_void_p)
)
return B
def rmcgs(A: np.ndarray, m: Optional[int] = None, r: Optional[int] = None) -> np.ndarray:
"""
Compute the product: B <- G * S * A where G has size m * r and elements from the standard
normal distribution, rescaled by 1/sqrt(m), and S is a CountSketch of size r * n. The matrix
G is not explicitly formed, and its elements are computed on-the-fly only when required.
Args:
A (np.ndarray): matrix A in row-major ordering (C_CONTIGUOUS)
m (int): number of rows for the Gaussian sketch. If zero, the Gaussian sketch will
not be applied. If None, it defaults to 2*d.
r (int): number of rows to use for the CountSketch transform. If zero, this transform
will not be applied. If None, it defaults to 5 * (d**2 + d).
Returns:
np.ndarray: matrix B in row-major ordering (C_CONTIGUOUS)
"""
assert_dtype(A, 'float64')
assert_contiguous_type(A, 'C_CONTIGUOUS')
n = int(A.shape[0])
d = int(A.shape[1])
r = r if r is not None else 5 * (d**2 + d)
m = m if m is not None else 2 * d
if (m > n) or (r > n):
raise ValueError(f'Either m={m} or r={r} is larger than n={n}, the number of rows of A.')
n_rows_B = m or r # if m is zero, fallback to r
B = np.zeros((n_rows_B, d))
ext_lib.rmcgs(d, m, n, r, A.ctypes.data_as(c_void_p), B.ctypes.data_as(c_void_p))
return B
def set_value(B: np.ndarray, value: float) -> None:
"""
Set all the elements of B equal to the given value.
Args:
B (np.ndarray): matrix B.
value (float): the value to set to all the elements of B.
"""
assert_dtype(B, 'float64')
ext_lib.set_value(int(B.shape[0]), int(B.shape[1]), B.ctypes.data_as(c_void_p), value)
def set_randn(B: np.ndarray) -> None:
"""
Fill B with random elements from the standard normal distribution.
Args:
B (np.ndarray): matrix B.
"""
assert_dtype(B, 'float64')
ext_lib.set_randn(int(B.shape[0]), int(B.shape[1]), B.ctypes.data_as(c_void_p))
def scale(B: np.ndarray, alpha: float) -> None:
"""
Scale the matrix B in-place by the scalar alpha: B <- alpha * B.
Args:
B (np.ndarray): matrix B.
alpha (float): scalar to multiply B.
"""
assert_dtype(B, 'float64')
ext_lib.scale(int(B.shape[0]), int(B.shape[1]), B.ctypes.data_as(c_void_p), c_double(alpha))
def gemm(alpha: float, A: np.ndarray, B: np.ndarray, beta: float, C: np.ndarray) -> None:
"""
Compute the product: C <- alpha * A * B + beta * C.
Args:
alpha (float): scalar to multiply the product A*B.
A (np.ndarray): matrix A in row-major ordering (C_CONTIGUOUS).
B (np.ndarray): matrix B in row-major ordering (C_CONTIGUOUS).
beta (float): scalar to multiply C before adding to alpha * A * B.
C (np.ndarray): matrix C in row-major ordering (C_CONTIGUOUS).
"""
assert_shape(A.shape[0], | |
list()
for n in range(len(token)):
l.append(''.join(char_templates(token[n])))
if n < MPL[0] or 'D' not in l: continue
for p in itertools.product(l):
yield p
if n == len(token) - 1: yield '{}$'.format(p)
# Ngram extraction and matching
def ngram_iter(v, n, bounds = False):
if len(v) < n: return iter(())
return chngrams(' {} '.format(v) if bounds else v, n).items()
# Special datatype categories
C_OTHERS = 'Autres types'
# Special fields and field prefixes
# F_COMPOSITE_REMAINDER = u'+++'
F_ORIGINAL_PATTERN = u'Original %s'
F_ACRONYMS = u'Acronyme' # For on-the-fly acronym detection (i.e. when acronym and expanded form occur in the same context)
F_VARIANTS = u'Variante' # For other forms of synonyms, including acronyms from pre-collected acro/expansion pair files
# Generic, MESR-domain and other field names
F_PERSON = u'Nom de personne'
F_FIRST = u'Prénom'
F_LAST = u'Nom'
F_TITLE = u'Titre'
F_JOURNAL = u'Titre de revue'
F_EMAIL = u'Email'
F_URL = u'URL'
F_INSEE = u'Code INSEE'
F_YEAR = u'Année'
F_MONTH = u'Mois'
F_DATE = u'Date'
F_PHONE = u'Téléphone'
F_GEO = u'Entité Géo'
F_ADDRESS = u'Adresse'
F_ZIP = u'Code Postal'
F_COUNTRY = u'Pays'
F_CITY = u'Commune'
F_STREET = u'Voie'
F_HOUSENUMBER = u'Numéro de voie'
F_DPT = u'Département'
F_REGION = u'Région'
F_STRUCTURED_TYPE = u'Type structuré'
F_TEXT = u'Texte'
F_ENGLISH = u'Anglais'
F_FRENCH = u'Français'
F_ID = u'ID'
F_ORG_ID = u'ID organisation'
F_PERSON_ID = u'ID personne'
F_ENTREPRISE = u'Entreprise' # Nom ou raison sociale de l'entreprise
F_SIREN = u'SIREN'
F_SIRET = u'SIRET'
F_NIF = u'NIF' # Numéro d'Immatriculation Fiscale
F_NIR = u'NIR' # French personal identification number
F_TVA = u'TVA'
F_GRID_LABEL = u'Intitulé GRID'
F_PUBLI = u'Publication'
F_ARTICLE = u'Article'
F_ABSTRACT = u'Résumé'
F_ISSN = u'ISSN'
F_ARTICLE_CONTENT = u'Contenu d\'article'
F_PUBLI_ID = u'ID publication'
F_DOI = u'DOI'
F_CORPS_GRADE = u'Corps et Grades'
F_MESR = u'Entité MESR'
F_NNS = u'Numéro National de Structure'
F_UAI = u'UAI'
F_UMR = u'Numéro UMR'
F_RD_STRUCT = u'Structure de recherche'
F_RD_PARTNER = u'Partenaire de recherche'
F_CLINICALTRIAL_COLLAB = u'Collaborateur d\'essai clinique'
F_RD = u'Institution de recherche'
F_ETAB = u'Etablissement'
F_EDUC_NAT = u'Education Nationale'
F_ACADEMIE = u'Académie'
F_ETAB_NOTSUP = u'Etablissement des premier et second degrés'
F_ETAB_ENSSUP = u'Etablissement d\'Enseignement Supérieur'
F_APB_MENTION = u'Mention APB'
F_RD_DOMAIN = u'Domaine de Recherche'
# A very high-level institution, comprising
# 1. (higher) education entities
# 2. R&D organizations
# 3. entreprises/corporations
F_INSTITUTION = u'Institution'
F_CLINICALTRIAL_NAME = u'Nom d\'essai clinique'
F_MEDICAL_SPEC = u'Spécialité médicale'
F_BIOMEDICAL = u'Entité biomédicale'
F_PHYTO = u'Phyto'
F_AGRO = u'Entité agro'
F_RAISON_SOCIALE = u'Raison sociale'
# This mapping only covers supertype-subtype relationships
SUBTYPING_RELS = defaultdict(set)
# This mapping only covers composition relationships
COMPOSITION_RELS = defaultdict(set)
# This mapping covers both subtyping and composition relationships
PARENT_CHILD_RELS = defaultdict(set)
TYPE_TAGS = {
F_PERSON: [F_LAST, F_PERSON],
F_FIRST: [F_PERSON],
F_LAST: [u'Patronyme', F_PERSON],
F_EMAIL: [F_ADDRESS, u'Courriel'],
F_URL: [F_ADDRESS],
F_INSEE: [F_ID, u'Code', u'Numéro'],
F_NIF: [F_ID],
F_NIR: [F_ID],
F_NNS : [F_NNS, u'NumNatStruct', 'RNSR'],
F_TVA: [F_ID],
F_GRID_LABEL: [F_ID, 'GRID', 'Recherche', 'Index'],
F_YEAR: [F_DATE],
F_MONTH: [F_DATE],
F_PHONE: [u'Numéro'],
F_ADDRESS: [F_GEO],
F_ZIP: [u'CP', u'Code', F_ADDRESS, F_GEO],
F_COUNTRY: [F_ADDRESS, F_GEO],
F_CITY: [u'Ville', F_ADDRESS, F_GEO],
F_STREET: [u'Rue', F_ADDRESS, F_GEO],
F_DPT: [F_ADDRESS, F_GEO],
F_REGION: [F_ADDRESS, F_GEO],
F_ID: [u'Identifiant', u'Code', u'Numéro'],
F_ORG_ID: [u'Organisation', u'Structure', u'Identifiant', u'Code', u'Numéro'],
F_PERSON_ID: [u'Personne', u'Individu', u'Identifiant', u'Code', u'Numéro'],
F_ENTREPRISE: [u'Société', u'Organisation'],
F_SIREN: [u'Identifiant', u'Numéro'],
F_SIRET: [u'Identifiant', u'Numéro'],
F_PUBLI_ID: [F_PUBLI, u'Identifiant', u'Numéro'],
F_DOI: [u'Identifiant', u'Numéro'],
F_UAI: [u'Identifiant', u'Numéro'],
F_UMR: [u'Identifiant', u'Numéro', u'Recherche'],
F_RD_STRUCT: [u'Organisation', u'Structure', u'Recherche'],
F_RD_PARTNER: [u'Organisation', u'Structure', u'Recherche'],
F_RD: [u'Recherche'],
F_EDUC_NAT: [u'Education', u'Enseignement'],
F_ACADEMIE: [u'Enseignement'],
F_ETAB: [u'Enseignement'],
F_ETAB_NOTSUP: [u'Primaire', u'Secondaire', u'Lycée', u'Collège'],
F_ETAB_ENSSUP: [u'Enseignement supérieur'],
F_CORPS_GRADE: [u'Corps', u'Grade', u'Fonction publique'],
F_PHYTO: [u'Agro'],
F_AGRO: [u'Agro'],
F_MEDICAL_SPEC: [u'Médecine'],
F_BIOMEDICAL: [u'Médecine']
}
# Stop words specific to certain data types
STOP_WORDS_CITY = ['commune', 'cedex', 'cdx']
# Base class for all type matchers
class TypeMatcher(object):
def __init__(self, t):
self.t = t
self.diversion = set()
def diversity(self):
''' Specifies the min number of distinct reference values to qualify a column-wide match
(it is essential to carefully override this constraint when the labels in question represent
singleton instances, i.e. specific entities, as opposed to a qualified and/or controlled
vocabulary). '''
return 1
def __str__(self):
return '{}<{}>'.format(self.__class__.__name__, self.t)
def register_full_match(self, c, t, s, hit = None):
ms = cover_score(c.value, hit) if s is None else s
outputFieldPrefix = None if self.t == t else self.t
c.register_full_match(t, outputFieldPrefix, ms, hit)
self.update_diversity(hit)
def register_partial_match(self, c, t, ms, hit, span):
outputFieldPrefix = None if self.t == t else self.t
c.register_partial_match(t, outputFieldPrefix, ms, hit, span)
self.update_diversity(hit)
@timed
def match_all_field_values(self, f):
error_values = Counter()
fatal_values = Counter()
values_seen = 0
for vc in f.cells:
error_count = sum(error_values.values())
fatal_count = sum(fatal_values.values())
if values_seen >= 100 and (error_count + fatal_count) * 100 > values_seen * MAX_ERROR_RATE:
logging.warning('{}: bailing out after {} total matching errors'.format(self, error_count + fatal_count))
break
elif fatal_count > MAX_FATAL_COUNT:
logging.warning('{}: bailing out after {} fatal matching errors'.format(self, fatal_count))
break
values_seen += 1
v = vc.value
if v in fatal_values:
fatal_values[v] += 1
continue
elif v in error_values:
error_values[v] += 1
continue
try :
self.match(vc)
# Handling non-fatal errors
except ValueError as ve:
logging.warning('{}: value error for "{}": {}'.format(self, vc.value, ve))
if FAIL_FAST_MODE:
fatal_values[v] += 1
else:
error_values[v] += 1
except OverflowError as oe:
logging.error('{} : overflow error (e.g. while parsing date) for "{}": {}'.format(self, vc.value, oe))
if FAIL_FAST_MODE:
fatal_values[v] += 1
else:
error_values[v] += 1
# Handling fatal errors
except RuntimeError as rte:
logging.warning('{}: runtime error for "{}": {}'.format(self, vc.value, rte))
fatal_values[v] += 1
except TypeError as te:
logging.warning('{}: type or parsing error for "{}": {}'.format(self, vc.value, te))
fatal_values[v] += 1
except UnicodeDecodeError as ude:
logging.error('{} : unicode error while parsing input value "{}": {}'.format(self, vc.value, ude))
fatal_values[v] += 1
except urllib.error.URLError as ue:
logging.warning('{}: request rejected for "{}": {}'.format(self, vc.value, ue))
fatal_values[v] += 1
except ConnectionError as cne:
logging.warning('{}: connection error: {}'.format(self, cne))
fatal_values[v] += 1
except Exception as une:
logging.warning('{}: unknown exception: {}'.format(self, une))
fatal_values[v] += 1
def update_diversity(self, hit):
self.diversion |= set(hit if isinstance(hit, list) else [hit])
def check_diversity(self, cells):
div = len(self.diversion)
if div <= 0: return
self.diversion.clear()
if div < self.diversity():
logging.info('Not enough diversity matches of type {} produced by {} ({})'.format(self.t, self, div))
else:
logging.info('Positing value type {} by {}'.format(self.t, self))
for c in cells: c.posit_type(self.t)
class GridMatcher(TypeMatcher):
@timed
def __init__(self):
super(GridMatcher, self).__init__(F_GRID_LABEL)
gridding.init_gridding()
def match_all_field_values(self, f):
src_items_by_label = gridding.grid_label_set([vc.value for vc in f.cells])
for vc in f.cells:
item = src_items_by_label[vc.value]
if 'grid' in item:
self.register_full_match(vc, self.t, 100, item['label'])
MATCH_MODE_EXACT = 0
MATCH_MODE_CLOSE = 1
# stdnum-based matcher-normalizer class
class StdnumMatcher(TypeMatcher):
@timed
def __init__(self, t, validator, normalizer):
super(StdnumMatcher, self).__init__(t)
self.t = t
self.validator = validator
self.normalizer = normalizer
@timed
def match(self, c):
nv = c.value
if self.validator(nv):
nv0 = self.normalizer(nv)
self.register_full_match(c, self.t, 100, nv0)
else:
raise TypeError('{} stdnum matcher did not validate "{}"'.format(self, c))
# Regex-based matcher-normalizer class
class RegexMatcher(TypeMatcher):
@timed
def __init__(self, t, p, g = 0, ignoreCase = False, partial = False, validator = None, neg = False, wordBoundary = True):
super(RegexMatcher, self).__init__(t)
self.p = pattern_with_word_boundary(p)
self.g = g
self.flags = re.I if ignoreCase else 0
self.partial = partial
self.validator = validator
self.neg = neg
self.wordBoundary = wordBoundary
logging.info('SET UP regex matcher for <%s> (length %d)', self.t, len(self.p))
@timed
def match(self, c):
if self.partial:
ms = re.findall(self.p, c.value, self.flags)
if ms:
if self.neg:
c.negate_type(self.t)
return
else:
for m in ms:
if not isinstance(m, str):
if len(m) > self.g:
m = m[self.g]
else:
continue
i1 = c.value.find(m)
if i1 >= 0:
if self.validator is None or self.validator(m):
self.register_partial_match(c, self.t, 100, m, (i1, i1 + len(m)))
return
else:
raise RuntimeError('{} could not find regex multi-match "{}" in original "{}"'.format(self, m, c.value))
else:
m = re.match(self.p, c.value, self.flags)
if m:
if self.neg:
c.negate_type(self.t)
return
else:
try:
grp = m.group(self.g)
if self.validator is None or self.validator(grp):
if len(grp) == len(c.value):
self.register_full_match(c, self.t, 100, grp)
else:
self.register_partial_match(c, self.t, 100, grp, (0, len(grp)))
return
except IndexError:
raise RuntimeError('No group {} matched in regex {} for input "{}"'.format(self.g, self.p, c))
raise ValueError('{} unmatched "{}"'.format(self, c.value))
def build_vocab_regex(vocab, partial):
j = '|'.join(vocab)
return '({}).*$'.format(j if partial else j)
class VocabMatcher(RegexMatcher):
''' When matcher is not None, the matching will be dispatched to it, which is useful when normalization is far costlier
than type detection. '''
@timed
def __init__(self, t, vocab, ignoreCase = False, partial = False, validator = None, neg = False, matcher = None):
super(VocabMatcher, self).__init__(t, build_vocab_regex(vocab, partial),
g = 0, ignoreCase = ignoreCase, partial = partial, validator = validator, neg = neg)
self.matcher = matcher
@timed
def match(self, c):
if self.matcher is not None:
logging.debug('%s normalizing "%s" from %s', self, c, self.matcher)
self.matcher.match(c)
elif self.matcher is None or self.t not in c.non_excluded_types():
logging.debug('%s normalizing "%s" from vocab only', self, c)
super(VocabMatcher, self).match(c)
else:
raise TypeError('{} matcher only found excluded type in "{}"'.format(self, c))
# Tokenization-based matcher-normalizer class
def tokenization_based_score(matchedSrcTokens, srcTokens, matchedRefPhrase, refPhrase,
minSrcTokenRatio = 80, minSrcCharRatio = 70, minRefCharRatio = 60):
srcTokenRatio = 100 * len(matchedSrcTokens) / len(srcTokens)
if srcTokenRatio < minSrcTokenRatio: return 0
srcCharRatio = 100 * sum([len(t) for t in matchedSrcTokens]) / sum([len(t) for t in srcTokens])
if srcCharRatio < minSrcCharRatio: return 0
matchedRefPhrase = ' '.join(matchedSrcTokens)
refCharRatio = 100 * len(matchedRefPhrase) / len(refPhrase)
return 0 if refCharRatio < minRefCharRatio else refCharRatio
def stop_words_as_normalized_list(stopWords):
return [] if stopWords is None else list([case_phrase(s, False) for s in stopWords])
DTC = 6 # Dangerous Token Count (becomes prohibitive to tokenize many source strings above this!)
class TokenizedMatcher(TypeMatcher):
@timed
def __init__(self, t, lexicon, maxTokens = 0, scorer = tokenization_based_score, distinctCount = 0, stopWords = None):
super(TokenizedMatcher, self).__init__(t)
currentMax = maxTokens
self.scorer = scorer
self.phrasesMap = validated_lexical_map(lexicon)
self.tokenIdx = dict()
self.distinctCount = distinctCount
self.stopWords = stop_words_as_normalized_list(stopWords)
for np in self.phrasesMap.keys():
tokens = list([t for t in np.split(' ') if t not in self.stopWords])
if len(tokens) < 1: continue
if maxTokens < 1 and len(tokens) > currentMax:
currentMax = len(tokens)
if currentMax > DTC:
logging.warning('Full tokenization of lexicon: encountered token of length {}, above DTC!'.format(currentMax))
matchedRefPhrase = ' '.join(tokens[:currentMax])
if matchedRefPhrase not in self.tokenIdx or len(self.tokenIdx[matchedRefPhrase]) < len(np):
self.tokenIdx[matchedRefPhrase] = np
self.maxTokens = currentMax
logging.info('SET UP %d-token matcher (%s-defined length) for <%s> with lexicon of size %d, total variants %d',
self.maxTokens, 'user' if maxTokens > | |
<gh_stars>0
"""
Copyright 2010 <NAME> <<EMAIL>>
This file is part of PyCAM.
PyCAM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyCAM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCAM. If not, see <http://www.gnu.org/licenses/>.
"""
# multiprocessing is imported later
# import multiprocessing
import os
import platform
import queue
import random
import signal
import socket
import sys
import time
import uuid
from pycam.errors import CommunicationError
import pycam.Utils
import pycam.Utils.log
log = pycam.Utils.log.get_logger()
try:
from multiprocessing.managers import SyncManager as _SyncManager
except ImportError as msg:
log.debug("Failed to import multiprocessing.managers.SyncMananger: %s", msg)
else:
# this class definition needs to be at the top level - for pyinstaller
class TaskManager(_SyncManager):
@classmethod
def _run_server(cls, *args):
# make sure that the server ignores SIGINT (KeyboardInterrupt)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# prevent connection errors to trigger exceptions
try:
_SyncManager._run_server(*args)
except socket.error:
pass
DEFAULT_PORT = 1250
# TODO: create one or two classes for these functions (to get rid of the globals)
# possible values:
# None: not initialized
# False: no threading
# multiprocessing: the multiprocessing module is imported and enabled later
__multiprocessing = None
# needs to be initialized, if multiprocessing is enabled
__num_of_processes = None
__manager = None
__closing = None
__task_source_uuid = None
__finished_jobs = []
__issued_warnings = []
def run_in_parallel(*args, **kwargs):
global __manager
if __manager is None:
if pycam.Utils.log.is_debug():
# force serial processing in debug mode
kwargs = dict(kwargs)
kwargs["disable_multiprocessing"] = True
return run_in_parallel_local(*args, **kwargs)
else:
return run_in_parallel_remote(*args, **kwargs)
def is_pool_available():
return __manager is not None
def is_multiprocessing_available():
if (pycam.Utils.get_platform() == pycam.Utils.OSPlatform.WINDOWS) and \
hasattr(sys, "frozen") and sys.frozen:
return False
try:
import multiprocessing
# try to initialize a semaphore - this can trigger shm access failures
# (e.g. on Debian Lenny with Python 2.6.6)
multiprocessing.Semaphore()
return True
except ImportError:
if "missing_module" not in __issued_warnings:
log.info("Python's multiprocessing module is missing: disabling parallel processing")
__issued_warnings.append("missing_module")
except OSError:
if "shm_access_failed" not in __issued_warnings:
log.info("Python's multiprocessing module failed to acquire read/write access to "
"shared memory (shm) - disabling parallel processing")
__issued_warnings.append("shm_access_failed")
return False
def is_multiprocessing_enabled():
return bool(__multiprocessing)
def is_server_mode_available():
# the following definition should be kept in sync with the documentation in
# docs/parallel-processing.md
return is_multiprocessing_available()
def get_number_of_processes():
if __num_of_processes is None:
return 1
else:
return __num_of_processes
def get_number_of_cores():
try:
import multiprocessing
return multiprocessing.cpu_count()
except ImportError:
return None
def get_pool_statistics():
global __manager
if __manager is None:
return []
else:
return __manager.statistics().get_worker_statistics()
def get_task_statistics():
global __manager
result = {}
if __manager is not None:
try:
result["tasks"] = __manager.tasks().qsize()
result["results"] = __manager.results().qsize()
except NotImplementedError:
# this can happen on MacOS (see multiprocessing doc)
pass
result["pending"] = __manager.pending_tasks().length()
result["cache"] = __manager.cache().length()
return result
class ManagerInfo:
""" this separate class allows proper pickling for "multiprocesssing"
"""
def __init__(self, tasks, results, stats, cache, pending):
self.tasks_queue = tasks
self.results_queue = results
self.statistics = stats
self.cache = cache
self.pending_tasks = pending
def get_tasks_queue(self):
return self.tasks_queue
def get_results_queue(self):
return self.results_queue
def get_statistics(self):
return self.statistics
def get_cache(self):
return self.cache
def get_pending_tasks(self):
return self.pending_tasks
def init_threading(number_of_processes=None, enable_server=False, remote=None, run_server=False,
server_credentials="", local_port=DEFAULT_PORT):
global __multiprocessing, __num_of_processes, __manager, __closing, __task_source_uuid
if __multiprocessing:
# kill the manager and clean everything up for a re-initialization
cleanup()
if (not is_server_mode_available()) and (enable_server or run_server):
# server mode is disabled for the Windows pyinstaller standalone
# due to "pickle errors". How to reproduce: run the standalone binary
# with "--enable-server --server-auth-key foo".
feature_matrix_text = ("Take a look at the wiki for a matrix of platforms and available "
"features: http://pycam.sourceforge.net/parallel-processing")
if enable_server:
log.warn("Unable to enable server mode with your current setup.\n%s",
feature_matrix_text)
elif run_server:
log.warn("Unable to run in server-only mode with the Windows standalone "
"executable.\n%s", feature_matrix_text)
else:
# no further warnings required
pass
enable_server = False
run_server = False
# only local -> no server settings allowed
if (not enable_server) and (not run_server):
remote = None
run_server = None
server_credentials = ""
if not is_multiprocessing_available():
__multiprocessing = False
# Maybe a multiprocessing feature was explicitly requested?
# Issue some warnings if necessary.
multiprocessing_missing_text = (
"Failed to enable server mode due to a lack of 'multiprocessing' capabilities. Please "
"use Python2.6 or install the 'python-multiprocessing' package.")
if enable_server:
log.warn("Failed to enable server mode due to a lack of 'multiprocessing' "
"capabilities. %s", multiprocessing_missing_text)
elif run_server:
log.warn("Failed to run in server-only mode due to a lack of 'multiprocessing' "
"capabilities. %s", multiprocessing_missing_text)
else:
# no further warnings required
pass
else:
import multiprocessing
if number_of_processes is None:
# use defaults
# don't enable threading for a single cpu
if (multiprocessing.cpu_count() > 1) or remote or run_server or enable_server:
__multiprocessing = multiprocessing
__num_of_processes = multiprocessing.cpu_count()
else:
__multiprocessing = False
elif (number_of_processes < 1) and (remote is None) and (enable_server is None):
# Zero processes are allowed if we use a remote server or offer a
# server.
__multiprocessing = False
else:
__multiprocessing = multiprocessing
__num_of_processes = number_of_processes
# initialize the manager
if not __multiprocessing:
__manager = None
log.info("Disabled parallel processing")
elif not enable_server and not run_server:
__manager = None
log.info("Enabled %d parallel local processes", __num_of_processes)
else:
# with multiprocessing
log.info("Enabled %d parallel local processes", __num_of_processes)
log.info("Allow remote processing")
# initialize the uuid list for all workers
worker_uuid_list = [str(uuid.uuid1()) for index in range(__num_of_processes)]
__task_source_uuid = str(uuid.uuid1())
if remote is None:
# try to guess an appropriate interface for binding
if pycam.Utils.get_platform() == pycam.Utils.OSPlatform.WINDOWS:
# Windows does not support a wildcard interface listener
all_ips = pycam.Utils.get_all_ips()
if all_ips:
address = (all_ips[0], local_port)
log.info("Binding to local interface with IP %s", str(all_ips[0]))
else:
raise CommunicationError("Failed to find any local IP")
else:
# empty hostname -> wildcard interface
# (this does not work with Windows - see above)
address = ('', local_port)
else:
if ":" in remote:
host, port = remote.split(":", 1)
try:
port = int(port)
except ValueError:
log.warning("Invalid port specified: '%s' - using default port (%d) instead",
port, DEFAULT_PORT)
port = DEFAULT_PORT
else:
host = remote
port = DEFAULT_PORT
address = (host, port)
if remote is None:
tasks_queue = multiprocessing.Queue()
results_queue = multiprocessing.Queue()
statistics = ProcessStatistics()
cache = ProcessDataCache()
pending_tasks = PendingTasks()
info = ManagerInfo(tasks_queue, results_queue, statistics, cache, pending_tasks)
TaskManager.register("tasks", callable=info.get_tasks_queue)
TaskManager.register("results", callable=info.get_results_queue)
TaskManager.register("statistics", callable=info.get_statistics)
TaskManager.register("cache", callable=info.get_cache)
TaskManager.register("pending_tasks", callable=info.get_pending_tasks)
else:
TaskManager.register("tasks")
TaskManager.register("results")
TaskManager.register("statistics")
TaskManager.register("cache")
TaskManager.register("pending_tasks")
__manager = TaskManager(address=address, authkey=server_credentials)
# run the local server, connect to a remote one or begin serving
try:
if remote is None:
__manager.start()
log.info("Started a local server.")
else:
__manager.connect()
log.info("Connected to a remote task server.")
except (multiprocessing.AuthenticationError, socket.error) as err_msg:
__manager = None
return err_msg
except EOFError:
__manager = None
raise CommunicationError("Failed to bind to socket for unknown reasons")
# create the spawning process
__closing = __manager.Value("b", False)
if __num_of_processes > 0:
# only start the spawner, if we want to use local workers
spawner = __multiprocessing.Process(name="spawn", target=_spawn_daemon,
args=(__manager, __num_of_processes,
worker_uuid_list))
spawner.start()
else:
spawner = None
# wait forever - in case of a server
if run_server:
log.info("Running a local server and waiting for remote connections.")
# the server can be stopped via CTRL-C - it is caught later
if spawner is not None:
spawner.join()
def cleanup():
global __multiprocessing, __manager, __closing
if __multiprocessing and __closing:
log.debug("Shutting down process handler")
try:
__closing.set(True)
except (IOError, EOFError):
log.debug("Connection to manager lost during cleanup")
# Only managers that were started via ".start()" implement a "shutdown".
# Managers started via ".connect" may skip this.
if hasattr(__manager, "shutdown"):
# wait for the spawner and the worker threads to go down
time.sleep(2.5)
# __manager.shutdown()
time.sleep(0.1)
# check if it is still alive and kill it if necessary
if __manager._process.is_alive():
__manager._process.terminate()
__manager = None
__closing = None
__multiprocessing = None
def _spawn_daemon(manager, number_of_processes, worker_uuid_list):
""" wait for items in the 'tasks' queue to appear and then spawn workers
"""
global __multiprocessing, __closing
tasks = manager.tasks()
results = manager.results()
stats = manager.statistics()
cache = manager.cache()
pending_tasks = manager.pending_tasks()
log.debug("Spawner daemon started with %d processes", number_of_processes)
log.debug("Registering %d worker threads: | |
<filename>recent2.py
#!/usr/bin/env python
#https://github.com/kislyuk/argcomplete#synopsis
import argcomplete
import argparse
import hashlib
import json
import os
import re
import socket
import sqlite3
import sys
import time
from pathlib import Path
from tabulate import tabulate
from datetime import datetime
# pip install python-dateutil
from dateutil import tz
recent_db = os.getenv('RECENT_DB', os.environ['HOME'] + '/.recent.db')
EXPECTED_PROMPT = 'log-recent -r $__bp_last_ret_value -c "$(HISTTIMEFORMAT= history 1)" -p $$'
class Term:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
LIGHTCYAN = '\033[1;36m'
LIGHTGRAY = '\033[0;37m'
YELLOW = '\033[0;33m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class DB:
SCHEMA_VERSION = 2
CASE_ON = "PRAGMA case_sensitive_like = true"
GET_COMMANDS_TABLE_SCHEMA = """
select sql
from sqlite_master
where type = 'table' and name = 'commands'"""
# NOTE(dotslash): I haven't found a way to send json using ?s. So doing with string formats.
INSERT_ROW = """
insert into commands
(command_dt,command,pid,return_val,pwd,session,json_data)
values (
datetime(?, 'unixepoch'), -- command_dt
?, -- command
?, -- pid
?, -- return_val
?, -- pwd
?, -- session
{} -- json_data
)"""
INSERT_ROW_NO_JSON = """
insert into commands
(command_dt,command,pid,return_val,pwd,session,json_data)
values (
datetime(?, 'unixepoch'), -- command_dt
?, -- command
?, -- pid
?, -- return_val
?, -- pwd
?, -- session
null -- json_data
)"""
INSERT_SESSION = """
insert into sessions
(created_dt, updated_dt, term, hostname, user, sequence, session)
values (
datetime('now','localtime'), datetime('now','localtime'), -- created_dt, updated_dt
?, -- term
?, -- hostname
?, -- user
?, -- sequence
? -- session
)"""
UPDATE_SESSION = """
update sessions
set updated_dt = datetime('now','localtime'), sequence = ?
where session = ?"""
# TAIL_N_ROWS's columns (column order is same as TAIL_N_ROWS
TAIL_N_ROWS_COLUMNS = 'command_dt,command,pid,return_val,pwd,session,json_data'.split(',')
TAIL_N_ROWS_DEDUP_COLUMNS = 'command_dt,command'.split(',')
TAIL_N_ROWS_TEMPLATE = """
select command_dt,command,pid,return_val,pwd,session,json_data
from (
select *
from commands
where
order by command_dt desc limit ?
)
order by command_dt"""
TAIL_N_ROWS_TEMPLATE_DEDUP = """
select *
from (
select max(command_dt) as command_dt, command
from commands
where
group by command
order by command_dt desc limit ?
)
order by command_dt"""
GET_SESSION_SEQUENCE = """select sequence from sessions where session = ?"""
# Setup: Create tables.
CREATE_COMMANDS_TABLE = """
create table if not exists commands (
command_dt timestamp,
command text,
pid int,
return_val int,
pwd text,
session text,
json_data json
)"""
CREATE_SESSIONS_TABLE = """
create table if not exists sessions (
session text primary key not null,
created_dt timestamp,
updated_dt timestamp,
term text,
hostname text,
user text,
sequence int
)"""
CREATE_DATE_INDEX = """
create index if not exists command_dt_ind
on commands (command_dt)"""
# Schema version
GET_SCHEMA_VERSION = """pragma user_version"""
UPDATE_SCHEMA_VERSION = """pragma user_version = """
# Migrate from v1 to v2.
MIGRATE_1_2 = "alter table commands add column json_data json"
class Session:
@classmethod
def session_id_string(cls, pid=None):
# TODO(sai): Should this always be ppid?
pid = pid or os.getppid()
# This combination of ENV vars *should* provide a unique session
# TERM_SESSION_ID for OS X Terminal
# XTERM for xterm
# TMUX, TMUX_PANE for tmux
# STY for GNU screen
# SHLVL handles nested shells
seed = "{}-{}-{}-{}-{}-{}-{}".format(
os.getenv('TERM_SESSION_ID', ''),
os.getenv('WINDOWID', ''),
os.getenv('SHLVL', ''),
os.getenv('TMUX', ''),
os.getenv('TMUX_PANE', ''),
os.getenv('STY', ''),
pid,
) # yapf: disable
return hashlib.md5(seed.encode('utf-8')).hexdigest()
def __init__(self, pid, sequence):
self.sequence = sequence
self.empty = False
self.id = Session.session_id_string(pid)
def update(self, conn):
c = conn.cursor()
try:
term = os.getenv('TERM', '')
hostname = socket.gethostname()
user = os.getenv('USER', '')
c.execute(DB.INSERT_SESSION, [term, hostname, user, self.sequence, self.id])
self.empty = True
except sqlite3.IntegrityError:
# Carriage returns need to be ignored
expected_sequence = c.execute(DB.GET_SESSION_SEQUENCE, [self.id]).fetchone()[0]
if expected_sequence == int(self.sequence):
self.empty = True
c.execute(DB.UPDATE_SESSION, [self.sequence, self.id])
c.close()
def migrate(cur_version, conn):
if cur_version not in (0, 1):
exit(Term.FAIL + ('recent: your command history database does not '
'match recent, please update') + Term.ENDC)
c = conn.cursor()
if cur_version == 1:
# Schema version is v1. Migrate to v2.
print(Term.WARNING + 'recent: migrating schema to version {}'.format(DB.SCHEMA_VERSION) +
Term.ENDC)
c.execute(DB.MIGRATE_1_2)
else:
print(Term.WARNING + 'recent: building schema' + Term.ENDC)
c.execute(DB.CREATE_COMMANDS_TABLE)
c.execute(DB.CREATE_SESSIONS_TABLE)
c.execute(DB.CREATE_DATE_INDEX)
c.execute(DB.UPDATE_SCHEMA_VERSION + str(DB.SCHEMA_VERSION))
conn.commit()
# Parses history command.
# This parse the output of `HISTTIMEFORMAT= history 1`
# Format: optional_whitespace + required_sequence_number + required_whitespace + command
def parse_history(history):
match = re.search(r'^\s*(\d+)\s+(.*)$', history, re.MULTILINE and re.DOTALL)
if match:
sequence, cmd = int(match.group(1)), match.group(2)
# log command discards if the command being logged has a suffix like "my_cmd <ts>"
# If a user copy-pastes recent output, having this timestamp will look weird.
copied_from_recent = \
re.search(r'^(.*)\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$', cmd)
if copied_from_recent:
cmd = copied_from_recent.group(1)
return sequence, cmd
else:
return None, None
def parse_date(date_format):
if re.match(r'^\d{4}$', date_format):
return 'strftime(\'%Y\', command_dt) = ?'
if re.match(r'^\d{4}-\d{2}$', date_format):
return 'strftime(\'%Y-%m\', command_dt) = ?'
if re.match(r'^\d{4}-\d{2}-\d{2}$', date_format):
return 'date(command_dt) = ?'
else:
print("Invalid date passed to -d")
sys.exit(1)
def create_connection():
conn = sqlite3.connect(recent_db, uri=recent_db.startswith("file:"))
build_schema(conn)
return conn
def build_schema(conn):
try:
c = conn.cursor()
current = c.execute(DB.GET_SCHEMA_VERSION).fetchone()[0]
if current != DB.SCHEMA_VERSION:
migrate(current, conn)
except (sqlite3.OperationalError, TypeError):
migrate(0, conn)
def envvars_to_log():
envvar_whitelist = {k.strip() for k in os.getenv('RECENT_ENV_VARS', '').split(',') if k.strip()}
def is_var_interesting(name: str):
# Anything starting with RECENT_ is welcome.
if name.startswith("RECENT_"):
return True
for interesting_var in envvar_whitelist:
# if name matches glob(interesting_var) then we will store it.
# E.g - CONDA_* => we are interested in all env vars that start with CONDA_.
if Path(name).match(interesting_var):
return True
return False
return {k: v for k, v in os.environ.items() if is_var_interesting(k)}
# Entry point to recent-log command.
def log(args_for_test=None):
parser = argparse.ArgumentParser()
parser.add_argument('-r',
'--return_value',
help='Command return value. Set to $?',
default=0,
type=int)
parser.add_argument('-c', '--command', help='Set to $(HISTTIMEFORMAT= history 1)', default='')
parser.add_argument('-p', '--pid', help='Shell pid. Set to $$', default=0, type=int)
args = parser.parse_args(args_for_test)
sequence, command = parse_history(args.command)
pid, return_value = args.pid, args.return_value
pwd = os.getenv('PWD', '')
if not sequence or not command:
print(Term.WARNING + ('recent: cannot parse command output, please check your bash '
'trigger looks like this:') + Term.ENDC)
exit("""export PROMPT_COMMAND='{}'""".format(EXPECTED_PROMPT))
log_command(command=command, pid=pid, sequence=sequence, return_value=return_value, pwd=pwd)
def log_command(command, pid, sequence, return_value, pwd):
conn = create_connection()
session = Session(pid, sequence)
session.update(conn)
if not session.empty:
c = conn.cursor()
json_data = "json('{}')".format(json.dumps({'env': envvars_to_log()}))
# We pass current time instead of using 'now' in sql to mock this value.
c.execute(DB.INSERT_ROW.format(json_data),
[int(time.time()), command, pid, return_value, pwd, session.id])
conn.commit()
conn.close()
# Imports bash_history into RECENT_DB
# Entry point to recent-import-bash-history command.
def import_bash_history_entry_point(args_for_test=None):
description = ('recent-import-bash-history imports bash_history into ~/.recent.db. '
'Run `recent -h` for info about recent command.')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-f',
help='Force import bash history ignoring previous imports',
action='store_true')
args = parser.parse_args(args_for_test)
import_marker = Path(
os.environ.get("RECENT_TEST_IMPORT_FILE", "~/.recent_imported_bash_history"))
import_marker = import_marker.expanduser().absolute()
print(import_marker)
if not args.f and import_marker.exists():
print(Term.FAIL +
'recent-import-bash-history failed: Bash history already imported into ~/.recent.db')
print('Run the command with -f option if you are absolutely sure.' + Term.ENDC)
parser.print_help()
sys.exit(1)
import_bash_history()
import_marker.touch()
def import_bash_history():
# Construct history from bash_history.
# Example bash_history. The history has 3 entries. First entry has no timestamp attached to it.
# The next 2 entries have timestamp attached to them. The last entry has some unknown comment
# which we will ignore.
"""
ls /
#1571012545
echo foo
#1571012560
#useless comment that should be ignored.
cat bar
"""
history = []
# Phase 1 starts: After this phase history will be like this
# [(-1, "ls /"), # This entry has no timestamp.
# (1571012545, "echo foo"),
# (1571012560, "cat bar")]
last_ts = -1
histfile = Path(os.environ.get("HISTFILE", "~/.bash_history")).expanduser()
if not histfile.exists():
return
for line in histfile.read_text().splitlines():
if not line:
continue
if line[0] == '#':
try:
last_ts = int(line[1:].strip())
except Exception:
# Ignore the exception.
pass
continue
history.append([last_ts, line.strip()])
# Phase 2 starts: After this phase history will be like this
# [(1571012545, "ls /"), # Timestamp for this comes from its next entry
# (1571012545, "echo foo"),
# (1571012560, "cat bar")]
last_ts = -1
for i in range(len(history) - 1, -1, -1):
if history[i][0] == -1 and last_ts != -1:
history[i][0] = last_ts
elif history[i][0] != -1 and last_ts == -1:
last_ts = history[i][0]
# Add the history entries into recent's DB.
conn = create_connection()
import random
# Create a session with a random -ve pid and random -ve sequence id.
pid = -random.randint(1, 10000000)
session = Session(pid=pid, sequence=-random.randint(1, 10000000))
session.update(conn)
for cmd_ts, cmd in history:
c = conn.cursor()
c.execute(DB.INSERT_ROW_NO_JSON, [
cmd_ts, cmd, pid,
# exit | |
are NOT set in the regular parameters
if isinstance(a_table, dict):
if a_format is not None or a_header is not None:
raise ValueError("If you specify the a_table to be a dictionary of values, then you should NOT specify "
"a_format, a_header or a_partition_by separately because they should be specified in the table.")
a_format = a_table.get("format", None)
a_header = a_table.get("header", None)
a_partition_by = a_table.get("partition_by", None)
a_table = a_table.get("name", None)
elif not isinstance(a_table, str):
raise ValueError("a_table must be either dict(name, format, header) or string")
if a_table is None:
raise ValueError("The a_table must be set (or a_table.name must be set, if a_table is a dictionary)")
# parameters compatibility checks
if a_columns_lowercase and a_columns_rename_map:
raise ValueError("You cannot set a_columns_lowercase=True and a_columns_rename simultaneously")
df = a_df # further we will modify it - avoid using a_df in the code
dest_type, dest_url, dest_options = parse_conn_info(a_dest_conn_info)
# rename columns: this may be useful NOT only for Postgres, but also for file systems
if a_columns_lowercase:
df = lowercase_columns(df, a_verbose_level=a_verbose_level)
if a_columns_rename_map:
df = rename_columns(df, a_columns_rename_map, a_verbose_level=a_verbose_level)
dtn = datetime.now()
if dest_type == C_DFS or dest_type == C_LOCAL:
path = join_path_generic(dest_url, a_table) if dest_type == C_DFS else join_path_os(dest_url, a_table)
print_verbose(1, a_verbose_level, f"saving to {dest_type} path {path}, started at: {date_to_ymd_hms(dtn)}")
get_fs_writer(df, a_format, a_header, a_overwrite, a_partition_by, a_verbose_level=a_verbose_level, **dest_options).save(path)
elif dest_type == C_JDBC:
# for PostgreSQL we will use its fast copy function
if a_fast_write and dest_url.lower().startswith("jdbc:postgresql"):
print_verbose(1, a_verbose_level, f"saving dataframe to table {a_table} of {dest_url} (using PostgreSQL copy routine), started at: {date_to_ymd_hms(dtn)}")
# forcibly set a_columns_lowercase=False, a_columns_rename=None because we already did this renaming before
pg_copy(df, a_table, a_overwrite=a_overwrite, a_verbose_level=a_verbose_level,
# passing url separately: it is cut off from the options. But it will go to the **kwargs of the pg_copy
url=dest_url, **dest_options)
else:
print_verbose(1, a_verbose_level, f"saving to table {a_table} of {dest_url} (using regular JDBC), started at: {date_to_ymd_hms(dtn)}")
get_jdbc_writer(df, dest_url, a_table, a_overwrite, a_verbose_level=a_verbose_level, **dest_options).save()
else:
raise NotImplemented(f"Type {dest_type} is not implemented yet")
sp, dtn = seconds_passed(dtn, True)
print_verbose(1, a_verbose_level, f"dataframe saved, seconds passed: {sp}, finished at: {date_to_ymd_hms(dtn)}")
# endregion
# region Other Dataframe routines
_CACHED_DF_LIST = [] # keep the list of cached dataframes
def ps(a_df: DataFrame):
a_df.printSchema()
def cache(a_df: DataFrame, a_row_count=False, a_storage_level=StorageLevel.MEMORY_ONLY, a_temp_table=None, a_verbose_level=3):
"""Persists a dataframe.
:param a_df: the dataframe
:param a_row_count: perform counting of rows and force the dataframe to go into cache
:param a_storage_level: the storage level. By default it will use memory only serialization.
We stronly recommend using Kryo serialization enabled in Spark.
:param a_verbose_level: print verbosity level
:return: nothing
"""
a_df.persist(storageLevel=a_storage_level)
_CACHED_DF_LIST.append(a_df)
cnt = -1
if a_row_count:
print_verbose(1, a_verbose_level, f"caching dataframe and counting rows")
cnt = a_df.count()
print_verbose(1, a_verbose_level, f"done caching, row count: {cnt:,}.")
else:
print_verbose(1, a_verbose_level, f"done caching.")
if a_temp_table:
a_df.createOrReplaceTempView(a_temp_table)
return cnt
def unpersist(a_df: DataFrame, a_force=False, a_verbose_level=3):
version_greater_240 = _spark.version > '2.4.0'
if a_force or version_greater_240:
a_df.unpersist()
if a_df in _CACHED_DF_LIST:
_CACHED_DF_LIST.remove(a_df)
if a_force:
print_verbose(1, a_verbose_level, f"dataframe is unpersisted forcibly")
elif version_greater_240:
print_verbose(1, a_verbose_level, f"dataframe is unpersisted")
else:
print_verbose(1, a_verbose_level, f"dataframe is NOT unpersisted because Spark version is less than 2.4.0, and it will lead to cascaded cache invalidation")
def uncache_all():
"""Uncaches previously serialized dataframes"""
for df in _CACHED_DF_LIST:
df.unpersist()
del _CACHED_DF_LIST[:]
TEMP_TABLE_COUNTER = 0
TEMP_TABLES = []
def temp_table(a_df: DataFrame, a_name=None, a_prefix="t"):
if a_name:
a_df.createOrReplaceTempView(a_name)
return a_name
global TEMP_TABLE_COUNTER
result = f"{a_prefix}_{TEMP_TABLE_COUNTER}"
a_df.createOrReplaceTempView(result)
TEMP_TABLES.append(result)
TEMP_TABLE_COUNTER += 1
return result
def sql(a_query, a_dfs=None, a_cache=False, a_row_count=False, a_temp_table=None, a_verbose_level=3) -> DataFrame:
"""
:rtype: object
"""
dfs = [] if a_dfs is None else a_dfs # this is done to prevent mutable argument
query = a_query
# if a_dfs is passed, it means there must be {0}, []
if dfs:
dfs: list = dfs if isinstance(dfs, list) else [dfs]
for i in range(len(dfs)):
table_name = temp_table(dfs[i])
query = query.replace("{" + str(i) + "}", table_name)
query = query.replace("[" + str(i) + "]", table_name)
print_verbose(3, a_verbose_level, query)
result: DataFrame = _spark.sql(query)
if a_cache and a_row_count:
cache(result, a_row_count=True)
elif a_row_count:
count(result)
elif a_cache:
cache(result)
if a_temp_table:
result.createOrReplaceTempView(a_temp_table)
return result
def distinct(a_df: DataFrame, a_columns, a_order=False, a_cache=False, a_row_count=False,
a_return_count=False, a_verbose_level=3) -> Union[DataFrame, Tuple[DataFrame, int]]:
if isinstance(a_columns, str):
a_columns = [a_columns]
table_name = temp_table(a_df)
cols = comma_columns(a_columns)
order_query = f" order by {cols}" if a_order else ""
query = f"select distinct {cols} from {table_name}" + order_query
df_result = sql(query, a_verbose_level=a_verbose_level)
cnt = -1
if a_cache:
cnt = cache(df_result, a_row_count=a_row_count or a_return_count, a_verbose_level=a_verbose_level)
if a_row_count or a_return_count:
if cnt < 0:
cnt = df_result.count()
print_verbose(1, a_verbose_level, f"distinct row count (no caching): {cnt:,}")
if a_return_count:
return df_result, cnt
return df_result
def distinct_values(a_df: DataFrame, a_column, a_order=False, a_verbose_level=3):
rows = distinct(a_df, a_column, a_order).collect()
return [r[a_column] for r in rows]
def min_value(a_df: DataFrame, a_column: str):
pdf1 = sql(f"select min({a_column}) as {a_column} from [0]", a_df, a_verbose_level=5).toPandas()
return pdf1[a_column].iloc[0]
def max_value(a_df: DataFrame, a_column: str):
pdf1 = sql(f"select max({a_column}) as {a_column} from [0]", a_df, a_verbose_level=5).toPandas()
return pdf1[a_column].iloc[0]
def collect_values(a_df: DataFrame, a_column, a_order=False, a_verbose_level=3):
rows = change(a_df, a_select=[a_column]).collect()
return [r[a_column] for r in rows]
def count(a_df: DataFrame, a_cache=False, a_verbose_level=3):
if not isinstance(a_cache, bool):
raise ValueError("a_cache parameter must be bool")
if a_cache:
cache(a_df)
cnt = a_df.count()
print_verbose(1, a_verbose_level, f"row count: {cnt:,}")
return cnt
def groupby_count(a_df: DataFrame, a_columns: ListOrStr, a_order_by=None, a_rc_desc=False, a_verbose_level=3) -> DataFrame:
if isinstance(a_columns, str):
a_columns = [a_columns]
table_name = temp_table(a_df)
cols = comma_columns(a_columns)
order_by = a_order_by
order_query = ""
if order_by:
if isinstance(order_by, str):
order_by = [order_by]
if isinstance(order_by, list):
order_by = {c: "asc" for c in order_by}
if a_rc_desc:
if order_by is None:
order_by = {"RC": "desc"}
else:
order_by["RC"] = "desc"
order_query = " order by " + ", ".join(f"{c} {v}" for c, v in order_by.items()) if order_by else ""
query = f"select {cols}, count(*) as RC from {table_name} group by {cols}" + order_query
return sql(query, a_verbose_level=a_verbose_level)
def groupby_sum(a_df: DataFrame, a_group_columns: ListOrStr, a_sum_columns: ListOrStr, a_order=False, a_verbose_level=3) -> DataFrame:
if isinstance(a_group_columns, str):
a_group_columns = [a_group_columns]
if isinstance(a_sum_columns, str):
a_sum_columns = [a_sum_columns]
table_name = temp_table(a_df)
group_cols = comma_columns(a_group_columns) if a_group_columns else None
sum_cols = ", ".join(f"SUM({x}) as SUM_{x}" for x in a_sum_columns)
order_query = f" order by {group_cols}" if a_order and group_cols else ""
query = f"select "
if group_cols:
query += " {group_cols}, "
query += f"{sum_cols} from {table_name} "
if group_cols:
query += f" group by {group_cols}"
query += order_query
return sql(query, a_verbose_level=a_verbose_level)
def groupby(a_df: DataFrame, a_group_columns: ListOrStr, a_columns: dict, a_order_by_group=False, a_order_by=None, a_verbose_level=3) -> DataFrame:
if isinstance(a_group_columns, str):
a_group_columns = [a_group_columns]
if not isinstance(a_columns, dict):
raise ValueError("a_coumns must be a dict of 'column': 'sum', 'column2': 'max', etc.")
if a_order_by is not None and (not isinstance(a_order_by, dict) and not isinstance(a_order_by, list)) :
raise ValueError("a_order_by must be a dictionary")
table_name = temp_table(a_df)
group_cols = comma_columns(a_group_columns)
agg_cols = []
for c, oplist in a_columns.items():
if "," not in oplist:
oplist = [oplist]
else:
oplist = oplist.split(",")
for op in oplist:
expr, colname = f"{op}({c})", f"{op}_{c}"
if op == "RC":
expr, colname = "count(*)", "RC"
elif op == "DC":
expr, colname = f"count(distinct {c})", f"DC_{c.replace(',', '_')}"
agg_cols.append(f"{expr} as {colname}")
agg_cols = ", ".join(agg_cols)
order_query = ""
if a_order_by_group:
order_query = f" order by {group_cols}"
elif a_order_by:
if isinstance(a_order_by, dict):
order_query = " order by " + ", ".join(f"{c} {o}" for c, o in a_order_by.items())
else:
order_query = " order by " + ", ".join(f"{c} asc" for c in a_order_by)
query = f"select {group_cols}, {agg_cols} from {table_name} group by {group_cols}" + order_query
return sql(query, a_verbose_level=a_verbose_level)
def show(a_df: DataFrame, a_limit=100, a_t=False, a_row_count=False, a_verbose_level=3,
a_where=None, a_order_by=None):
df = a_df
if a_row_count:
cnt = df.count()
print_verbose(1, a_verbose_level, f"total row count: {cnt:,}")
if a_order_by or a_where:
df = change(df, a_where=a_where, a_order_by=a_order_by, a_verbose_level=0)
if a_limit > 0:
print_verbose(1, a_verbose_level, f"showing top {a_limit} rows")
df = df.limit(a_limit)
pdf = df.toPandas()
if a_t:
pdf = pdf.T
display_pdf(pdf)
BAD_SYMBOLS = '% ,()&-$#.'
HIVE_KEYWORDS_SET = {"ALL", "ALTER", "AND", "ARRAY", "AS", "AUTHORIZATION", "BETWEEN", "BIGINT", "BINARY", "BOOLEAN", "BOTH",
"BY", "CASE", "CAST", "CHAR", "COLUMN", "CONF", "CREATE", "CROSS", "CUBE", "CURRENT", "CURRENT_DATE", "CURRENT_TIMESTAMP",
"CURSOR", "DATABASE", "DATE", "DECIMAL", "DELETE", "DESCRIBE", "DISTINCT", "DOUBLE", "DROP", "ELSE", "END", "EXCHANGE",
"EXISTS", "EXTENDED", "EXTERNAL", "FALSE", "FETCH", "FLOAT", "FOLLOWING", "FOR", "FROM", "FULL", "FUNCTION", "GRANT",
"GROUP", "GROUPING", "HAVING", "IF", "IMPORT", "IN", "INNER", "INSERT", "INT", "INTERSECT", "INTERVAL", "INTO", "IS",
"JOIN", "LATERAL", "LEFT", "LESS", | |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn import metrics
from matplotlib import animation
from PIL import Image
import matplotlib.patches as mpatches
from math import pi
from numpy import cos, sin
import copy
def cal_center(sites,initial_centers,nu_matrix,m):
centers = copy.deepcopy(initial_centers)
n_centers = nu_matrix.shape[1]
n_sites = len(sites)
for j in range(n_centers):
up_xtotal = 0.0
down_xtotal = 0.0
up_ytotal = 0.0
down_ytotal = 0.0
for i in range(n_sites):
up_xtotal = up_xtotal + sites[i].x_location * nu_matrix[i][j] ** m
up_ytotal = up_ytotal + sites[i].y_location * nu_matrix[i][j] ** m
down_xtotal = down_xtotal + nu_matrix[i][j] ** m
down_ytotal = down_ytotal + nu_matrix[i][j] ** m
centers[j].x_location = up_xtotal / down_xtotal
centers[j].y_location = up_ytotal / down_ytotal
return centers
def cal_numatrix(distance_matrix,centers,m,n,K):
nu_matrix = np.zeros((n,K))
for i in range(n):
for j in range(K):
total = 0
for k in range(K):
total = total + (distance_matrix[i][j] / distance_matrix[i][k]) ** (2/(m-1))
total = total ** (-1)
nu_matrix[i][j] = total
return nu_matrix
def cal_distance(sites,centers):
n_sites = len(sites)
n_centers = len(centers)
distance_matrix = np.zeros((n_sites,n_centers))
for i in range(n_sites):
for j in range(n_centers):
distance_matrix[i][j] = ( ((sites[i].x_location - centers[j].x_location) **2)+((sites[i].y_location - centers[j].y_location) **2) ) **0.5
return distance_matrix
def assign_center(site,centers,distance_matrix):
# return the id of center which has the sortest distance from site to this center
site_id = site.id
min_distance = [distance_matrix[site_id][0],0]
for j in range(distance_matrix.shape[1]):# compare distances from site to K centers
if distance_matrix[site_id][j] < min_distance[0]:
min_distance[0] = distance_matrix[site_id][j]
min_distance[1] = j
return centers[min_distance[1]]
def c_means(sites, init_centers,algorithm_kind,m):
centers = copy.deepcopy(init_centers)
n_fig = 0
while True:
distance_matrix = cal_distance(sites,centers)
changed_centers = []
# assign the center to the site
for site in sites:
center = assign_center(site, centers, distance_matrix)
site.center = center.id
center.sites.append(site)
# recalculate center
nu_matrix = cal_numatrix(distance_matrix,centers,m,len(sites),len(centers))
new_centers = copy.deepcopy(cal_center(sites,centers,nu_matrix,m))
for i in range(len(centers)):
if ((abs(centers[i].x_location-new_centers[i].x_location)>0.01) or (abs(centers[i].y_location-new_centers[i].y_location)>0.01)):
changed_centers.append(centers[i])
if len(changed_centers) == 0:
return centers,n_fig
centers = copy.deepcopy(new_centers)
plt.clf()
color_squence = ['darkorchid','limegreen','sandybrown','lightslategrey','rosybrown','sienna','seagreen']
for j in range(len(centers)):
x_sample_location = []
y_sample_location = []
for i in range(len(sites)):
x_sample_location.append(sites[i].x_location)
y_sample_location.append(sites[i].y_location)
plt.scatter(sites[i].x_location,sites[i].y_location,marker='o',c = 'white',alpha = nu_matrix[i][j],edgecolors=color_squence[j%7])
# plot sites using face color but edgecolor
# plt.scatter(sites[i].x_location,sites[i].y_location,marker='o',c = color_squence[j%7],alpha = nu_matrix[i][j])
# #draw sites,each site belongs to a center and has only one color
# for j in range(len(centers)):
# x_sample_location = []
# y_sample_location = []
# for i in range(len(sites)):
# if sites[i].center == j:
# x_sample_location.append(sites[i].x_location)
# y_sample_location.append(sites[i].y_location)
# plt.scatter(sites[i].x_location,sites[i].y_location,marker='o',c = color_squence[j%7],alpha = max(nu_matrix[i][:]))
x_center_location = []
y_center_location = []
for i in range(len(centers)):
x_center_location.append(centers[i].x_location)
y_center_location.append(centers[i].y_location)
plt.scatter(x_center_location,y_center_location,s=400,marker='*',c='red')
plt.title(algorithm_kind + ' M=' + str(m))
plt.xlabel('Number of iterations:' + str(n_fig+1))
plt.savefig(str(algorithm_kind)+ '_' +'M=' + str(m) + '_' + str(n_fig+1)+'.png')
n_fig = n_fig+1
plt.pause(0.001)
class Site:
center = 0
def __init__(self,id,x_location,y_location):
self.id = id
self.x_location = x_location
self.y_location = y_location
class Center:
sites = []
def __init__(self,id,x_location,y_location):
self.id = id
self.x_location = x_location
self.y_location = y_location
if __name__ == "__main__":
'''
Define the clusters using super param
'''
algorithm_kind = 'CMeans'
CENTERS=[[-1.15,-1.15], [-1.15,1.15], [0,0], [1.15,-1.15],[1.15,1.15]]
K = len(CENTERS)
N_SAMPLES = 1000 # numbel of samples, K samples is used for initial centers
CLUSTER_STD = [0.4, 0.4, 0.4, 0.4, 0.4] # std of each cluster
# CENTERS=[[-1,1], [-1,-2], [1,1], [1,-1],[2,2]]
# K = len(CENTERS)
# N_SAMPLES = 600 # numbel of samples, K samples is used for initial centers
# CLUSTER_STD = [0.4, 0.5, 0.3, 0.4, 0.3] # std of each cluster
###########################################################################################################################
M = 1.1
'''
Initial sample sites
'''
Object_sites = []
sample_sites_locations = []
temp_sample_sites_locations, cluster_id = make_blobs(n_samples=N_SAMPLES, n_features=2, centers = CENTERS, cluster_std=CLUSTER_STD, random_state =9)
x_sample_location = temp_sample_sites_locations[:,0]
x_sample_location = x_sample_location.tolist()
y_sample_location = temp_sample_sites_locations[:,1]
y_sample_location = y_sample_location.tolist()
for n_sample in range(N_SAMPLES):
# get sites locations [[x,y],[]...],then initial sites objects
sample_sites_locations.append([x_sample_location[n_sample],y_sample_location[n_sample]])
Object_sites.append( Site(n_sample,sample_sites_locations[n_sample][0],sample_sites_locations[n_sample][1]) )
'''
Init nu matrix and centers
'''
Object_centers = []
initial_centers_locations= []
x_center_location = []
y_center_location = []
# init nu matrix
nu_matrix = np.zeros((N_SAMPLES,K))
for i in range(N_SAMPLES):
for j in range(K):
nu_matrix[i][j] = np.random.uniform(0,1)
row_total = []
for i in range(N_SAMPLES):
row_total.append(sum(nu_matrix[i][:]))
for i in range(N_SAMPLES):
for j in range(K):
nu_matrix[i][j] = nu_matrix[i][j] / row_total[i]
# init centers
for k in range(K):
Object_centers.append( Center(k,0,0) )
Object_centers = cal_center(Object_sites,Object_centers,nu_matrix,M)
for k in range(K):
x_center_location.append(Object_centers[k].x_location)
y_center_location.append(Object_centers[k].y_location)
initial_centers_locations.append([x_center_location[k],y_center_location[k]])
# show the initial situation
fig = plt.figure(figsize=(5,5))
plt.scatter(x_sample_location,y_sample_location, marker='o',edgecolor = 'green',c = 'white') # the sites before k-means
plt.scatter(x_center_location,y_center_location,s = 300,marker='*',c = 'red')
plt.title('Init by '+ algorithm_kind)
plt.savefig('CMeans_inital_centers_.png')
'''
C-Means and plt.show
'''
plt.ion()
[Object_centers,n_fig] = c_means(Object_sites, Object_centers,algorithm_kind,M)
plt.ioff()
plt.show()
'''
Save figs as gif
'''
im = Image.open(str(algorithm_kind)+ '_' +'M=' + str(M) + "_1.png")
images=[]
for i in range(n_fig+1):
if i>1:
fpath = str(algorithm_kind)+ '_' +'M=' + str(M) + '_' + str(i)+ ".png"
images.append(Image.open(fpath))
im.save(str(algorithm_kind)+ '_' +'M=' + str(M) + '.gif', save_all=True, append_images=images,loop=1000,duration=500)
###########################################################################################################################
M = 1.5
'''
Initial sample sites
'''
Object_sites = []
sample_sites_locations = []
temp_sample_sites_locations, cluster_id = make_blobs(n_samples=N_SAMPLES, n_features=2, centers = CENTERS, cluster_std=CLUSTER_STD, random_state =9)
x_sample_location = temp_sample_sites_locations[:,0]
x_sample_location = x_sample_location.tolist()
y_sample_location = temp_sample_sites_locations[:,1]
y_sample_location = y_sample_location.tolist()
for n_sample in range(N_SAMPLES):
# get sites locations [[x,y],[]...],then initial sites objects
sample_sites_locations.append([x_sample_location[n_sample],y_sample_location[n_sample]])
Object_sites.append( Site(n_sample,sample_sites_locations[n_sample][0],sample_sites_locations[n_sample][1]) )
'''
Init nu matrix and centers
'''
Object_centers = []
initial_centers_locations= []
x_center_location = []
y_center_location = []
# init nu matrix
nu_matrix = np.zeros((N_SAMPLES,K))
for i in range(N_SAMPLES):
for j in range(K):
nu_matrix[i][j] = np.random.uniform(0,1)
row_total = []
for i in range(N_SAMPLES):
row_total.append(sum(nu_matrix[i][:]))
for i in range(N_SAMPLES):
for j in range(K):
nu_matrix[i][j] = nu_matrix[i][j] / row_total[i]
# init centers
for k in range(K):
Object_centers.append( Center(k,0,0) )
Object_centers = cal_center(Object_sites,Object_centers,nu_matrix,M)
for k in range(K):
x_center_location.append(Object_centers[k].x_location)
y_center_location.append(Object_centers[k].y_location)
initial_centers_locations.append([x_center_location[k],y_center_location[k]])
# show the initial situation
fig = plt.figure(figsize=(5,5))
plt.scatter(x_sample_location,y_sample_location, marker='o',edgecolor = 'green',c = 'white') # the sites before k-means
plt.scatter(x_center_location,y_center_location,s = 300,marker='*',c = 'red')
plt.title('Init by '+ algorithm_kind)
plt.savefig('CMeans_inital_centers_.png')
'''
C-Means and plt.show
'''
plt.ion()
[Object_centers,n_fig] = c_means(Object_sites, Object_centers,algorithm_kind,M)
plt.ioff()
plt.show()
'''
Save figs as gif
'''
im = Image.open(str(algorithm_kind)+ '_' +'M=' + str(M) + "_1.png")
images=[]
for i in range(n_fig+1):
if i>1:
fpath = str(algorithm_kind)+ '_' +'M=' + str(M) + '_' + str(i)+ ".png"
images.append(Image.open(fpath))
im.save(str(algorithm_kind)+ '_' +'M=' + str(M) + '.gif', save_all=True, append_images=images,loop=1000,duration=500)
###########################################################################################################################
M = 2
'''
Initial sample sites
'''
Object_sites = []
sample_sites_locations = []
temp_sample_sites_locations, cluster_id = make_blobs(n_samples=N_SAMPLES, n_features=2, centers = CENTERS, cluster_std=CLUSTER_STD, random_state =9)
x_sample_location = temp_sample_sites_locations[:,0]
x_sample_location = x_sample_location.tolist()
y_sample_location = temp_sample_sites_locations[:,1]
y_sample_location = y_sample_location.tolist()
for n_sample in range(N_SAMPLES):
# get sites locations [[x,y],[]...],then initial sites objects
sample_sites_locations.append([x_sample_location[n_sample],y_sample_location[n_sample]])
Object_sites.append( Site(n_sample,sample_sites_locations[n_sample][0],sample_sites_locations[n_sample][1]) )
'''
Init nu matrix and centers
'''
Object_centers = []
initial_centers_locations= []
x_center_location = []
y_center_location = []
# init nu matrix
nu_matrix = np.zeros((N_SAMPLES,K))
for i in range(N_SAMPLES):
for j in range(K):
nu_matrix[i][j] = np.random.uniform(0,1)
row_total = []
for i in range(N_SAMPLES):
row_total.append(sum(nu_matrix[i][:]))
for i in range(N_SAMPLES):
for j in range(K):
nu_matrix[i][j] = nu_matrix[i][j] / row_total[i]
# init centers
for k in range(K):
Object_centers.append( Center(k,0,0) )
Object_centers = cal_center(Object_sites,Object_centers,nu_matrix,M)
for k in range(K):
x_center_location.append(Object_centers[k].x_location)
y_center_location.append(Object_centers[k].y_location)
initial_centers_locations.append([x_center_location[k],y_center_location[k]])
# show the initial situation
fig = plt.figure(figsize=(5,5))
plt.scatter(x_sample_location,y_sample_location, marker='o',edgecolor = 'green',c = 'white') # the sites before k-means
plt.scatter(x_center_location,y_center_location,s = 300,marker='*',c = 'red')
plt.title('Init by '+ algorithm_kind)
plt.savefig('CMeans_inital_centers_.png')
'''
C-Means and plt.show
'''
plt.ion()
[Object_centers,n_fig] = c_means(Object_sites, Object_centers,algorithm_kind,M)
plt.ioff()
plt.show()
'''
Save figs as gif
'''
im = Image.open(str(algorithm_kind)+ '_' +'M=' + str(M) + "_1.png")
images=[]
for i in range(n_fig+1):
if i>1:
fpath = str(algorithm_kind)+ '_' +'M=' + str(M) + '_' + str(i)+ ".png"
images.append(Image.open(fpath))
im.save(str(algorithm_kind)+ '_' +'M=' + str(M) + '.gif', save_all=True, append_images=images,loop=1000,duration=500)
###########################################################################################################################
M = 3
'''
Initial sample sites
'''
Object_sites = []
sample_sites_locations = []
temp_sample_sites_locations, cluster_id = make_blobs(n_samples=N_SAMPLES, n_features=2, centers = CENTERS, cluster_std=CLUSTER_STD, random_state =9)
x_sample_location = temp_sample_sites_locations[:,0]
x_sample_location = x_sample_location.tolist()
y_sample_location = temp_sample_sites_locations[:,1]
y_sample_location = y_sample_location.tolist()
for n_sample in range(N_SAMPLES):
# get sites locations [[x,y],[]...],then initial sites objects
sample_sites_locations.append([x_sample_location[n_sample],y_sample_location[n_sample]])
Object_sites.append( Site(n_sample,sample_sites_locations[n_sample][0],sample_sites_locations[n_sample][1]) )
'''
Init nu matrix and centers
'''
Object_centers = []
initial_centers_locations= []
x_center_location = []
y_center_location = []
# init nu matrix
nu_matrix = np.zeros((N_SAMPLES,K))
for i in range(N_SAMPLES):
for j in range(K):
nu_matrix[i][j] = np.random.uniform(0,1)
row_total = []
for | |
"RENAME TO" clause.
""",
fields=[
Field(
'new_name',
'ASTPathExpression',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
],
extra_public_defs="""
std::string GetSQLForAlterAction() const override;
""")
gen.AddNode(
name='ASTSetCollateClause',
tag_id=251,
parent='ASTAlterAction',
comment="""
ALTER action for "SET COLLATE ()" clause
""",
fields=[
Field(
'collate',
'ASTCollate',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
],
extra_public_defs="""
std::string GetSQLForAlterAction() const override;
""")
gen.AddNode(
name='ASTAlterActionList',
tag_id=252,
parent='ASTNode',
fields=[
Field(
'actions',
'ASTAlterAction',
tag_id=2,
field_loader=FieldLoaderMethod.REST_AS_REPEATED),
])
gen.AddNode(
name='ASTAlterAllRowAccessPoliciesStatement',
tag_id=253,
parent='ASTStatement',
fields=[
Field(
'table_name_path',
'ASTPathExpression',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'alter_action',
'ASTAlterAction',
tag_id=3,
field_loader=FieldLoaderMethod.REQUIRED),
])
gen.AddNode(
name='ASTForeignKeyActions',
tag_id=254,
parent='ASTNode',
use_custom_debug_string=True,
fields=[
Field(
'update_action',
SCALAR_ACTION,
tag_id=2),
Field(
'delete_action',
SCALAR_ACTION,
tag_id=3),
],
extra_public_defs="""
static std::string GetSQLForAction(Action action);
""")
gen.AddNode(
name='ASTForeignKeyReference',
tag_id=255,
parent='ASTNode',
use_custom_debug_string=True,
fields=[
Field(
'table_name',
'ASTPathExpression',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'column_list',
'ASTColumnList',
tag_id=3,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'actions',
'ASTForeignKeyActions',
tag_id=4,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'match',
SCALAR_MATCH,
tag_id=5),
Field(
'enforced',
SCALAR_BOOL_DEFAULT_TRUE,
tag_id=6),
],
extra_public_defs="""
std::string GetSQLForMatch() const;
""")
gen.AddNode(
name='ASTScript',
tag_id=256,
parent='ASTNode',
comment="""
A top-level script.
""",
fields=[
Field(
'statement_list_node',
'ASTStatementList',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
],
extra_public_defs="""
absl::Span<const ASTStatement* const> statement_list() const {
return statement_list_node_->statement_list();
}
""")
gen.AddNode(
name='ASTElseifClause',
tag_id=257,
parent='ASTNode',
comment="""
Represents an ELSEIF clause in an IF statement.
""",
fields=[
Field(
'condition',
'ASTExpression',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED,
comment="""
condition and body are both required.
"""),
Field(
'body',
'ASTStatementList',
tag_id=3,
field_loader=FieldLoaderMethod.REQUIRED),
],
extra_public_defs="""
// Returns the ASTIfStatement that this ASTElseifClause belongs to.
const ASTIfStatement* if_stmt() const {
return parent()->parent()->GetAsOrDie<ASTIfStatement>();
}
""")
gen.AddNode(
name='ASTElseifClauseList',
tag_id=258,
parent='ASTNode',
comment="""
Represents a list of ELSEIF clauses. Note that this list is never empty,
as the grammar will not create an ASTElseifClauseList object unless there
exists at least one ELSEIF clause.
""",
fields=[
Field(
'elseif_clauses',
'ASTElseifClause',
tag_id=2,
field_loader=FieldLoaderMethod.REST_AS_REPEATED),
])
gen.AddNode(
name='ASTIfStatement',
tag_id=259,
parent='ASTScriptStatement',
fields=[
Field(
'condition',
'ASTExpression',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED,
comment="""
condition and then_list are both required.
"""),
Field(
'then_list',
'ASTStatementList',
tag_id=3,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'elseif_clauses',
'ASTElseifClauseList',
tag_id=4,
comment="""
Optional; nullptr if no ELSEIF clauses are specified. If present, the
list will never be empty.
"""),
Field(
'else_list',
'ASTStatementList',
tag_id=5,
comment="""
Optional; nullptr if no ELSE clause is specified
"""),
])
gen.AddNode(
name='ASTWhenThenClause',
tag_id=260,
parent='ASTNode',
comment="""
Represents a WHEN...THEN clause in a CASE statement.
""",
fields=[
Field(
'condition',
'ASTExpression',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED,
comment="""
condition and body are both required.
"""),
Field(
'body',
'ASTStatementList',
tag_id=3,
field_loader=FieldLoaderMethod.REQUIRED),
],
extra_public_defs="""
// Returns the ASTCaseStatement that this ASTWhenThenClause belongs to.
// Immediate parent is an ASTWhenThenClauseList, contained in an
// ASTCaseStatement.
const ASTCaseStatement* case_stmt() const {
return parent()->parent()->GetAsOrDie<ASTCaseStatement>();
}
""")
gen.AddNode(
name='ASTWhenThenClauseList',
tag_id=261,
parent='ASTNode',
comment="""
Represents a list of WHEN...THEN clauses. Note that this list is never empty,
as the grammar mandates that there is at least one WHEN...THEN clause in
a CASE statement.
""",
fields=[
Field(
'when_then_clauses',
'ASTWhenThenClause',
tag_id=2,
field_loader=FieldLoaderMethod.REST_AS_REPEATED),
])
gen.AddNode(
name='ASTCaseStatement',
tag_id=262,
parent='ASTScriptStatement',
fields=[
Field(
'expression',
'ASTExpression',
tag_id=2,
field_loader=FieldLoaderMethod.OPTIONAL_EXPRESSION,
comment="""
Optional; nullptr if not specified
"""),
Field(
'when_then_clauses',
'ASTWhenThenClauseList',
tag_id=3,
field_loader=FieldLoaderMethod.REQUIRED,
comment="""
Required field.
"""),
Field(
'else_list',
'ASTStatementList',
tag_id=4),
])
gen.AddNode(
name='ASTHint',
tag_id=263,
parent='ASTNode',
fields=[
Field(
'num_shards_hint',
'ASTIntLiteral',
tag_id=2,
comment="""
This is the @num_shards hint shorthand that can occur anywhere that a
hint can occur, prior to @{...} hints.
At least one of num_shards_hints is non-NULL or hint_entries is non-empty.
"""),
Field(
'hint_entries',
'ASTHintEntry',
tag_id=3,
field_loader=FieldLoaderMethod.REST_AS_REPEATED),
])
gen.AddNode(
name='ASTHintEntry',
tag_id=264,
parent='ASTNode',
fields=[
Field(
'qualifier',
'ASTIdentifier',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'name',
'ASTIdentifier',
tag_id=3,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'value',
'ASTExpression',
tag_id=4,
field_loader=FieldLoaderMethod.REQUIRED,
comment="""
Value is always an identifier, literal, or parameter.
"""),
],
gen_init_fields=False,
extra_private_defs="""
void InitFields() final {
// We need a special case here because we have two children that both have
// type ASTIdentifier and the first one is optional.
if (num_children() == 2) {
FieldLoader fl(this);
fl.AddRequired(&name_);
fl.AddRequired(&value_);
} else {
FieldLoader fl(this);
fl.AddRequired(&qualifier_);
fl.AddRequired(&name_);
fl.AddRequired(&value_);
}
}
"""
)
gen.AddNode(
name='ASTUnpivotInItemLabel',
tag_id=265,
parent='ASTNode',
fields=[
Field(
'string_label',
'ASTStringLiteral',
tag_id=2,
gen_setters_and_getters=False),
Field(
'int_label',
'ASTIntLiteral',
tag_id=3,
gen_setters_and_getters=False),
],
extra_public_defs="""
const ASTLeaf* label() const {
if (string_label_ != nullptr) {
return string_label_;
}
return int_label_;
}
""")
gen.AddNode(
name='ASTDescriptor',
tag_id=266,
parent='ASTNode',
fields=[
Field(
'columns',
'ASTDescriptorColumnList',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
])
gen.AddNode(
name='ASTColumnSchema',
tag_id=267,
parent='ASTNode',
is_abstract=True,
comment="""
A column schema identifies the column type and the column annotations.
The annotations consist of the column attributes and the column options.
This class is used only in column definitions of CREATE TABLE statements,
and is unrelated to CREATE SCHEMA despite the usage of the overloaded term
"schema".
The hierarchy of column schema is similar to the type hierarchy.
The annotations can be applied on struct fields or array elements, for
example, as in STRUCT<x INT64 NOT NULL, y STRING OPTIONS(foo="bar")>.
In this case, some column attributes, such as PRIMARY KEY and HIDDEN, are
disallowed as field attributes.
""",
fields=[
Field(
'type_parameters',
'ASTTypeParameterList',
tag_id=2,
visibility=Visibility.PROTECTED),
Field(
'generated_column_info',
'ASTGeneratedColumnInfo',
tag_id=3,
visibility=Visibility.PROTECTED),
Field(
'default_expression',
'ASTExpression',
tag_id=4,
field_loader=FieldLoaderMethod.OPTIONAL_EXPRESSION,
visibility=Visibility.PROTECTED),
Field(
'collate',
'ASTCollate',
tag_id=5,
visibility=Visibility.PROTECTED),
Field(
'attributes',
'ASTColumnAttributeList',
tag_id=6,
visibility=Visibility.PROTECTED),
Field(
'options_list',
'ASTOptionsList',
tag_id=7,
visibility=Visibility.PROTECTED),
],
extra_public_defs="""
// Helper method that returns true if the attributes()->values() contains an
// ASTColumnAttribute with the node->kind() equal to 'node_kind'.
bool ContainsAttribute(ASTNodeKind node_kind) const;
template <typename T>
std::vector<const T*> FindAttributes(ASTNodeKind node_kind) const {
std::vector<const T*> found;
if (attributes() == nullptr) {
return found;
}
for (const ASTColumnAttribute* attribute : attributes()->values()) {
if (attribute->node_kind() == node_kind) {
found.push_back(static_cast<const T*>(attribute));
}
}
return found;
}
""")
gen.AddNode(
name='ASTSimpleColumnSchema',
tag_id=268,
parent='ASTColumnSchema',
fields=[
Field(
'type_name',
'ASTPathExpression',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
])
gen.AddNode(
name='ASTArrayColumnSchema',
tag_id=269,
parent='ASTColumnSchema',
fields=[
Field(
'element_schema',
'ASTColumnSchema',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
])
gen.AddNode(
name='ASTTableConstraint',
tag_id=270,
parent='ASTTableElement',
is_abstract=True,
comment="""
Base class for constraints, including primary key, foreign key and check
constraints.
""",
extra_public_defs="""
virtual const ASTIdentifier* constraint_name() const = 0;
""")
gen.AddNode(
name='ASTPrimaryKey',
tag_id=271,
parent='ASTTableConstraint',
use_custom_debug_string=True,
fields=[
Field(
'column_list',
'ASTColumnList',
tag_id=2),
Field(
'options_list',
'ASTOptionsList',
tag_id=3),
Field(
'constraint_name',
'ASTIdentifier',
tag_id=4,
getter_is_override=True),
Field(
'enforced',
SCALAR_BOOL_DEFAULT_TRUE,
tag_id=5),
])
gen.AddNode(
name='ASTForeignKey',
tag_id=272,
parent='ASTTableConstraint',
fields=[
Field(
'column_list',
'ASTColumnList',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'reference',
'ASTForeignKeyReference',
tag_id=3,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'options_list',
'ASTOptionsList',
tag_id=4),
Field(
'constraint_name',
'ASTIdentifier',
tag_id=5,
getter_is_override=True),
])
gen.AddNode(
name='ASTCheckConstraint',
tag_id=273,
parent='ASTTableConstraint',
use_custom_debug_string=True,
fields=[
Field(
'expression',
'ASTExpression',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'options_list',
'ASTOptionsList',
tag_id=3),
Field(
'constraint_name',
'ASTIdentifier',
tag_id=4,
getter_is_override=True),
Field(
'is_enforced',
SCALAR_BOOL_DEFAULT_TRUE,
tag_id=5),
])
gen.AddNode(
name='ASTDescriptorColumn',
tag_id=274,
parent='ASTNode',
fields=[
Field(
'name',
'ASTIdentifier',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED,
comment="""
Required field
"""),
])
gen.AddNode(
name='ASTDescriptorColumnList',
tag_id=275,
parent='ASTNode',
fields=[
Field(
'descriptor_column_list',
'ASTDescriptorColumn',
tag_id=2,
field_loader=FieldLoaderMethod.REST_AS_REPEATED,
comment="""
Guaranteed by the parser to never be empty.
"""),
])
gen.AddNode(
name='ASTCreateEntityStatement',
tag_id=276,
parent='ASTCreateStatement',
fields=[
Field(
'type',
'ASTIdentifier',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'name',
'ASTPathExpression',
tag_id=3,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'options_list',
'ASTOptionsList',
tag_id=4),
Field(
'json_body',
'ASTJSONLiteral',
tag_id=5),
Field(
'text_body',
'ASTStringLiteral',
tag_id=6),
],
extra_public_defs="""
const ASTPathExpression* GetDdlTarget() const override { return name_; }
""")
gen.AddNode(
name='ASTRaiseStatement',
tag_id=277,
parent='ASTScriptStatement',
fields=[
Field(
'message',
'ASTExpression',
tag_id=2,
field_loader=FieldLoaderMethod.OPTIONAL_EXPRESSION),
],
extra_public_defs="""
// A RAISE statement rethrows an existing exception, as opposed to creating
// a new exception, when none of the properties are set. Currently, the only
// property is the message. However, for future proofing, as more properties
// get added to RAISE later, code should call this function to check for a
// rethrow, rather than checking for the presence of a message, directly.
bool is_rethrow() const { return message_ == nullptr; }
""")
gen.AddNode(
name='ASTExceptionHandler',
tag_id=278,
parent='ASTNode',
fields=[
Field(
'statement_list',
'ASTStatementList',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED,
comment="""
Required field; even an empty block still contains an empty statement list.
"""),
])
gen.AddNode(
name='ASTExceptionHandlerList',
tag_id=279,
parent='ASTNode',
comment="""
Represents a list of exception handlers in a block. Currently restricted
to one element, but may contain multiple elements in the future, once there
are multiple error codes for a block to catch.
""",
fields=[
Field(
'exception_handler_list',
'ASTExceptionHandler',
tag_id=2,
field_loader=FieldLoaderMethod.REST_AS_REPEATED),
])
gen.AddNode(
name='ASTBeginEndBlock',
tag_id=280,
parent='ASTScriptStatement',
fields=[
Field('label', 'ASTLabel', tag_id=2),
Field(
'statement_list_node',
'ASTStatementList',
tag_id=3,
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'handler_list',
'ASTExceptionHandlerList',
tag_id=4,
comment="""
Optional; nullptr indicates a BEGIN block without an EXCEPTION clause.
"""),
],
extra_public_defs="""
absl::Span<const ASTStatement* const> statement_list() const {
return statement_list_node_->statement_list();
}
bool has_exception_handler() const {
return handler_list_ != nullptr &&
!handler_list_->exception_handler_list().empty();
}
""")
gen.AddNode(
name='ASTIdentifierList',
tag_id=281,
parent='ASTNode',
fields=[
Field(
'identifier_list',
'ASTIdentifier',
tag_id=2,
field_loader=FieldLoaderMethod.REST_AS_REPEATED,
comment="""
Guaranteed by the parser to never be empty.
"""),
])
gen.AddNode(
name='ASTVariableDeclaration',
tag_id=282,
parent='ASTScriptStatement',
fields=[
Field(
'variable_list',
'ASTIdentifierList',
tag_id=2,
field_loader=FieldLoaderMethod.REQUIRED,
comment="""
Required fields
"""),
Field(
'type',
'ASTType',
tag_id=3,
field_loader=FieldLoaderMethod.OPTIONAL_TYPE,
comment="""
Optional fields; at least one of <type> and | |
# Code Taken from https://github.com/LYH-YF/MWPToolkit
# -*- encoding: utf-8 -*-
# @Author: <NAME>
# @Time: 2021/08/29 11:10:20
# @File: multi_head_attention.py
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from utils.enum_type import EPT
class MultiHeadAttention(nn.Module):
r"""Multi-head Attention is proposed in the following paper:
Attention Is All You Need.
"""
def __init__(self, embedding_size, num_heads, dropout_ratio=0.0):
super(MultiHeadAttention, self).__init__()
self.embedding_size = embedding_size
self.num_heads = num_heads
self.head_size = embedding_size // num_heads
assert self.head_size * num_heads == self.embedding_size, "embedding size must be divisible by num_heads"
self.scaling = self.head_size ** -0.5 # d_k ** -0.5
self.linear_query = nn.Linear(embedding_size, embedding_size)
self.linear_key = nn.Linear(embedding_size, embedding_size)
self.linear_value = nn.Linear(embedding_size, embedding_size)
nn.init.normal_(self.linear_query.weight, mean=0, std=0.02)
nn.init.normal_(self.linear_key.weight, mean=0, std=0.02)
nn.init.normal_(self.linear_value.weight, mean=0, std=0.02)
self.linear_out = nn.Linear(embedding_size, embedding_size)
nn.init.normal_(self.linear_out.weight, mean=0, std=0.02)
self.weight_dropout = nn.Dropout(dropout_ratio)
def forward(self, query, key, value, key_padding_mask=None, attn_mask=None):
r"""
Multi-head attention
Args:
query (torch.Tensor): shape [batch_size, tgt_len, embedding_size].
key (torch.Tensor): shape [batch_size, src_len, embedding_size].
value (torch.Tensor): shape [batch_size, src_len, embedding_size].
key_padding_mask (torch.Tensor): shape [batch_size, src_len].
attn_mask (torch.BoolTensor): shape [batch_size, tgt_len, src_len].
Return:
tuple(torch.Tensor, torch.Tensor):
attn_repre, shape [batch_size, tgt_len, embedding_size].
attn_weights, shape [batch_size, tgt_len, src_len].
"""
device=query.device
batch_size, tgt_len, embedding_size = query.size()
src_len = key.size(1)
assert key.size() == value.size()
q = self.linear_query(query) * self.scaling
k = self.linear_key(key)
v = self.linear_value(value)
q = q.view(batch_size, tgt_len, self.num_heads, self.head_size).permute(0, 2, 1, 3)
k = k.view(batch_size, src_len, self.num_heads, self.head_size).permute(0, 2, 3, 1)
v = v.view(batch_size, src_len, self.num_heads, self.head_size).permute(0, 2, 1, 3)
attn_weights = torch.matmul(q, k)
assert list(attn_weights.size()) == [batch_size, self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_weights.masked_fill_(
attn_mask.unsqueeze(0).unsqueeze(1).to(device),
float("-inf")
)
if key_padding_mask is not None:
attn_weights.masked_fill_(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(device),
float("-inf")
)
attn_weights = self.weight_dropout(F.softmax(attn_weights, dim=-1))
attn_repre = torch.matmul(attn_weights, v)
assert list(attn_repre.size()) == [batch_size, self.num_heads, tgt_len, self.head_size]
attn_repre = attn_repre.transpose(1, 2).contiguous().view(batch_size, tgt_len, embedding_size)
attn_repre = self.linear_out(attn_repre)
# maximum attention weight over heads
attn_weights, _ = attn_weights.max(dim=1)
return attn_repre, attn_weights
class EPTMultiHeadAttentionWeights(nn.Module):
"""
Class for computing multi-head attention weights (follows the paper, 'Attention is all you need')
This class computes dot-product between query Q and key K, i.e.
"""
def __init__(self, **config):
"""
Initialize MultiHeadAttentionWeights class
:keyword int hidden_dim: Vector dimension of hidden states (H). 768 by default.
:keyword int num_heads: Number of attention heads (N). 12 by default.
"""
super().__init__()
self.config = config
# Check whether D is divisible by H.
assert self.hidden_dim % self.num_heads == 0, \
"Hidden dimension %s is not divisible by the number of heads %s." % (self.hidden_dim, self.num_heads)
# Linear transform for query Q
self.linear_q = nn.Linear(self.hidden_dim, self.hidden_dim)
# Linear transform for key K
self.linear_k = nn.Linear(self.hidden_dim, self.hidden_dim)
# Vector dimension D of input of a single attention head
self.dim_head = self.hidden_dim // self.num_heads
# Square root of vector dimension, i.e. \\sqrt{D}
self.sqrt_dim = self.dim_head ** 0.5
def forward(self, query: torch.Tensor, key: torch.Tensor = None, key_ignorance_mask: torch.Tensor = None,
attention_mask: torch.Tensor = None, head_at_last: bool = True) -> torch.Tensor:
"""
Compute multi-head attention weights
Args:
query (torch.Tensor): FloatTensor representing the query matrix with shape [batch_size, query_sequence_length, hidden_size].
key (torch.Tensor): FloatTensor representing the key matrix with shape [batch_size, key_sequence_length, hidden_size] or [1, key_sequence_length, hidden_size]. By default, this is `None` (Use query matrix as a key matrix)
key_ignorance_mask (torch.Tensor): BoolTensor representing the mask for ignoring column vector in key matrix, with shape [batch_size, key_sequence_length].
If an element at (b, t) is `True,` then all return elements at batch_size=b, key_sequence_length=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
attention_mask (torch.Tensor): BoolTensor representing Attention mask for ignoring a key for each query item, with shape [query_sequence_length, key_sequence_length].
If an element at (s, t) is `True,` then all return elements at sequence_length=s, T=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
head_at_last (bool): Use `True` to make shape of return value be [batch_size, query_sequence_length, key_sequence_length, head_nums].
If `False,` this method will return [batch_size, head_nums, sequence_length, key_sequence_length]. By default, this is `True`
Returns:
torch.FloatTensor: FloatTensor of Multi-head Attention weights.
"""
# If key is None, reuse query matrix Q.
if key is None:
key = query
# Check size & type conditions
assert query.shape[0] == key.shape[0] or key.shape[0] == 1 or query.shape[0] == 1
assert key_ignorance_mask is None or (key.shape[:2] == key_ignorance_mask.shape and
key_ignorance_mask.dtype == torch.bool)
assert attention_mask is None or (query.shape[1] == attention_mask.shape[0] and
key.shape[1] == attention_mask.shape[1] and
attention_mask.dtype == torch.bool)
# Store length information
query_len = query.shape[1]
key_len = key.shape[1]
batch_size = max(key.shape[0], query.shape[0])
# Project query & key with linear transformations
query = self.linear_q(query)
key = self.linear_k(key)
# Scale query with sqrt(dim)
query = query / self.sqrt_dim
# If key / value has shape [1, T, H], expand it.
if query.shape[0] == 1:
query = query.expand(batch_size, -1, -1)
if key.shape[0] == 1:
key = key.expand(batch_size, -1, -1)
# Transform query [B, S, N, H/N] -> [B, N, S, H/N] -> [BN, S, H/N].
query = query.view(batch_size, query_len, self.num_heads, self.dim_head) \
.transpose(1, 2).flatten(0, 1).contiguous()
# Transform key [B, T, N, H/N] -> [B, N, H/N, T] -> [BN, H/T, T].
key = key.view(batch_size, key_len, self.num_heads, self.dim_head) \
.permute(0, 2, 3, 1).flatten(0, 1).contiguous()
# Compute attention weights: [BN, S, T] -> [B, N, S, T]
attention_weights = torch.bmm(query, key).view(batch_size, self.num_heads, query_len, key_len).contiguous()
# Apply masks (IMPORTANT!!! This should be applied after GELU for output weights)
if attention_mask is not None:
# Recap: attention mask has shape [S, T], which can be broadcasted as [1, 1, S, T].
attention_weights.masked_fill_(attention_mask, EPT.NEG_INF)
if key_ignorance_mask is not None:
# Recap: ignorance mask has shape [B, T] -> [B, 1, 1, T] and apply it.
attention_weights.masked_fill_(key_ignorance_mask.unsqueeze(1).unsqueeze(1), EPT.NEG_INF)
if head_at_last:
# Output will be [B, N, S, T] -> [B, S, T, N]
return attention_weights.permute(0, 2, 3, 1).contiguous()
else:
return attention_weights
@property
def hidden_dim(self) -> int:
"""
:rtype: int
:return: Vector dimension of hidden states (H)
"""
return self.config.get('hidden_dim', 768)
@property
def num_heads(self) -> int:
"""
:rtype: int
:return: Number of attention heads (N)
"""
return self.config.get('num_heads', 12)
class EPTMultiHeadAttention(nn.Module):
"""
Class for computing multi-head attention (follows the paper, 'Attention is all you need')
This class computes attention over K-V pairs with query Q, i.e.
"""
def __init__(self, **config):
"""
Initialize MultiHeadAttention class
:keyword int hidden_dim: Vector dimension of hidden states (H). 768 by default
:keyword int num_heads: Number of attention heads (N). 12 by default
:keyword float dropout_p: Probability of dropout. 0 by default
"""
super().__init__()
# Multi-head Attention Weight layer
self.attn = EPTMultiHeadAttentionWeights(**config)
# Dropout over attention weights (as in 'Attention is all you need')
self.dropout_p=0.0
self.dropout_attn = nn.Dropout(self.dropout_p)
# Linear transformations for value and output matrix.
self.linear_v = nn.Linear(self.attn.hidden_dim, self.attn.hidden_dim)
self.linear_out = nn.Linear(self.attn.hidden_dim, self.attn.hidden_dim)
def forward(self, query: torch.Tensor, key_value: torch.Tensor = None, key_ignorance_mask: torch.Tensor = None,
attention_mask: torch.Tensor = None, return_weights: bool = False, **kwargs):
"""
Compute multi-head attention
Args:
query (torch.Tensor): FloatTensor representing the query matrix with shape [batch_size, query_sequence_length, hidden_size].
key_value (torch.Tensor): FloatTensor representing the key matrix or value matrix with shape [batch_size, key_sequence_length, hidden_size] or [1, key_sequence_length, hidden_size].
By default, this is `None` (Use query matrix as a key matrix).
key_ignorance_mask (torch.Tensor): BoolTensor representing the mask for ignoring column vector in key matrix, with shape [batch_size, key_sequence_length].
If an element at (b, t) is `True,` then all return elements at batch_size=b, key_sequence_length=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
attention_mask (torch.Tensor): BoolTensor representing Attention mask for ignoring a key for each query item, with shape [query_sequence_length, key_sequence_length].
If an element at (s, t) is `True,` then all return elements at query_sequence_length=s, key_sequence_length=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
return_weights (bool): Use `True` to return attention weights. By default, this is `True.`
Returns:
Union[torch.FloatTensor, Tuple[torch.FloatTensor, torch.FloatTensor]]:
If head_at_last is True, return (Attention Output, Attention Weights). Otherwise, return only the Attention Output.
Attention Output: Shape [batch_size, query_sequence_length, hidden_size].
Attention Weights: Shape [batch_size, query_sequence_length, key_sequence_length, head_nums].
"""
# If key_value is | |
e=VLCException()
return libvlc_vlm_del_media(self, psz_name, e)
if hasattr(dll, 'libvlc_vlm_set_enabled'):
def vlm_set_enabled(self, psz_name, b_enabled):
"""Enable or disable a media (VOD or broadcast).
@param psz_name: the media to work on
@param b_enabled: the new status
"""
e=VLCException()
return libvlc_vlm_set_enabled(self, psz_name, b_enabled, e)
if hasattr(dll, 'libvlc_vlm_set_output'):
def vlm_set_output(self, psz_name, psz_output):
"""Set the output for a media.
@param psz_name: the media to work on
@param psz_output: the output MRL (the parameter to the "sout" variable)
"""
e=VLCException()
return libvlc_vlm_set_output(self, psz_name, psz_output, e)
if hasattr(dll, 'libvlc_vlm_set_input'):
def vlm_set_input(self, psz_name, psz_input):
"""Set a media's input MRL. This will delete all existing inputs and
add the specified one.
@param psz_name: the media to work on
@param psz_input: the input MRL
"""
e=VLCException()
return libvlc_vlm_set_input(self, psz_name, psz_input, e)
if hasattr(dll, 'libvlc_vlm_add_input'):
def vlm_add_input(self, psz_name, psz_input):
"""Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on
@param psz_input: the input MRL
"""
e=VLCException()
return libvlc_vlm_add_input(self, psz_name, psz_input, e)
if hasattr(dll, 'libvlc_vlm_set_loop'):
def vlm_set_loop(self, psz_name, b_loop):
"""Set a media's loop status.
@param psz_name: the media to work on
@param b_loop: the new status
"""
e=VLCException()
return libvlc_vlm_set_loop(self, psz_name, b_loop, e)
if hasattr(dll, 'libvlc_vlm_set_mux'):
def vlm_set_mux(self, psz_name, psz_mux):
"""Set a media's vod muxer.
@param psz_name: the media to work on
@param psz_mux: the new muxer
"""
e=VLCException()
return libvlc_vlm_set_mux(self, psz_name, psz_mux, e)
if hasattr(dll, 'libvlc_vlm_change_media'):
def vlm_change_media(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
"""Edit the parameters of a media. This will delete all existing inputs and
add the specified one.
@param psz_name: the name of the new broadcast
@param psz_input: the input MRL
@param psz_output: the output MRL (the parameter to the "sout" variable)
@param i_options: number of additional options
@param ppsz_options: additional options
@param b_enabled: boolean for enabling the new broadcast
@param b_loop: Should this broadcast be played in loop ?
"""
e=VLCException()
return libvlc_vlm_change_media(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop, e)
if hasattr(dll, 'libvlc_vlm_play_media'):
def vlm_play_media(self, psz_name):
"""Play the named broadcast.
@param psz_name: the name of the broadcast
"""
e=VLCException()
return libvlc_vlm_play_media(self, psz_name, e)
if hasattr(dll, 'libvlc_vlm_stop_media'):
def vlm_stop_media(self, psz_name):
"""Stop the named broadcast.
@param psz_name: the name of the broadcast
"""
e=VLCException()
return libvlc_vlm_stop_media(self, psz_name, e)
if hasattr(dll, 'libvlc_vlm_pause_media'):
def vlm_pause_media(self, psz_name):
"""Pause the named broadcast.
@param psz_name: the name of the broadcast
"""
e=VLCException()
return libvlc_vlm_pause_media(self, psz_name, e)
if hasattr(dll, 'libvlc_vlm_seek_media'):
def vlm_seek_media(self, psz_name, f_percentage):
"""Seek in the named broadcast.
@param psz_name: the name of the broadcast
@param f_percentage: the percentage to seek to
"""
e=VLCException()
return libvlc_vlm_seek_media(self, psz_name, f_percentage, e)
if hasattr(dll, 'libvlc_vlm_show_media'):
def vlm_show_media(self, psz_name):
"""Return information about the named broadcast.
\bug will always return NULL
@param psz_name: the name of the broadcast
@return: string with information about named media
"""
e=VLCException()
return libvlc_vlm_show_media(self, psz_name, e)
if hasattr(dll, 'libvlc_vlm_get_media_instance_position'):
def vlm_get_media_instance_position(self, psz_name, i_instance):
"""Get vlm_media instance position by name or instance id
@param psz_name: name of vlm media instance
@param i_instance: instance id
@return: position as float
"""
e=VLCException()
return libvlc_vlm_get_media_instance_position(self, psz_name, i_instance, e)
if hasattr(dll, 'libvlc_vlm_get_media_instance_time'):
def vlm_get_media_instance_time(self, psz_name, i_instance):
"""Get vlm_media instance time by name or instance id
@param psz_name: name of vlm media instance
@param i_instance: instance id
@return: time as integer
"""
e=VLCException()
return libvlc_vlm_get_media_instance_time(self, psz_name, i_instance, e)
if hasattr(dll, 'libvlc_vlm_get_media_instance_length'):
def vlm_get_media_instance_length(self, psz_name, i_instance):
"""Get vlm_media instance length by name or instance id
@param psz_name: name of vlm media instance
@param i_instance: instance id
@return: length of media item
"""
e=VLCException()
return libvlc_vlm_get_media_instance_length(self, psz_name, i_instance, e)
if hasattr(dll, 'libvlc_vlm_get_media_instance_rate'):
def vlm_get_media_instance_rate(self, psz_name, i_instance):
"""Get vlm_media instance playback rate by name or instance id
@param psz_name: name of vlm media instance
@param i_instance: instance id
@return: playback rate
"""
e=VLCException()
return libvlc_vlm_get_media_instance_rate(self, psz_name, i_instance, e)
if hasattr(dll, 'libvlc_vlm_get_media_instance_title'):
def vlm_get_media_instance_title(self, psz_name, i_instance):
"""Get vlm_media instance title number by name or instance id
\bug will always return 0
@param psz_name: name of vlm media instance
@param i_instance: instance id
@return: title as number
"""
e=VLCException()
return libvlc_vlm_get_media_instance_title(self, psz_name, i_instance, e)
if hasattr(dll, 'libvlc_vlm_get_media_instance_chapter'):
def vlm_get_media_instance_chapter(self, psz_name, i_instance):
"""Get vlm_media instance chapter number by name or instance id
\bug will always return 0
@param psz_name: name of vlm media instance
@param i_instance: instance id
@return: chapter as number
"""
e=VLCException()
return libvlc_vlm_get_media_instance_chapter(self, psz_name, i_instance, e)
if hasattr(dll, 'libvlc_vlm_get_media_instance_seekable'):
def vlm_get_media_instance_seekable(self, psz_name, i_instance):
"""Is libvlc instance seekable ?
\bug will always return 0
@param psz_name: name of vlm media instance
@param i_instance: instance id
@return: 1 if seekable, 0 if not
"""
e=VLCException()
return libvlc_vlm_get_media_instance_seekable(self, psz_name, i_instance, e)
if hasattr(dll, 'mediacontrol_new_from_instance'):
def mediacontrol_new_from_instance(self):
"""Create a MediaControl instance from an existing libvlc instance
@return: a mediacontrol_Instance
"""
e=MediaControlException()
return mediacontrol_new_from_instance(self, e)
class Log(object):
def __new__(cls, pointer=None):
'''Internal method used for instanciating wrappers from ctypes.
'''
if pointer is None:
raise Exception("Internal method. Surely this class cannot be instanciated by itself.")
if pointer == 0:
return None
else:
o=object.__new__(cls)
o._as_parameter_=ctypes.c_void_p(pointer)
return o
@staticmethod
def from_param(arg):
'''(INTERNAL) ctypes parameter conversion method.
'''
return arg._as_parameter_
def __iter__(self):
return self.get_iterator()
def dump(self):
return [ str(m) for m in self ]
if hasattr(dll, 'libvlc_log_close'):
def close(self):
"""Close a VLC message log instance.
"""
e=VLCException()
return libvlc_log_close(self, e)
if hasattr(dll, 'libvlc_log_count'):
def count(self):
"""Returns the number of messages in a log instance.
@return: number of log messages
"""
e=VLCException()
return libvlc_log_count(self, e)
def __len__(self):
e=VLCException()
return libvlc_log_count(self, e)
if hasattr(dll, 'libvlc_log_clear'):
def clear(self):
"""Clear a log instance.
All messages in the log are removed. The log should be cleared on a
regular basis to avoid clogging.
"""
e=VLCException()
return libvlc_log_clear(self, e)
if hasattr(dll, 'libvlc_log_get_iterator'):
def get_iterator(self):
"""Allocate and returns a new iterator to messages in log.
@return: log iterator object
"""
e=VLCException()
return libvlc_log_get_iterator(self, e)
class LogIterator(object):
def __new__(cls, pointer=None):
'''Internal method used for instanciating wrappers from ctypes.
'''
if pointer is None:
raise Exception("Internal method. Surely this class cannot be instanciated by itself.")
if pointer == 0:
return None
else:
o=object.__new__(cls)
o._as_parameter_=ctypes.c_void_p(pointer)
return o
@staticmethod
def from_param(arg):
'''(INTERNAL) ctypes parameter conversion method.
'''
return arg._as_parameter_
def __iter__(self):
return self
def next(self):
if not self.has_next():
raise StopIteration
buf=LogMessage()
e=VLCException()
ret=libvlc_log_iterator_next(self, buf, e)
return ret.contents
if hasattr(dll, 'libvlc_log_iterator_free'):
def free(self):
"""Release a previoulsy allocated iterator.
"""
e=VLCException()
return libvlc_log_iterator_free(self, e)
if hasattr(dll, 'libvlc_log_iterator_has_next'):
def has_next(self):
"""Return whether log iterator has more messages.
@return: true if iterator has more message objects, else false
"""
e=VLCException()
return libvlc_log_iterator_has_next(self, e)
class Media(object):
def __new__(cls, pointer=None):
'''Internal method used for instanciating wrappers from ctypes.
'''
if pointer is None:
raise Exception("Internal method. Surely this class cannot be instanciated by itself.")
if pointer == 0:
return None
else:
o=object.__new__(cls)
o._as_parameter_=ctypes.c_void_p(pointer)
return o
@staticmethod
def from_param(arg):
'''(INTERNAL) ctypes parameter conversion method.
'''
return arg._as_parameter_
if hasattr(dll, 'libvlc_media_add_option'):
def add_option(self, ppsz_options):
"""Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
The options are detailed in vlc --long-help, for instance "--sout-all"
@param ppsz_options: the options (as a string)
"""
e=VLCException()
return libvlc_media_add_option(self, ppsz_options, e)
if hasattr(dll, 'libvlc_media_add_option_untrusted'):
def add_option_untrusted(self, ppsz_options):
"""Add an option to the media from an untrusted source.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
The options are detailed in vlc --long-help, for instance "--sout-all"
@param ppsz_options: the options (as a string)
"""
e=VLCException()
return libvlc_media_add_option_untrusted(self, ppsz_options, e)
if hasattr(dll, 'libvlc_media_retain'):
def retain(self):
"""Retain a reference to a media descriptor object (libvlc_media_t). Use
libvlc_media_release() to decrement the reference count of a
media descriptor object.
"""
return libvlc_media_retain(self)
if hasattr(dll, 'libvlc_media_release'):
def release(self):
"""Decrement the reference count of a media descriptor object. If the
reference count is 0, then libvlc_media_release() will release the
media descriptor object. It will send out an libvlc_MediaFreed event
to all listeners. If the media descriptor object has been released it
should not be used again.
"""
return libvlc_media_release(self)
if hasattr(dll, 'libvlc_media_get_mrl'):
def get_mrl(self):
"""Get the media resource locator (mrl) from a media descriptor object
@return: string with mrl of media descriptor object
"""
e=VLCException()
return libvlc_media_get_mrl(self, e)
if hasattr(dll, 'libvlc_media_duplicate'):
def duplicate(self):
"""Duplicate a media descriptor object.
"""
return libvlc_media_duplicate(self)
if hasattr(dll, 'libvlc_media_get_meta'):
def get_meta(self, e_meta):
"""Read the meta of the media.
@param e_meta: the meta to read
@return: the media's meta
"""
e=VLCException()
| |
text_loc_x : str, default None
The key in the 'text' to locate the text_key for the
``pandas.DataFrame.index`` labels
text_loc_y : str, default None
The key in the 'text' to locate the text_key for the
``pandas.DataFrame.columns`` labels
display : {'x', 'y', ['x', 'y']}, default None
Text
axes : {'x', 'y', ['x', 'y']}, default None
Text
view_level : bool, default False
Text
transform_tests : {False, 'full', 'cells'}, default cells
Text
totalize : bool, default False
Text
Returns
-------
None
The ``.dataframe`` is modified inplace.
"""
for chain in self:
if isinstance(chain, dict):
for c in list(chain.values())[0]:
c.paint(*args, **kwargs)
else:
chain.paint(*args, **kwargs)
return None
HEADERS = ['header-title',
'header-left',
'header-center',
'header-right']
FOOTERS = ['footer-title',
'footer-left',
'footer-center',
'footer-right']
VALID_ANNOT_TYPES = HEADERS + FOOTERS + ['notes']
VALID_ANNOT_CATS = ['header', 'footer', 'notes']
VALID_ANNOT_POS = ['title',
'left',
'center',
'right']
class ChainAnnotations(dict):
def __init__(self):
super(ChainAnnotations, self).__init__()
self.header_title = []
self.header_left = []
self.header_center = []
self.header_right = []
self.footer_title = []
self.footer_left = []
self.footer_center = []
self.footer_right = []
self.notes = []
for v in VALID_ANNOT_TYPES:
self[v] = []
def __setitem__(self, key, value):
self._test_valid_key(key)
return super(ChainAnnotations, self).__setitem__(key, value)
def __getitem__(self, key):
self._test_valid_key(key)
return super(ChainAnnotations, self).__getitem__(key)
def __repr__(self):
headers = [(h.split('-')[1], self[h]) for h in self.populated if
h.split('-')[0] == 'header']
footers = [(f.split('-')[1], self[f]) for f in self.populated if
f.split('-')[0] == 'footer']
notes = self['notes'] if self['notes'] else []
if notes:
ar = 'Notes\n'
ar += '-{:>16}\n'.format(str(notes))
else:
ar = 'Notes: None\n'
if headers:
ar += 'Headers\n'
for pos, text in list(dict(headers).items()):
ar += ' {:>5}: {:>5}\n'.format(str(pos), str(text))
else:
ar += 'Headers: None\n'
if footers:
ar += 'Footers\n'
for pos, text in list(dict(footers).items()):
ar += ' {:>5}: {:>5}\n'.format(str(pos), str(text))
else:
ar += 'Footers: None'
return ar
def _test_valid_key(self, key):
"""
"""
if key not in VALID_ANNOT_TYPES:
splitted = key.split('-')
if len(splitted) > 1:
acat, apos = splitted[0], splitted[1]
else:
acat, apos = key, None
if apos:
if acat == 'notes':
msg = "'{}' annotation type does not support positions!"
msg = msg.format(acat)
elif not acat in VALID_ANNOT_CATS and not apos in VALID_ANNOT_POS:
msg = "'{}' is not a valid annotation type!".format(key)
elif acat not in VALID_ANNOT_CATS:
msg = "'{}' is not a valid annotation category!".format(acat)
elif apos not in VALID_ANNOT_POS:
msg = "'{}' is not a valid annotation position!".format(apos)
else:
msg = "'{}' is not a valid annotation type!".format(key)
raise KeyError(msg)
@property
def header(self):
h_dict = {}
for h in HEADERS:
if self[h]: h_dict[h.split('-')[1]] = self[h]
return h_dict
@property
def footer(self):
f_dict = {}
for f in FOOTERS:
if self[f]: f_dict[f.split('-')[1]] = self[f]
return f_dict
@property
def populated(self):
"""
The annotation fields that are defined.
"""
return sorted([k for k, v in list(self.items()) if v])
@staticmethod
def _annot_key(a_type, a_pos):
if a_pos:
return '{}-{}'.format(a_type, a_pos)
else:
return a_type
def set(self, text, category='header', position='title'):
"""
Add annotation texts defined by their category and position.
Parameters
----------
category : {'header', 'footer', 'notes'}, default 'header'
Defines if the annotation is treated as a *header*, *footer* or
*note*.
position : {'title', 'left', 'center', 'right'}, default 'title'
Sets the placement of the annotation within its category.
Returns
-------
None
"""
if not category: category = 'header'
if not position and category != 'notes': position = 'title'
if category == 'notes': position = None
akey = self._annot_key(category, position)
self[akey].append(text)
self.__dict__[akey.replace('-', '_')].append(text)
return None
CELL_DETAILS = {'en-GB': {'cc': 'Cell Contents',
'N': 'Counts',
'c%': 'Column Percentages',
'r%': 'Row Percentages',
'str': 'Statistical Test Results',
'cp': 'Column Proportions',
'cm': 'Means',
'stats': 'Statistics',
'mb': 'Minimum Base',
'sb': 'Small Base',
'up': ' indicates result is significantly higher than the result in the Total column',
'down': ' indicates result is significantly lower than the result in the Total column'
},
'fr-FR': {'cc': 'Contenu cellule',
'N': 'Total',
'c%': 'Pourcentage de colonne',
'r%': 'Pourcentage de ligne',
'str': 'Résultats test statistique',
'cp': 'Proportions de colonne',
'cm': 'Moyennes de colonne',
'stats': 'Statistiques',
'mb': 'Base minimum',
'sb': 'Petite base',
'up': ' indique que le résultat est significativement supérieur au résultat de la colonne Total',
'down': ' indique que le résultat est significativement inférieur au résultat de la colonne Total'
}}
class Chain(object):
def __init__(self, stack, name, structure=None):
self.stack = stack
self.name = name
self.structure = structure
self.source = 'native'
self.edited = False
self._custom_views = None
self.double_base = False
self.grouping = None
self.sig_test_letters = None
self.totalize = False
self.base_descriptions = None
self.painted = False
self.hidden = False
self.annotations = ChainAnnotations()
self._array_style = None
self._group_style = None
self._meta = None
self._x_keys = None
self._y_keys = None
self._given_views = None
self._grp_text_map = []
self._text_map = None
self._custom_texts = {}
self._transl = qp.core.view.View._metric_name_map()
self._pad_id = None
self._frame = None
self._has_rules = None
self._flag_bases = None
self._is_mask_item = False
self._shapes = None
class _TransformedChainDF(object):
"""
"""
def __init__(self, chain):
c = chain.clone()
self.org_views = c.views
self.df = c._frame
self._org_idx = self.df.index
self._edit_idx = list(range(0, len(self._org_idx)))
self._idx_valmap = {n: o for n, o in
zip(self._edit_idx,
self._org_idx.get_level_values(1))}
self.df.index = self._edit_idx
self._org_col = self.df.columns
self._edit_col = list(range(0, len(self._org_col)))
self._col_valmap = {n: o for n, o in
zip(self._edit_col,
self._org_col.get_level_values(1))}
self.df.columns = self._edit_col
self.array_mi = c._array_style == 0
self.nested_y = c._nested_y
self._nest_mul = self._nesting_multiplier()
return None
def _nesting_multiplier(self):
"""
"""
levels = self._org_col.nlevels
if levels == 2:
return 1
else:
return (levels / 2) + 1
def _insert_viewlikes(self, new_index_flat, org_index_mapped):
inserts = [new_index_flat.index(val) for val in new_index_flat
if not val in list(org_index_mapped.values())]
flatviews = []
for name, no in list(self.org_views.items()):
e = [name] * no
flatviews.extend(e)
for vno, i in enumerate(inserts):
flatviews.insert(i, '__viewlike__{}'.format(vno))
new_views = OrderedDict()
no_of_views = Counter(flatviews)
for fv in flatviews:
if not fv in new_views: new_views[fv] = no_of_views[fv]
return new_views
def _updated_index_tuples(self, axis):
"""
"""
if axis == 1:
current = self.df.columns.values.tolist()
mapped = self._col_valmap
org_tuples = self._org_col.tolist()
else:
current = self.df.index.values.tolist()
mapped = self._idx_valmap
org_tuples = self._org_idx.tolist()
merged = [mapped[val] if val in mapped else val for val in current]
# ================================================================
if (self.array_mi and axis == 1) or axis == 0:
self._transf_views = self._insert_viewlikes(merged, mapped)
else:
self._transf_views = self.org_views
# ================================================================
i = d = 0
new_tuples = []
for merged_val in merged:
idx = i-d if i-d != len(org_tuples) else i-d-1
if org_tuples[idx][1] == merged_val:
new_tuples.append(org_tuples[idx])
else:
empties = ['*'] * self._nest_mul
new_tuple = tuple(empties + [merged_val])
new_tuples.append(new_tuple)
d += 1
i += 1
return new_tuples
def _reindex(self):
"""
"""
y_names = ['Question', 'Values']
if not self.array_mi:
x_names = y_names
else:
x_names = ['Array', 'Questions']
if self.nested_y: y_names = y_names * (self._nest_mul - 1)
tuples = self._updated_index_tuples(axis=1)
self.df.columns = pd.MultiIndex.from_tuples(tuples, names=y_names)
tuples = self._updated_index_tuples(axis=0)
self.df.index = pd.MultiIndex.from_tuples(tuples, names=x_names)
return None
def export(self):
"""
"""
return self._TransformedChainDF(self)
def assign(self, transformed_chain_df):
"""
"""
if not isinstance(transformed_chain_df, self._TransformedChainDF):
raise ValueError("Must pass an exported ``Chain`` instance!")
transformed_chain_df._reindex()
self._frame = transformed_chain_df.df
self.views = transformed_chain_df._transf_views
return None
def __str__(self):
if self.structure is not None:
return '%s...\n%s' % (self.__class__.__name__, str(self.structure.head()))
str_format = ('%s...'
'\nSource: %s'
'\nName: %s'
'\nOrientation: %s'
'\nX: %s'
'\nY: %s'
'\nNumber of views: %s')
return str_format % (self.__class__.__name__,
getattr(self, 'source', 'native'),
getattr(self, 'name', 'None'),
getattr(self, 'orientation', 'None'),
getattr(self, '_x_keys', 'None'),
getattr(self, '_y_keys', 'None'),
getattr(self, 'views', 'None'))
def __repr__(self):
return self.__str__()
def __len__(self):
"""Returns the total number of cells in the Chain.dataframe"""
return (len(getattr(self, 'index', [])) * len(getattr(self, 'columns', [])))
def clone(self):
"""
"""
return copy.deepcopy(self)
@lazy_property
def _default_text(self):
tk = self._meta['lib']['default text']
if tk not in self._transl:
self._transl[tk] = self._transl['en-GB']
return tk
@lazy_property
def orientation(self):
""" TODO: doc string
"""
if len(self._x_keys) == 1 and len(self._y_keys) == 1:
return 'x'
elif len(self._x_keys) == 1:
return 'x'
elif len(self._y_keys) == 1:
return 'y'
if len(self._x_keys) > 1 and len(self._y_keys) > 1:
return None
@lazy_property
def axis(self):
# TODO: name appropriate?
return int(self.orientation=='x')
@lazy_property
def axes(self):
# TODO: name appropriate?
if self.axis == 1:
return self._x_keys, self._y_keys
return self._y_keys, self._x_keys
@property
def dataframe(self):
return self._frame
@property
def index(self):
return self._index
@index.setter
def index(self, index):
self._index = index
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, columns):
self._columns = columns
@property
def frame_values(self):
return self._frame_values
@frame_values.setter
def | |
use (browse_record), either the original one if it
only belongs to this group, or the copy."""
if len(rule.groups) == 1:
return rule
# duplicate it first:
rule_obj = self.pool.get('ir.rule')
new_id = rule_obj.copy(cr, UID_ROOT, rule.id,
default={
'name': '%s %s' %(rule.name, _('(Duplicated for modified sharing permissions)')),
'groups': [(6,0,[group_id])],
'domain_force': rule.domain_force, # non evaluated!
})
_logger.debug("Duplicating rule %s (%s) (domain: %s) for modified access ", rule.name, rule.id, rule.domain_force)
# then disconnect from group_id:
rule.write({'groups':[(3,group_id)]}) # disconnects, does not delete!
return rule_obj.browse(cr, UID_ROOT, new_id, context=context)
def _create_or_combine_sharing_rule(self, cr, current_user, wizard_data, group_id, model_id, domain, restrict=False, rule_name=None, context=None):
"""Add a new ir.rule entry for model_id and domain on the target group_id.
If ``restrict`` is True, instead of adding a rule, the domain is
combined with AND operator with all existing rules in the group, to implement
an additional restriction (as of 6.1, multiple rules in the same group are
OR'ed by default, so a restriction must alter all existing rules)
This is necessary because the personal rules of the user that is sharing
are first copied to the new share group. Afterwards the filters used for
sharing are applied as an additional layer of rules, which are likely to
apply to the same model. The default rule algorithm would OR them (as of 6.1),
which would result in a combined set of permission that could be larger
than those of the user that is sharing! Hence we must forcefully AND the
rules at this stage.
One possibly undesirable effect can appear when sharing with a
pre-existing group, in which case altering pre-existing rules would not
be desired. This is addressed in the portal module.
"""
if rule_name is None:
rule_name = _('Sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
rule_obj = self.pool.get('ir.rule')
rule_ids = rule_obj.search(cr, UID_ROOT, [('groups', 'in', group_id), ('model_id', '=', model_id)])
if rule_ids:
for rule in rule_obj.browse(cr, UID_ROOT, rule_ids, context=context):
if rule.domain_force == domain:
# don't create it twice!
if restrict:
continue
else:
_logger.debug("Ignoring sharing rule on model %s with domain: %s the same rule exists already", model_id, domain)
return
if restrict:
# restricting existing rules is done by adding the clause
# with an AND, but we can't alter the rule if it belongs to
# other groups, so we duplicate if needed
rule = self._check_personal_rule_or_duplicate(cr, group_id, rule, context=context)
eval_ctx = rule_obj._eval_context_for_combinations()
org_domain = expression.normalize_domain(safe_eval(rule.domain_force, eval_ctx))
new_clause = expression.normalize_domain(safe_eval(domain, eval_ctx))
combined_domain = expression.AND([new_clause, org_domain])
rule.write({'domain_force': combined_domain, 'name': rule.name + _('(Modified)')})
_logger.debug("Combining sharing rule %s on model %s with domain: %s", rule.id, model_id, domain)
if not rule_ids or not restrict:
# Adding the new rule in the group is ok for normal cases, because rules
# in the same group and for the same model will be combined with OR
# (as of v6.1), so the desired effect is achieved.
rule_obj.create(cr, UID_ROOT, {
'name': rule_name,
'model_id': model_id,
'domain_force': domain,
'groups': [(4,group_id)]
})
_logger.debug("Created sharing rule on model %s with domain: %s", model_id, domain)
def _create_indirect_sharing_rules(self, cr, current_user, wizard_data, group_id, fields_relations, context=None):
rule_name = _('Indirect sharing filter created by user %s (%s) for group %s') % \
(current_user.name, current_user.login, group_id)
try:
domain = safe_eval(wizard_data.domain)
if domain:
for rel_field, model in fields_relations:
# mail.message is transversal: it should not received directly the access rights
if model.model in ['mail.message', 'mail.notification', 'res.company']: continue
related_domain = []
if not rel_field: continue
for element in domain:
if expression.is_leaf(element):
left, operator, right = element
left = '%s.%s'%(rel_field, left)
element = left, operator, right
related_domain.append(element)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=str(related_domain),
rule_name=rule_name, restrict=True, context=context)
except Exception:
_logger.exception('Failed to create share access')
raise osv.except_osv(_('Sharing access cannot be created.'),
_('Sorry, the current screen and filter you are trying to share are not supported at the moment.\nYou may want to try a simpler filter.'))
def _check_preconditions(self, cr, uid, wizard_data, context=None):
self._assert(wizard_data.action_id and wizard_data.access_mode,
_('Action and Access Mode are required to create a shared access.'),
context=context)
self._assert(self.has_share(cr, uid, wizard_data, context=context),
_('You must be a member of the Share/User group to use the share wizard.'),
context=context)
if wizard_data.user_type == 'emails':
self._assert((wizard_data.new_users or wizard_data.email_1 or wizard_data.email_2 or wizard_data.email_3),
_('Please indicate the emails of the persons to share with, one per line.'),
context=context)
def _create_share_users_group(self, cr, uid, wizard_data, context=None):
"""Creates the appropriate share group and share users, and populates
result_line_ids of wizard_data with one line for each user.
:return: a tuple composed of the new group id (to which the shared access should be granted),
the ids of the new share users that have been created and the ids of the existing share users
"""
group_id = self._create_share_group(cr, uid, wizard_data, context=context)
# First create any missing user, based on the email addresses provided
new_ids, existing_ids = self._create_new_share_users(cr, uid, wizard_data, group_id, context=context)
# Finally, setup the new action and shortcut for the users.
if existing_ids:
# existing users still need to join the new group
self.pool.get('res.users').write(cr, UID_ROOT, existing_ids, {
'groups_id': [(4,group_id)],
})
# existing user don't need their home action replaced, only a new shortcut
self._setup_action_and_shortcut(cr, uid, wizard_data, existing_ids, make_home=False, context=context)
if new_ids:
# new users need a new shortcut AND a home action
self._setup_action_and_shortcut(cr, uid, wizard_data, new_ids, make_home=True, context=context)
return group_id, new_ids, existing_ids
def go_step_2(self, cr, uid, ids, context=None):
wizard_data = self.browse(cr, uid, ids[0], context=context)
self._check_preconditions(cr, uid, wizard_data, context=context)
# Create shared group and users
group_id, new_ids, existing_ids = self._create_share_users_group(cr, uid, wizard_data, context=context)
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
model_obj = self.pool.get('ir.model')
model_id = model_obj.search(cr, uid, [('model','=', wizard_data.action_id.res_model)])[0]
model = model_obj.browse(cr, uid, model_id, context=context)
# ACCESS RIGHTS
# We have several classes of objects that should receive different access rights:
# Let:
# - [obj0] be the target model itself (and its parents via _inherits, if any)
# - [obj1] be the target model and all other models recursively accessible from
# obj0 via one2many relationships
# - [obj2] be the target model and all other models recursively accessible from
# obj0 via one2many and many2many relationships
# - [obj3] be all models recursively accessible from obj1 via many2one relationships
# (currently not used)
obj0, obj1, obj2, obj3 = self._get_relationship_classes(cr, uid, model, context=context)
mode = wizard_data.access_mode
# Add access to [obj0] and [obj1] according to chosen mode
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj0, context=context)
self._add_access_rights_for_share_group(cr, uid, group_id, mode, obj1, context=context)
# Add read-only access (always) to [obj2]
self._add_access_rights_for_share_group(cr, uid, group_id, 'readonly', obj2, context=context)
# IR.RULES
# A. On [obj0], [obj1], [obj2]: add all rules from all groups of
# the user that is sharing
# Warning: rules must be copied instead of linked if they contain a reference
# to uid or if the rule is shared with other groups (and it must be replaced correctly)
# B. On [obj0]: 1 rule with domain of shared action
# C. For each model in [obj1]: 1 rule in the form:
# many2one_rel.domain_of_obj0
# where many2one_rel is the many2one used in the definition of the
# one2many, and domain_of_obj0 is the sharing domain
# For example if [obj0] is project.project with a domain of
# ['id', 'in', [1,2]]
# then we will have project.task in [obj1] and we need to create this
# ir.rule on project.task:
# ['project_id.id', 'in', [1,2]]
# A.
all_relations = obj0 + obj1 + obj2
self._link_or_copy_current_user_rules(cr, current_user, group_id, all_relations, context=context)
# B.
main_domain = wizard_data.domain if wizard_data.domain != '[]' else str(DOMAIN_ALL)
self._create_or_combine_sharing_rule(cr, current_user, wizard_data,
group_id, model_id=model.id, domain=main_domain,
restrict=True, context=context)
# C.
self._create_indirect_sharing_rules(cr, current_user, wizard_data, group_id, obj1, context=context)
# refresh wizard_data
wizard_data = self.browse(cr, uid, ids[0], context=context)
# EMAILS AND NOTIFICATIONS
# A. Not invite: as before
# -> send emails to destination users
# B. Invite (OpenSocial)
# -> subscribe all users (existing and new) to the record
# -> send a notification with a summary to the current record
# -> send a notification to all users; users allowing to receive
# emails in preferences will receive it
# | |
import networkx as nx
from pymnet import *
import random
import matplotlib
import cascade as cas
import statistics
import math
import time
import csv
matplotlib.use('TkAgg')
nodes = 500
layers = 3
intra_thres = 0.2
inter_thres = 0.2
attack_size = 10
attack_point = (0.5, 0.5)
attack_type = "spatial_number" # choose one of the "normal", "spatial_number", "spatial_range"
support_type = "random_layers" # choose one of the "random_nodes", "random_layers"
edge_type = "undirected" # choose one of the "undirected", "directed"
coords = {}
dist_array = []
visited = [False for _ in range(nodes)]
in_thres = [False for _ in range(nodes)]
rgg_supp_nodes = {}
rand_supp_nodes = {}
intra_rgg_edges = []
intra_rand_edges = []
inter_rgg_edges = []
inter_rand_edges = []
intra_edges_num = []
inter_edges_num = [] # [for_edge, back_edge, for_supp_edge, back_supp_edge]
def cal_dist(cur_node, target_node):
x1, y1 = coords[cur_node]
if target_node == -1:
x2, y2 = attack_point
else:
x2, y2 = coords[target_node]
d = math.sqrt((x1-x2)**2 + (y1-y2)**2)
return d
def find_nearest_node(cur_node, supporting_node, neighbours, target_layers):
candidates = []
for target_node in neighbours:
if target_node[1] in target_layers:
dist = cal_dist(cur_node, target_node[0])
candidates.append((target_node[0], dist))
if len(candidates) != 0:
s_candidates = sorted(candidates, key=lambda dist: dist[1])
supporting_node = s_candidates[0][0]
return supporting_node
def make_interlayer_edges(net, cur_layer, layer_names, intra_type, inter_type):
if (intra_type == 'RGG') and (inter_type == 'RGG'):
if cur_layer != (len(layer_names) - 1):
for_edges = 0
back_edges = 0
for_supp_edges = 0
back_supp_edges = 0
for cur_node in range((cur_layer * nodes), (cur_layer + 1) * nodes):
for target_node in range((cur_layer + 1) * nodes, (cur_layer + 2) * nodes):
d = cal_dist(cur_node, target_node)
if d <= inter_thres:
net[cur_node, target_node, layer_names[cur_layer], layer_names[cur_layer + 1]] = 1
net[target_node, cur_node, layer_names[cur_layer + 1], layer_names[cur_layer]] = 1
inter_rgg_edges.append((cur_node, target_node, layer_names[cur_layer], layer_names[cur_layer + 1]))
inter_rgg_edges.append((target_node, cur_node, layer_names[cur_layer + 1], layer_names[cur_layer]))
if cur_node == rgg_supp_nodes[target_node]:
for_supp_edges += 1
else:
for_edges += 1
if target_node == rgg_supp_nodes[cur_node]:
back_supp_edges += 1
else:
back_edges += 1
inter_edges_num.append([for_edges, back_edges, for_supp_edges, back_supp_edges])
elif (intra_type == 'RGG') and (inter_type == 'Random'):
if cur_layer != (len(layer_names) - 1):
for_edges = 0
back_edges = 0
for_supp_edges = 0
back_supp_edges = 0
cur_nodes = list(range((cur_layer * nodes), (cur_layer + 1) * nodes))
target_nodes = list(range(((cur_layer + 1) * nodes), ((cur_layer + 2) * nodes)))
random.shuffle(target_nodes)
for target_node in target_nodes:
if rand_supp_nodes[target_node] in cur_nodes:
net[rand_supp_nodes[target_node], target_node, layer_names[cur_layer], layer_names[cur_layer + 1]] = 1
inter_rand_edges.append((rand_supp_nodes[target_node], target_node, layer_names[cur_layer], layer_names[cur_layer + 1]))
for_supp_edges += 1
if for_supp_edges >= inter_edges_num[cur_layer][2]:
break
random.shuffle(cur_nodes)
for cur_node in cur_nodes:
if rand_supp_nodes[cur_node] in target_nodes:
net[rand_supp_nodes[cur_node], cur_node, layer_names[cur_layer + 1], layer_names[cur_layer]] = 1
inter_rand_edges.append((rand_supp_nodes[cur_node], cur_node, layer_names[cur_layer + 1], layer_names[cur_layer]))
back_supp_edges += 1
if back_supp_edges >= inter_edges_num[cur_layer][3]:
break
sorted(cur_nodes)
sorted(target_nodes)
while for_edges < inter_edges_num[cur_layer][0]:
cur_node = random.choice(cur_nodes)
target_node = random.choice(target_nodes)
if net[cur_node, target_node, layer_names[cur_layer], layer_names[cur_layer + 1]] == 0:
net[cur_node, target_node, layer_names[cur_layer], layer_names[cur_layer + 1]] = 1
inter_rand_edges.append((cur_node, target_node, layer_names[cur_layer], layer_names[cur_layer + 1]))
for_edges += 1
while back_edges < inter_edges_num[cur_layer][1]:
cur_node = random.choice(cur_nodes)
target_node = random.choice(target_nodes)
if net[target_node, cur_node, layer_names[cur_layer + 1], layer_names[cur_layer]] == 0:
net[target_node, cur_node, layer_names[cur_layer + 1], layer_names[cur_layer]] = 1
inter_rand_edges.append((target_node, cur_node, layer_names[cur_layer + 1], layer_names[cur_layer]))
back_edges += 1
elif (intra_type == 'Random') and (inter_type == 'RGG'):
for node_from, node_to, layer_from, layer_to in inter_rgg_edges:
net[node_from, node_to, layer_from, layer_to] = 1
elif (intra_type == 'Random') and (inter_type == 'Random'):
for node_from, node_to, layer_from, layer_to in inter_rand_edges:
net[node_from, node_to, layer_from, layer_to] = 1
return net
def make_intralayer_edges(net, cur_layer, cur_layer_name, intra_type, inter_type):
if (intra_type == 'RGG') and (inter_type == 'RGG'):
edges = 0
for cur_node in range(cur_layer * nodes, (cur_layer + 1) * nodes):
for target_node in range(cur_layer * nodes, (cur_layer + 1) * nodes):
if cur_node != target_node:
d = cal_dist(cur_node, target_node)
if d <= intra_thres:
net[cur_node, target_node, cur_layer_name, cur_layer_name] = 1
intra_rgg_edges.append((cur_node, target_node, cur_layer_name))
edges += 1
intra_edges_num.append(edges)
elif (intra_type == 'RGG') and (inter_type == 'Random'):
for cur_node, target_node, cur_layer_name in intra_rgg_edges:
net[cur_node, target_node, cur_layer_name, cur_layer_name] = 1
elif (intra_type == 'Random') and (inter_type == 'RGG'):
cur_nodes = list(range((cur_layer * nodes), ((cur_layer + 1) * nodes)))
target_nodes = list(range((cur_layer * nodes), ((cur_layer + 1) * nodes)))
edges = 0
while edges < intra_edges_num[cur_layer]:
cur_node = random.choice(cur_nodes)
target_node = random.choice(target_nodes)
if net[cur_node, target_node, cur_layer_name, cur_layer_name] == 0:
net[cur_node, target_node, cur_layer_name, cur_layer_name] = 1
intra_rand_edges.append((cur_node, target_node, cur_layer_name))
edges += 1
elif (intra_type == 'Random') and (inter_type == 'Random'):
for cur_node, target_node, cur_layer_name in intra_rand_edges:
net[cur_node, target_node, cur_layer_name, cur_layer_name] = 1
return net
def make_edges(net, layer_names, intra_type, inter_type):
for cur_layer in range(layers):
net = make_intralayer_edges(net, cur_layer, layer_names[cur_layer], intra_type, inter_type)
net = make_interlayer_edges(net, cur_layer, layer_names, intra_type, inter_type)
return net
def find_mean(index, min_val, sum_val, temp_supp_nodes):
if sum_val > min_val:
return min_val, temp_supp_nodes
if index == nodes:
if sum_val < min_val:
min_val = sum_val
return min_val, temp_supp_nodes
for col in range(nodes):
if not visited[col]:
visited[col] = True
temp_supp_nodes.append(col)
sum_val += dist_array[index][col]
find_mean(index+1, min_val, sum_val, temp_supp_nodes)
sum_val -= dist_array[index][col]
temp_supp_nodes.remove(col)
visited[col] = False
def find_supporting_pair(cur_layer, target_nodes):
min_val = nodes * math.sqrt(2)
supp_nodes = []
for cur_node in range(cur_layer * nodes, (cur_layer + 1) * nodes):
dist_this_layer = []
for target_node in target_nodes:
cur_dist = cal_dist(cur_node, target_node)
dist_this_layer.append(cur_dist)
dist_array.append(dist_this_layer)
sum_val = 0
temp_supp_nodes = []
min_val = find_mean(0, min_val, sum_val, temp_supp_nodes)
return target_nodes
def find_supporting_nodes(layer_names, intra_type, inter_type):
if (intra_type == 'RGG') and (inter_type == 'RGG'):
for cur_layer in range(len(layer_names)):
target_nodes = []
if cur_layer == 0:
target_nodes = list(range(((cur_layer + 1) * nodes), ((cur_layer + 2) * nodes)))
elif cur_layer == len(layer_names) - 1:
target_nodes = list(range(((cur_layer - 1) * nodes), cur_layer * nodes))
else:
if support_type == "random_nodes":
target_nodes = list(range(((cur_layer + 1) * nodes), ((cur_layer + 2) * nodes)))
elif support_type == "random_layers":
choice = random.choice([(cur_layer - 1), (cur_layer + 1)])
target_nodes = list(range((choice * nodes), ((choice + 1) * nodes)))
supp_nodes = find_supporting_pair(cur_layer, target_nodes)
for cur_node in range(cur_layer * nodes, (cur_layer + 1) * nodes):
short_dist = 1
short_node = -1
for target_node in target_nodes:
cur_dist = cal_dist(cur_node, target_node)
if (cur_dist <= inter_thres) and (cur_dist <= short_dist):
short_dist = cur_dist
short_node = target_node
rgg_supp_nodes[cur_node] = short_node
if short_node != -1:
target_nodes.remove(short_node)
elif (intra_type == 'RGG') and (inter_type == 'Random'):
for cur_layer in range(len(layer_names)):
target_nodes = []
if cur_layer == 0:
target_nodes = list(range(((cur_layer + 1) * nodes), ((cur_layer + 2) * nodes)))
elif cur_layer == len(layer_names) - 1:
target_nodes = list(range(((cur_layer - 1) * nodes), cur_layer * nodes))
else:
if support_type == "random_nodes":
target_nodes = list(range(((cur_layer + 1) * nodes), ((cur_layer + 2) * nodes)))
elif support_type == "random_layers":
if inter_edges_num[cur_layer][3] == 0:
choice = cur_layer - 1
else:
choice = cur_layer + 1
target_nodes = list(range((choice * nodes), ((choice + 1) * nodes)))
random.shuffle(target_nodes)
cur_layer_nodes = list(range((cur_layer * nodes), ((cur_layer + 1) * nodes)))
index = 0
for cur_node in cur_layer_nodes:
rand_supp_nodes[cur_node] = target_nodes[index]
index += 1
def make_nodes(net, layer_names, intra_type, inter_type):
for i in range(layers):
for j in range(nodes):
if (intra_type == 'RGG') and (inter_type == 'RGG'):
coords[(i * nodes) + j] = (random.random(), random.random())
net.add_node((i * nodes) + j, layer_names[i])
return net
def make_network_layer(net, layer_names):
for i in range(layers):
layer_name = chr(97 + i)
net.add_layer(layer_name, aspect=0)
layer_names.append(layer_name)
return net, layer_names
def build_network(rep, intra_type, inter_type):
layer_names = []
net = MultilayerNetwork(aspects=1, fullyInterconnected=False, directed=False)
net, layer_names = make_network_layer(net, layer_names)
net = make_nodes(net, layer_names, intra_type, inter_type)
find_supporting_nodes(layer_names, intra_type, inter_type)
net = make_edges(net, layer_names, intra_type, inter_type)
return net
def analyse_initial_network(net, init_data):
layer_names = net.get_layers() # return dictionary
layer_names = sorted(list(layer_names))
stats = { "clustering":[], # Average clustering coefficient
"mean degree":[], # Mean degree
"the most far node":[], # The most far node from the attack centre
"components":[], # Components of the graph in each layers
"largest component":[], # The largest component of the graphs
"size of largest component":[], # The size of the largest component
}
# init_intra_edge, init_inter_edge, init_supp_edge, init_far_node, init_clust, init_mean_deg, init_large_comp
cur_layer = 0
for layer in layer_names:
edges = []
for edge in net.edges:
if edge[2] == edge[3] == layer:
edges.append(edge[:2])
G = nx.Graph()
G.add_edges_from(edges)
components = list(nx.connected_components(G))
far_dist = 0
for cur_node in range(cur_layer * nodes, (cur_layer + 1) * nodes):
d = cal_dist(cur_node, -1)
if d > far_dist:
far_dist = d
stats["clustering"].append(nx.average_clustering(G))
stats["mean degree"].append(len(edges) * 2 / nodes)
stats["the most far node"].append(far_dist)
stats["components"].append(components)
stats["largest component"].append(max(components, key=len))
stats["size of largest component"].append(len(max(components, key=len)))
cur_layer | |
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Nipype : Neuroimaging in Python pipelines and interfaces package.
Nipype intends to create python interfaces to other neuroimaging
packages and create an API for specifying a full analysis pipeline in
python.
Much of the machinery at the beginning of this file has been copied over from
nibabel denoted by ## START - COPIED FROM NIBABEL and a corresponding ## END
"""
# Build helper
from __future__ import print_function
import os
from os.path import join as pjoin
import sys
from glob import glob
from functools import partial
from io import open
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# For some commands, use setuptools.
if len(set(('develop', 'bdist_egg', 'bdist_rpm', 'bdist', 'bdist_dumb',
'install_egg_info', 'egg_info', 'easy_install', 'bdist_wheel',
'bdist_mpkg')).intersection(sys.argv)) > 0:
# import setuptools setup, thus monkeypatching distutils.
import setup_egg
from setuptools import setup
else:
from distutils.core import setup
# Commit hash writing, and dependency checking
''' Distutils / setuptools helpers from nibabel.nisext'''
from distutils.version import LooseVersion
from distutils.command.build_py import build_py
from distutils import log
PY3 = sys.version_info[0] >= 3
if PY3:
string_types = (str, bytes)
else:
string_types = (basestring, str, unicode)
def get_comrec_build(pkg_dir, build_cmd=build_py):
""" Return extended build command class for recording commit
The extended command tries to run git to find the current commit, getting
the empty string if it fails. It then writes the commit hash into a file
in the `pkg_dir` path, named ``COMMIT_INFO.txt``.
In due course this information can be used by the package after it is
installed, to tell you what commit it was installed from if known.
To make use of this system, you need a package with a COMMIT_INFO.txt file -
e.g. ``myproject/COMMIT_INFO.txt`` - that might well look like this::
# This is an ini file that may contain information about the code state
[commit hash]
# The line below may contain a valid hash if it has been substituted during 'git archive'
archive_subst_hash=$Format:%h$
# This line may be modified by the install process
install_hash=
The COMMIT_INFO file above is also designed to be used with git substitution
- so you probably also want a ``.gitattributes`` file in the root directory
of your working tree that contains something like this::
myproject/COMMIT_INFO.txt export-subst
That will cause the ``COMMIT_INFO.txt`` file to get filled in by ``git
archive`` - useful in case someone makes such an archive - for example with
via the github 'download source' button.
Although all the above will work as is, you might consider having something
like a ``get_info()`` function in your package to display the commit
information at the terminal. See the ``pkg_info.py`` module in the nipy
package for an example.
"""
class MyBuildPy(build_cmd):
''' Subclass to write commit data into installation tree '''
def run(self):
import subprocess
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
build_cmd.run(self)
proc = subprocess.Popen('git rev-parse --short HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
repo_commit, _ = proc.communicate()
# Fix for python 3
repo_commit = '{}'.format(repo_commit)
# We write the installation commit even if it's empty
cfg_parser = ConfigParser()
cfg_parser.read(pjoin(pkg_dir, 'COMMIT_INFO.txt'))
cfg_parser.set('commit hash', 'install_hash', repo_commit)
out_pth = pjoin(self.build_lib, pkg_dir, 'COMMIT_INFO.txt')
if PY3:
cfg_parser.write(open(out_pth, 'wt'))
else:
cfg_parser.write(open(out_pth, 'wb'))
return MyBuildPy
def _add_append_key(in_dict, key, value):
""" Helper for appending dependencies to setuptools args """
# If in_dict[key] does not exist, create it
# If in_dict[key] is a string, make it len 1 list of strings
# Append value to in_dict[key] list
if key not in in_dict:
in_dict[key] = []
elif isinstance(in_dict[key], string_types):
in_dict[key] = [in_dict[key]]
in_dict[key].append(value)
# Dependency checks
def package_check(pkg_name, version=None,
optional=False,
checker=LooseVersion,
version_getter=None,
messages=None,
setuptools_args=None
):
''' Check if package `pkg_name` is present and has good enough version
Has two modes of operation. If `setuptools_args` is None (the default),
raise an error for missing non-optional dependencies and log warnings for
missing optional dependencies. If `setuptools_args` is a dict, then fill
``install_requires`` key value with any missing non-optional dependencies,
and the ``extras_requires`` key value with optional dependencies.
This allows us to work with and without setuptools. It also means we can
check for packages that have not been installed with setuptools to avoid
installing them again.
Parameters
----------
pkg_name : str
name of package as imported into python
version : {None, str}, optional
minimum version of the package that we require. If None, we don't
check the version. Default is None
optional : bool or str, optional
If ``bool(optional)`` is False, raise error for absent package or wrong
version; otherwise warn. If ``setuptools_args`` is not None, and
``bool(optional)`` is not False, then `optional` should be a string
giving the feature name for the ``extras_require`` argument to setup.
checker : callable, optional
callable with which to return comparable thing from version
string. Default is ``distutils.version.LooseVersion``
version_getter : {None, callable}:
Callable that takes `pkg_name` as argument, and returns the
package version string - as in::
``version = version_getter(pkg_name)``
If None, equivalent to::
mod = __import__(pkg_name); version = mod.__version__``
messages : None or dict, optional
dictionary giving output messages
setuptools_args : None or dict
If None, raise errors / warnings for missing non-optional / optional
dependencies. If dict fill key values ``install_requires`` and
``extras_require`` for non-optional and optional dependencies.
'''
setuptools_mode = setuptools_args is not None
optional_tf = bool(optional)
if version_getter is None:
def version_getter(pkg_name):
mod = __import__(pkg_name)
return mod.__version__
if messages is None:
messages = {}
msgs = {
'missing': 'Cannot import package "%s" - is it installed?',
'missing opt': 'Missing optional package "%s"',
'opt suffix': '; you may get run-time errors',
'version too old': 'You have version %s of package "%s"'
' but we need version >= %s', }
msgs.update(messages)
status, have_version = _package_status(pkg_name,
version,
version_getter,
checker)
if status == 'satisfied':
return
if not setuptools_mode:
if status == 'missing':
if not optional_tf:
raise RuntimeError(msgs['missing'] % pkg_name)
log.warn(msgs['missing opt'] % pkg_name +
msgs['opt suffix'])
return
elif status == 'no-version':
raise RuntimeError('Cannot find version for %s' % pkg_name)
assert status == 'low-version'
if not optional_tf:
raise RuntimeError(msgs['version too old'] % (have_version,
pkg_name,
version))
log.warn(msgs['version too old'] % (have_version,
pkg_name,
version) +
msgs['opt suffix'])
return
# setuptools mode
if optional_tf and not isinstance(optional, string_types):
raise RuntimeError('Not-False optional arg should be string')
dependency = pkg_name
if version:
dependency += '>=' + version
if optional_tf:
if 'extras_require' not in setuptools_args:
setuptools_args['extras_require'] = {}
_add_append_key(setuptools_args['extras_require'],
optional,
dependency)
return
# add_append_key(setuptools_args, 'install_requires', dependency)
return
def _package_status(pkg_name, version, version_getter, checker):
try:
__import__(pkg_name)
except ImportError:
return 'missing', None
if not version:
return 'satisfied', None
try:
have_version = version_getter(pkg_name)
except AttributeError:
return 'no-version', None
if checker(have_version) < checker(version):
return 'low-version', have_version
return 'satisfied', have_version
cmdclass = {'build_py': get_comrec_build('nipype')}
# Get version and release info, which is all stored in nipype/info.py
ver_file = os.path.join('nipype', 'info.py')
exec(open(ver_file).read(), locals())
# Prepare setuptools args
if 'setuptools' in sys.modules:
extra_setuptools_args = dict(
tests_require=['nose'],
test_suite='nose.collector',
zip_safe=False,
extras_require=dict(
doc='Sphinx>=0.3',
test='nose>=0.10.1'),
)
pkg_chk = partial(package_check, setuptools_args=extra_setuptools_args)
else:
extra_setuptools_args = {}
pkg_chk = package_check
# Do dependency checking
pkg_chk('networkx', NETWORKX_MIN_VERSION)
pkg_chk('nibabel', NIBABEL_MIN_VERSION)
pkg_chk('numpy', NUMPY_MIN_VERSION)
pkg_chk('scipy', SCIPY_MIN_VERSION)
pkg_chk('traits', TRAITS_MIN_VERSION)
pkg_chk('nose', NOSE_MIN_VERSION)
pkg_chk('future', FUTURE_MIN_VERSION)
pkg_chk('simplejson', SIMPLEJSON_MIN_VERSION)
pkg_chk('prov', PROV_MIN_VERSION)
custom_dateutil_messages = {'missing opt': ('Missing optional package "%s"'
' provided by package '
'"python-dateutil"')}
pkg_chk('dateutil', DATEUTIL_MIN_VERSION,
messages=custom_dateutil_messages)
def main(**extra_args):
thispath, _ = os.path.split(__file__)
testdatafiles = [pjoin('testing', 'data', val)
for val in os.listdir(pjoin(thispath, 'nipype', 'testing', 'data'))
if not os.path.isdir(pjoin(thispath, 'nipype', 'testing', 'data', val))]
testdatafiles+=[
pjoin('testing', 'data', 'dicomdir', '*'),
pjoin('testing', 'data', 'bedpostxout', '*'),
pjoin('testing', 'data', 'tbss_dir', '*'),
pjoin('workflows', 'data', '*'),
pjoin('pipeline', 'engine', 'report_template.html'),
pjoin('external', 'd3.js'),
pjoin('interfaces', 'script_templates', '*'),
pjoin('interfaces', 'tests', 'realign_json.json'),
pjoin('interfaces', 'tests', 'use_resources'),
]
setup(name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
version=VERSION,
install_requires=REQUIRES,
provides=PROVIDES,
packages=['nipype',
'nipype.algorithms',
'nipype.algorithms.tests',
'nipype.caching',
'nipype.caching.tests',
'nipype.external',
'nipype.fixes',
'nipype.fixes.numpy',
'nipype.fixes.numpy.testing',
'nipype.interfaces',
'nipype.interfaces.afni',
'nipype.interfaces.afni.tests',
'nipype.interfaces.ants',
'nipype.interfaces.ants.tests',
'nipype.interfaces.camino',
'nipype.interfaces.camino.tests',
'nipype.interfaces.camino2trackvis',
'nipype.interfaces.camino2trackvis.tests',
'nipype.interfaces.cmtk',
'nipype.interfaces.cmtk.tests',
'nipype.interfaces.diffusion_toolkit',
'nipype.interfaces.diffusion_toolkit.tests',
'nipype.interfaces.dipy',
'nipype.interfaces.dipy.tests',
'nipype.interfaces.elastix',
'nipype.interfaces.elastix.tests',
'nipype.interfaces.freesurfer',
'nipype.interfaces.freesurfer.tests',
'nipype.interfaces.fsl',
'nipype.interfaces.fsl.tests',
'nipype.interfaces.minc',
'nipype.interfaces.minc.tests',
'nipype.interfaces.mipav',
'nipype.interfaces.mipav.tests',
'nipype.interfaces.mne',
'nipype.interfaces.mne.tests',
'nipype.interfaces.mrtrix',
'nipype.interfaces.mrtrix3',
'nipype.interfaces.mrtrix.tests',
'nipype.interfaces.mrtrix3.tests',
'nipype.interfaces.nipy',
'nipype.interfaces.nipy.tests',
'nipype.interfaces.nitime',
'nipype.interfaces.nitime.tests',
'nipype.interfaces.script_templates',
'nipype.interfaces.semtools',
'nipype.interfaces.semtools.brains',
'nipype.interfaces.semtools.brains.tests',
'nipype.interfaces.semtools.diffusion',
'nipype.interfaces.semtools.diffusion.tests',
'nipype.interfaces.semtools.diffusion.tractography',
'nipype.interfaces.semtools.diffusion.tractography.tests',
'nipype.interfaces.semtools.filtering',
'nipype.interfaces.semtools.filtering.tests',
'nipype.interfaces.semtools.legacy',
'nipype.interfaces.semtools.legacy.tests',
'nipype.interfaces.semtools.registration',
'nipype.interfaces.semtools.registration.tests',
'nipype.interfaces.semtools.segmentation',
'nipype.interfaces.semtools.segmentation.tests',
'nipype.interfaces.semtools.testing',
'nipype.interfaces.semtools.tests',
'nipype.interfaces.semtools.utilities',
'nipype.interfaces.semtools.utilities.tests',
'nipype.interfaces.slicer',
'nipype.interfaces.slicer.diffusion',
'nipype.interfaces.slicer.diffusion.tests',
'nipype.interfaces.slicer.filtering',
'nipype.interfaces.slicer.filtering.tests',
'nipype.interfaces.slicer.legacy',
'nipype.interfaces.slicer.legacy.diffusion',
'nipype.interfaces.slicer.legacy.diffusion.tests',
'nipype.interfaces.slicer.legacy.tests',
'nipype.interfaces.slicer.quantification',
'nipype.interfaces.slicer.quantification.tests',
'nipype.interfaces.slicer.registration',
'nipype.interfaces.slicer.registration.tests',
'nipype.interfaces.slicer.segmentation',
'nipype.interfaces.slicer.segmentation.tests',
| |
test_to_dask_dataframe_dim_order(self):
values = np.array([[1, 2], [3, 4]], dtype=np.int64)
ds = Dataset({"w": (("x", "y"), values)}).chunk(1)
expected = ds["w"].to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=["x", "y"])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
expected = ds["w"].T.to_series().reset_index()
actual = ds.to_dask_dataframe(dim_order=["y", "x"])
assert isinstance(actual, dd.DataFrame)
assert_frame_equal(expected, actual.compute())
with pytest.raises(ValueError, match=r"does not match the set of dimensions"):
ds.to_dask_dataframe(dim_order=["x"])
@pytest.mark.parametrize("method", ["load", "compute"])
def test_dask_kwargs_variable(method):
x = Variable("y", da.from_array(np.arange(3), chunks=(2,)))
# args should be passed on to da.Array.compute()
with mock.patch.object(
da.Array, "compute", return_value=np.arange(3)
) as mock_compute:
getattr(x, method)(foo="bar")
mock_compute.assert_called_with(foo="bar")
@pytest.mark.parametrize("method", ["load", "compute", "persist"])
def test_dask_kwargs_dataarray(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = DataArray(data)
if method in ["load", "compute"]:
dask_func = "dask.array.compute"
else:
dask_func = "dask.persist"
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo="bar")
mock_func.assert_called_with(data, foo="bar")
@pytest.mark.parametrize("method", ["load", "compute", "persist"])
def test_dask_kwargs_dataset(method):
data = da.from_array(np.arange(3), chunks=(2,))
x = Dataset({"x": (("y"), data)})
if method in ["load", "compute"]:
dask_func = "dask.array.compute"
else:
dask_func = "dask.persist"
# args should be passed on to "dask_func"
with mock.patch(dask_func) as mock_func:
getattr(x, method)(foo="bar")
mock_func.assert_called_with(data, foo="bar")
kernel_call_count = 0
def kernel(name):
"""Dask kernel to test pickling/unpickling and __repr__.
Must be global to make it pickleable.
"""
global kernel_call_count
kernel_call_count += 1
return np.ones(1, dtype=np.int64)
def build_dask_array(name):
global kernel_call_count
kernel_call_count = 0
return dask.array.Array(
dask={(name, 0): (kernel, name)}, name=name, chunks=((1,),), dtype=np.int64
)
@pytest.mark.parametrize(
"persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]
)
def test_persist_Dataset(persist):
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
ds = ds + 1
n = len(ds.foo.data.dask)
ds2 = persist(ds)
assert len(ds2.foo.data.dask) == 1
assert len(ds.foo.data.dask) == n # doesn't mutate in place
@pytest.mark.parametrize(
"persist", [lambda x: x.persist(), lambda x: dask.persist(x)[0]]
)
def test_persist_DataArray(persist):
x = da.arange(10, chunks=(5,))
y = DataArray(x)
z = y + 1
n = len(z.data.dask)
zz = persist(z)
assert len(z.data.dask) == n
assert len(zz.data.dask) == zz.data.npartitions
def test_dataarray_with_dask_coords():
import toolz
x = xr.Variable("x", da.arange(8, chunks=(4,)))
y = xr.Variable("y", da.arange(8, chunks=(4,)) * 2)
data = da.random.random((8, 8), chunks=(4, 4)) + 1
array = xr.DataArray(data, dims=["x", "y"])
array.coords["xx"] = x
array.coords["yy"] = y
assert dict(array.__dask_graph__()) == toolz.merge(
data.__dask_graph__(), x.__dask_graph__(), y.__dask_graph__()
)
(array2,) = dask.compute(array)
assert not dask.is_dask_collection(array2)
assert all(isinstance(v._variable.data, np.ndarray) for v in array2.coords.values())
def test_basic_compute():
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk({"x": 2})
for get in [dask.threaded.get, dask.multiprocessing.get, dask.local.get_sync, None]:
with dask.config.set(scheduler=get):
ds.compute()
ds.foo.compute()
ds.foo.variable.compute()
def test_dask_layers_and_dependencies():
ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
x = dask.delayed(ds)
assert set(x.__dask_graph__().dependencies).issuperset(
ds.__dask_graph__().dependencies
)
assert set(x.foo.__dask_graph__().dependencies).issuperset(
ds.__dask_graph__().dependencies
)
def make_da():
da = xr.DataArray(
np.ones((10, 20)),
dims=["x", "y"],
coords={"x": np.arange(10), "y": np.arange(100, 120)},
name="a",
).chunk({"x": 4, "y": 5})
da.x.attrs["long_name"] = "x"
da.attrs["test"] = "test"
da.coords["c2"] = 0.5
da.coords["ndcoord"] = da.x * 2
da.coords["cxy"] = (da.x * da.y).chunk({"x": 4, "y": 5})
return da
def make_ds():
map_ds = xr.Dataset()
map_ds["a"] = make_da()
map_ds["b"] = map_ds.a + 50
map_ds["c"] = map_ds.x + 20
map_ds = map_ds.chunk({"x": 4, "y": 5})
map_ds["d"] = ("z", [1, 1, 1, 1])
map_ds["z"] = [0, 1, 2, 3]
map_ds["e"] = map_ds.x + map_ds.y
map_ds.coords["c1"] = 0.5
map_ds.coords["cx"] = ("x", np.arange(len(map_ds.x)))
map_ds.coords["cx"].attrs["test2"] = "test2"
map_ds.attrs["test"] = "test"
map_ds.coords["xx"] = map_ds["a"] * map_ds.y
map_ds.x.attrs["long_name"] = "x"
map_ds.y.attrs["long_name"] = "y"
return map_ds
# fixtures cannot be used in parametrize statements
# instead use this workaround
# https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly
@pytest.fixture
def map_da():
return make_da()
@pytest.fixture
def map_ds():
return make_ds()
def test_unify_chunks(map_ds):
ds_copy = map_ds.copy()
ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10})
with pytest.raises(ValueError, match=r"inconsistent chunks"):
ds_copy.chunks
expected_chunks = {"x": (4, 4, 2), "y": (5, 5, 5, 5)}
with raise_if_dask_computes():
actual_chunks = ds_copy.unify_chunks().chunks
assert actual_chunks == expected_chunks
assert_identical(map_ds, ds_copy.unify_chunks())
out_a, out_b = xr.unify_chunks(ds_copy.cxy, ds_copy.drop_vars("cxy"))
assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))
assert out_b.chunks == expected_chunks
# Test unordered dims
da = ds_copy["cxy"]
out_a, out_b = xr.unify_chunks(da.chunk({"x": -1}), da.T.chunk({"y": -1}))
assert out_a.chunks == ((4, 4, 2), (5, 5, 5, 5))
assert out_b.chunks == ((5, 5, 5, 5), (4, 4, 2))
# Test mismatch
with pytest.raises(ValueError, match=r"Dimension 'x' size mismatch: 10 != 2"):
xr.unify_chunks(da, da.isel(x=slice(2)))
@pytest.mark.parametrize("obj", [make_ds(), make_da()])
@pytest.mark.parametrize(
"transform", [lambda x: x.compute(), lambda x: x.unify_chunks()]
)
def test_unify_chunks_shallow_copy(obj, transform):
obj = transform(obj)
unified = obj.unify_chunks()
assert_identical(obj, unified) and obj is not obj.unify_chunks()
@pytest.mark.parametrize("obj", [make_da()])
def test_auto_chunk_da(obj):
actual = obj.chunk("auto").data
expected = obj.data.rechunk("auto")
np.testing.assert_array_equal(actual, expected)
assert actual.chunks == expected.chunks
def test_map_blocks_error(map_da, map_ds):
def bad_func(darray):
return (darray * darray.x + 5 * darray.y)[:1, :1]
with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
xr.map_blocks(bad_func, map_da).compute()
def returns_numpy(darray):
return (darray * darray.x + 5 * darray.y).values
with pytest.raises(TypeError, match=r"Function must return an xarray DataArray"):
xr.map_blocks(returns_numpy, map_da)
with pytest.raises(TypeError, match=r"args must be"):
xr.map_blocks(operator.add, map_da, args=10)
with pytest.raises(TypeError, match=r"kwargs must be"):
xr.map_blocks(operator.add, map_da, args=[10], kwargs=[20])
def really_bad_func(darray):
raise ValueError("couldn't do anything.")
with pytest.raises(Exception, match=r"Cannot infer"):
xr.map_blocks(really_bad_func, map_da)
ds_copy = map_ds.copy()
ds_copy["cxy"] = ds_copy.cxy.chunk({"y": 10})
with pytest.raises(ValueError, match=r"inconsistent chunks"):
xr.map_blocks(bad_func, ds_copy)
with pytest.raises(TypeError, match=r"Cannot pass dask collections"):
xr.map_blocks(bad_func, map_da, kwargs=dict(a=map_da.chunk()))
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks(obj):
def func(obj):
result = obj + obj.x + 5 * obj.y
return result
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj)
expected = func(obj)
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_convert_args_to_list(obj):
expected = obj + 10
with raise_if_dask_computes():
actual = xr.map_blocks(operator.add, obj, [10])
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
def test_map_blocks_dask_args():
da1 = xr.DataArray(
np.ones((10, 20)),
dims=["x", "y"],
coords={"x": np.arange(10), "y": np.arange(20)},
).chunk({"x": 5, "y": 4})
# check that block shapes are the same
def sumda(da1, da2):
assert da1.shape == da2.shape
return da1 + da2
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(sumda, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
# one dimension in common
da2 = (da1 + 1).isel(x=1, drop=True)
with raise_if_dask_computes():
mapped = xr.map_blocks(operator.add, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
# test that everything works when dimension names are different
da2 = (da1 + 1).isel(x=1, drop=True).rename({"y": "k"})
with raise_if_dask_computes():
mapped = xr.map_blocks(operator.add, da1, args=[da2])
xr.testing.assert_equal(da1 + da2, mapped)
with pytest.raises(ValueError, match=r"Chunk sizes along dimension 'x'"):
xr.map_blocks(operator.add, da1, args=[da1.chunk({"x": 1})])
with pytest.raises(ValueError, match=r"indexes along dimension 'x' are not equal"):
xr.map_blocks(operator.add, da1, args=[da1.reindex(x=np.arange(20))])
# reduction
da1 = da1.chunk({"x": -1})
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(lambda a, b: (a + b).sum("x"), da1, args=[da2])
xr.testing.assert_equal((da1 + da2).sum("x"), mapped)
# reduction with template
da1 = da1.chunk({"x": -1})
da2 = da1 + 1
with raise_if_dask_computes():
mapped = xr.map_blocks(
lambda a, b: (a + b).sum("x"), da1, args=[da2], template=da1.sum("x")
)
xr.testing.assert_equal((da1 + da2).sum("x"), mapped)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_add_attrs(obj):
def add_attrs(obj):
obj = obj.copy(deep=True)
obj.attrs["new"] = "new"
obj.cxy.attrs["new2"] = "new2"
return obj
expected = add_attrs(obj)
with raise_if_dask_computes():
actual = xr.map_blocks(add_attrs, obj)
assert_identical(actual, expected)
# when template is specified, attrs are copied from template, not set by function
with raise_if_dask_computes():
actual = xr.map_blocks(add_attrs, obj, template=obj)
assert_identical(actual, obj)
def test_map_blocks_change_name(map_da):
def change_name(obj):
obj = obj.copy(deep=True)
obj.name = "new"
return obj
expected = change_name(map_da)
with raise_if_dask_computes():
actual = xr.map_blocks(change_name, map_da)
assert_identical(actual, expected)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_kwargs(obj):
expected = xr.full_like(obj, fill_value=np.nan)
with raise_if_dask_computes():
actual = xr.map_blocks(xr.full_like, obj, kwargs=dict(fill_value=np.nan))
assert_chunks_equal(expected.chunk(), actual)
assert_identical(actual, expected)
def test_map_blocks_to_array(map_ds):
with raise_if_dask_computes():
actual = xr.map_blocks(lambda x: x.to_array(), map_ds)
# to_array does not preserve name, so cannot use assert_identical
assert_equal(actual, map_ds.to_array())
@pytest.mark.parametrize(
"func",
[
lambda x: x,
lambda x: x.to_dataset(),
lambda x: x.drop_vars("x"),
lambda x: x.expand_dims(k=[1, 2, 3]),
lambda x: x.expand_dims(k=3),
lambda x: x.assign_coords(new_coord=("y", x.y.data * 2)),
lambda x: x.astype(np.int32),
lambda x: x.x,
],
)
def test_map_blocks_da_transformations(func, map_da):
with raise_if_dask_computes():
actual = xr.map_blocks(func, map_da)
assert_identical(actual, func(map_da))
@pytest.mark.parametrize(
"func",
[
lambda x: x,
lambda x: x.drop_vars("cxy"),
lambda x: x.drop_vars("a"),
lambda x: x.drop_vars("x"),
lambda x: x.expand_dims(k=[1, 2, 3]),
lambda x: x.expand_dims(k=3),
lambda x: x.rename({"a": "new1", "b": "new2"}),
lambda x: x.x,
],
)
def test_map_blocks_ds_transformations(func, map_ds):
with raise_if_dask_computes():
actual = xr.map_blocks(func, map_ds)
assert_identical(actual, func(map_ds))
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_da_ds_with_template(obj):
func = lambda x: x.isel(x=[1])
template = obj.isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, obj, template=template)
assert_identical(actual, template)
with raise_if_dask_computes():
actual = obj.map_blocks(func, template=template)
assert_identical(actual, template)
def test_map_blocks_template_convert_object():
da = make_da()
func = lambda x: x.to_dataset().isel(x=[1])
template = da.to_dataset().isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, da, template=template)
assert_identical(actual, template)
ds = da.to_dataset()
func = lambda x: x.to_array().isel(x=[1])
template = ds.to_array().isel(x=[1, 5, 9])
with raise_if_dask_computes():
actual = xr.map_blocks(func, ds, template=template)
assert_identical(actual, template)
@pytest.mark.parametrize("obj", [make_da(), make_ds()])
def test_map_blocks_errors_bad_template(obj):
with pytest.raises(ValueError, match=r"unexpected coordinate variables"):
xr.map_blocks(lambda x: x.assign_coords(a=10), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"does not contain coordinate variables"):
xr.map_blocks(lambda x: x.drop_vars("cxy"), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"Dimensions {'x'} missing"):
xr.map_blocks(lambda x: x.isel(x=1), obj, template=obj).compute()
with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=obj).compute()
with pytest.raises(TypeError, | |
<reponame>miguelangel-nubla/teslajsonpy
# SPDX-License-Identifier: Apache-2.0
"""
Python Package for controlling Tesla API.
For more details about this api, please refer to the documentation at
https://github.com/zabuldon/teslajsonpy
"""
import asyncio
import logging
import time
from typing import Callable, Optional, Text, Tuple
import backoff
import wrapt
from aiohttp import ClientConnectorError
from teslajsonpy.battery_sensor import Battery, Range
from teslajsonpy.binary_sensor import (
ChargerConnectionSensor,
OnlineSensor,
ParkingSensor,
)
from teslajsonpy.charger import ChargerSwitch, ChargingSensor, RangeSwitch
from teslajsonpy.climate import Climate, TempSensor
from teslajsonpy.connection import Connection
from teslajsonpy.const import (
DRIVING_INTERVAL,
IDLE_INTERVAL,
ONLINE_INTERVAL,
SLEEP_INTERVAL,
)
from teslajsonpy.exceptions import RetryLimitError, TeslaException
from teslajsonpy.gps import GPS, Odometer
from teslajsonpy.lock import Lock, ChargerLock
from teslajsonpy.sentry_mode import SentryModeSwitch
from teslajsonpy.trunk import TrunkLock, FrunkLock
from teslajsonpy.window_cover import WindowCover
_LOGGER = logging.getLogger(__name__)
def min_expo(base=2, factor=1, max_value=None, min_value=0):
# pylint: disable=invalid-name
"""Generate value for exponential decay.
Args:
base: The mathematical base of the exponentiation operation
factor: Factor to multiply the exponentation by.
max_value: The maximum value to yield. Once the value in the
true exponential sequence exceeds this, the value
of max_value will forever after be yielded.
min_value: The minimum value to yield. This is a constant minimum.
"""
n = 0
while True:
a = min_value + factor * base ** n
if max_value is None or a < max_value:
yield a
n += 1
else:
yield max_value
@wrapt.decorator
async def wake_up(wrapped, instance, args, kwargs) -> Callable:
# pylint: disable=protected-access
"""Wrap a API func so it will attempt to wake the vehicle if asleep.
The command wrapped is run once if the car_id was last reported
online. If wrapped detects the car_id is offline, five attempts
will be made to wake the vehicle to retry the command.
Raises
RetryLimitError: The wake_up has exceeded the 5 attempts.
TeslaException: Tesla connection errors
Returns
Callable: Wrapped function that will wake_up
"""
def valid_result(result):
"""Check if TeslaAPI result succesful.
Parameters
----------
result : tesla API result
This is the result of a Tesla Rest API call.
Returns
-------
bool
Tesla API failure can be checked in a dict with a bool in
['response']['result'], a bool, or None or
['response']['reason'] == 'could_not_wake_buses'
Returns true when a failure state not detected.
"""
try:
return (
result is not None
and result is not False
and (
result is True
or (
isinstance(result, dict)
and isinstance(result["response"], dict)
and (
result["response"].get("result") is True
or result["response"].get("reason")
!= "could_not_wake_buses"
)
)
)
)
except TypeError as exception:
_LOGGER.error("Result: %s, %s", result, exception)
retries = 0
sleep_delay = 2
car_id = args[0]
is_wake_command = len(args) >= 2 and args[1] == "wake_up"
result = None
if instance.car_online.get(instance._id_to_vin(car_id)) or is_wake_command:
try:
result = await wrapped(*args, **kwargs)
except TeslaException as ex:
_LOGGER.debug(
"Exception: %s\n%s(%s %s)", ex.message, wrapped.__name__, args, kwargs
)
raise
if valid_result(result) or is_wake_command:
return result
_LOGGER.debug(
"wake_up needed for %s -> %s \n"
"Info: args:%s, kwargs:%s, "
"VIN:%s, car_online:%s",
wrapped.__name__,
result,
args,
kwargs,
instance._id_to_vin(car_id)[-5:] if car_id else None,
instance.car_online,
)
instance.car_online[instance._id_to_vin(car_id)] = False
while (
kwargs.get("wake_if_asleep")
and
# Check online state
(
car_id is None
or (
not instance._id_to_vin(car_id)
or not instance.car_online.get(instance._id_to_vin(car_id))
)
)
):
_LOGGER.debug("Attempting to wake up")
result = await instance._wake_up(car_id)
_LOGGER.debug(
"%s(%s): Wake Attempt(%s): %s",
wrapped.__name__,
instance._id_to_vin(car_id)[-5:],
retries,
result,
)
if not result:
if retries < 5:
await asyncio.sleep(15 + sleep_delay ** (retries + 2))
retries += 1
continue
instance.car_online[instance._id_to_vin(car_id)] = False
raise RetryLimitError("Reached retry limit; aborting wake up")
break
instance.car_online[instance._id_to_vin(car_id)] = True
# retry function
_LOGGER.debug("Retrying %s(%s %s)", wrapped.__name__, args, kwargs)
try:
result = await wrapped(*args, **kwargs)
_LOGGER.debug(
"Retry after wake up succeeded: %s",
"True" if valid_result(result) else result,
)
except TeslaException as ex:
_LOGGER.debug(
"Exception: %s\n%s(%s %s)", ex.message, wrapped.__name__, args, kwargs
)
raise
if valid_result(result):
return result
raise TeslaException("could_not_wake_buses")
class Controller:
# pylint: disable=too-many-public-methods
"""Controller for connections to Tesla Motors API."""
def __init__(
self,
websession,
email: Text = None,
password: Text = None,
access_token: Text = None,
refresh_token: Text = None,
expiration: int = 0,
update_interval: int = 300,
enable_websocket: bool = False,
) -> None:
"""Initialize controller.
Args:
websession (aiohttp.ClientSession): Websession for aiohttp.
email (Text, optional): Email account. Defaults to None.
password (Text, optional): Password. Defaults to None.
access_token (Text, optional): Access token. Defaults to None.
refresh_token (Text, optional): Refresh token. Defaults to None.
expiration (int, optional): Timestamp when access_token expires. Defaults to 0
update_interval (int, optional): Seconds between allowed updates to the API. This is to prevent
being blocked by Tesla. Defaults to 300.
enable_websocket (bool, optional): Whether to connect with websockets. Defaults to False.
"""
self.__connection = Connection(
websession, email, password, access_token, refresh_token, expiration
)
self.__components = []
self._update_interval: int = update_interval
self.__update = {}
self.__climate = {}
self.__charging = {}
self.__state = {}
self.__config = {}
self.__driving = {}
self.__gui = {}
self._last_update_time = {} # succesful update attempts by car
self._last_wake_up_time = {} # succesful wake_ups by car
self._last_attempted_update_time = 0 # all attempts by controller
self.__lock = {}
self.__controller_lock = None
self.__wakeup_conds = {}
self.car_online = {}
self.car_state = {}
self.__id_vin_map = {}
self.__vin_id_map = {}
self.__vin_vehicle_id_map = {}
self.__vehicle_id_vin_map = {}
self.__websocket_listeners = []
self.__last_parked_timestamp = {}
self.__update_state = {}
self.enable_websocket = enable_websocket
async def connect(
self, test_login=False, wake_if_asleep=False
) -> Tuple[Text, Text]:
"""Connect controller to Tesla.
Args
test_login (bool, optional): Whether to test credentials only. Defaults to False.
wake_if_asleep (bool, optional): Whether to wake up any sleeping cars to update state. Defaults to False.
Returns
Tuple[Text, Text]: Returns the refresh_token and access_token
"""
cars = await self.get_vehicles()
self._last_attempted_update_time = time.time()
self.__controller_lock = asyncio.Lock()
for car in cars:
vin = car["vin"]
self.__id_vin_map[car["id"]] = vin
self.__vin_id_map[vin] = car["id"]
self.__vin_vehicle_id_map[vin] = car["vehicle_id"]
self.__vehicle_id_vin_map[car["vehicle_id"]] = vin
self.__lock[vin] = asyncio.Lock()
self.__wakeup_conds[vin] = asyncio.Lock()
self._last_update_time[vin] = 0
self._last_wake_up_time[vin] = 0
self.__update[vin] = True
self.__update_state[vin] = "normal"
self.car_state[vin] = car
self.car_online[vin] = car["state"] == "online"
self.__last_parked_timestamp[vin] = self._last_attempted_update_time
self.__climate[vin] = {}
self.__charging[vin] = {}
self.__state[vin] = {}
self.__config[vin] = {}
self.__driving[vin] = {}
self.__gui[vin] = {}
self._add_components(car)
if not test_login:
tasks = [
self.update(car["id"], wake_if_asleep=wake_if_asleep) for car in cars
]
_LOGGER.debug("tasks %s %s", tasks, wake_if_asleep)
try:
await asyncio.gather(*tasks)
except (TeslaException, RetryLimitError):
pass
return (self.__connection.refresh_token, self.__connection.access_token)
def is_token_refreshed(self) -> bool:
"""Return whether token has been changed and not retrieved.
Returns
bool: Whether token has been changed since the last return
"""
return self.__connection.token_refreshed
def get_tokens(self) -> Tuple[Text, Text]:
"""Return refresh and access tokens.
This will set the the self.__connection token_refreshed to False.
Returns
Tuple[Text, Text]: Returns a tuple of refresh and access tokens
"""
self.__connection.token_refreshed = False
return (self.__connection.refresh_token, self.__connection.access_token)
def get_expiration(self) -> int:
"""Return expiration for oauth.
Returns
int: Returns timestamp when oauth expires
"""
return self.__connection.expiration
def register_websocket_callback(self, callback) -> int:
"""Register callback for websocket messages.
Args
callback (function): function to call with json data
Returns
int: Return index of entry
"""
self.__websocket_listeners.append(callback)
return len(self.__websocket_listeners) - 1
@backoff.on_exception(min_expo, ClientConnectorError, max_time=10, logger=__name__)
async def get_vehicles(self):
"""Get vehicles json from TeslaAPI."""
return (await self.__connection.get("vehicles"))["response"]
@wake_up
async def post(self, car_id, command, data=None, wake_if_asleep=True):
# pylint: disable=unused-argument
"""Send post command to the car_id.
This is a wrapped function by wake_up.
Parameters
----------
car_id : string
Identifier for the car on the owner-api endpoint. It is the id
field for identifying the car across the owner-api endpoint.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
command : string
Tesla API command. https://tesla-api.timdorr.com/vehicle/commands
data : dict
Optional parameters.
wake_if_asleep : bool
Function for wake_up decorator indicating whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object.
"""
car_id = self._update_id(car_id)
data = data or {}
return await self.__connection.post(f"vehicles/{car_id}/{command}", data=data)
@wake_up
async def get(self, car_id, command, wake_if_asleep=False):
# pylint: disable=unused-argument
"""Send get command to the car_id.
This is a wrapped function by wake_up.
Parameters
----------
car_id : string
Identifier for the car on the owner-api endpoint. It is the id
field for identifying the car across the owner-api endpoint.
https://tesla-api.timdorr.com/api-basics/vehicles#vehicle_id-vs-id
command : string
Tesla API command. https://tesla-api.timdorr.com/vehicle/commands
wake_if_asleep : bool
Function for wake_up decorator indicating whether a failed response
should wake up the vehicle or retry.
Returns
-------
dict
Tesla json object.
"""
car_id = self._update_id(car_id)
return await self.__connection.get(f"vehicles/{car_id}/{command}")
async def data_request(self, car_id, name, wake_if_asleep=False):
"""Get requested data from car_id.
Parameters
----------
| |
<gh_stars>1-10
"""Dataset module for functions related to an xarray.Dataset."""
from typing import Any, Dict, Hashable, List, Optional, Union
import pandas as pd
import xarray as xr
from xcdat import bounds # noqa: F401
from xcdat.logger import setup_custom_logger
logger = setup_custom_logger(__name__)
def open_dataset(
path: str, data_var: Optional[str] = None, **kwargs: Dict[str, Any]
) -> xr.Dataset:
"""Wrapper for ``xarray.open_dataset()`` that applies common operations.
Operations include:
- If the dataset has a time dimension, decode both CF and non-CF time units.
- Generate bounds for supported coordinates if they don't exist.
- Option to limit the Dataset to a single regular (non-bounds) data
variable while retaining any bounds data variables.
Parameters
----------
path : str
Path to Dataset.
data_var: Optional[str], optional
The key of the data variable to keep in the Dataset, by default None.
kwargs : Dict[str, Any]
Additional arguments passed on to ``xarray.open_dataset``.
- Visit the xarray docs for accepted arguments [1]_.
- ``decode_times`` defaults to ``False`` to allow for the manual
decoding of non-CF time units.
Returns
-------
xr.Dataset
Dataset after applying operations.
Notes
-----
``xarray.open_dataset`` opens the file with read-only access. When you
modify values of a Dataset, even one linked to files on disk, only the
in-memory copy you are manipulating in xarray is modified: the original file
on disk is never touched.
References
----------
.. [1] https://xarray.pydata.org/en/stable/generated/xarray.open_dataset.html
Examples
--------
Import and call module:
>>> from xcdat.dataset import open_dataset
>>> ds = open_dataset("file_path")
Keep a single variable in the Dataset:
>>> from xcdat.dataset import open_dataset
>>> ds = open_dataset("file_path", keep_vars="tas")
Keep multiple variables in the Dataset:
>>> from xcdat.dataset import open_dataset
>>> ds = open_dataset("file_path", keep_vars=["ts", "tas"])
"""
# NOTE: Using decode_times=False may add incorrect units for existing time
# bounds (becomes "days since 1970-01-01 00:00:00").
ds = xr.open_dataset(path, decode_times=False, **kwargs)
ds = infer_or_keep_var(ds, data_var)
if ds.cf.dims.get("T") is not None:
ds = decode_time_units(ds)
ds = ds.bounds.fill_missing()
return ds
def open_mfdataset(
paths: Union[str, List[str]],
data_var: Optional[str] = None,
**kwargs: Dict[str, Any],
) -> xr.Dataset:
"""Wrapper for ``xarray.open_mfdataset()`` that applies common operations.
Operations include:
- If the dataset has a time dimension, decode both CF and non-CF time units.
- Generate bounds for supported coordinates if they don't exist.
- Option to limit the Dataset to a single regular (non-bounds) data
variable while retaining any bounds data variables.
Parameters
----------
path : Union[str, List[str]]
Either a string glob in the form ``"path/to/my/files/*.nc"`` or an
explicit list of files to open. Paths can be given as strings or as
pathlib Paths. If concatenation along more than one dimension is desired,
then ``paths`` must be a nested list-of-lists (see ``combine_nested``
for details). (A string glob will be expanded to a 1-dimensional list.)
data_var: Optional[str], optional
The key of the data variable to keep in the Dataset, by default None.
kwargs : Dict[str, Any]
Additional arguments passed on to ``xarray.open_mfdataset`` and/or
``xarray.open_dataset``.
- Visit the xarray docs for accepted arguments, [2]_ and [3]_.
- ``decode_times`` defaults to ``False`` to allow for the manual
decoding of non-CF time units.
Returns
-------
xr.Dataset
Dataset after applying operations.
Notes
-----
``xarray.open_mfdataset`` opens the file with read-only access. When you
modify values of a Dataset, even one linked to files on disk, only the
in-memory copy you are manipulating in xarray is modified: the original file
on disk is never touched.
References
----------
.. [2] https://xarray.pydata.org/en/stable/generated/xarray.open_mfdataset.html
.. [3] https://xarray.pydata.org/en/stable/generated/xarray.open_dataset.html
Examples
--------
Import and call module:
>>> from xcdat.dataset import open_mfdataset
>>> ds = open_mfdataset(["file_path1", "file_path2"])
Keep a single variable in the Dataset:
>>> from xcdat.dataset import open_dataset
>>> ds = open_mfdataset(["file_path1", "file_path2"], keep_vars="tas")
Keep multiple variables in the Dataset:
>>> from xcdat.dataset import open_dataset
>>> ds = open_mfdataset(["file_path1", "file_path2"], keep_vars=["ts", "tas"])
"""
# NOTE: Using decode_times=False may add incorrect units for existing time
# bounds (becomes "days since 1970-01-01 00:00:00").
ds = xr.open_mfdataset(paths, decode_times=False, **kwargs)
ds = infer_or_keep_var(ds, data_var)
if ds.cf.dims.get("T") is not None:
ds = decode_time_units(ds)
ds = ds.bounds.fill_missing()
return ds
def infer_or_keep_var(dataset: xr.Dataset, data_var: Optional[str]) -> xr.Dataset:
"""Infer the data variable(s) or keep a specific one in the Dataset.
If ``data_var`` is None, then this function checks the number of
regular (non-bounds) data variables in the Dataset. If there is a single
regular data var, then it will add an 'xcdat_infer' attr pointing to it in
the Dataset. XCDAT APIs can then call `get_inferred_var()` to get the data
var linked to the 'xcdat_infer' attr. If there are multiple regular data
variables, the 'xcdat_infer' attr is not set and the Dataset is returned
as is.
If ``data_var`` is not None, then this function checks if the ``data_var``
exists in the Dataset and if it is a regular data var. If those checks pass,
it will subset the Dataset to retain that ``data_var`` and all bounds data
vars. An 'xcdat_infer' attr pointing to the ``data_var`` is also added
to the Dataset.
This utility function is useful for designing XCDAT APIs with an optional
``data_var`` kwarg. If ``data_var`` is None, an inference to the desired
data var is performed with a call to this function. Otherwise, perform the
API operation explicitly on ``data_var``.
Parameters
----------
dataset : xr.Dataset
The Dataset.
data_var: Optional[str], optional
The key of the data variable to keep in the Dataset.
Returns
-------
xr.Dataset
The Dataset.
Raises
------
KeyError
If the specified data variable is not found in the Dataset.
KeyError
If the user specifies a bounds variable to keep.
"""
ds = dataset.copy()
# Make sure the "xcdat_infer" attr is None because a Dataset may be written
# with this attr already set.
ds.attrs["xcdat_infer"] = None
all_vars = ds.data_vars.keys()
bounds_vars = ds.bounds.names
regular_vars: List[Hashable] = list(set(all_vars) ^ set(bounds_vars))
if len(regular_vars) == 0:
logger.warning("This dataset only contains bounds data variables.")
if data_var is None:
if len(regular_vars) == 1:
ds.attrs["xcdat_infer"] = regular_vars[0]
elif len(regular_vars) > 1:
regular_vars_str = ", ".join(
f"'{var}'" for var in sorted(regular_vars) # type:ignore
)
logger.info(
"This dataset contains more than one regular data variable "
f"({regular_vars_str}). If desired, pass the `data_var` kwarg to "
"reduce down to one regular data var."
)
if data_var is not None:
if data_var not in all_vars:
raise KeyError(
f"The data variable '{data_var}' does not exist in the dataset."
)
if data_var in bounds_vars:
raise KeyError("Please specify a regular (non-bounds) data variable.")
ds = dataset[[data_var] + bounds_vars]
ds.attrs["xcdat_infer"] = data_var
return ds
def decode_time_units(dataset: xr.Dataset):
"""Decodes both CF and non-CF compliant time units.
``xarray`` uses the ``cftime`` module, which only supports CF compliant
time units [4]_. As a result, opening datasets with non-CF compliant
time units (months and years) will throw an error if ``decode_times=True``.
This function works around this issue by first checking if the time units
are CF or non-CF compliant. Datasets with CF compliant time units are passed
to ``xarray.decode_cf``. Datasets with non-CF compliant time units are
manually decoded by extracting the units and reference date, which are used
to generate an array of datetime values.
Parameters
----------
dataset : xr.Dataset
Dataset with non-decoded CF/non-CF compliant time units.
Returns
-------
xr.Dataset
Dataset with decoded time units.
Notes
-----
.. [4] https://unidata.github.io/cftime/api.html#cftime.num2date
Examples
--------
Decode non-CF compliant time units in a Dataset:
>>> from xcdat.dataset import decode_time_units
>>> ds = xr.open_dataset("file_path", decode_times=False)
>>> ds.time
<xarray.DataArray 'time' (time: 3)>
array([0, 1, 2])
Coordinates:
* time (time) int64 0 1 2
Attributes:
units: years since 2000-01-01
bounds: time_bnds
axis: T
long_name: time
standard_name: time
>>> ds = decode_time_units(ds)
>>> ds.time
<xarray.DataArray 'time' (time: 3)>
array(['2000-01-01T00:00:00.000000000', '2001-01-01T00:00:00.000000000',
'2002-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2001-01-01 2002-01-01
Attributes:
units: years since 2000-01-01
bounds: time_bnds
axis: T
long_name: time
standard_name: time
View time coordinate encoding information:
>>> ds.time.encoding
{'source': None, 'dtype': dtype('int64'), 'original_shape': (3,), 'units':
'years since 2000-01-01', 'calendar': 'proleptic_gregorian'}
"""
time = dataset["time"]
units_attr = time.attrs.get("units")
if units_attr is None:
raise KeyError(
"No 'units' attribute found for time coordinate. Make sure to open "
"the dataset with `decode_times=False`."
)
units, reference_date | |
#!/usr/bin/env python
import sys
import os
import math
import pysam
import argparse
import multiprocessing as mp
from statistics import mean
def printBinInfo(info):
if (info[3] + info[4] != 0): print(info[0] + '\t' + info[1] + '\t' + info[2] + '\t' + str(info[3]) + '\t' + str(info[4]))
def outputUnmappedLowQualLR(out_prefix_filename, mapq, ref_unbin, multithreaded):
global lr_bams
nb_base_lr = 0
out_f = open(out_prefix_filename + "_lr_unknown.fq", "w") # Output file
for bamf in lr_bams:
it_bamf = bamf.fetch(multiple_iterators=multithreaded, until_eof=True) # fetch() also gets unmapped reads here
for record in it_bamf:
# Discard 0-length reads, unmapped reads, secondary and supplementary alignments.
if (record.query_length != 0) and ((record.is_unmapped == True) or ((record.is_secondary == False) and (record.is_supplementary == False) and ((record.mapping_quality < mapq) or (record.reference_name in ref_unbin)))):
# Output LR
out_f.write('>' + record.query_name + '\n' + record.query_sequence + '\n')
nb_base_lr += record.query_length
# If has qualities, just output those
if (record.query_qualities != None):
qual = [chr(x+33) for x in record.query_qualities]
out_f.write('+' + '\n' + "".join(qual) + '\n')
out_f.close()
return ("unknown_lr", os.path.abspath(out_prefix_filename + "_lr_unknown.fq"), "NA", nb_base_lr, 0)
def outputUnmappedLowQualSR(out_prefix_filename, mapq, qs_read, qs_base, multithreaded):
global sr_bams
nb_base_sr = 0
out_f = open(out_prefix_filename + "_sr_unmapped.fa", "w") # Output file for unmapped or low qual SR
out_junk_f = open(out_prefix_filename + "_sr_discarded.fa", "w") # Ouput file for discarded unmapped or low qual SR (junk)
for bamf in sr_bams:
it_bamf = bamf.fetch(multiple_iterators=multithreaded, until_eof=True) # fetch() also gets unmapped reads here
for record in it_bamf:
# Discard 0-length reads, unmapped reads, secondary and supplementary alignments.
if (record.query_length != 0) and ((record.is_unmapped == True) or ((record.is_secondary == False) and (record.is_supplementary == False) and (record.mapping_quality < mapq))):
nb_base_sr += record.query_length
if (record.query_qualities == None): out_f.write('>' + record.query_name + '\n' + record.query_sequence + '\n')
elif (mean(record.query_qualities) < qs_read):
out_junk_f.write('>' + record.query_name + '\n' + record.query_sequence + '\n')
nb_base_sr -= record.query_length
elif (qs_base != 0):
pos_low_qual = [ i for i in range(len(record.query_qualities)) if (record.query_qualities[i] < qs_base) ]
query_sequence = list(record.query_sequence)
for pos in pos_low_qual: query_sequence[pos] = 'N'
out_f.write('>' + record.query_name + '\n' + ''.join(query_sequence) + '\n')
else: out_f.write('>' + record.query_name + '\n' + record.query_sequence + '\n')
out_f.close()
out_junk_f.close()
return ("unmapped_sr", os.path.abspath(out_prefix_filename + "_sr_unmapped.fa"), "NA", nb_base_sr, 0)
def segmentBAM(out_prefix_filename, chr_name, lr_start_pos_ref, lr_end_pos_ref, chr_len, mapq_sr, mapq_lr, qs_sr, multithreaded):
global sr_bams
global lr_bams
buffer_sz = 1000000 # Size buffer to add to SR reads boundaries
sr_start_pos_ref = chr_len # SR start position
sr_end_pos_ref = 0 # SR end position
bin_name = chr_name + "_" + str(lr_start_pos_ref) # Bin name
out_sr_filename = out_prefix_filename + "_sr_" + bin_name + ".fa" # Output name SR file
out_lr_filename = out_prefix_filename + "_lr_" + bin_name + ".fq" # Output name LR file
out_sr_f = open(out_sr_filename, "w") # Output file for SR
out_lr_f = open(out_lr_filename, "w") # Output file for LR
nb_base_sr = 0 # Number of SR extracted for that region
nb_base_lr = 0 # Number of LR extracted for that region
for lr_bamf in lr_bams:
it_lr_bamf = lr_bamf.fetch(chr_name, lr_start_pos_ref, lr_end_pos_ref, multiple_iterators=multithreaded)
for record in it_lr_bamf:
# If long read starts in the region and its mapq is good enough
if (record.reference_start >= lr_start_pos_ref) and (record.reference_start <= lr_end_pos_ref) and (record.mapping_quality >= mapq_lr):
# Discard 0-length reads, unmapped reads, secondary and supplementary alignments.
if (record.query_length != 0) and (record.is_unmapped == False) and (record.is_secondary == False) and (record.is_supplementary == False):
# Set the boundaries where we need to fetch the SR
sr_start_pos_ref = min(sr_start_pos_ref, record.reference_start)
sr_end_pos_ref = max(sr_end_pos_ref, record.reference_start + record.reference_length - 1)
# Output LR
out_lr_f.write('>' + record.query_name + '\n' + record.query_sequence + '\n')
# If has qualities, just output those
if (record.query_qualities != None):
qual = [chr(x+33) for x in record.query_qualities]
out_lr_f.write('+' + '\n' + "".join(qual) + '\n')
nb_base_lr += record.query_length # Increase count of LR extracted
if (nb_base_lr != 0): # If there are long reads to correct for that region
# Increase SR boundaries by buffer_sz
sr_start_pos_ref = max(sr_start_pos_ref - buffer_sz, 0)
sr_end_pos_ref = min(sr_end_pos_ref + buffer_sz, chr_len)
for sr_bamf in sr_bams:
it_sr_bamf = sr_bamf.fetch(chr_name, sr_start_pos_ref, sr_end_pos_ref, multiple_iterators=multithreaded)
for record in it_sr_bamf:
# If short read starts in the region and its mapq is good enough
if (record.reference_start >= sr_start_pos_ref) and (record.reference_start <= sr_end_pos_ref) and (record.mapping_quality >= mapq_sr):
# Discard 0-length reads, unmapped reads, secondary and supplementary alignments.
if (record.query_length != 0) and (record.is_unmapped == False) and (record.is_secondary == False) and (record.is_supplementary == False):
if (qs_sr != 0) and (record.query_qualities != None):
pos_low_qual = [ i for i in range(len(record.query_qualities)) if (record.query_qualities[i] < qs_sr) ]
query_sequence = list(record.query_sequence)
for pos in pos_low_qual: query_sequence[pos] = 'N'
out_sr_f.write('>' + record.query_name + '\n' + ''.join(query_sequence) + '\n')
# Output LR
else: out_sr_f.write('>' + record.query_name + '\n' + record.query_sequence + '\n')
nb_base_sr += record.query_length # Increase count of LR extracted
out_sr_f.close()
out_lr_f.close()
if (nb_base_lr == 0) and (nb_base_sr == 0):
os.remove(os.path.abspath(out_sr_filename))
os.remove(os.path.abspath(out_lr_filename))
return (bin_name, "NA", "NA", 0, 0)
else: return (bin_name, os.path.abspath(out_sr_filename), os.path.abspath(out_lr_filename), nb_base_sr, nb_base_lr)
def checkReferenceCompatibility(sr_filenames, lr_filenames, force_inter_ref):
ref_s_inter = set()
ref_s_union = set()
ref_d_inter = {}
ref_d_diff = {}
filenames = sr_filenames + lr_filenames
first_file = True
for filename in filenames:
ref_file = set()
bamf = pysam.AlignmentFile(filename, "rb")
for chr_name in bamf.references: ref_file.add((chr_name, bamf.get_reference_length(chr_name)))
if (first_file == True):
ref_s_inter = ref_file
ref_s_union = ref_file
first_file = False
else:
ref_s_inter = ref_s_inter.intersection(ref_file)
ref_s_union = ref_s_union.union(ref_file)
ref_s_diff = ref_s_union.difference(ref_s_inter)
if (len(ref_s_diff) != 0) and (force_inter_ref == False):
sys.exit("Input BAM files have different reference chromsomes/contigs. Use option --intersection_ref to force using the intersection.")
for contig in ref_s_inter: ref_d_inter[contig[0]] = contig[1]
for contig in ref_s_diff: ref_d_diff[contig[0]] = contig[1]
return (ref_d_inter, ref_s_diff)
if __name__ == '__main__':
# Default values
sr_filenames = []
lr_filename = []
out_prefix_filename = ""
nb_threads = 1
len_segment = 5000000
mapq_sr = 0
mapq_lr = 30
qs_sr = 10
qs_unmap_sr = 20
force_inter_ref = False
# Parse arguments
parser = argparse.ArgumentParser(prog='segmentBAM', description='Segment BAM files of short reads and long reads', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
required = parser.add_argument_group('Required arguments')
required.add_argument('-s', '--short_read_bam', action='append', help='Filename of a short read bam file', required=True)
required.add_argument('-l', '--long_read_bam', action='append', help='Filename of a long read bam file', required=True)
required.add_argument('-o', '--out_prefix_filename', action='store', help='Prefix of the output filenames', required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('-t', '--threads', action='store', help='Number of threads to use', default=nb_threads, required=False)
optional.add_argument('-b', '--buffer_size', action='store', help='Length of segments in bp', default=len_segment, required=False)
optional.add_argument('-m', '--mapq_short', action='store', help='Minimum MAPQ of short reads', default=mapq_sr, required=False)
optional.add_argument('-n', '--mapq_long', action='store', help='Minimum MAPQ of long reads', default=mapq_lr, required=False)
optional.add_argument('-q', '--qs_sr', action='store', help='Minimum quality score of short read bases', default=qs_sr, required=False)
optional.add_argument('-u', '--qs_unmap_sr', action='store', help='Minimum mean quality score of unmapped short reads', default=qs_unmap_sr, required=False)
optional.add_argument('--intersection_ref', action='store_true', help='Force using the intersection of reference contigs if input BAMs have different references.', required=False)
args = parser.parse_args()
args_d = vars(args)
for k,v in args_d.items():
if ((k == "s") or (k == "short_read_bam")): sr_filenames = v
elif ((k == "l") or (k == "long_read_bam")): lr_filenames = v
elif ((k == "t") or (k == "threads")): nb_threads = int(v)
elif ((k == "o") or (k == "out_prefix_filename")): out_prefix_filename = v
elif ((k == "b") or (k == "buffer_size")): len_segment = int(v)
elif ((k == "m") or (k == "mapq_short")): mapq_sr = int(v)
elif ((k == "n") or (k == "mapq_long")): mapq_lr = int(v)
elif ((k == "q") or (k == "qs_sr")): qs_sr = int(v)
elif ((k == "u") or (k == "qs_unmap_sr")): qs_unmap_sr = int(v)
elif (k == "intersection_ref"): force_inter_ref = True
# Minor consistency check on arguments
if (len(sr_filenames) == 0): sys.exit("No input short read BAM provided as input")
if (len(lr_filenames) == 0): sys.exit("No input long read BAM provided as input")
if (nb_threads <= 0): sys.exit("Cannot use less than 1 thread")
if (nb_threads > mp.cpu_count()): sys.exit("Cannot use more than " + str(mp.cpu_count()) + "threads")
if (len_segment <= 0): sys.exit("Cannot use segments length that are less than 0")
if (mapq_sr < 0): sys.exit("Cannot use MAPQ less than 0 for short reads")
if (mapq_sr > 60): sys.exit("Cannot use MAPQ greater than 60 for short reads")
if (mapq_lr < 0): sys.exit("Cannot use MAPQ less than 0 for long reads")
if (mapq_lr > 60): sys.exit("Cannot use MAPQ greater than 60 for long reads")
if (qs_sr < 0): sys.exit("Cannot use quality score less than 0 for short reads")
if (qs_sr > 40): sys.exit("Cannot use quality score greater than 40 for short reads")
if (qs_unmap_sr < 0): sys.exit("Cannot use quality score less than 0 for short reads")
if (qs_unmap_sr > 40): sys.exit("Cannot use quality score greater than 40 for short reads")
# Check that all BAM files use the same reference file
ref_bin, ref_unbin = checkReferenceCompatibility(sr_filenames, lr_filenames, force_inter_ref)
if (len(ref_bin.keys()) == 0): sys.exit("Input BAM files do not share any reference contigs.")
# Open BAM files
lr_bams = []
sr_bams = []
for lr_filename in lr_filenames:
lr_bamf = pysam.AlignmentFile(lr_filename, "rb")
lr_bams.append(lr_bamf)
for sr_filename in sr_filenames:
sr_bamf = pysam.AlignmentFile(sr_filename, "rb")
sr_bams.append(sr_bamf)
# Segments into different files SR and LR from same regions with (MAPQ >= mapq)
if (nb_threads == 1):
for chr_name, chr_len in ref_bin.items():
# Segment SR and LR with MAPQ>mapq into bins
for i in range(0, chr_len, len_segment): printBinInfo(segmentBAM(out_prefix_filename, chr_name, i, min(i + len_segment - 1, chr_len), chr_len, mapq_sr, mapq_lr, qs_sr, False))
printBinInfo(outputUnmappedLowQualLR(out_prefix_filename, mapq_lr, ref_unbin, False)) # Output to file all LR which are either unmapped or with (MAPQ < mapq)
printBinInfo(outputUnmappedLowQualSR(out_prefix_filename, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.