input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
""" Image editing class for virtual event only
"""
import os
import datetime
import logging
import rospy
import cv2
from markov.log_handler.logger import Logger
from markov.utils import get_racecar_idx
from mp4_saving import utils
from mp4_saving.constants import (RaceCarColorToRGB,
IconographicImageSize,
SCALE_RATIO, FrameQueueData,
VirtualEventMP4Params,
VirtualEventIconographicPngs,
VirtualEventXYPixelLoc, VirtualEventFader,
VirtualEventData)
from mp4_saving.image_editing_interface import ImageEditingInterface
from mp4_saving.top_view_graphics import TopViewGraphics
from mp4_saving.fader import Fader
from markov.virtual_event.constants import DEFAULT_RACE_DURATION
from mp4_saving.states.virtual_event_wait_state import VirtualEventWaitState
from markov.state_machine.fsm import FSM
from markov.boto.s3.files.virtual_event_best_sector_time import VirtualEventBestSectorTime
from markov.boto.s3.constants import (SECTOR_TIME_LOCAL_PATH,
SECTOR_TIME_S3_POSTFIX,
TrackSectorTime,
SECTOR_X_FORMAT,
SECTOR_TIME_FORMAT_DICT)
from markov.boto.s3.utils import get_s3_key
LOG = Logger(__name__, logging.INFO).get_logger()
class VirtualEventMultiAgentImageEditing(ImageEditingInterface):
""" Image editing class for virtual event
"""
def __init__(self, racecar_name, racecar_info, race_type):
""" Initializing the required data for the head to bot, time-trail. This is used for single agent
Arguments:
racecar_name (str): racecar name in string
racecars_info (list): list of dict having information of the agent
race_type (str): Since this class is reused for all the different race_type
"""
# race duration in milliseconds
self._total_laps = int(rospy.get_param("NUMBER_OF_TRIALS", 0))
self._world_name = rospy.get_param("WORLD_NAME")
self.num_sectors = int(rospy.get_param("NUM_SECTORS", "3"))
self.race_duration = int(rospy.get_param("RACE_DURATION", DEFAULT_RACE_DURATION)) * 1000
self.racecar_info = racecar_info
self.race_type = race_type
racecar_index = get_racecar_idx(racecar_name)
self.racecar_index = racecar_index if racecar_index else 0
# Store the font which we will use to write the phase with
self.amazon_ember_regular_28px = utils.get_font('AmazonEmber-Regular', 28)
self.amazon_ember_regular_18px = utils.get_font('AmazonEmber-Regular', 18)
self.amazon_ember_regular_14px = utils.get_font('AmazonEmber-Regular', 14)
self.amazon_ember_regular_12px = utils.get_font('AmazonEmber-Regular', 12)
# The track image as iconography
self.track_icongraphy_img = utils.get_track_iconography_image()
# Track image offset
self.track_loc_offset = VirtualEventXYPixelLoc.TRACK_IMG_VIRTUAL_EVENT_LOC.value
self._track_x_min = None
self._track_x_max = None
self._track_y_min = None
self._track_y_max = None
# Gradient overlay image with track and virtual event mock
gradient_img_path = VirtualEventIconographicPngs.H2H_OVERLAY_PNG.value
self.gradient_img = self._plot_track_on_gradient(gradient_img_path)
# lap count
loc_x, loc_y = VirtualEventXYPixelLoc.LAP_COUNT_TEXT.value
self.gradient_img = utils.write_text_on_image(image=self.gradient_img, text="LAP",
loc=(loc_x, loc_y), font=self.amazon_ember_regular_18px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# racer0 speed
loc_x, loc_y = VirtualEventXYPixelLoc.RACER0_SPEED_TEXT.value
self.gradient_img = utils.write_text_on_image(image=self.gradient_img, text="m/s",
loc=(loc_x, loc_y), font=self.amazon_ember_regular_12px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# racer1 best lap time
loc_x, loc_y = VirtualEventXYPixelLoc.RACER0_BEST_LAP_TIME_TEXT.value
self.gradient_img = utils.write_text_on_image(image=self.gradient_img, text="BEST LAP TIME",
loc=(loc_x, loc_y), font=self.amazon_ember_regular_12px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# racer1 speed
loc_x, loc_y = VirtualEventXYPixelLoc.RACER1_SPEED_TEXT.value
self.gradient_img = utils.write_text_on_image(image=self.gradient_img, text="m/s",
loc=(loc_x, loc_y), font=self.amazon_ember_regular_12px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# racer1 best lap time
loc_x, loc_y = VirtualEventXYPixelLoc.RACER1_BEST_LAP_TIME_TEXT.value
self.gradient_img = utils.write_text_on_image(image=self.gradient_img, text="BEST LAP TIME",
loc=(loc_x, loc_y), font=self.amazon_ember_regular_12px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# apply graident
self.gradient_alpha_rgb_mul, self.one_minus_gradient_alpha = utils.get_gradient_values(self.gradient_img)
# Top camera information
top_camera_info = utils.get_top_camera_info()
self.top_view_graphics = TopViewGraphics(top_camera_info.horizontal_fov, top_camera_info.padding_pct,
top_camera_info.image_width, top_camera_info.image_height,
racecar_info,
is_virtual_event=True)
# virtual event image editting state machine
self._image_edit_fsm = FSM(initial_state=VirtualEventWaitState())
# if best sector time download from s3 failed. Then, initialize best sector time as None
# and not display sector color
self._sector_times = {}
# declare sector images
self._sectors_img_dict = {}
for idx in range(self.num_sectors):
sector = SECTOR_X_FORMAT.format(idx + 1)
sector_color_img_dict = utils.init_sector_img_dict(world_name=self._world_name,
sector=sector)
self._sectors_img_dict[sector] = sector_color_img_dict
# use the s3 bucket and prefix for yaml file stored as environment variable because
# here is SimApp use only. For virtual event there is no s3 bucket and prefix past
# through yaml file. All are past through sqs. For simplicity, reuse the yaml s3 bucket
# and prefix environment variable.
self._virtual_event_best_sector_time = VirtualEventBestSectorTime(
bucket=os.environ.get("YAML_S3_BUCKET", ''),
s3_key=get_s3_key(os.environ.get("YAML_S3_PREFIX", ''), SECTOR_TIME_S3_POSTFIX),
region_name=os.environ.get("APP_REGION", "us-east-1"),
local_path=SECTOR_TIME_LOCAL_PATH)
self._sector_times.update(self._virtual_event_best_sector_time.get_sector_time(
num_sectors=self.num_sectors))
# declare default best personal and current persoanl time to inf
for idx in range(self.num_sectors):
sector = SECTOR_X_FORMAT.format(idx + 1)
self._sector_times[SECTOR_TIME_FORMAT_DICT[TrackSectorTime.BEST_PERSONAL].format(sector)] = float("inf")
self._sector_times[SECTOR_TIME_FORMAT_DICT[TrackSectorTime.CURRENT_PERSONAL].format(sector)] = float("inf")
self._curr_lap_time = 0
self._last_eval_time = 0
self._curr_progress = 0
self._last_progress = 0
# Initializing the fader behaviour to pre-compute the gradient values
final_fading_image = utils.get_image(VirtualEventIconographicPngs.FINAL_FADING_IMAGE_50ALPHA.value,
IconographicImageSize.FULL_IMAGE_SIZE.value)
final_fading_image = cv2.cvtColor(final_fading_image, cv2.COLOR_RGBA2BGRA)
self._fader_obj = Fader(final_fading_image, fading_min_percent=VirtualEventFader.FADING_MIN_PERCENT.value,
fading_max_percent=VirtualEventFader.FADING_MAX_PERCENT.value,
num_frames=VirtualEventFader.NUM_FRAMES.value)
self._racers_png_images = [cv2.cvtColor(utils.get_image(path), cv2.COLOR_RGBA2BGRA)
for path in VirtualEventIconographicPngs.RACERS_RANK_PNG.value]
def _edit_major_cv_image(self, major_cv_image, metric_info):
""" Apply all the editing for the Major 45degree camera image
Args:
major_cv_image (Image): Image straight from the camera
metric_info (dict): rest image editting info
Returns:
Image: Edited main camera image
"""
major_cv_image = utils.apply_gradient(major_cv_image, self.gradient_alpha_rgb_mul,
self.one_minus_gradient_alpha)
#########################
# update display params #
#########################
mp4_video_metrics_info = metric_info[FrameQueueData.AGENT_METRIC_INFO.value]
virtual_event_info = metric_info[FrameQueueData.VIRTUAL_EVENT_INFO.value]
episode_status = mp4_video_metrics_info[self.racecar_index].episode_status
# total_evaluation_time (Race time)
total_eval_milli_seconds = mp4_video_metrics_info[self.racecar_index].total_evaluation_time
# Reset counter
reset_counter = mp4_video_metrics_info[self.racecar_index].reset_counter
# Speed
speed = mp4_video_metrics_info[self.racecar_index].throttle
# Current progress
current_progress = mp4_video_metrics_info[self.racecar_index].completion_percentage
# Prepare a dict for finite state machine on event call
info_dict = {VirtualEventMP4Params.COUNTDOWN_TIMER.value: mp4_video_metrics_info[self.racecar_index].pause_duration,
VirtualEventMP4Params.MAJOR_CV_IMAGE.value: major_cv_image,
VirtualEventMP4Params.CURRENT_LAP.value: virtual_event_info[VirtualEventData.LAP.value] + 1,
VirtualEventMP4Params.TOTAL_EVAL_SECONDS.value: total_eval_milli_seconds,
VirtualEventMP4Params.RESET_COUNTER.value: reset_counter,
VirtualEventMP4Params.SPEED.value: speed,
VirtualEventMP4Params.CURR_PROGRESS.value: current_progress,
VirtualEventMP4Params.LAST_EVAL_SECONDS.value: self._last_eval_time,
VirtualEventMP4Params.X_MIN.value: self._track_x_min,
VirtualEventMP4Params.X_MAX.value: self._track_x_max,
VirtualEventMP4Params.Y_MIN.value: self._track_y_min,
VirtualEventMP4Params.Y_MAX.value: self._track_y_max,
VirtualEventMP4Params.SECTOR_TIMES.value: self._sector_times,
VirtualEventMP4Params.CURR_LAP_TIME.value: self._curr_lap_time,
VirtualEventMP4Params.SECTOR_IMAGES.value: self._sectors_img_dict,
VirtualEventMP4Params.FADER_OBJ.value: self._fader_obj}
#####################
# run state machine #
#####################
# virtual event image edit finite state machine on event
info_dict = self._image_edit_fsm.execute(input_val={'event': episode_status,
'info_dict': info_dict})
# update display param from the finite state machine return value
major_cv_image = info_dict[VirtualEventMP4Params.MAJOR_CV_IMAGE.value]
total_eval_milli_seconds = info_dict[VirtualEventMP4Params.TOTAL_EVAL_SECONDS.value]
reset_counter = info_dict[VirtualEventMP4Params.RESET_COUNTER.value]
self._last_eval_time = info_dict[VirtualEventMP4Params.LAST_EVAL_SECONDS.value]
self._sector_times = info_dict[VirtualEventMP4Params.SECTOR_TIMES.value]
self._curr_lap_time = info_dict[VirtualEventMP4Params.CURR_LAP_TIME.value]
# lap count
loc_x, loc_y = VirtualEventXYPixelLoc.LAP_COUNT_DIGIT.value
lap_counter_text = "{}/{}".format(int(min(mp4_video_metrics_info[self.racecar_index].lap_counter + 1, self._total_laps)), self._total_laps)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=lap_counter_text,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_18px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# racer0 name
loc_x, loc_y = VirtualEventXYPixelLoc.RACER0_NAME.value
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=self.racecar_info[0]['display_name'],
loc=(loc_x, loc_y), font=self.amazon_ember_regular_14px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# racer0 best lap time
loc_x, loc_y = VirtualEventXYPixelLoc.RACER0_BEST_LAP_TIME_DIGIT.value
best_lap_time = mp4_video_metrics_info[0].best_lap_time
# The initial default best_lap_time from s3_metrics.py is inf
# If the ros service in s3_metrics.py has not come up yet, best_lap_time is 0
best_lap_time = utils.milliseconds_to_timeformat(
datetime.timedelta(milliseconds=best_lap_time)) \
if best_lap_time != float("inf") and best_lap_time != 0 else "--:--.---"
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=best_lap_time,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_28px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# racer0 Speed digit
loc_x, loc_y = VirtualEventXYPixelLoc.RACER0_SPEED_DIGIT.value
speed_text = utils.get_speed_formatted_str(mp4_video_metrics_info[0].throttle)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=speed_text,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_28px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# racer1 name
loc_x, loc_y = VirtualEventXYPixelLoc.RACER1_NAME.value
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=self.racecar_info[1]['display_name'],
loc=(loc_x, loc_y), font=self.amazon_ember_regular_14px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# racer1 best lap time
loc_x, loc_y = VirtualEventXYPixelLoc.RACER1_BEST_LAP_TIME_DIGIT.value
best_lap_time = mp4_video_metrics_info[1].best_lap_time
# The initial default best_lap_time from s3_metrics.py is inf
# If the ros service in s3_metrics.py has not come up yet, best_lap_time is 0
best_lap_time = utils.milliseconds_to_timeformat(
datetime.timedelta(milliseconds=best_lap_time)) \
if best_lap_time != float("inf") and best_lap_time != 0 else "--:--.---"
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=best_lap_time,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_28px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# racer1 Speed digit
loc_x, loc_y = VirtualEventXYPixelLoc.RACER1_SPEED_DIGIT.value
speed_text = utils.get_speed_formatted_str(mp4_video_metrics_info[1].throttle)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=speed_text,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_28px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# rank board
time_to_leader = virtual_event_info[VirtualEventData.TIME_TO_LEADER.value]
if time_to_leader and len(time_to_leader) == 2:
sim_time = virtual_event_info[VirtualEventData.SIM_TIME.value]
racer_rank = sorted(time_to_leader, key=time_to_leader.get)
# leader name
loc_x, loc_y = VirtualEventXYPixelLoc.LEADER_NAME_TEXT.value
sim_time = utils.milliseconds_to_timeformat(
datetime.timedelta(milliseconds=sim_time)) \
if sim_time != float("inf") and sim_time != 0 else "--:--.---"
major_cv_image = utils.write_text_on_image(image=major_cv_image, text="{} {}".format(
racer_rank[0][:5].upper(), sim_time),
loc=(loc_x, loc_y), font=self.amazon_ember_regular_14px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# runner up name
loc_x, loc_y = VirtualEventXYPixelLoc.RUNNER_UP_NAME_TEXT.value
gap_time = time_to_leader[racer_rank[1]]
gap_time = format(gap_time / 1000, "0.2f") \
if gap_time != float("inf") and gap_time != 0 else 0.00
major_cv_image = utils.write_text_on_image(image=major_cv_image, text="{} +{}".format(
racer_rank[1][:5].upper(), gap_time),
loc=(loc_x, loc_y), font=self.amazon_ember_regular_14px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# leader square mark
race_square_loc = []
if self.racecar_info[0]['display_name'] == racer_rank[0]:
race_square_loc = [VirtualEventXYPixelLoc.LEADER_RECTANGLE.value,
VirtualEventXYPixelLoc.RUNNER_UP_RECTANGLE.value]
else:
race_square_loc = [VirtualEventXYPixelLoc.RUNNER_UP_RECTANGLE.value,
VirtualEventXYPixelLoc.LEADER_RECTANGLE.value]
for minor_cv_image, loc in zip(self._racers_png_images, race_square_loc):
utils.plot_rectangular_image_on_main_image(major_cv_image, minor_cv_image, loc)
major_cv_image = cv2.cvtColor(major_cv_image, cv2.COLOR_RGB2BGRA)
return major_cv_image
def _plot_track_on_gradient(self, gradient_img_path):
""" For the given gradient apply the track iconographic image and use this to apply gradient
on each camera frame. Previously this was done on the top camera which changed every frame. But
with the track iconographic image set static, adding the track on gradient is more optimized.
Arguments:
gradient_img_path (str): Gradient image path
Returns:
(Image): Edited gradient image with track image
"""
gradient_img = utils.get_image(gradient_img_path, IconographicImageSize.FULL_IMAGE_SIZE.value)
gradient_img = cv2.cvtColor(gradient_img, cv2.COLOR_RGBA2BGRA)
track_icongraphy_scaled = utils.resize_image(self.track_icongraphy_img, SCALE_RATIO)
track_icongraphy_alpha = track_icongraphy_scaled[:, :, 3] / 255.0
# Track image is placed at the top left
self._track_x_min = self.track_loc_offset[1]
self._track_x_max = self.track_loc_offset[1] + track_icongraphy_scaled.shape[0]
self._track_y_min = self.track_loc_offset[0]
self._track_y_max = self.track_loc_offset[0] + track_icongraphy_scaled.shape[1]
# This is used as the offset for plotting the agent dots
self.track_start_loc = (self._track_y_min, self._track_x_min)
for channel in range(0, 4):
gradient_img[self._track_x_min:self._track_x_max, self._track_y_min:self._track_y_max, channel] =\
(track_icongraphy_alpha * track_icongraphy_scaled[:, :, channel]) + \
(1 - track_icongraphy_alpha) * (gradient_img[self._track_x_min:self._track_x_max, self._track_y_min:self._track_y_max, channel])
return gradient_img
def _plot_agents_on_major_cv_image(self, major_cv_image, mp4_video_metrics_info):
""" Add the agents, obstacles on the track.
Arguments:
major_cv_image (Image): Edited image having gradient, text, track
mp4_video_metrics_info (List): List of ROS metric values of each agent
Returns:
Image: Edited image with gradient, text, track and agents with dots
"""
agents_loc = [(metric.x, metric.y) for metric in mp4_video_metrics_info]
objects_loc = []
if mp4_video_metrics_info[0].object_locations:
objects_loc = [(object_loc.x, object_loc.y) for object_loc | |
itx['recipient'] and opentx.amount == itx['amount'] and opentx.signature == itx['signature']:
try:
self.__open_transactions.remove(opentx)
except ValueError:
print('Item was already removed')
self.save_data()
return True
def resolve(self):
"""Checks all peer nodes' blockchains and replaces the local one with longer valid ones."""
# Initialize the winner chain with the local chain
winner_chain = self.chain
replace = False
for node in self.__peer_nodes:
url = 'http://{}/chain'.format(node)
try:
# Send a request and store the response
response = requests.get(url)
# Retrieve the JSON data as a dictionary
node_chain = response.json()
# Convert the dictionary list to a list of block AND transaction objects
node_chain = [Block(block['index'], block['previous_hash'], [Transaction(
tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']],
block['proof'], block['timestamp']) for block in node_chain]
node_chain_length = len(node_chain)
local_chain_length = len(winner_chain)
# Store the received chain as the current winner chain if it's longer AND valid
if node_chain_length > local_chain_length and Verification.verify_chain(node_chain):
winner_chain = node_chain
replace = True
except requests.exceptions.ConnectionError:
continue
self.resolve_conflicts = False
# Replace the local chain with the winner chain
self.chain = winner_chain
if replace:
self.__open_transactions = []
self.save_data()
return replace
def add_peer_node(self, node):
"""Adds a new node to the peer node set.
Arguments:
:node: The node URL which should be added.
"""
self.__peer_nodes.add(node)
self.save_data()
def remove_peer_node(self, node):
"""Removes a node from the peer node set.
Arguments:
:node: The node URL which should be removed.
"""
self.__peer_nodes.discard(node)
self.save_data()
def get_peer_nodes(self):
"""Return a list of all connected peer nodes."""
#returning a copy of the peer node
# return self.__peer_nodes[:]
#as if the set method is not scriptable this will bump into an error
return list(self.__peer_nodes)
#since we are returning the list this will already make a copy.
# #in all of the blockchain there is problem of initial stage because when we call last block the case occurs when
# #we dont have any block in the chain then it is one of the major problems hence we are using a dummy genesis_block
# #as the first block
# # #our starting block for the blockchain
# # genesis_block = {
# # 'previous_hash': '',
# # 'index' : 0,
# # 'transactions' : [],
# # 'proof' : 100
# # }
# #initializing our empty blockchian
# #blockchain = [genesis_block]
# blockchain = []
# #unhandled transactions
# open_transactions = []
# #well we are going to pass owner from the node to have a specific identification id's
# owner = 'Dhruv'
# # participant = {'Dhruv'}
# def load_data():
# global blockchain
# global open_transactions
# try:
# with open('blockchain.txt', mode = 'r') as f:
# file_content = f.readlines()
# blockchain = json.loads(file_content[0][:-1])
# updated_blockchain = []
# for block in blockchain:
# convertex_tx = [Transaction(tx['sender'], tx['recipient'], tx['amount']) for tx in block['transactions']]
# updated_block = Block(block['index'], block['previous_hash'], convertex_tx, block['proof'], block['timestamp'])#calling here
# # updated_block = {
# # 'previous_hash': block['previous_hash'],
# # 'index': block['index'],
# # 'proof': block['proof'],
# # 'transactions': [OrderedDict(
# # [('sender', tx['sender']), ('recipient', tx['recipient']), ('amount' ,tx['amount'])]) for tx in block['transactions']]
# # }
# updated_blockchain.append(updated_block)
# blockchain = updated_blockchain
# open_transactions = json.loads(file_content[1])
# updated_transactions = []
# for tx in open_transactions:
# updated_transaction = Transaction(tx['sender'], tx['recipient'], tx['amount'])
# # updated_transaction = OrderedDict(
# # [('sender', tx['sender']), ('recipient', tx['recipient']), ('amount' ,tx['amount'])])
# updated_transactions.append(updated_transaction)
# open_transactions = updated_transactions
# #this is only going to trigger when we don't had made blockchain.txt file
# #due to try except block it is sure that the code is wrong still this will not crash
# except IOError:
# genesis_block = Block(0, '', [], 100, 0)
# # genesis_block = {
# # 'previous_hash': '',
# # 'index' : 0,
# # 'transactions' : [],
# # 'proof' : 100
# # }
# blockchain = [genesis_block]
# open_transactions = []
# finally:
# print('cleanup!')
# def load_data():
# with open('blockchain.p', mode = 'rb') as f:
# file_content = pickle.loads(f.read())
# global blockchain
# global open_transactions
# blockchain = file_content['chain']
# open_transactions = file_content['ot']
# print(file_content)
# load_data()
# def save_data():
# try:
# with open('blockchain.txt', mode = 'w') as f:
# saveable_chain = [block.__dict__ for block in [Block(block_el.index, block_el.previous_hash, [tx.__dict__ for tx in block_el.transactions], block_el.proof, block_el.timestamp) for block_el in blockchain]]
# f.write(json.dumps(saveable_chain))
# f.write('\n')
# saveable_tx = [tx.__dict__ for tx in open_transactions]
# f.write(json.dumps(saveable_tx))
# except (IOError, IndexError):
# print('saving failed!!')
#old dumping code
# def save_data():
# with open('blockchain.p', mode = 'wb') as f:
# save_data = {
# 'chain': blockchain,
# 'ot': open_transactions
# }
# f.write(pickle.dumps(save_data))
#moved to the class here for backup code if needed
# def get_balance(participant):
# #using nested list comprehension with two for loops like work
# tx_sender = [[tx.amount for tx in block.transactions
# if tx.sender == participant] for block in blockchain]
# open_tx_sender = [tx.amount for tx in open_transactions
# if tx.sender == participant]
# tx_sender.append(open_tx_sender)
# amount_sent = reduce(lambda tx_sum ,tx_amt : tx_sum + sum(tx_amt) if len(tx_amt)>0 else tx_sum + 0, tx_sender, 0)
# #we implemented that funtion with the help of the lambad and inline arguments
# #we can't use ternary operations with lambda functions
# '''
# amount_sent = 0
# for tx in tx_sender:
# #to remove the error generated due to first empty block
# if len(tx) > 0:
# amount_sent += sum(tx)
# '''
# tx_recipient = [[tx.amount for tx in block.transactions
# if tx.recipient == participant] for block in blockchain]
# amount_received = reduce(lambda tx_sum ,tx_amt : tx_sum + sum(tx_amt) if len(tx_amt)>0 else tx_sum + 0, tx_recipient, 0)
# '''
# amount_received = 0
# for tx in tx_recipient:
# #to remove the error generated due to first empty block
# if len(tx) > 0:
# amount_received += sum(tx[])
# '''
# return amount_received - amount_sent
#this is also moved to blockchain class to have better utility of the code
# #this function is made to send the last element of the blockchain hence in the case when whe have
# # an empty blockchain we will pass the null value else what we had done continues
# def get_last_blockchain_value():
# if len(blockchain) < 1:
# return None
# #none is usefull to show tha there is nothing
# return blockchain[-1]
# # here indexing value -1 will refer to the last element of the list
# def verify_transaction(transaction):
# sender_balance = get_balance(transaction.sender)
# return sender_balance >= transaction.amount
# #this will ruturn the value in true or false(boolean )
'''
def add_transaction(transaction_amount, last_transaction=[1]):
#this is making our our blocks to be in chain
if last_transaction == None:
last_transaction = [1]
#with this code we are giving all of the blockchain first block as one !!
#anycase first element of the block will be 1
blockchain.append([last_transaction, transaction_amount])
'''
#this is also been moved to the blockchain class
# def add_transaction(recipient, sender = owner,amount = 1.0):
# #sender : the sender of the coins
# #recipient : the recipient of the coin
# #amount : the amount of soin send from sender to recipient and the default is set to be 1.0
# #this is unodered dictonary
# #making dictionary
# # transaction = {
# # 'sender' : sender,
# # 'recipient' : recipient,
# # 'amount' : amount
# # }
# transaction = Transaction(sender, recipient, amount)
# # transaction = OrderedDict(
# # [('sender', sender), ('recipient', recipient), ('amount' ,amount)])
# #if the verify chain value will be true then and only then the transaction will be validated
# verifier = Verification()
# if verifier.verify_transaction(transaction, get_balance):
# open_transactions.append(transaction)
# #participant is set hence it will only allow us to enter the unique value
# #if any kind of duplicate value comes then it will simply ignore it
# # participant.add(sender)
# # participant.add(recipient)
# save_data()
# return True
# return False
# def valid_proof(transactions, last_hash, proof):
# guess = (str([tx.to_ordered_dict() for tx in transactions]) + str(last_hash) + str(proof)).encode()
# # print(guess)
# guess_hash = hash_string_256(guess)
# # print(guess_hash)
# #returning only first and the second element
# return guess_hash[0:2] == '00'
#added to the blockchain class
# def proof_of_work():
# last_block = blockchain[-1]
# last_hash = hash_block(last_block)
# proof = 0
# verifier = Verification()
# while not verifier.valid_proof(open_transactions ,last_hash, proof):
# proof += 1
# return proof
'''
#this is also true but we can do it very easily
add_value(2,[1])
add_value(0.9,get_last_blockchain_value())
add_value(10.80,get_last_blockchain_value())
add_value(5,get_last_blockchain_value())
'''
# def mine_block():
# last_block = blockchain[-1]
# #can use it like this instead of for loop
# #this will work but let do it which looks better known as "list comprehensions"
# #hashed_block = str([ last_block[key] for key in last_block ])
# #hashed_block = '-'.join([str(last_block[key]) for key in last_block])
# hashed_block = hash_block(last_block)
# # #gives us last element of blockchain
# # for key in last_block:
# # values = last_block[key]
# # hashed_block = hashed_block + str(values)
# proof = proof_of_work()
# #rewarding the miner who is responsible | |
###############################################################################
#
# file: filereader.py
#
# Purpose: refer to module documentation for details
#
# Note: This file is part of Termsaver application, and should not be used
# or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
This module contains a screen base class that handles screens that require
recursive directory scanning for files to be printed out.
See additional information in the class itself.
The helper class available here is:
* `FileReaderBase`
"""
#
# Python built-in modules
#
import os
import Queue as queue
from threading import Thread
#
# Internal modules
#
from termsaverlib.screen.base import ScreenBase
from termsaverlib import exception, constants
from termsaverlib.screen.helper.typing import TypingHelperBase
from termsaverlib.i18n import _
class FileReaderBase(ScreenBase, TypingHelperBase):
"""
A base class used to handle file reading, more specifically, multiple files
retrieved from a `path` recursively. This also uses the `TypingHelperBase`
helper class to add functionality of typing writer display.
The instantiation of this class takes two additional arguments, compared
with its base class:
* `delay`: Defines the speed of the typing writer. See more details
of this in `TypingHelperBase` class documentation. Its value is
defined by `TypingHelperBase`'s default, if none is informed.
* `path`: Defines the path to be recursively checked for text
files to be displayed on terminal screen.
When inheriting from this screen, you can also take advantage of the
following properties and functionalities:
* `cleanup_per_file`: forces to clean up the screen for each file in
the looping. The alternative, available in
`ScreenBase.cleanup_per_cycle`, only handles a cycle action.
"""
path = ''
cleanup_per_file = False
def __init__(self, name, description, path=None, delay=None, cli_opts=None):
"""
Creates a new instance of this class.
This constructor has two additional arguments, compared with its base
class:
* delay: defines the speed of the typing writer. See more details
of this in `TypingHelperBase` class documentation.
* path: defines the path from where this screen should scan
for files
"""
ScreenBase.__init__(self, name, description, cli_opts)
# define default cli options, if none are informed
if not cli_opts:
self.cli_opts = {
'opts': 'hd:p:',
'long_opts': ['help', 'delay=', 'path='],
}
self.delay = delay
self.path = path
self.cleanup_per_cycle = False
def _run_cycle(self):
"""
Executes a \"cycle\" of this screen.
* The concept of \"cycle\" is no longer accurate, and is misleading.
this function will not return.
New threaded implementation:
* Checks if self.path is a valid path, using `os.path.exists`
* Assigns a new Queue to `queue_of_valid_files`
* Appends a new `FileScannerThread` object to a list of threads
* `start()`s the `FileScannerThread`
* `FileScannerThread` will put paths in the queue as valid
file paths are found
* `clear_screen()`s
* Gets a file from `queue_of_valid_files`, removing item from queue
* While nextFile (empty sequences are false)
* As long as there is something in the queue - that is, as long
as `queue.queue.get()` is able to get an object from (the)
`queue_of_valid_files`, this test evaluates True.
* I imagine that this behaves unpredictably given a computer
with __REALLY__ slow I/O
* Opens `nextFile` with handle-auto-closing `with` statement and
`typing_print()`s it
* Clears screen if `self.cleanup_per_file`
* Puts `nextFile` ON the queue
* Because `queue_of_valid_files.get()` REMOVES a file path
from the queue, `_run_cycle()` will never reach that path
again, and eventually will exhaust the queue
(failing silently, with a blank screen)
* A static blank screen is the antithesis of a screensaver
* Therefore, `queue_of_valid_files.put(nextFile)` puts the file
path at the last spot in the queue
* Finally, another call to `queue_of_valid_files.get()` sets up
the next iteration in the while loop.
"""
# validate path
if not os.path.exists(self.path):
raise exception.PathNotFoundException(self.path)
queue_of_valid_files = queue.Queue()
threads = [FileReaderBase.FileScannerThread(self, queue_of_valid_files, self.path)]
threads[-1].daemon = True
threads[-1].start()
#self.clear_screen() hides any error message produced before it!
self.clear_screen()
nextFile = queue_of_valid_files.get()
while nextFile:
with open(nextFile, 'r') as f:
file_data = f.read()
self.typing_print(file_data)
if self.cleanup_per_file:
self.clear_screen()
queue_of_valid_files.put(nextFile)
nextFile = queue_of_valid_files.get()
def _usage_options_example(self):
"""
Describe here the options and examples of this screen.
The method `_parse_args` will be handling the parsing of the options
documented here.
Additionally, this is dependent on the values exposed in `cli_opts`,
passed to this class during its instantiation. Only values properly
configured there will be accepted here.
"""
print _("""
Options:
-p, --path Sets the location to search for text-based source files.
this option is mandatory.
-d, --delay Sets the speed of the displaying characters
default is%(default_delay)s of a second
-h, --help Displays this help message
Examples:
$ %(app_name)s %(screen)s -p /path/to/my/code
This will trigger the screensaver to read all files in the path selected
$ %(app_name)s %(screen)s -p /path/to/my/code -d 0
This will trigger the screensaver to read all files in the path selected
with no delay (too fast for a screensaver, but it's your choice that
matters!)
""") % {
'screen': self.name,
'app_name': constants.App.NAME,
'default_delay': constants.Settings.CHAR_DELAY_SECONDS,
}
def _parse_args(self, prepared_args):
"""
Handles the special command-line arguments available for this screen.
Although this is a base screen, having these options prepared here
can save coding for screens that will not change the default options.
See `_usage_options_example` method for documentation on each of the
options being parsed here.
Additionally, this is dependent on the values exposed in `cli_opts`,
passed to this class during its instantiation. Only values properly
configured there will be accepted here.
"""
for o, a in prepared_args[0]: # optlist, args
if o in ("-h", "--help"):
self.usage()
self.screen_exit()
elif o in ("-d", "--delay"):
try:
# make sure argument is a valid value (float)
self.delay = float(a)
except:
raise exception.InvalidOptionException("delay")
elif o in ("-p", "--path"):
# make sure argument is a valid value (existing path)
self.path = a
if not os.path.exists(self.path):
raise exception.PathNotFoundException(self.path,
_("Make sure the file or directory exists."))
else:
# this should never happen!
raise Exception(_("Unhandled option. See --help for details."))
# last validations
if self.path in (None, ''):
raise exception.InvalidOptionException("path",
_("It is mandatory option"), help=self._message_no_path())
def _recurse_to_exec(self, path, func, filetype=''):
"""
Executes a function for each file found recursively within the
specified path.
Arguments:
* path: the path to be recursively checked (directory)
* func: the function to be executed with the file(s)
* filetype: to filter for a specific filetype
"""
try:
if os.path.isdir(path):
for item in os.listdir(path):
f = os.path.join(path, item)
if os.path.isdir(f):
if not item.startswith('.'):
self._recurse_to_exec(f, func, filetype)
elif f.endswith(filetype) and not self._is_path_binary(f):
func(f)
elif path.endswith(filetype) and not self._is_path_binary(path):
func(path)
except:
# If IOError, don't put on queue, as the path might throw
# another IOError during screen saver operations.
return
@staticmethod
def recursively_populate_queue(self, queue_of_valid_files, path, filetype=''):
"""
Populates an (empty) queue of all files within directory
in "path", with the paths to said files.
MUST be a staticmethod for threaded implementation to function.
Arguments:
* queue_of_valid_files
* path: the path to be recursively checked (directory)
* filetype: to filter for a specific filetype
"""
self._recurse_to_exec(path, queue_of_valid_files.put, filetype)
def _is_path_binary(self, path):
"""
Returns True if the given path corresponds to a binary, or, if for any
reason, the file can not be accessed or opened.
For the merit of being a binary file (i.e., termsaver will not be able
to handle it), it is safe enough to consider the above True, as any
files in this situation will be simply skipped, avoiding weird errors
being thrown to the end-user.
Arguments:
* path: the file location
"""
CHUNKSIZE = 1024
f = None
try:
f = open(path, 'rb')
except:
# If IOError, don't even bother, as the path might throw
# another IOError during screen saver operations.
return True
try:
while True:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
| |
<reponame>vfreex/doozer<gh_stars>0
import asyncio
import copy
import errno
import glob
import hashlib
import io
import logging
import os
import pathlib
import re
import shutil
import time
import sys
import traceback
from datetime import date
from multiprocessing import Event, Lock
from typing import Tuple, Union, Optional, Dict
import aiofiles
import bashlex
import requests
import yaml
from dockerfile_parse import DockerfileParser
from kobo.rpmlib import parse_nvr
from tenacity import (before_sleep_log, retry, retry_if_not_result,
stop_after_attempt, wait_fixed)
from doozerlib import assertion, constants, exectools, logutil, state, util
from doozerlib.brew import get_build_objects, watch_task
from doozerlib.dblib import Record
from doozerlib.exceptions import DoozerFatalError
from doozerlib.model import ListModel, Missing, Model
from doozerlib.pushd import Dir
from doozerlib.source_modifications import SourceModifierFactory
from doozerlib.util import convert_remote_git_to_https, yellow_print
from doozerlib.assembly import AssemblyTypes
# doozer used to be part of OIT
OIT_COMMENT_PREFIX = '#oit##'
OIT_BEGIN = '##OIT_BEGIN'
OIT_END = '##OIT_END'
CONTAINER_YAML_HEADER = """
# This file is managed by doozer: https://github.com/openshift/doozer
# operated by the OpenShift Automated Release Tooling team (#aos-art on CoreOS Slack).
# Any manual changes will be overwritten by doozer on the next build.
#
# See https://source.redhat.com/groups/public/container-build-system/container_build_system_wiki/odcs_integration_with_osbs
# for more information on maintaining this file and the format and examples
---
"""
# Always ignore these files/folders when rebasing into distgit
# May be added to based on group/image config
BASE_IGNORE = [".git", ".oit", "additional-tags"]
logger = logutil.getLogger(__name__)
def recursive_overwrite(src, dest, ignore=set()):
"""
Use rsync to copy one file tree to a new location
"""
exclude = ' --exclude .git '
for i in ignore:
exclude += ' --exclude="{}" '.format(i)
cmd = 'rsync -av {} {}/ {}/'.format(exclude, src, dest)
exectools.cmd_assert(cmd, retries=3)
def pull_image(url):
logger.info("Pulling image: %s" % url)
def wait(_):
logger.info("Error pulling image %s -- retrying in 60 seconds" % url)
time.sleep(60)
exectools.retry(
3, wait_f=wait,
task_f=lambda: exectools.cmd_gather(["podman", "pull", url])[0] == 0)
def build_image_ref_name(name):
return 'openshift/ose-' + re.sub(pattern='^ose-', repl='', string=name)
def map_image_name(name, image_map):
for match, replacement in image_map.items():
if name.find(match) != -1:
return name.replace(match, replacement)
return name
class DistGitRepo(object):
def __init__(self, metadata, autoclone=True):
self.metadata = metadata
self.config: Model = metadata.config
self.runtime = metadata.runtime
self.name: str = self.metadata.name
self.distgit_dir: str = None
self.dg_path: pathlib.Path = None
self.build_status = False
self.push_status = False
self.branch: str = self.runtime.branch
self.sha: str = None
self.source_sha: str = None
self.source_full_sha: str = None
self.source_latest_tag: str = None
self.source_date_epoch = None
self.actual_source_url: str = None
self.public_facing_source_url: str = None
# If this is a standard release, private_fix will be set to True if the source contains
# embargoed (private) CVE fixes. Defaulting to None which means the value should be determined while rebasing.
self.private_fix = None
if self.runtime.assembly_type != AssemblyTypes.STANDARD:
# Only standard releases can have embargoed workflows.
self.private_fix = False
# If we are rebasing, this map can be populated with
# variables acquired from the source path.
self.env_vars_from_source = None
# Allow the config yaml to override branch
# This is primarily useful for a sync only group.
if self.config.distgit.branch is not Missing:
self.branch = self.config.distgit.branch
self.logger = self.metadata.logger
# Initialize our distgit directory, if necessary
if autoclone:
self.clone(self.runtime.distgits_dir, self.branch)
def pull_sources(self):
"""
Pull any distgit sources (use only after after clone)
"""
with Dir(self.distgit_dir):
sources_file: pathlib.Path = self.dg_path.joinpath('sources')
if not sources_file.exists():
self.logger.debug('No sources file exists; skipping rhpkg sources')
return
exectools.cmd_assert('rhpkg sources')
def clone(self, distgits_root_dir, distgit_branch):
if self.metadata.prevent_cloning:
raise IOError(f'Attempt to clone downstream {self.metadata.distgit_key} after cloning disabled; a regression has been introduced.')
with Dir(distgits_root_dir):
namespace_dir = os.path.join(distgits_root_dir, self.metadata.namespace)
# It is possible we have metadata for the same distgit twice in a group.
# There are valid scenarios (when they represent different branches) and
# scenarios where this is a user error. In either case, make sure we
# don't conflict by stomping on the same git directory.
self.distgit_dir = os.path.join(namespace_dir, self.metadata.distgit_key)
self.dg_path = pathlib.Path(self.distgit_dir)
fake_distgit = (self.runtime.local and 'content' in self.metadata.config)
if os.path.isdir(self.distgit_dir):
self.logger.info("Distgit directory already exists; skipping clone: %s" % self.distgit_dir)
if self.runtime.upcycle:
self.logger.info("Refreshing source for '{}' due to --upcycle".format(self.distgit_dir))
with Dir(self.distgit_dir):
exectools.cmd_assert('git fetch --all', retries=3)
exectools.cmd_assert('git reset --hard @{upstream}', retries=3)
else:
# Make a directory for the distgit namespace if it does not already exist
try:
os.mkdir(namespace_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if fake_distgit and self.runtime.command in ['images:rebase', 'images:update-dockerfile']:
cmd_list = ['mkdir', '-p', self.distgit_dir]
self.logger.info("Creating local build dir: {}".format(self.distgit_dir))
exectools.cmd_assert(cmd_list)
else:
if self.runtime.command == 'images:build':
yellow_print('Warning: images:rebase was skipped and therefore your '
'local build will be sourced from the current dist-git '
'contents and not the typical GitHub source. '
)
self.logger.info("Cloning distgit repository [branch:%s] into: %s" % (distgit_branch, self.distgit_dir))
# Has the user specified a specific commit to checkout from distgit on the command line?
distgit_commitish = self.runtime.downstream_commitish_overrides.get(self.metadata.distgit_key, None)
timeout = str(self.runtime.global_opts['rhpkg_clone_timeout'])
rhpkg_clone_depth = int(self.runtime.global_opts.get('rhpkg_clone_depth', '0'))
if self.metadata.namespace == 'containers' and self.runtime.cache_dir:
# Containers don't generally require distgit lookaside. We can rely on normal
# git clone & leverage git caches to greatly accelerate things if the user supplied it.
gitargs = ['--branch', distgit_branch]
if not distgit_commitish:
gitargs.append('--single-branch')
if not distgit_commitish and rhpkg_clone_depth > 0:
gitargs.extend(["--depth", str(rhpkg_clone_depth)])
self.runtime.git_clone(self.metadata.distgit_remote_url(), self.distgit_dir, gitargs=gitargs,
set_env=constants.GIT_NO_PROMPTS, timeout=timeout)
else:
# Use rhpkg -- presently no idea how to cache.
cmd_list = ["timeout", timeout]
cmd_list.append("rhpkg")
if self.runtime.rhpkg_config_lst:
cmd_list.extend(self.runtime.rhpkg_config_lst)
if self.runtime.user is not None:
cmd_list.append("--user=%s" % self.runtime.user)
cmd_list.extend(["clone", self.metadata.qualified_name, self.distgit_dir])
cmd_list.extend(["--branch", distgit_branch])
if not distgit_commitish and rhpkg_clone_depth > 0:
cmd_list.extend(["--depth", str(rhpkg_clone_depth)])
# Clone the distgit repository. Occasional flakes in clone, so use retry.
exectools.cmd_assert(cmd_list, retries=3, set_env=constants.GIT_NO_PROMPTS)
if distgit_commitish:
with Dir(self.distgit_dir):
exectools.cmd_assert(f'git checkout {distgit_commitish}')
self.sha, _ = exectools.cmd_assert(["git", "-C", self.distgit_dir, "rev-parse", "HEAD"], strip=True)
def merge_branch(self, target, allow_overwrite=False):
self.logger.info('Switching to branch: {}'.format(target))
cmd = ["rhpkg"]
if self.runtime.rhpkg_config_lst:
cmd.extend(self.runtime.rhpkg_config_lst)
cmd.extend(["switch-branch", target])
exectools.cmd_assert(cmd, retries=3)
if not allow_overwrite:
if os.path.isfile('Dockerfile') or os.path.isdir('.oit'):
raise IOError('Unable to continue merge. Dockerfile found in target branch. Use --allow-overwrite to force.')
self.logger.info('Merging source branch history over current branch')
msg = 'Merge branch {} into {}'.format(self.branch, target)
exectools.cmd_assert(
cmd=['git', 'merge', '--allow-unrelated-histories', '-m', msg, self.branch],
retries=3,
on_retry=['git', 'reset', '--hard', target], # in case merge failed due to storage
)
def has_source(self):
"""
Check whether this dist-git repo has source content
"""
return "git" in self.config.content.source or \
"alias" in self.config.content.source
def source_path(self):
"""
:return: Returns the directory containing the source which should be used to populate distgit. This includes
the source.path subdirectory if the metadata includes one.
"""
source_root = self.runtime.resolve_source(self.metadata)
sub_path = self.config.content.source.path
path = source_root
if sub_path is not Missing:
path = os.path.join(source_root, sub_path)
assertion.isdir(path, "Unable to find path for source [%s] for config: %s" % (path, self.metadata.config_filename))
return path
def source_repo_path(self):
"""
:return: Returns the directory containing the root of the cloned source repo.
"""
path = self.runtime.resolve_source(self.metadata)
assertion.isdir(path, "Unable to find path for source [%s] for config: %s" % (path, self.metadata.config_filename))
return path
def _get_diff(self):
return None # to actually record a diff, child classes must override this function
def commit(self, cmdline_commit_msg: str, commit_attributes: Optional[Dict[str, Union[int, str, bool]]] = None, log_diff=False):
if self.runtime.local:
return '' # no commits if local
with Dir(self.distgit_dir):
commit_payload: Dict[str, Union[int, str, bool]] = {
'MaxFileSize': 100 * 1024 * 1024, # 100MB push limit; see https://source.redhat.com/groups/public/release-engineering/release_engineering_rcm_wiki/dist_git_update_hooks
'jenkins.url': None if 'unittest' in sys.modules.keys() else os.getenv('BUILD_URL'), # Get the Jenkins build URL if available, but ignore if this is a unit test run
}
if self.dg_path: # Might not be set if this is a unittest
df_path = self.dg_path.joinpath('Dockerfile')
if df_path.exists():
# This is an image distgit commit, we can help the callers by reading in env variables for the commit message.
# RPM commits are expected to pass these values in directly in commit_attributes.
dfp = DockerfileParser(str(df_path))
for var_name in ['version', 'release', 'io.openshift.build.source-location', 'io.openshift.build.commit.id']:
commit_payload[var_name] = dfp.labels.get(var_name, None)
if commit_attributes:
commit_payload.update(commit_attributes)
# The commit should be a valid yaml document so we can retrieve details
# programmatically later. The human specified portion of the commit is
# included in comments above the yaml payload.
cmdline_commit_msg = cmdline_commit_msg.strip().replace('\n', '\n# ') # If multiple lines are specified, split across commented lines.
commit_msg = f'# {cmdline_commit_msg}\n' # Any message specified in '-m' during rebase
commit_msg += yaml.safe_dump(commit_payload, default_flow_style=False, sort_keys=True)
self.logger.info("Adding commit to local repo:\n{}".format(commit_msg))
if log_diff:
diff = self._get_diff()
if diff and diff.strip():
self.runtime.add_distgits_diff(self.metadata.distgit_key, diff)
# commit changes; if these flake there is probably not much we can do about it
exectools.cmd_assert(["git", "add", "-A", "."])
exectools.cmd_assert(["git", "commit", "--allow-empty", "-m", commit_msg])
rc, sha, | |
current_timestamp)
order by JobId desc,
DbUpdatedTimestamp desc,
LastChgTimestamp desc
loop
if v_job.JobId <> v_prev_job_id then
v_prev_job_id := v_job.JobId;
if v_job.State <> 'S' then
v_rec.JobId := v_job.JobId;
v_rec.JobName := v_job.JobName;
v_rec.State := v_job.State;
v_rec.Bsn := v_job.Bsn;
v_rec.UserName := v_job.UserName;
v_rec.StartTimestamp := v_job.StartTimestamp;
v_rec.EndTimestamp := v_job.EndTimestamp;
v_rec.ExitStatus := v_job.ExitStatus;
v_rec.NumNodes := v_job.NumNodes;
v_rec.Nodes := GetListNodeLctns(v_job.Nodes);
v_rec.JobAcctInfo := v_job.JobAcctInfo;
v_rec.Wlmjobstate := v_job.Wlmjobstate;
return next v_rec;
end if;
end if;
end loop;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION raseventlistattime(p_start_time timestamp without time zone, p_end_time timestamp without time zone) RETURNS SETOF raseventtype
LANGUAGE plpgsql
AS $$
BEGIN
if p_start_time is not null then
return query
select RE.EventType,
RE.LastChgTimestamp,
RE.DbUpdatedTimestamp,
MD.Severity,
RE.Lctn,
RE.JobId,
RE.ControlOperation,
MD.Msg,
RE.InstanceData
from Tier2_RasEvent RE
inner join Tier2_RasMetaData MD on
RE.EventType = MD.EventType and
MD.DbUpdatedTimestamp =
(select max(T.DbUpdatedTimestamp) from Tier2_RasMetaData T
where T.EventType = MD.EventType)
where RE.DbUpdatedTimestamp <=
coalesce(p_end_time, current_timestamp) and
RE.DbUpdatedTimestamp >= p_start_time and
MD.Severity = 'ERROR' or MD.Severity = 'FATAL'
order by RE.DbUpdatedTimestamp desc, RE.EventType, RE.Id LIMIT 200;
else
return query
select RE.EventType,
RE.LastChgTimestamp,
RE.DbUpdatedTimestamp,
MD.Severity,
RE.Lctn,
RE.JobId,
RE.ControlOperation,
MD.Msg,
RE.InstanceData
from Tier2_RasEvent RE
inner join Tier2_RasMetaData MD on
RE.EventType = MD.EventType and
MD.DbUpdatedTimestamp =
(select max(T.DbUpdatedTimestamp) from Tier2_RasMetaData T
where T.EventType = MD.EventType)
where RE.DbUpdatedTimestamp <=
coalesce(p_end_time, current_timestamp) and
MD.Severity = 'ERROR' or MD.Severity = 'FATAL'
order by RE.DbUpdatedTimestamp desc, RE.EventType, RE.Id LIMIT 200;
end if;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION reservationlistattime(p_start_time timestamp without time zone, p_end_time timestamp without time zone) RETURNS SETOF reservationtype
LANGUAGE plpgsql
AS $$
BEGIN
if p_start_time is null then
return query
select RE.ReservationName,
RE.Users,
RE.Nodes,
RE.StartTimestamp,
RE.EndTimestamp,
RE.DeletedTimestamp,
RE.LastChgTimestamp
from Tier2_WlmReservation_History RE
where RE.DbUpdatedTimestamp <= coalesce(p_end_time, current_timestamp)
order by RE.DbUpdatedTimestamp desc, RE.ReservationName, RE.Users LIMIT 200;
else
return query
select RE.ReservationName,
RE.Users,
RE.Nodes,
RE.StartTimestamp,
RE.EndTimestamp,
RE.DeletedTimestamp,
RE.LastChgTimestamp
from Tier2_WlmReservation_History RE
where RE.DbUpdatedTimestamp <= coalesce(p_end_time, current_timestamp) and
RE.DbUpdatedTimestamp >= p_start_time
order by RE.DbUpdatedTimestamp desc, RE.ReservationName, RE.Users LIMIT 200;
end if;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION serviceinventorylist() RETURNS SETOF tier2_servicenode_history
LANGUAGE sql
AS $$
select DISTINCT ON (lctn) lctn, hostname, state, sernum, bootimageid, ipaddr, macaddr, type, bmcipaddr, bmcmacaddr,
bmchostname, dbupdatedtimestamp, lastchgtimestamp, lastchgadaptertype, lastchgworkitemid, owner, inventoryinfo
from tier2_servicenode_history order by lctn, dbupdatedtimestamp desc;
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION inventorysnapshotlist(
p_start_time timestamp without time zone,
p_end_time timestamp without time zone)
RETURNS SETOF inventorytype
LANGUAGE plpgsql
AS $$
BEGIN
if (p_start_time is not null) then
return query
select * from tier2_inventorysnapshot
where snapshottimestamp <= coalesce(p_end_time, current_timestamp) and
snapshottimestamp >= p_start_time
order by snapshottimestamp desc;
else
return query
select * from tier2_inventorysnapshot
where snapshottimestamp <= coalesce(p_end_time, current_timestamp)
order by snapshottimestamp desc;
end if;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION inventoryinfolist(
p_start_time timestamp without time zone,
p_end_time timestamp without time zone)
RETURNS SETOF inventorytype
LANGUAGE plpgsql
AS $$
BEGIN
if (p_start_time is not null) then
return query
select distinct on (lctn) lctn, lastchgtimestamp, inventoryinfo from tier2_computenode_history
where dbupdatedtimestamp <= coalesce(p_end_time, current_timestamp) and
dbupdatedtimestamp >= p_start_time and lastchgadaptertype != 'POPULATE'
order by lctn, lastchgtimestamp desc;
else
return query
select distinct on (lctn) lctn, lastchgtimestamp, inventoryinfo from tier2_computenode_history
where dbupdatedtimestamp <= coalesce(p_end_time, current_timestamp) and lastchgadaptertype != 'POPULATE'
order by lctn, lastchgtimestamp desc;
end if;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION replacementhistorylist(
p_start_time timestamp without time zone,
p_end_time timestamp without time zone)
RETURNS SETOF tier2_replacement_history
LANGUAGE plpgsql
AS $$
BEGIN
if (p_start_time is not null) then
return query
select * from tier2_replacement_history
where dbupdatedtimestamp <= coalesce(p_end_time, current_timestamp) and
dbupdatedtimestamp >= p_start_time
order by lastchgtimestamp desc;
else
return query
select * from tier2_replacement_history
where dbupdatedtimestamp <= coalesce(p_end_time, current_timestamp)
order by lastchgtimestamp desc;
end if;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION getaggregatedevndatawithfilters(
p_start_time timestamp without time zone,
p_end_time timestamp without time zone,
p_lctn character varying,
p_limit integer)
RETURNS SETOF tier2_aggregatedenvdata
LANGUAGE sql
AS $$
select * from tier2_aggregatedenvdata
where lctn similar to (p_lctn || '%') and
timestamp <= coalesce(p_end_time, current_timestamp) and
timestamp >= coalesce(p_start_time, current_timestamp - INTERVAL '3 MONTHS')
order by timestamp LIMIT p_limit;
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION getinventorychange(
p_start_time timestamp without time zone,
p_end_time timestamp without time zone,
p_lctn character varying,
p_sernum character varying,
p_limit integer)
RETURNS SETOF tier2_replacement_history
LANGUAGE plpgsql
AS $$
BEGIN
if p_sernum != '%' then
return query
select * from tier2_replacement_history
where dbupdatedtimestamp <= coalesce(p_end_time, current_timestamp) and
dbupdatedtimestamp >= coalesce(p_start_time, current_timestamp - INTERVAL '3 MONTHS') and
newsernum like (p_sernum || '%')
order by lctn, dbupdatedtimestamp desc limit p_limit;
else
return query
select * from tier2_replacement_history
where dbupdatedtimestamp <= coalesce(p_end_time, current_timestamp) and
dbupdatedtimestamp >= coalesce(p_start_time, current_timestamp - INTERVAL '3 MONTHS') and
lctn like (p_lctn || '%')
order by lctn, dbupdatedtimestamp desc limit p_limit;
end if;
return;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION getinventorydataforlctn(
p_start_time timestamp without time zone,
p_end_time timestamp without time zone,
p_lctn character varying,
p_limit integer)
RETURNS SETOF tier2_computenode_history
LANGUAGE plpgsql
AS $$
BEGIN
if p_start_time is null and p_end_time is null then
return query
select distinct on (lctn) lctn, sequencenumber, state, hostname, sernum, bootimageid, ipaddr, macaddr, type, bmcipaddr, bmcmacaddr, bmchostname, dbupdatedtimestamp, lastchgtimestamp, lastchgadaptertype, lastchgworkitemid, owner, inventoryinfo from tier2_computenode_history
where lctn like (p_lctn || '%')
order by dbupdatedtimestamp desc limit p_limit;
else
return query
select lctn, sequencenumber, state, hostname, sernum, bootimageid, environment, ipaddr, macaddr, type, bmcipaddr, bmcmacaddr, bmchostname, dbupdatedtimestamp, lastchgtimestamp, lastchgadaptertype, lastchgworkitemid, owner, inventoryinfo from tier2_computenode_history
where dbupdatedtimestamp <= coalesce(p_end_time, current_timestamp) and
dbupdatedtimestamp >= coalesce(p_start_time, current_timestamp - INTERVAL '3 MONTHS') and
lctn = p_lctn
order by dbupdatedtimestamp desc limit p_limit;
END IF;
return;
END;
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION getlistnodelctnsastable(
p_job_nodes bytea)
RETURNS character varying[]
LANGUAGE plpgsql
AS $$
DECLARE
v_lctn varchar;
v_list varchar[];
v_first boolean := true;
v_num_bits integer;
v_bit_index integer;
BEGIN
CREATE temporary TABLE nodelisttable (lctn character varying(10) not null) on commit drop;
v_num_bits := length(p_job_nodes) * 8;
for i in 0 .. v_num_bits - 1 loop
v_bit_index := v_num_bits - 1 - i;
if get_bit(p_job_nodes, v_bit_index) = 1 then
select Lctn into v_lctn from Tier2_ComputeNode_History
where SequenceNumber = i
order by DbUpdatedTimestamp limit 1;
if v_lctn is null then
raise exception 'GetListNodeLctns - can''t find corresponding Lctn string for node sequence number = %!', i;
end if;
v_list := array_append(v_list, v_lctn);
end if;
end loop;
return v_list;
END
$$;
"""))
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION getraseventswithfilters(
p_start_time timestamp without time zone,
p_end_time timestamp without time zone,
p_lctn character varying,
p_event_type character varying,
p_severity character varying,
p_limit integer)
RETURNS SETOF raseventtype
LANGUAGE plpgsql
AS $$
DECLARE
v_start_time timestamp without time zone;
v_end_time timestamp without time zone;
v_lctn character varying;
v_event_type character varying;
v_severity character varying;
v_limit integer;
BEGIN
v_start_time := p_start_time;
v_end_time := p_end_time;
v_lctn := p_lctn;
v_event_type := p_event_type;
v_severity := p_severity;
v_limit := p_limit;
if v_severity = '%' then
return query
select RE.EventType,
RE.LastChgTimestamp,
RE.DbUpdatedTimestamp,
MD.Severity,
RE.Lctn,
RE.JobId,
RE.ControlOperation,
MD.Msg,
RE.InstanceData
from Tier2_RasEvent RE
inner join Tier2_RasMetaData MD on
RE.EventType = MD.EventType
where RE.DbUpdatedTimestamp <=
coalesce(v_end_time, current_timestamp) and
RE.DbUpdatedTimestamp >= coalesce(v_start_time, current_timestamp - INTERVAL '6 MONTHS') and
MD.descriptivename like (v_event_type || '%') and
RE.lctn like (v_lctn || '%')
order by RE.DbUpdatedTimestamp desc, RE.EventType, RE.Id LIMIT v_limit;
else
return query
select RE.EventType,
RE.LastChgTimestamp,
RE.DbUpdatedTimestamp,
MD.Severity,
RE.Lctn,
RE.JobId,
RE.ControlOperation,
MD.Msg,
RE.InstanceData
from Tier2_RasEvent RE
inner join Tier2_RasMetaData MD on
RE.EventType = MD.EventType
where RE.DbUpdatedTimestamp <=
coalesce(v_end_time, current_timestamp) and
RE.DbUpdatedTimestamp >= coalesce(v_start_time, current_timestamp - INTERVAL '6 MONTHS') and
MD.Severity = upper(v_severity) and MD.descriptivename like (v_event_type || '%') and
RE.lctn like (v_lctn || '%')
order by RE.DbUpdatedTimestamp desc, RE.EventType, RE.Id LIMIT v_limit;
end if;
return;
END
$$;
"""))
def drop_tables():
op.execute(textwrap.dedent("""
DROP TABLE tier2_aggregatedenvdata;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_computenode_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_diag;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_servicenode_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_adapter_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_alert;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_bootimage_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_chassis_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_fabrictopology_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_inventorysnapshot;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_job_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_jobstep_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_lustre_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_machine_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_rack_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_rasevent;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_rasmetadata;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_replacement_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_serviceoperation_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_switch_history;
"""))
op.execute(textwrap.dedent("""
DROP TABLE tier2_wlmreservation_history;
| |
# example for accessing smeared hits and fitted tracks
import ROOT,os,sys,getopt
import rootUtils as ut
import shipunit as u
from ShipGeoConfig import ConfigRegistry
from rootpyPickler import Unpickler
import shipRoot_conf
shipRoot_conf.configure()
debug = False
chi2CutOff = 4.
PDG = ROOT.TDatabasePDG.Instance()
inputFile = None
geoFile = None
dy = None
nEvents = 99999
fiducialCut = True
measCutFK = 25
measCutPR = 22
docaCut = 2.
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:g:A:Y:i", ["nEvents=","geoFile="])
except getopt.GetoptError:
# print help information and exit:
print ' enter file name'
sys.exit()
for o, a in opts:
if o in ("-f"):
inputFile = a
if o in ("-g", "--geoFile"):
geoFile = a
if o in ("-Y"):
dy = float(a)
if o in ("-n", "--nEvents="):
nEvents = int(a)
if not dy:
# try to extract from input file name
tmp = inputFile.split('.')
try:
dy = float( tmp[1]+'.'+tmp[2] )
except:
dy = None
else:
inputFile = 'ship.'+str(dy)+'.Pythia8-TGeant4_rec.root'
if inputFile[0:4] == "/eos":
eospath = "root://eoslhcb/"+inputFile
f = ROOT.TFile.Open(eospath)
sTree = f.cbmsim
elif not inputFile.find(',')<0 :
sTree = ROOT.TChain("cbmsim")
for x in inputFile.split(','):
sTree.AddFile(x)
else:
f = ROOT.TFile(inputFile)
sTree = f.cbmsim
# try to figure out which ecal geo to load
if not geoFile:
geoFile = inputFile.replace('ship.','geofile_full.').replace('_rec.','.')
if geoFile[0:4] == "/eos":
eospath = "root://eoslhcb/"+geoFile
fgeo = ROOT.TFile.Open(eospath)
else:
fgeo = ROOT.TFile(geoFile)
sGeo = fgeo.FAIRGeom
if not fgeo.FindKey('ShipGeo'):
# old geofile, missing Shipgeo dictionary
if sGeo.GetVolume('EcalModule3') : ecalGeoFile = "ecal_ellipse6x12m2.geo"
else: ecalGeoFile = "ecal_ellipse5x10m2.geo"
print 'found ecal geo for ',ecalGeoFile
# re-create geometry and mag. field
ShipGeo = ConfigRegistry.loadpy("$FAIRSHIP/geometry/geometry_config.py", Yheight = dy, EcalGeoFile = ecalGeoFile )
else:
# new geofile, load Shipgeo dictionary written by run_simScript.py
upkl = Unpickler(fgeo)
ShipGeo = upkl.load('ShipGeo')
ecalGeoFile = ShipGeo.ecal.File
# -----Create geometry----------------------------------------------
import shipDet_conf
run = ROOT.FairRunSim()
modules = shipDet_conf.configure(run,ShipGeo)
gMan = ROOT.gGeoManager
geoMat = ROOT.genfit.TGeoMaterialInterface()
ROOT.genfit.MaterialEffects.getInstance().init(geoMat)
volDict = {}
i=0
for x in ROOT.gGeoManager.GetListOfVolumes():
volDict[i]=x.GetName()
i+=1
bfield = ROOT.genfit.BellField(ShipGeo.Bfield.max ,ShipGeo.Bfield.z,2, ShipGeo.Yheight/2.)
fM = ROOT.genfit.FieldManager.getInstance()
fM.init(bfield)
# prepare veto decisions
import shipVeto
veto = shipVeto.Task()
vetoDets={}
# fiducial cuts
vetoStation = ROOT.gGeoManager.GetTopVolume().GetNode('Veto_5')
vetoStation_zDown = vetoStation.GetMatrix().GetTranslation()[2]+vetoStation.GetVolume().GetShape().GetDZ()
T1Station = ROOT.gGeoManager.GetTopVolume().GetNode('Tr1_1')
T1Station_zUp = T1Station.GetMatrix().GetTranslation()[2]-T1Station.GetVolume().GetShape().GetDZ()
h = {}
ut.bookHist(h,'delPOverP','delP / P',400,0.,200.,100,-0.5,0.5)
ut.bookHist(h,'pullPOverPx','delPx / sigma',400,0.,200.,100,-3.,3.)
ut.bookHist(h,'pullPOverPy','delPy / sigma',400,0.,200.,100,-3.,3.)
ut.bookHist(h,'pullPOverPz','delPz / sigma',400,0.,200.,100,-3.,3.)
ut.bookHist(h,'delPOverP2','delP / P chi2/nmeas<'+str(chi2CutOff),400,0.,200.,100,-0.5,0.5)
ut.bookHist(h,'delPOverPz','delPz / Pz',400,0.,200.,100,-0.5,0.5)
ut.bookHist(h,'delPOverP2z','delPz / Pz chi2/nmeas<'+str(chi2CutOff),400,0.,200.,100,-0.5,0.5)
ut.bookHist(h,'chi2','chi2/nmeas after trackfit',100,0.,10.)
ut.bookHist(h,'prob','prob(chi2)',100,0.,1.)
ut.bookHist(h,'IP','Impact Parameter',100,0.,10.)
ut.bookHist(h,'Vzresol','Vz reco - true [cm]',100,-50.,50.)
ut.bookHist(h,'Vxresol','Vx reco - true [cm]',100,-10.,10.)
ut.bookHist(h,'Vyresol','Vy reco - true [cm]',100,-10.,10.)
ut.bookHist(h,'Vzpull','Vz pull',100,-3.,3.)
ut.bookHist(h,'Vxpull','Vx pull',100,-3.,3.)
ut.bookHist(h,'Vypull','Vy pull',100,-3.,3.)
ut.bookHist(h,'Doca','Doca between two tracks',100,0.,10.)
ut.bookHist(h,'IP0','Impact Parameter to target',100,0.,100.)
ut.bookHist(h,'IP0/mass','Impact Parameter to target vs mass',100,0.,2.,100,0.,100.)
ut.bookHist(h,'HNL','reconstructed Mass',500,0.,2.)
ut.bookHist(h,'HNLw','reconstructed Mass with weights',500,0.,2.)
ut.bookHist(h,'meas','number of measurements',40,-0.5,39.5)
ut.bookHist(h,'meas2','number of measurements, fitted track',40,-0.5,39.5)
ut.bookHist(h,'measVSchi2','number of measurements vs chi2/meas',40,-0.5,39.5,100,0.,10.)
ut.bookHist(h,'distu','distance to wire',100,0.,1.)
ut.bookHist(h,'distv','distance to wire',100,0.,1.)
ut.bookHist(h,'disty','distance to wire',100,0.,1.)
ut.bookHist(h,'meanhits','mean number of hits / track',50,-0.5,49.5)
ut.bookHist(h,'ecalClusters','x/y and energy',50,-3.,3.,50,-6.,6.)
ut.bookHist(h,'oa','cos opening angle',100,0.999,1.)
# potential Veto detectors
ut.bookHist(h,'nrtracks','nr of tracks in signal selected',10,-0.5,9.5)
ut.bookHist(h,'nrSVT','nr of hits in SVT',10,-0.5,9.5)
ut.bookHist(h,'nrUVT','nr of hits in UVT',100,-0.5,99.5)
ut.bookHist(h,'nrSBT','nr of hits in SBT',100,-0.5,99.5)
ut.bookHist(h,'nrRPC','nr of hits in RPC',100,-0.5,99.5)
import TrackExtrapolateTool
def VertexError(t1,t2,PosDir,CovMat,scalFac):
# with improved Vx x,y resolution
a,u = PosDir[t1]['position'],PosDir[t1]['direction']
c,v = PosDir[t2]['position'],PosDir[t2]['direction']
Vsq = v.Dot(v)
Usq = u.Dot(u)
UV = u.Dot(v)
ca = c-a
denom = Usq*Vsq-UV**2
tmp2 = Vsq*u-UV*v
Va = ca.Dot(tmp2)/denom
tmp2 = UV*u-Usq*v
Vb = ca.Dot(tmp2)/denom
X = (a+c+Va*u+Vb*v) * 0.5
l1 = a - X + u*Va # l2 = c - X + v*Vb
dist = 2. * ROOT.TMath.Sqrt( l1.Dot(l1) )
T = ROOT.TMatrixD(3,12)
for i in range(3):
for k in range(4):
for j in range(3):
KD = 0
if i==j: KD = 1
if k==0 or k==2:
# cova and covc
temp = ( u[j]*Vsq - v[j]*UV )*u[i] + (u[j]*UV-v[j]*Usq)*v[i]
sign = -1
if k==2 : sign = +1
T[i][3*k+j] = 0.5*( KD + sign*temp/denom )
elif k==1:
# covu
aNAZ = denom*( ca[j]*Vsq-v.Dot(ca)*v[j] )
aZAN = ( ca.Dot(u)*Vsq-ca.Dot(v)*UV )*2*( u[j]*Vsq-v[j]*UV )
bNAZ = denom*( ca[j]*UV+(u.Dot(ca)*v[j]) - 2*ca.Dot(v)*u[j] )
bZAN = ( ca.Dot(u)*UV-ca.Dot(v)*Usq )*2*( u[j]*Vsq-v[j]*UV )
T[i][3*k+j] = 0.5*( Va*KD + u[i]/denom**2*(aNAZ-aZAN) + v[i]/denom**2*(bNAZ-bZAN) )
elif k==3:
# covv
aNAZ = denom*( 2*ca.Dot(u)*v[j] - ca.Dot(v)*u[j] - ca[j]*UV )
aZAN = ( ca.Dot(u)*Vsq-ca.Dot(v)*UV )*2*( v[j]*Usq-u[j]*UV )
bNAZ = denom*( ca.Dot(u)*u[j]-ca[j]*Usq )
bZAN = ( ca.Dot(u)*UV-ca.Dot(v)*Usq )*2*( v[j]*Usq-u[j]*UV )
T[i][3*k+j] = 0.5*(Vb*KD + u[i]/denom**2*(aNAZ-aZAN) + v[i]/denom**2*(bNAZ-bZAN) )
transT = ROOT.TMatrixD(12,3)
transT.Transpose(T)
CovTracks = ROOT.TMatrixD(12,12)
tlist = [t1,t2]
for k in range(2):
for i in range(6):
for j in range(6):
xfac = 1.
if i>2: xfac = scalFac[tlist[k]]
if j>2: xfac = xfac * scalFac[tlist[k]]
CovTracks[i+k*6][j+k*6] = CovMat[tlist[k]][i][j] * xfac
# if i==5 or j==5 : CovMat[tlist[k]][i][j] = 0 # ignore error on z-direction
tmp = ROOT.TMatrixD(3,12)
tmp.Mult(T,CovTracks)
covX = ROOT.TMatrixD(3,3)
covX.Mult(tmp,transT)
return X,covX,dist
def Rsq(X,Y,dy):
return (X/(2.45*u.m) )**2 + (Y/((dy/2.-0.05)*u.m) )**2
#
def ImpactParameter(point,tPos,tMom):
t = 0
if hasattr(tMom,'P'): P = tMom.P()
else: P = tMom.Mag()
for i in range(3): t += tMom(i)/P*(point(i)-tPos(i))
dist = 0
for i in range(3): dist += (point(i)-tPos(i)-t*tMom(i)/P)**2
dist = ROOT.TMath.Sqrt(dist)
return dist
#
def checkHNLorigin(sTree):
flag = True
if not fiducialCut: return flag
# only makes sense for signal == HNL
if sTree.MCTrack.GetEntries()<3: return
# hnlkey = 2 # pythia8 cascade events
# hnlkey = 1 # pythia8 primary events
for hnlkey in [1,2]:
if abs(sTree.MCTrack[hnlkey].GetPdgCode()) == 9900015:
theHNLVx = sTree.MCTrack[hnlkey+1]
if theHNLVx.GetStartZ() < ShipGeo.vetoStation.z+100.*u.cm : flag = False
if theHNLVx.GetStartZ() > ShipGeo.TrackStation1.z : flag = False
X,Y = theHNLVx.GetStartX(),theHNLVx.GetStartY()
if Rsq(X,Y,dy)>1: flag = False
return flag
def checkFiducialVolume(sTree,tkey,dy):
# to be replaced later with using track extrapolator,
# for now use MC truth
inside = True
if not fiducialCut: return inside
mcPartKey = sTree.fitTrack2MC[tkey]
for ahit in sTree.strawtubesPoint:
if ahit.GetTrackID() == mcPartKey:
X,Y = ahit.GetX(),ahit.GetY()
if Rsq(X,Y,dy)>1:
inside = False
break
return inside
def getPtruthFirst(sTree,mcPartKey):
Ptruth,Ptruthx,Ptruthy,Ptruthz = -1.,-1.,-1.,-1.
for ahit in sTree.strawtubesPoint:
if ahit.GetTrackID() == mcPartKey:
Ptruthx,Ptruthy,Ptruthz = ahit.GetPx(),ahit.GetPy(),ahit.GetPz()
Ptruth = ROOT.TMath.Sqrt(Ptruthx**2+Ptruthy**2+Ptruthz**2)
break
return Ptruth,Ptruthx,Ptruthy,Ptruthz
def access2SmearedHits():
key = 0
for ahit in ev.SmearedHits.GetObject():
print ahit[0],ahit[1],ahit[2],ahit[3],ahit[4],ahit[5],ahit[6]
# follow link to true MCHit
mchit = TrackingHits[key]
mctrack = MCTracks[mchit.GetTrackID()]
print mchit.GetZ(),mctrack.GetP(),mctrack.GetPdgCode()
key+=1
def myVertex(t1,t2,PosDir):
# closest distance between two tracks
# d = |pq . u x v|/|u x v|
a = ROOT.TVector3(PosDir[t1][0](0) ,PosDir[t1][0](1), PosDir[t1][0](2))
u = ROOT.TVector3(PosDir[t1][1](0),PosDir[t1][1](1),PosDir[t1][1](2))
c = ROOT.TVector3(PosDir[t2][0](0) ,PosDir[t2][0](1), PosDir[t2][0](2))
v = ROOT.TVector3(PosDir[t2][1](0),PosDir[t2][1](1),PosDir[t2][1](2))
pq = a-c
uCrossv = u.Cross(v)
dist = pq.Dot(uCrossv)/(uCrossv.Mag()+1E-8)
# u.a - u.c + s*|u|**2 - u.v*t = 0
# v.a - v.c + s*v.u - t*|v|**2 = 0
E = u.Dot(a) - u.Dot(c)
F = v.Dot(a) - v.Dot(c)
A,B = u.Mag2(), -u.Dot(v)
C,D = u.Dot(v), -v.Mag2()
t = -(C*E-A*F)/(B*C-A*D)
X = c.x()+v.x()*t
Y = c.y()+v.y()*t
Z = c.z()+v.z()*t
return X,Y,Z,abs(dist)
def RedoVertexing(t1,t2):
PosDir = {}
for tr in [t1,t2]:
xx = sTree.FitTracks[tr].getFittedState()
PosDir[tr] = [xx.getPos(),xx.getDir()]
xv,yv,zv,doca = myVertex(t1,t2,PosDir)
# as we have learned, need iterative procedure
dz = 99999.
reps,states,newPosDir = {},{},{}
parallelToZ = ROOT.TVector3(0., 0., 1.)
rc = True
step = 0
while dz > 0.1:
zBefore = zv
newPos = ROOT.TVector3(xv,yv,zv)
# make a new rep for track 1,2
for tr in [t1,t2]:
xx = sTree.FitTracks[tr].getFittedState()
reps[tr] = ROOT.genfit.RKTrackRep(xx.getPDG())
states[tr] = ROOT.genfit.StateOnPlane(reps[tr])
reps[tr].setPosMom(states[tr],xx.getPos(),xx.getMom())
try:
reps[tr].extrapolateToPoint(states[tr], newPos, False)
except:
print 'SHiPAna: extrapolation did not worked'
rc = False
break
newPosDir[tr] = [reps[tr].getPos(states[tr]),reps[tr].getDir(states[tr])]
if not rc: break
xv,yv,zv,doca = myVertex(t1,t2,newPosDir)
dz = abs(zBefore-zv)
step+=1
if step > 10:
print 'abort iteration, too many steps, pos=',xv,yv,zv,' doca=',doca,'z before and dz',zBefore,dz
rc = False
break
if not rc: return xv,yv,zv,doca,-1 # extrapolation failed, makes no sense to continue
LV={}
for tr in [t1,t2]:
mom = reps[tr].getMom(states[tr])
pid = abs(states[tr].getPDG())
if pid == 2212: pid = 211
mass = PDG.GetParticle(pid).Mass()
E = ROOT.TMath.Sqrt( mass*mass + mom.Mag2() )
LV[tr].SetPxPyPzE(mom.x(),mom.y(),mom.z(),E)
HNLMom = LV[t1]+LV[t2]
return xv,yv,zv,doca,HNLMom
def fitSingleGauss(x,ba=None,be=None):
name = 'myGauss_'+x
myGauss = h[x].GetListOfFunctions().FindObject(name)
if not myGauss:
if not ba : ba = h[x].GetBinCenter(1)
if not be : be = h[x].GetBinCenter(h[x].GetNbinsX())
bw = h[x].GetBinWidth(1)
mean = h[x].GetMean()
sigma = h[x].GetRMS()
norm = h[x].GetEntries()*0.3
myGauss = ROOT.TF1(name,'[0]*'+str(bw)+'/([2]*sqrt(2*pi))*exp(-0.5*((x-[1])/[2])**2)+[3]',4)
myGauss.SetParameter(0,norm)
myGauss.SetParameter(1,mean)
myGauss.SetParameter(2,sigma)
myGauss.SetParameter(3,1.)
myGauss.SetParName(0,'Signal')
myGauss.SetParName(1,'Mean')
myGauss.SetParName(2,'Sigma')
myGauss.SetParName(3,'bckgr')
h[x].Fit(myGauss,'','',ba,be)
def match2HNL(p):
matched = False
hnlKey = []
for t in [p.GetDaughter(0),p.GetDaughter(1)]:
mcp = sTree.fitTrack2MC[t]
while mcp > -0.5:
mo = sTree.MCTrack[mcp]
if abs(mo.GetPdgCode()) == 9900015:
hnlKey.append(mcp)
break
mcp = mo.GetMotherId()
if len(hnlKey) == 2:
if hnlKey[0]==hnlKey[1]: matched = True
return matched
def ecalCluster2MC(aClus):
# return MC track most contributing, and its fraction of energy
trackid = ROOT.Long()
energy_dep = ROOT.Double()
mcLink = {}
for i in range( aClus.Size() ):
mccell = ecalStructure.GetHitCell(aClus.CellNum(i)) # Get i'th cell of the cluster.
for n in range( mccell.TrackEnergySize()):
mccell.GetTrackEnergySlow(n, | |
import numpy as np
import ase2 as ase
import ase2.io as aio
from concurrent.futures import ProcessPoolExecutor
import time
import ase2.calculators.dftb as adftb
import qml as qml
import qml.representations as qmlrep
import scipy.spatial as sps
# Python library used for the simulation
class Trajectory:
"""docstring for trajectory"""
def __init__(self, position_traj=[], energy_traj=[],
generation_details=None):
self.position_traj = position_traj
self.energy_traj = energy_traj
self.generation_details = generation_details
def extend(self, traj):
if type(traj) is not type(self):
raise ValueError('The input is not a trajectory')
if traj.generation_details != self.generation_details:
raise ValueError(
'The trajectories to merge come from different simulations.')
self.position_traj.extend(traj.position_traj)
self.energy_traj.extend(traj.energy_traj)
class MCTrajectory:
def __init__(self, position_traj=None, energy_traj=None, moves_used=None,
moves_accepted=None, generation_details=None,
flush_prefix=None):
if position_traj is None:
position_traj = []
self.position_traj = position_traj
if energy_traj is None:
energy_traj = []
self.energy_traj = energy_traj
if generation_details is None:
generation_details = {}
self.generation_details = generation_details
if moves_used is None:
moves_used = []
self.moves_used = moves_used
if moves_accepted is None:
moves_accepted = []
self.moves_accepted = moves_accepted
def extend(self, traj):
if type(traj) is not type(self):
raise ValueError('The input is not a trajectory')
# if traj.generation_details != self.generation_details:
# raise ValueError(
# 'The trajectories to merge come from different simulations.')
self.position_traj.extend(traj.position_traj)
self.energy_traj.extend(traj.energy_traj)
self.moves_used.extend(traj.moves_used)
self.moves_accepted.extend(traj.moves_accepted)
def mc_probabilities(self):
probabilities = []
for i in range(len(self.generation_details['move_list'])):
idxs = [t for t, x in enumerate(self.moves_used) if x == i]
idxs_bool = [self.moves_accepted[t] for t in idxs]
probabilities.append(sum(idxs_bool) / len(idxs_bool))
return probabilities
def flush(self, flush_prefix):
if len(self.moves_used) > 0:
f = open('{}_mc_moves.dat'.format(flush_prefix), 'ab')
np.savetxt(f, np.array(
list(zip(self.moves_used, self.moves_accepted))), fmt='%i')
f.close()
f = open('{}_energies.dat'.format(flush_prefix), 'ab')
np.savetxt(f, np.array(self.energy_traj), fmt='%.6f')
f.close()
for struct in self.position_traj:
aio.write('{}_structures.xyz'.format(flush_prefix),
ase.Atoms(self.generation_details['atoms'],
positions=struct), append=True)
self.__init__(generation_details=self.generation_details,
flush_prefix=flush_prefix)
class DftbEnergy:
"""docstring for dftb"""
def __init__(self, atoms, directory, **kwargs):
self.dftb_kwargs = kwargs
self.atoms = ase.Atoms(atoms)
self.directory = directory
self.calc = adftb.Dftb(**kwargs)
self.calc.directory = directory
def energy(self, structure):
self.atoms.positions = structure
self.calc.calculate(self.atoms)
energy = self.calc.results['energy']
ev_to_kcalmol = 23
return energy * ev_to_kcalmol
def force(self, structure):
pass
class MixedPotential:
"""docstring for MixedPotential"""
def __init__(self, energy_func1, energy_func2, alpha):
self.energy_func1 = energy_func1
self.energy_func2 = energy_func2
def energy(self, structure):
return self.energy_func1(
structure) * (1 - self.alpha) + self.energy_func2(
structure) * self.alpha
class KRR_potential:
"""docstring for ML_potential"""
def __init__(self, representation_generator,
training_representations, alpha_values,
kernel, baseline=None, delta_scale=1):
self.baseline = baseline
self.representation_generator = representation_generator
self.alpha_values = alpha_values
self.kernel = kernel
self.training_representations = training_representations
self.delta_scale = delta_scale
def energy(self, structure):
delta_e = [0]
if self.baseline is not None:
ener = self.baseline(structure)
else:
ener = 0
x = self.representation_generator.generate(structure)
k_vec = self.kernel(np.expand_dims(x, axis=0),
self.training_representations)
delta_e = self.delta_scale * np.dot(k_vec, self.alpha_values)
return ener + delta_e[0]
class SLATMGenerator:
"""docstring for SLATMGenerator"""
def __init__(self, atoms):
self.atoms = atoms
self.atomic_numbers = ase.Atoms(symbols=atoms).get_atomic_numbers()
self.mbtypes = qml.representations.get_slatm_mbtypes(
[self.atomic_numbers])
def generate(self, structure):
return qmlrep.generate_slatm(
coordinates=structure, nuclear_charges=self.atomic_numbers,
mbtypes=self.mbtypes)
class CMGenerator:
"""docstring for CMGenerator"""
def __init__(self, atoms):
self.atoms = atoms
self.nuclear_charges = ase.Atoms(symbols=atoms).get_atomic_numbers()
def generate(self, structure):
return qmlrep.generate_coulomb_matrix(
nuclear_charges=self.nuclear_charges,
coordinates=structure,
size=len(self.atoms))
class GaussianKernel:
"""docstring for GaussianKernel"""
def __init__(self, sigma, norm=np.linalg.norm):
self.norm = norm
self.sigma
def build(self, x, data):
return np.exp(- (1 / self.sigma) * self.norm(data - x))
class GaussianVar:
"""docstring for GaussianVar"""
def __init__(self, loc, var):
self.loc = loc
self.var = var
def generate(self, size):
return np.random.normal(self.loc, self.var, size)
class Reservoir:
"""docstring for Reservoir"""
def __init__(self, structures, energies, temperature, energy_func,
kb=0.0019872041):
self.structures = structures
self.energies = energies
self.size = len(energies)
self.temperature = temperature
self.beta = (kb * self.temperature) ** - 1
self.energy_func = energy_func
def simulation_type(self):
return MCTrajectory(generation_details=self.simulation_details())
def simulation_details(self):
details = {'temperature': self.temperature,
'energy_func': self.energy_func}
return details
def flush(self):
pass
def run(self, *args):
np.random.seed()
empty = MCTrajectory(generation_details=self.simulation_details())
idx = np.random.choice(np.arange(self.size))
pos = self.structures[idx]
ener = self.energies[idx]
return [empty, pos, ener]
class MCSimulation:
"""docstring for MCSimulation"""
def __init__(self, energy_func, temperature, atoms,
move_list, move_weight_list=None, kb=0.0019872041):
self.temperature = temperature
self.beta = (kb * self.temperature) ** - 1
self.atoms = atoms
self.energy_func = energy_func
self.move_list = move_list
self.move_weight_list = move_weight_list
def simulation_details(self):
return vars(self)
def simulation_type(self):
return MCTrajectory(generation_details=self.simulation_details())
def _advance(self, old_pos, old_ener):
move_idx = np.random.choice(
list(range(len(self.move_list))), p=self.move_weight_list)
move = self.move_list[move_idx]
new_pos, new_ener, bias = move.move(
old_position=old_pos, old_energy=old_ener, beta=self.beta)
if new_ener is None:
new_ener = self.energy_func(new_pos)
new_weight = np.exp(- self.beta * new_ener)
old_weight = np.exp(- self.beta * old_ener)
prob = min([1, bias * new_weight / old_weight])
accepted = np.random.rand() < prob
# print((old_ener, new_ener))
# print((prob, accepted))
if accepted:
return new_pos, new_ener, bias, move_idx, accepted
else:
return old_pos, old_ener, bias, move_idx, accepted
def run(self, init_struct, steps, stride=10, init_ener=None,
return_last=False):
np.random.seed()
pos = init_struct
if init_ener is None:
ener = self.energy_func(pos)
else:
ener = init_ener
position_traj = []
energy_traj = []
moves_used = []
moves_accepted = []
bias_traj = []
# append initial structure
position_traj.append(pos)
energy_traj.append(ener)
for i in range(1, steps):
pos, ener, bias, move_idx, accepted = self._advance(
pos, ener)
bias_traj.append(bias)
moves_used.append(move_idx)
moves_accepted.append(accepted)
if i % stride == 0:
position_traj.append(pos)
energy_traj.append(ener)
traj = MCTrajectory(position_traj, energy_traj, moves_used,
moves_accepted, self.simulation_details())
if return_last is True:
return [traj, pos, ener]
else:
return traj
class ReplicaExchangeSimulation:
"""docstring for ReplicaExchangeSimulation"""
def __init__(self, num_reps, simulations, init_structs, stride, rep_steps,
reservoir=False, init_eners=None, directory='.'):
# self.init_sumtrack = summary.summarize(muppy.get_objects())
self.num_reps = num_reps
if num_reps % 2 != 0:
raise('Number of 00s must be pair')
if len(simulations) != self.num_reps:
raise('Wrong number of temperatures')
self.temperatures = [sim.temperature for sim in simulations]
self.energy_funcs = [sim.energy_func for sim in simulations]
self.simulations = simulations
self.init_rep_structs = init_structs
self.par_exec = ProcessPoolExecutor(max_workers=num_reps)
# print('e')
if init_eners is None:
pass
self.init_rep_eners = list(self.par_exec.map(
smap, self.energy_funcs, self.init_rep_structs))
else:
self.init_rep_eners = init_eners
# print('e')
self.rep_index = np.arange(self.num_reps)
self.even_sims = self.rep_index[::2]
self.odd_sims = self.rep_index[::2]
self.accepted_exchanges = {(i, (i + 1) % self.num_reps):
[] for i in range(self.num_reps)}
self.strides = [stride for i in range(num_reps)]
self.rep_steps = rep_steps
for stride in self.strides:
if self.rep_steps % stride != 0:
raise ValueError('Rep_steps must be multiple of stride')
self.rep_stepss = [rep_steps for i in range(self.num_reps)]
self.directory = directory
def run(self, num_exchanges):
trajectories = [sim.simulation_type() for sim in self.simulations]
for i in range(num_exchanges):
t0 = time.time()
# generate dynamics
# run individual simulation in parallel
return_last = [True for l in range(self.num_reps)]
simulation_results = list(
self.par_exec.map(run_simulation, self.simulations,
self.init_rep_structs, self.rep_stepss,
self.strides, self.init_rep_eners,
return_last))
rep_trajs = [res[0] for res in simulation_results]
exchange_structs = [res[1] for res in simulation_results]
exchange_eners = [res[2] for res in simulation_results]
for k in range(self.num_reps):
trajectories[k].extend(rep_trajs[k])
aaa, bbb = self._replica_exchange(exchange_structs, exchange_eners)
self.init_rep_structs = aaa
self.init_rep_eners = bbb
self.exchange_probabilities = {key: (0.001 + sum(val)) / (len(
val) + 0.001) for key, val in self.accepted_exchanges.items()}
if i % 2 == 1:
for rep, traj in enumerate(trajectories):
traj.flush(flush_prefix=(
self.directory + '/hrem.rep{}_'.format(rep)))
t1 = time.time()
with open("exchange.txt", "a") as myfile:
myfile.write(
'Exchange {0}, step {1}, time interval {2:.3} \n'.format(
i + 1, (i + 1) * self.rep_steps, t1 - t0))
[myfile.write('{0}: {1:.3}\n'.format(
x, y)) for x, y in self.exchange_probabilities.items()]
def _replica_exchange(self, exchange_structs, exchange_eners):
shift = np.random.choice([1, -1])
rep_index = np.arange(self.num_reps)
group1 = rep_index[::2]
group2 = rep_index[1::2]
if shift == 1:
ex_index = np.vstack((group2, group1)).flatten(order='F')
else:
ex_index = np.roll(
np.vstack((group1, np.roll(group2, 1))).flatten(
order='F'), -1)
pairs = list(zip(group1, ex_index[::2]))
old_structs = exchange_structs
old_energies = exchange_eners
new_structs = [old_structs[i] for i in ex_index]
new_energies = list(self.par_exec.map(
smap, self.energy_funcs, new_structs))
with open("log.txt", "a") as myfile:
myfile.write('================================')
myfile.write('Exchange')
myfile.write('================================')
for pair in pairs:
rep0 = self.simulations[pair[0]]
rep1 = self.simulations[pair[1]]
old_e0 = old_energies[pair[0]]
old_e1 = old_energies[pair[1]]
new_e0 = new_energies[pair[0]]
new_e1 = new_energies[pair[1]]
old_weight = rep0.beta * old_e0 + rep1.beta * old_e1
new_weight = rep0.beta * new_e0 + rep1.beta * new_e1
prob = mc_prob(weight_new=new_weight, weight_old=old_weight)
accepted = np.random.rand() < prob
with open("log.txt", "a") as myfile:
myfile.write('\n')
myfile.write('Rep A: ')
myfile.write('{}'.format(pair[0]))
myfile.write('\n')
myfile.write('Old Energy: ')
myfile.write('{0:.5f} '.format(old_e0))
myfile.write('\n')
myfile.write('New Energy: ')
myfile.write('{0:.5f} '.format(new_e0))
myfile.write('\n')
myfile.write('beta rep A: ')
myfile.write('{0:.5f} '.format(rep0.beta))
myfile.write('\n')
myfile.write('Rep B: ')
myfile.write('{}'.format(pair[1]))
myfile.write('\n')
myfile.write('Old Energy: ')
myfile.write('{0:.5f} '.format(old_e1))
myfile.write('\n')
myfile.write('New Energy: ')
myfile.write('{0:.5f} '.format(new_e1))
myfile.write('\n')
myfile.write('beta rep B: ')
myfile.write('{0:.5f} '.format(rep1.beta))
myfile.write('\n')
myfile.write('Old weight: ')
myfile.write('{0:.5f} '.format(old_weight))
myfile.write('\n')
myfile.write('New weight: ')
myfile.write('{0:.5f} '.format(new_weight))
myfile.write('\n')
myfile.write('Exchange Prob: ')
myfile.write('{0:.5f} '.format(prob))
myfile.write('Accepted: ')
myfile.write('{} '.format(bool(accepted)))
myfile.write('\n')
myfile.write('---------------------------------------------')
myfile.write('\n')
if shift == 1:
self.accepted_exchanges[(pair[0], pair[1])].append(accepted)
else:
self.accepted_exchanges[(pair[1], pair[0])].append(accepted)
if accepted:
pass
else:
new_structs[pair[0]] = old_structs[pair[0]]
new_structs[pair[1]] = old_structs[pair[1]]
new_energies[pair[0]] = old_energies[pair[0]]
new_energies[pair[1]] = old_energies[pair[1]]
return new_structs, new_energies
def mc_accept(weight_new, weight_old):
exp = np.exp(- weight_new + weight_old)
if exp > np.random.rand():
return True
else:
return False
def mc_prob(weight_new, weight_old):
prob | |
not None:
self.prefix.prefixlen = value
prefix_length = property(fset=_set_prefix_length)
def get_status_class(self):
return PrefixStatusChoices.CSS_CLASSES.get(self.status)
def get_duplicates(self):
return Prefix.objects.filter(vrf=self.vrf, prefix=str(self.prefix)).exclude(pk=self.pk)
def get_child_prefixes(self):
"""
Return all Prefixes within this Prefix and VRF. If this Prefix is a container in the global table, return child
Prefixes belonging to any VRF.
"""
if self.vrf is None and self.status == PrefixStatusChoices.STATUS_CONTAINER:
return Prefix.objects.filter(prefix__net_contained=str(self.prefix))
else:
return Prefix.objects.filter(prefix__net_contained=str(self.prefix), vrf=self.vrf)
def get_child_ips(self):
"""
Return all IPAddresses within this Prefix and VRF. If this Prefix is a container in the global table, return
child IPAddresses belonging to any VRF.
"""
if self.vrf is None and self.status == PrefixStatusChoices.STATUS_CONTAINER:
return IPAddress.objects.filter(address__net_host_contained=str(self.prefix))
else:
return IPAddress.objects.filter(address__net_host_contained=str(self.prefix), vrf=self.vrf)
def get_available_prefixes(self):
"""
Return all available Prefixes within this prefix as an IPSet.
"""
prefix = netaddr.IPSet(self.prefix)
child_prefixes = netaddr.IPSet([child.prefix for child in self.get_child_prefixes()])
available_prefixes = prefix - child_prefixes
return available_prefixes
def get_available_ips(self):
"""
Return all available IPs within this prefix as an IPSet.
"""
prefix = netaddr.IPSet(self.prefix)
child_ips = netaddr.IPSet([ip.address.ip for ip in self.get_child_ips()])
available_ips = prefix - child_ips
# All IP addresses within a pool are considered usable
if self.is_pool:
return available_ips
# All IP addresses within a point-to-point prefix (IPv4 /31 or IPv6 /127) are considered usable
if (
self.prefix.version == 4 and self.prefix.prefixlen == 31 # RFC 3021
) or (
self.prefix.version == 6 and self.prefix.prefixlen == 127 # RFC 6164
):
return available_ips
# Omit first and last IP address from the available set
available_ips -= netaddr.IPSet([
netaddr.IPAddress(self.prefix.first),
netaddr.IPAddress(self.prefix.last),
])
return available_ips
def get_first_available_prefix(self):
"""
Return the first available child prefix within the prefix (or None).
"""
available_prefixes = self.get_available_prefixes()
if not available_prefixes:
return None
return available_prefixes.iter_cidrs()[0]
def get_first_available_ip(self):
"""
Return the first available IP within the prefix (or None).
"""
available_ips = self.get_available_ips()
if not available_ips:
return None
return '{}/{}'.format(next(available_ips.__iter__()), self.prefix.prefixlen)
def get_utilization(self):
"""
Determine the utilization of the prefix and return it as a percentage. For Prefixes with a status of
"container", calculate utilization based on child prefixes. For all others, count child IP addresses.
"""
if self.status == PrefixStatusChoices.STATUS_CONTAINER:
queryset = Prefix.objects.filter(
prefix__net_contained=str(self.prefix),
vrf=self.vrf
)
child_prefixes = netaddr.IPSet([p.prefix for p in queryset])
return int(float(child_prefixes.size) / self.prefix.size * 100)
else:
# Compile an IPSet to avoid counting duplicate IPs
child_count = netaddr.IPSet([ip.address.ip for ip in self.get_child_ips()]).size
prefix_size = self.prefix.size
if self.prefix.version == 4 and self.prefix.prefixlen < 31 and not self.is_pool:
prefix_size -= 2
return int(float(child_count) / prefix_size * 100)
@extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks')
class IPAddress(ChangeLoggedModel, CustomFieldModel):
"""
An IPAddress represents an individual IPv4 or IPv6 address and its mask. The mask length should match what is
configured in the real world. (Typically, only loopback interfaces are configured with /32 or /128 masks.) Like
Prefixes, IPAddresses can optionally be assigned to a VRF. An IPAddress can optionally be assigned to an Interface.
Interfaces can have zero or more IPAddresses assigned to them.
An IPAddress can also optionally point to a NAT inside IP, designating itself as a NAT outside IP. This is useful,
for example, when mapping public addresses to private addresses. When an Interface has been assigned an IPAddress
which has a NAT outside IP, that Interface's Device can use either the inside or outside IP as its primary IP.
"""
address = IPAddressField(
help_text='IPv4 or IPv6 address (with mask)'
)
vrf = models.ForeignKey(
to='ipam.VRF',
on_delete=models.PROTECT,
related_name='ip_addresses',
blank=True,
null=True,
verbose_name='VRF'
)
tenant = models.ForeignKey(
to='tenancy.Tenant',
on_delete=models.PROTECT,
related_name='ip_addresses',
blank=True,
null=True
)
status = models.CharField(
max_length=50,
choices=IPAddressStatusChoices,
default=IPAddressStatusChoices.STATUS_ACTIVE,
help_text='The operational status of this IP'
)
role = models.CharField(
max_length=50,
choices=IPAddressRoleChoices,
blank=True,
help_text='The functional role of this IP'
)
assigned_object_type = models.ForeignKey(
to=ContentType,
limit_choices_to=IPADDRESS_ASSIGNMENT_MODELS,
on_delete=models.PROTECT,
related_name='+',
blank=True,
null=True
)
assigned_object_id = models.PositiveIntegerField(
blank=True,
null=True
)
assigned_object = GenericForeignKey(
ct_field='assigned_object_type',
fk_field='assigned_object_id'
)
nat_inside = models.OneToOneField(
to='self',
on_delete=models.SET_NULL,
related_name='nat_outside',
blank=True,
null=True,
verbose_name='NAT (Inside)',
help_text='The IP for which this address is the "outside" IP'
)
dns_name = models.CharField(
max_length=255,
blank=True,
validators=[DNSValidator],
verbose_name='DNS Name',
help_text='Hostname or FQDN (not case-sensitive)'
)
description = models.CharField(
max_length=200,
blank=True
)
tags = TaggableManager(through=TaggedItem)
objects = IPAddressManager()
csv_headers = [
'address', 'vrf', 'tenant', 'status', 'role', 'assigned_object_type', 'assigned_object_id', 'is_primary',
'dns_name', 'description',
]
clone_fields = [
'vrf', 'tenant', 'status', 'role', 'description',
]
class Meta:
ordering = ('address', 'pk') # address may be non-unique
verbose_name = 'IP address'
verbose_name_plural = 'IP addresses'
def __str__(self):
return str(self.address)
def get_absolute_url(self):
return reverse('ipam:ipaddress', args=[self.pk])
def get_duplicates(self):
return IPAddress.objects.filter(
vrf=self.vrf,
address__net_host=str(self.address.ip)
).exclude(pk=self.pk)
def clean(self):
super().clean()
if self.address:
# /0 masks are not acceptable
if self.address.prefixlen == 0:
raise ValidationError({
'address': "Cannot create IP address with /0 mask."
})
# Enforce unique IP space (if applicable)
if self.role not in IPADDRESS_ROLES_NONUNIQUE and ((
self.vrf is None and settings.ENFORCE_GLOBAL_UNIQUE
) or (
self.vrf and self.vrf.enforce_unique
)):
duplicate_ips = self.get_duplicates()
if duplicate_ips:
raise ValidationError({
'address': "Duplicate IP address found in {}: {}".format(
"VRF {}".format(self.vrf) if self.vrf else "global table",
duplicate_ips.first(),
)
})
# Check for primary IP assignment that doesn't match the assigned device/VM
if self.pk:
device = Device.objects.filter(Q(primary_ip4=self) | Q(primary_ip6=self)).first()
if device:
if getattr(self.assigned_object, 'device', None) != device:
raise ValidationError({
'interface': f"IP address is primary for device {device} but not assigned to it!"
})
vm = VirtualMachine.objects.filter(Q(primary_ip4=self) | Q(primary_ip6=self)).first()
if vm:
if getattr(self.assigned_object, 'virtual_machine', None) != vm:
raise ValidationError({
'vminterface': f"IP address is primary for virtual machine {vm} but not assigned to it!"
})
# Validate IP status selection
if self.status == IPAddressStatusChoices.STATUS_SLAAC and self.family != 6:
raise ValidationError({
'status': "Only IPv6 addresses can be assigned SLAAC status"
})
def save(self, *args, **kwargs):
# Force dns_name to lowercase
self.dns_name = self.dns_name.lower()
super().save(*args, **kwargs)
def to_objectchange(self, action):
# Annotate the assigned object, if any
return ObjectChange(
changed_object=self,
object_repr=str(self),
action=action,
related_object=self.assigned_object,
object_data=serialize_object(self)
)
def to_csv(self):
# Determine if this IP is primary for a Device
is_primary = False
if self.address.version == 4 and getattr(self, 'primary_ip4_for', False):
is_primary = True
elif self.address.version == 6 and getattr(self, 'primary_ip6_for', False):
is_primary = True
obj_type = None
if self.assigned_object_type:
obj_type = f'{self.assigned_object_type.app_label}.{self.assigned_object_type.model}'
return (
self.address,
self.vrf.name if self.vrf else None,
self.tenant.name if self.tenant else None,
self.get_status_display(),
self.get_role_display(),
obj_type,
self.assigned_object_id,
is_primary,
self.dns_name,
self.description,
)
@property
def family(self):
if self.address:
return self.address.version
return None
def _set_mask_length(self, value):
"""
Expose the IPNetwork object's prefixlen attribute on the parent model so that it can be manipulated directly,
e.g. for bulk editing.
"""
if self.address is not None:
self.address.prefixlen = value
mask_length = property(fset=_set_mask_length)
def get_status_class(self):
return IPAddressStatusChoices.CSS_CLASSES.get(self.status)
def get_role_class(self):
return IPAddressRoleChoices.CSS_CLASSES.get(self.role)
class VLANGroup(ChangeLoggedModel):
"""
A VLAN group is an arbitrary collection of VLANs within which VLAN IDs and names must be unique.
"""
name = models.CharField(
max_length=100
)
slug = models.SlugField(
max_length=100
)
site = models.ForeignKey(
to='dcim.Site',
on_delete=models.PROTECT,
related_name='vlan_groups',
blank=True,
null=True
)
description = models.CharField(
max_length=200,
blank=True
)
objects = RestrictedQuerySet.as_manager()
csv_headers = ['name', 'slug', 'site', 'description']
class Meta:
ordering = ('site', 'name', 'pk') # (site, name) may be non-unique
unique_together = [
['site', 'name'],
['site', 'slug'],
]
verbose_name = 'VLAN group'
verbose_name_plural = 'VLAN groups'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('ipam:vlangroup_vlans', args=[self.pk])
def to_csv(self):
return (
self.name,
self.slug,
self.site.name if self.site else None,
self.description,
)
def get_next_available_vid(self):
"""
Return the first available VLAN ID (1-4094) in the group.
"""
vlan_ids = VLAN.objects.filter(group=self).values_list('vid', flat=True)
for i in range(1, 4095):
if i not in vlan_ids:
return i
return None
@extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks')
class VLAN(ChangeLoggedModel, CustomFieldModel):
"""
A VLAN is a distinct layer two forwarding domain identified by a 12-bit integer (1-4094). Each VLAN must be assigned
to a Site, however VLAN IDs need not be unique within a Site. A VLAN may optionally be assigned to a VLANGroup,
within which all VLAN IDs and names but be unique.
Like Prefixes, each VLAN is assigned an operational status and optionally a user-defined Role. A VLAN can have zero
or more Prefixes assigned to it.
"""
site = models.ForeignKey(
to='dcim.Site',
on_delete=models.PROTECT,
related_name='vlans',
blank=True,
null=True
)
group = models.ForeignKey(
to='ipam.VLANGroup',
on_delete=models.PROTECT,
related_name='vlans',
blank=True,
null=True
)
vid = models.PositiveSmallIntegerField(
verbose_name='ID',
validators=[MinValueValidator(1), MaxValueValidator(4094)]
)
name = models.CharField(
max_length=64
)
| |
assert ref_sop_item.ReferencedSOPClassUID == sop_class_uid
assert ref_sop_item.ReferencedSOPInstanceUID == sop_instance_uid
assert ref_sop_item.ReferencedSegmentNumber == segment_number
with pytest.raises(AttributeError):
ref_sop_item.ReferencedFrameNumber
def test_scoord_item_construction_point(self):
name = codes.DCM.ImageRegion
graphic_type = GraphicTypeValues.POINT
graphic_data = np.array([[1.0, 1.0]])
pixel_origin_interpretation = 'FRAME'
i = ScoordContentItem(
name=name,
graphic_type=graphic_type,
graphic_data=graphic_data,
pixel_origin_interpretation=pixel_origin_interpretation
)
assert i.ValueType == 'SCOORD'
assert i.ConceptNameCodeSequence[0] == name
assert i.GraphicType == graphic_type.value
assert i.GraphicData == graphic_data.flatten().tolist()
assert i.PixelOriginInterpretation == pixel_origin_interpretation
with pytest.raises(AttributeError):
i.FiducialUID
def test_scoord_item_construction_circle(self):
name = codes.DCM.ImageRegion
graphic_type = GraphicTypeValues.CIRCLE
graphic_data = np.array([[1.0, 1.0], [2.0, 2.0]])
pixel_origin_interpretation = 'VOLUME'
i = ScoordContentItem(
name=name,
graphic_type=graphic_type,
graphic_data=graphic_data,
pixel_origin_interpretation=pixel_origin_interpretation
)
assert i.ValueType == 'SCOORD'
assert i.ConceptNameCodeSequence[0] == name
assert i.GraphicType == graphic_type.value
assert np.all(i.GraphicData[:2] == graphic_data[0, :])
assert np.all(i.GraphicData[2:4] == graphic_data[1, :])
assert i.PixelOriginInterpretation == pixel_origin_interpretation
with pytest.raises(AttributeError):
i.FiducialUID
def test_scoord3d_item_construction_point(self):
name = codes.DCM.ImageRegion
graphic_type = GraphicTypeValues3D.POINT
graphic_data = np.array([[1.0, 1.0, 1.0]])
frame_of_reference_uid = '1.2.3'
i = Scoord3DContentItem(
name=name,
graphic_type=graphic_type,
graphic_data=graphic_data,
frame_of_reference_uid=frame_of_reference_uid
)
assert i.ValueType == 'SCOORD3D'
assert i.ConceptNameCodeSequence[0] == name
assert i.GraphicType == graphic_type.value
assert np.all(i.GraphicData == graphic_data[0, :])
assert i.ReferencedFrameOfReferenceUID == frame_of_reference_uid
with pytest.raises(AttributeError):
i.FiducialUID
def test_scoord3d_item_construction_polygon(self):
name = codes.DCM.ImageRegion
graphic_type = GraphicTypeValues3D.POLYGON
graphic_data = np.array([
[1.0, 1.0, 1.0], [2.0, 2.0, 1.0], [1.0, 1.0, 1.0]
])
frame_of_reference_uid = '1.2.3'
i = Scoord3DContentItem(
name=name,
graphic_type=graphic_type,
graphic_data=graphic_data,
frame_of_reference_uid=frame_of_reference_uid
)
assert i.ValueType == 'SCOORD3D'
assert i.ConceptNameCodeSequence[0] == name
assert i.GraphicType == graphic_type.value
assert np.all(i.GraphicData[:3] == graphic_data[0, :])
assert np.all(i.GraphicData[3:6] == graphic_data[1, :])
assert np.all(i.GraphicData[6:9] == graphic_data[2, :])
assert i.ReferencedFrameOfReferenceUID == frame_of_reference_uid
with pytest.raises(AttributeError):
i.FiducialUID
class TestContentSequence(unittest.TestCase):
def setUp(self):
super().setUp()
class TestSubjectContextDevice(unittest.TestCase):
def setUp(self):
super().setUp()
self._name = '<NAME>'
self._uid = generate_uid()
self._manufacturer = 'Foomakers Inc.'
self._model_name = '<NAME> II'
self._serial_number = '987654321'
self._physical_location = 'Planet Foo'
def test_construction_basic(self):
context = SubjectContextDevice(name=self._name)
assert len(context) == 1
assert context[0].ConceptNameCodeSequence[0].CodeValue == \
codes.DCM.DeviceSubjectName.value
assert context[0].TextValue == self._name
def test_construction_all(self):
context = SubjectContextDevice(
name=self._name,
uid=self._uid,
manufacturer_name=self._manufacturer,
model_name=self._model_name,
serial_number=self._serial_number,
physical_location=self._physical_location
)
assert len(context) == 6
assert context[0].ConceptNameCodeSequence[0].CodeValue == \
codes.DCM.DeviceSubjectName.value
assert context[0].TextValue == self._name
assert context[1].ConceptNameCodeSequence[0].CodeValue == \
codes.DCM.DeviceSubjectUID.value
assert context[1].UID == self._uid
assert context[2].ConceptNameCodeSequence[0].CodeValue == \
codes.DCM.DeviceSubjectManufacturer.value
assert context[2].TextValue == self._manufacturer
assert context[3].ConceptNameCodeSequence[0].CodeValue == \
codes.DCM.DeviceSubjectModelName.value
assert context[3].TextValue == self._model_name
assert context[4].ConceptNameCodeSequence[0].CodeValue == \
codes.DCM.DeviceSubjectSerialNumber.value
assert context[4].TextValue == self._serial_number
assert context[5].ConceptNameCodeSequence[0].CodeValue == \
codes.DCM.DeviceSubjectPhysicalLocationDuringObservation.value
assert context[5].TextValue == self._physical_location
class TestObservationContext(unittest.TestCase):
def setUp(self):
super().setUp()
self._person_name = '<NAME>'
self._device_uid = generate_uid()
self._specimen_uid = generate_uid()
self._observer_person_context = ObserverContext(
observer_type=codes.cid270.Person,
observer_identifying_attributes=PersonObserverIdentifyingAttributes(
name=self._person_name
)
)
self._observer_device_context = ObserverContext(
observer_type=codes.cid270.Device,
observer_identifying_attributes=DeviceObserverIdentifyingAttributes(
uid=self._device_uid
)
)
self._subject_context = SubjectContext(
subject_class=codes.cid271.Specimen,
subject_class_specific_context=SubjectContextSpecimen(
uid=self._specimen_uid
)
)
self._observation_context = ObservationContext(
observer_person_context=self._observer_person_context,
observer_device_context=self._observer_device_context,
subject_context=self._subject_context
)
def test_observer_context(self):
# person
assert len(self._observer_person_context) == 2
item = self._observer_person_context[0]
assert item.ConceptNameCodeSequence[0].CodeValue == '121005'
assert item.ConceptCodeSequence[0] == codes.cid270.Person
item = self._observer_person_context[1]
assert item.ConceptNameCodeSequence[0].CodeValue == '121008'
assert item.TextValue == self._person_name
# device
assert len(self._observer_device_context) == 2
item = self._observer_device_context[0]
assert item.ConceptNameCodeSequence[0].CodeValue == '121005'
assert item.ConceptCodeSequence[0] == codes.cid270.Device
item = self._observer_device_context[1]
assert item.ConceptNameCodeSequence[0].CodeValue == '121012'
assert item.UID == self._device_uid
def test_subject_context(self):
assert len(self._subject_context) == 2
item = self._subject_context[0]
assert item.ConceptNameCodeSequence[0].CodeValue == '121024'
assert item.ConceptCodeSequence[0] == codes.cid271.Specimen
item = self._subject_context[1]
assert item.ConceptNameCodeSequence[0].CodeValue == '121039'
assert item.UID == self._specimen_uid
def test_content_length(self):
assert len(self._observation_context) == 6
class TestFindingSiteOptional(unittest.TestCase):
def setUp(self):
super().setUp()
self._location = codes.cid7151.LobeOfLung
self._laterality = codes.cid244.Right
self._modifier = codes.cid2.Apical
self._finding_site = FindingSite(
anatomic_location=self._location,
laterality=self._laterality,
topographical_modifier=self._modifier
)
def test_finding_site(self):
item = self._finding_site
assert item.ConceptNameCodeSequence[0].CodeValue == '363698007'
assert item.ConceptCodeSequence[0] == self._location
assert len(item.ContentSequence) == 2
def test_laterality(self):
item = self._finding_site.ContentSequence[0]
assert item.ConceptNameCodeSequence[0].CodeValue == '272741003'
assert item.ConceptCodeSequence[0] == self._laterality
def test_topographical_modifier(self):
item = self._finding_site.ContentSequence[1]
assert item.ConceptNameCodeSequence[0].CodeValue == '106233006'
assert item.ConceptCodeSequence[0] == self._modifier
class TestFindingSite(unittest.TestCase):
def setUp(self):
super().setUp()
self._location = \
codes.cid6300.RightAnteriorMiddlePeripheralZoneOfProstate
self._finding_site = FindingSite(
anatomic_location=self._location
)
def test_finding_site(self):
item = self._finding_site
assert item.ConceptNameCodeSequence[0].CodeValue == '363698007'
assert item.ConceptCodeSequence[0] == self._location
assert not hasattr(item, 'ContentSequence')
class TestSourceImageForSegmentation(unittest.TestCase):
def setUp(self):
super().setUp()
file_path = Path(__file__)
data_dir = file_path.parent.parent.joinpath('data')
self._src_dataset = dcmread(
str(data_dir.joinpath('test_files', 'ct_image.dcm'))
)
self._src_dataset_multiframe = dcmread(
get_testdata_file('eCT_Supplemental.dcm')
)
self._invalid_src_dataset_sr = dcmread(
get_testdata_file('reportsi.dcm')
)
self._invalid_src_dataset_seg = dcmread(
str(data_dir.joinpath('test_files', 'seg_image_sm_dots.dcm'))
)
self._ref_frames = [1, 2]
self._ref_frames_invalid = [
self._src_dataset_multiframe.NumberOfFrames + 1
]
def test_construction(self):
src_image = SourceImageForSegmentation(
self._src_dataset.SOPClassUID,
self._src_dataset.SOPInstanceUID
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset.SOPInstanceUID
)
def test_construction_with_frame_reference(self):
src_image = SourceImageForSegmentation(
self._src_dataset_multiframe.SOPClassUID,
self._src_dataset_multiframe.SOPInstanceUID,
self._ref_frames
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset_multiframe.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset_multiframe.SOPInstanceUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedFrameNumber ==
self._ref_frames
)
def test_from_source_image(self):
src_image = SourceImageForSegmentation.from_source_image(
self._src_dataset
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset.SOPInstanceUID
)
def test_from_source_image_with_referenced_frames(self):
src_image = SourceImageForSegmentation.from_source_image(
self._src_dataset_multiframe,
self._ref_frames
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset_multiframe.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset_multiframe.SOPInstanceUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedFrameNumber ==
self._ref_frames
)
def test_from_source_image_with_invalid_referenced_frames(self):
with pytest.raises(ValueError):
SourceImageForSegmentation.from_source_image(
self._src_dataset_multiframe,
self._ref_frames_invalid
)
def test_from_invalid_source_image_sr(self):
with pytest.raises(ValueError):
SourceImageForSegmentation.from_source_image(
self._invalid_src_dataset_sr
)
def test_from_invalid_source_image_seg(self):
with pytest.raises(ValueError):
SourceImageForSegmentation.from_source_image(
self._invalid_src_dataset_seg
)
class TestSourceSeriesForSegmentation(unittest.TestCase):
def setUp(self):
super().setUp()
file_path = Path(__file__)
data_dir = file_path.parent.parent.joinpath('data')
self._src_dataset = dcmread(
str(data_dir.joinpath('test_files', 'ct_image.dcm'))
)
self._invalid_src_dataset_sr = dcmread(
get_testdata_file('reportsi.dcm')
)
self._invalid_src_dataset_seg = dcmread(
str(data_dir.joinpath('test_files', 'seg_image_sm_dots.dcm'))
)
def test_construction(self):
src_series = SourceSeriesForSegmentation(
self._src_dataset.SeriesInstanceUID,
)
assert src_series.UID == self._src_dataset.SeriesInstanceUID
def test_from_source_image(self):
src_series = SourceSeriesForSegmentation.from_source_image(
self._src_dataset
)
assert src_series.UID == self._src_dataset.SeriesInstanceUID
def test_from_invalid_source_image_sr(self):
with pytest.raises(ValueError):
SourceSeriesForSegmentation.from_source_image(
self._invalid_src_dataset_sr
)
def test_from_invalid_source_image_seg(self):
with pytest.raises(ValueError):
SourceSeriesForSegmentation.from_source_image(
self._invalid_src_dataset_seg
)
class TestSourceImageForRegion(unittest.TestCase):
def setUp(self):
super().setUp()
file_path = Path(__file__)
data_dir = file_path.parent.parent.joinpath('data')
self._src_dataset = dcmread(
str(data_dir.joinpath('test_files', 'ct_image.dcm'))
)
self._src_dataset_multiframe = dcmread(
get_testdata_file('eCT_Supplemental.dcm')
)
self._invalid_src_dataset_sr = dcmread(
get_testdata_file('reportsi.dcm')
)
self._invalid_src_dataset_seg = dcmread(
str(data_dir.joinpath('test_files', 'seg_image_sm_dots.dcm'))
)
self._ref_frames = [1, 2]
self._ref_frames_invalid = [
self._src_dataset_multiframe.NumberOfFrames + 1
]
def test_construction(self):
src_image = SourceImageForRegion(
self._src_dataset.SOPClassUID,
self._src_dataset.SOPInstanceUID
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset.SOPInstanceUID
)
def test_construction_with_frame_reference_frames(self):
src_image = SourceImageForRegion(
self._src_dataset_multiframe.SOPClassUID,
self._src_dataset_multiframe.SOPInstanceUID,
self._ref_frames
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset_multiframe.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset_multiframe.SOPInstanceUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedFrameNumber ==
self._ref_frames
)
def test_from_source_image(self):
src_image = SourceImageForRegion.from_source_image(
self._src_dataset
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset.SOPInstanceUID
)
def test_from_source_image_with_referenced_frames(self):
src_image = SourceImageForRegion.from_source_image(
self._src_dataset_multiframe,
self._ref_frames
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset_multiframe.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset_multiframe.SOPInstanceUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedFrameNumber ==
self._ref_frames
)
def test_from_source_image_with_invalid_referenced_frames(self):
with pytest.raises(ValueError):
SourceImageForRegion.from_source_image(
self._src_dataset_multiframe,
self._ref_frames_invalid
)
def test_from_invalid_source_image_sr(self):
with pytest.raises(ValueError):
SourceImageForRegion.from_source_image(
self._invalid_src_dataset_sr
)
def test_from_invalid_source_image_seg(self):
with pytest.raises(ValueError):
SourceImageForRegion.from_source_image(
self._invalid_src_dataset_seg
)
class TestSourceImageForMeasurement(unittest.TestCase):
def setUp(self):
super().setUp()
file_path = Path(__file__)
data_dir = file_path.parent.parent.joinpath('data')
self._src_dataset = dcmread(
str(data_dir.joinpath('test_files', 'ct_image.dcm'))
)
self._src_dataset_multiframe = dcmread(
get_testdata_file('eCT_Supplemental.dcm')
)
self._invalid_src_dataset_sr = dcmread(
get_testdata_file('reportsi.dcm')
)
self._invalid_src_dataset_seg = dcmread(
str(data_dir.joinpath('test_files', 'seg_image_sm_dots.dcm'))
)
self._ref_frames = [1, 2]
self._ref_frames_invalid = [
self._src_dataset_multiframe.NumberOfFrames + 1
]
def test_construction(self):
src_image = SourceImageForMeasurement(
self._src_dataset.SOPClassUID,
self._src_dataset.SOPInstanceUID
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset.SOPInstanceUID
)
def test_construction_with_frame_reference(self):
src_image = SourceImageForMeasurement(
self._src_dataset_multiframe.SOPClassUID,
self._src_dataset_multiframe.SOPInstanceUID,
self._ref_frames
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset_multiframe.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset_multiframe.SOPInstanceUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedFrameNumber ==
self._ref_frames
)
def test_from_source_image(self):
src_image = SourceImageForMeasurement.from_source_image(
self._src_dataset
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset.SOPInstanceUID
)
def test_from_source_image_with_referenced_frames(self):
src_image = SourceImageForMeasurement.from_source_image(
self._src_dataset_multiframe,
self._ref_frames
)
assert len(src_image.ReferencedSOPSequence) == 1
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_dataset_multiframe.SOPClassUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_dataset_multiframe.SOPInstanceUID
)
assert (
src_image.ReferencedSOPSequence[0].ReferencedFrameNumber ==
self._ref_frames
)
def test_from_source_image_with_invalid_referenced_frames(self):
with pytest.raises(ValueError):
SourceImageForMeasurement.from_source_image(
self._src_dataset_multiframe,
self._ref_frames_invalid
)
def test_from_invalid_source_image_sr(self):
with pytest.raises(ValueError):
SourceImageForMeasurement.from_source_image(
self._invalid_src_dataset_sr
)
def test_from_invalid_source_image_seg(self):
with pytest.raises(ValueError):
SourceImageForMeasurement.from_source_image(
self._invalid_src_dataset_seg
)
class TestReferencedSegment(unittest.TestCase):
def setUp(self):
file_path = Path(__file__)
data_dir = file_path.parent.parent.joinpath('data')
self._filepath = str(
data_dir.joinpath('test_files', 'seg_image_sm_dots.dcm')
)
self._seg_dataset = dcmread(self._filepath)
self._src_sop_class_uid = self._seg_dataset.ReferencedSeriesSequence[0]\
.ReferencedInstanceSequence[0].ReferencedSOPClassUID
self._src_sop_ins_uid = self._seg_dataset.ReferencedSeriesSequence[0]\
.ReferencedInstanceSequence[0].ReferencedSOPInstanceUID
self._src_series_ins_uid = self._seg_dataset.\
ReferencedSeriesSequence[0].SeriesInstanceUID
self._ref_frame_number = 38
self._wrong_ref_frame_number = 13 # does not match the segment
self._invalid_ref_frame_number = 0
self._ref_segment_number = 35
self._invalid_ref_segment_number = 8 # does not exist in this dataset
self._src_images = [
SourceImageForSegmentation(
self._src_sop_class_uid,
self._src_sop_ins_uid
)
]
self._src_series = SourceSeriesForSegmentation(
self._src_series_ins_uid
)
def test_construction(self):
ref_seg = ReferencedSegment(
sop_class_uid=self._seg_dataset.SOPClassUID,
sop_instance_uid=self._seg_dataset.SOPInstanceUID,
segment_number=self._ref_segment_number,
source_images=self._src_images
)
assert len(ref_seg) == 2
assert (
ref_seg[0].ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._seg_dataset.SOPClassUID
)
assert (
ref_seg[0].ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._seg_dataset.SOPInstanceUID
)
assert (
ref_seg[0].ReferencedSOPSequence[0].ReferencedSegmentNumber ==
self._ref_segment_number
)
assert (
ref_seg[1].ReferencedSOPSequence[0].ReferencedSOPClassUID ==
self._src_sop_class_uid
)
assert (
ref_seg[1].ReferencedSOPSequence[0].ReferencedSOPInstanceUID ==
self._src_sop_ins_uid
)
def test_construction_with_frame_reference(self):
ref_seg = ReferencedSegment(
sop_class_uid=self._seg_dataset.SOPClassUID,
sop_instance_uid=self._seg_dataset.SOPInstanceUID,
segment_number=self._ref_segment_number,
| |
return bool(self._entity_data.get('OnlyRunForward'))
return bool(0)
@property
def OnlyRunBackward(self):
if "OnlyRunBackward" in self._entity_data:
return bool(self._entity_data.get('OnlyRunBackward'))
return bool(0)
@property
def LimitForward(self):
if "LimitForward" in self._entity_data:
return float(self._entity_data.get('LimitForward'))
return float(1)
@property
def LimitBackward(self):
if "LimitBackward" in self._entity_data:
return float(self._entity_data.get('LimitBackward'))
return float(0)
@property
def LimitStop(self):
if "LimitStop" in self._entity_data:
return float(self._entity_data.get('LimitStop'))
return float(-1)
@property
def StartLocked(self):
if "StartLocked" in self._entity_data:
return bool(self._entity_data.get('StartLocked'))
return bool(0)
@property
def LimitLocked(self):
if "LimitLocked" in self._entity_data:
return float(self._entity_data.get('LimitLocked'))
return float(0)
@property
def ReturnToCompletion(self):
if "ReturnToCompletion" in self._entity_data:
return bool(self._entity_data.get('ReturnToCompletion'))
return bool(0)
@property
def ReturnToCompletionAmount(self):
if "ReturnToCompletionAmount" in self._entity_data:
return float(self._entity_data.get('ReturnToCompletionAmount'))
return float(0)
@property
def ReturnToCompletionThreshold(self):
if "ReturnToCompletionThreshold" in self._entity_data:
return float(self._entity_data.get('ReturnToCompletionThreshold'))
return float(-1)
@property
def ReturnToCompletionDelay(self):
if "ReturnToCompletionDelay" in self._entity_data:
return float(self._entity_data.get('ReturnToCompletionDelay'))
return float(0)
@property
def AnimationDuration(self):
if "AnimationDuration" in self._entity_data:
return float(self._entity_data.get('AnimationDuration'))
return float(5)
@property
def StartSound(self):
if "StartSound" in self._entity_data:
return self._entity_data.get('StartSound')
return None
@property
def MoveSound(self):
if "MoveSound" in self._entity_data:
return self._entity_data.get('MoveSound')
return None
@property
def StopSound(self):
if "StopSound" in self._entity_data:
return self._entity_data.get('StopSound')
return None
@property
def OpenCompleteSound(self):
if "OpenCompleteSound" in self._entity_data:
return self._entity_data.get('OpenCompleteSound')
return None
@property
def CloseCompleteSound(self):
if "CloseCompleteSound" in self._entity_data:
return self._entity_data.get('CloseCompleteSound')
return None
@property
def BounceSound(self):
if "BounceSound" in self._entity_data:
return self._entity_data.get('BounceSound')
return None
@property
def LockedSound(self):
if "LockedSound" in self._entity_data:
return self._entity_data.get('LockedSound')
return None
@property
def ReturnForwardMoveSound(self):
if "ReturnForwardMoveSound" in self._entity_data:
return self._entity_data.get('ReturnForwardMoveSound')
return None
@property
def ReturnBackwardMoveSound(self):
if "ReturnBackwardMoveSound" in self._entity_data:
return self._entity_data.get('ReturnBackwardMoveSound')
return None
@property
def InteractionBoneName(self):
if "InteractionBoneName" in self._entity_data:
return self._entity_data.get('InteractionBoneName')
return "interact"
@property
def ReturnToCompletionStyle(self):
if "ReturnToCompletionStyle" in self._entity_data:
return self._entity_data.get('ReturnToCompletionStyle')
return "0"
@property
def AllowGravityGunPull(self):
if "AllowGravityGunPull" in self._entity_data:
return bool(self._entity_data.get('AllowGravityGunPull'))
return bool(0)
@property
def RetainVelocity(self):
if "RetainVelocity" in self._entity_data:
return bool(self._entity_data.get('RetainVelocity'))
return bool(0)
@property
def ReactToDynamicPhysics(self):
if "ReactToDynamicPhysics" in self._entity_data:
return bool(self._entity_data.get('ReactToDynamicPhysics'))
return bool(0)
@property
def IgnoreHandRotation(self):
if "IgnoreHandRotation" in self._entity_data:
return bool(self._entity_data.get('IgnoreHandRotation'))
return bool(1)
@property
def IgnoreHandPosition(self):
if "IgnoreHandPosition" in self._entity_data:
return bool(self._entity_data.get('IgnoreHandPosition'))
return bool(0)
@property
def DoHapticsOnBothHands(self):
if "DoHapticsOnBothHands" in self._entity_data:
return bool(self._entity_data.get('DoHapticsOnBothHands'))
return bool(0)
@property
def PositiveResistance(self):
if "PositiveResistance" in self._entity_data:
return float(self._entity_data.get('PositiveResistance'))
return float(1)
@property
def UpdateChildModels(self):
if "UpdateChildModels" in self._entity_data:
return bool(self._entity_data.get('UpdateChildModels'))
return bool(0)
@property
def NormalizeChildModelUpdates(self):
if "NormalizeChildModelUpdates" in self._entity_data:
return bool(self._entity_data.get('NormalizeChildModelUpdates'))
return bool(0)
@property
def ChildModelAnimgraphParameter(self):
if "ChildModelAnimgraphParameter" in self._entity_data:
return self._entity_data.get('ChildModelAnimgraphParameter')
return ""
@property
def SetNavIgnore(self):
if "SetNavIgnore" in self._entity_data:
return bool(self._entity_data.get('SetNavIgnore'))
return bool(0)
@property
def CreateNavObstacle(self):
if "CreateNavObstacle" in self._entity_data:
return bool(self._entity_data.get('CreateNavObstacle'))
return bool(0)
@property
def ReleaseOnPlayerDamage(self):
if "ReleaseOnPlayerDamage" in self._entity_data:
return bool(self._entity_data.get('ReleaseOnPlayerDamage'))
return bool(0)
@property
def BehaveAsPropPhysics(self):
if "BehaveAsPropPhysics" in self._entity_data:
return bool(self._entity_data.get('BehaveAsPropPhysics'))
return bool(0)
@property
def AddToSpatialPartition(self):
if "AddToSpatialPartition" in self._entity_data:
return bool(self._entity_data.get('AddToSpatialPartition'))
return bool(1)
@property
def interactAs(self):
if "interactAs" in self._entity_data:
return self._entity_data.get('interactAs')
return ""
class info_hlvr_equip_player(Targetname):
pass
icon_sprite = "editor/info_hlvr_equip_player.vmat"
@property
def equip_on_mapstart(self):
if "equip_on_mapstart" in self._entity_data:
return bool(self._entity_data.get('equip_on_mapstart'))
return bool(1)
@property
def energygun(self):
if "energygun" in self._entity_data:
return bool(self._entity_data.get('energygun'))
return bool(0)
@property
def shotgun(self):
if "shotgun" in self._entity_data:
return bool(self._entity_data.get('shotgun'))
return bool(0)
@property
def rapidfire(self):
if "rapidfire" in self._entity_data:
return bool(self._entity_data.get('rapidfire'))
return bool(0)
@property
def multitool(self):
if "multitool" in self._entity_data:
return bool(self._entity_data.get('multitool'))
return bool(0)
@property
def flashlight(self):
if "flashlight" in self._entity_data:
return bool(self._entity_data.get('flashlight'))
return bool(0)
@property
def flashlight_enabled(self):
if "flashlight_enabled" in self._entity_data:
return bool(self._entity_data.get('flashlight_enabled'))
return bool(0)
@property
def grabbitygloves(self):
if "grabbitygloves" in self._entity_data:
return bool(self._entity_data.get('grabbitygloves'))
return bool(0)
@property
def itemholder(self):
if "itemholder" in self._entity_data:
return bool(self._entity_data.get('itemholder'))
return bool(0)
@property
def set_ammo(self):
if "set_ammo" in self._entity_data:
return int(self._entity_data.get('set_ammo'))
return int(-1)
@property
def set_ammo_rapidfire(self):
if "set_ammo_rapidfire" in self._entity_data:
return int(self._entity_data.get('set_ammo_rapidfire'))
return int(-1)
@property
def set_ammo_shotgun(self):
if "set_ammo_shotgun" in self._entity_data:
return int(self._entity_data.get('set_ammo_shotgun'))
return int(-1)
@property
def set_resin(self):
if "set_resin" in self._entity_data:
return int(self._entity_data.get('set_resin'))
return int(-1)
@property
def start_weapons_empty(self):
if "start_weapons_empty" in self._entity_data:
return bool(self._entity_data.get('start_weapons_empty'))
return bool(0)
@property
def inventory_enabled(self):
if "inventory_enabled" in self._entity_data:
return bool(self._entity_data.get('inventory_enabled'))
return bool(1)
@property
def backpack_enabled(self):
if "backpack_enabled" in self._entity_data:
return bool(self._entity_data.get('backpack_enabled'))
return bool(1)
@property
def allow_removal(self):
if "allow_removal" in self._entity_data:
return bool(self._entity_data.get('allow_removal'))
return bool(0)
@property
def pistol_upgrade_lasersight(self):
if "pistol_upgrade_lasersight" in self._entity_data:
return bool(self._entity_data.get('pistol_upgrade_lasersight'))
return bool(0)
@property
def pistol_upgrade_reflexsight(self):
if "pistol_upgrade_reflexsight" in self._entity_data:
return bool(self._entity_data.get('pistol_upgrade_reflexsight'))
return bool(0)
@property
def pistol_upgrade_bullethopper(self):
if "pistol_upgrade_bullethopper" in self._entity_data:
return bool(self._entity_data.get('pistol_upgrade_bullethopper'))
return bool(0)
@property
def pistol_upgrade_burstfire(self):
if "pistol_upgrade_burstfire" in self._entity_data:
return bool(self._entity_data.get('pistol_upgrade_burstfire'))
return bool(0)
@property
def rapidfire_upgrade_reflexsight(self):
if "rapidfire_upgrade_reflexsight" in self._entity_data:
return bool(self._entity_data.get('rapidfire_upgrade_reflexsight'))
return bool(0)
@property
def rapidfire_upgrade_lasersight(self):
if "rapidfire_upgrade_lasersight" in self._entity_data:
return bool(self._entity_data.get('rapidfire_upgrade_lasersight'))
return bool(0)
@property
def rapidfire_upgrade_extended_magazine(self):
if "rapidfire_upgrade_extended_magazine" in self._entity_data:
return bool(self._entity_data.get('rapidfire_upgrade_extended_magazine'))
return bool(0)
@property
def shotgun_upgrade_autoloader(self):
if "shotgun_upgrade_autoloader" in self._entity_data:
return bool(self._entity_data.get('shotgun_upgrade_autoloader'))
return bool(0)
@property
def shotgun_upgrade_grenade(self):
if "shotgun_upgrade_grenade" in self._entity_data:
return bool(self._entity_data.get('shotgun_upgrade_grenade'))
return bool(0)
@property
def shotgun_upgrade_lasersight(self):
if "shotgun_upgrade_lasersight" in self._entity_data:
return bool(self._entity_data.get('shotgun_upgrade_lasersight'))
return bool(0)
@property
def shotgun_upgrade_quickfire(self):
if "shotgun_upgrade_quickfire" in self._entity_data:
return bool(self._entity_data.get('shotgun_upgrade_quickfire'))
return bool(0)
class point_hlvr_strip_player(Targetname):
pass
icon_sprite = "editor/point_hlvr_strip_player.vmat"
@property
def EnablePhysicsDelay(self):
if "EnablePhysicsDelay" in self._entity_data:
return float(self._entity_data.get('EnablePhysicsDelay'))
return float(1)
@property
def DissolveItemsDelay(self):
if "DissolveItemsDelay" in self._entity_data:
return float(self._entity_data.get('DissolveItemsDelay'))
return float(3)
@property
def ItemVelocity(self):
if "ItemVelocity" in self._entity_data:
return float(self._entity_data.get('ItemVelocity'))
return float(20)
class item_item_crate(BasePropPhysics):
pass
@property
def ItemClass(self):
if "ItemClass" in self._entity_data:
return self._entity_data.get('ItemClass')
return "item_hlvr_clip_energygun"
@property
def CrateAppearance(self):
if "CrateAppearance" in self._entity_data:
return self._entity_data.get('CrateAppearance')
return "2"
@property
def ItemCount(self):
if "ItemCount" in self._entity_data:
return int(self._entity_data.get('ItemCount'))
return int(1)
@property
def SpecificResupply(self):
if "SpecificResupply" in self._entity_data:
return self._entity_data.get('SpecificResupply')
return ""
@property
def ammobalancing_removable(self):
if "ammobalancing_removable" in self._entity_data:
return self._entity_data.get('ammobalancing_removable')
return "0"
class item_hlvr_crafting_currency_large(BasePropPhysics):
pass
@property
def remove_over_amount(self):
if "remove_over_amount" in self._entity_data:
return int(self._entity_data.get('remove_over_amount'))
return int(0)
class item_hlvr_crafting_currency_small(BasePropPhysics):
pass
@property
def remove_over_amount(self):
if "remove_over_amount" in self._entity_data:
return int(self._entity_data.get('remove_over_amount'))
return int(0)
class item_hlvr_prop_discovery(BasePropPhysics):
pass
class item_hlvr_prop_ammobag(Item):
pass
class prop_hlvr_crafting_station(Targetname):
pass
@property
def hacking_plug(self):
if "hacking_plug" in self._entity_data:
return self._entity_data.get('hacking_plug')
return ""
@property
def is_powered(self):
if "is_powered" in self._entity_data:
return bool(self._entity_data.get('is_powered'))
return bool(None)
@property
def lightmapstatic(self):
if "lightmapstatic" in self._entity_data:
return self._entity_data.get('lightmapstatic')
return "0"
class trigger_crafting_station_object_placement(Trigger):
pass
class item_healthcharger(Targetname):
pass
@property
def start_with_vial(self):
if "start_with_vial" in self._entity_data:
return bool(self._entity_data.get('start_with_vial'))
return bool(1)
@property
def vial_level(self):
if "vial_level" in self._entity_data:
return float(self._entity_data.get('vial_level'))
return float(1)
@property
def lightmapstatic(self):
if "lightmapstatic" in self._entity_data:
return self._entity_data.get('lightmapstatic')
return "0"
class item_combine_console(Targetname):
pass
@property
def hacking_plug(self):
if "hacking_plug" in self._entity_data:
return self._entity_data.get('hacking_plug')
return ""
@property
def rack0_active(self):
if "rack0_active" in self._entity_data:
return bool(self._entity_data.get('rack0_active'))
return bool(None)
@property
def rack1_active(self):
if "rack1_active" in self._entity_data:
return bool(self._entity_data.get('rack1_active'))
return bool(None)
@property
def rack2_active(self):
if "rack2_active" in self._entity_data:
return bool(self._entity_data.get('rack2_active'))
return bool(None)
@property
def rack3_active(self):
if "rack3_active" in self._entity_data:
return bool(self._entity_data.get('rack3_active'))
return bool(None)
@property
def tank0_start_missing(self):
if "tank0_start_missing" in self._entity_data:
return bool(self._entity_data.get('tank0_start_missing'))
return bool(None)
@property
def tank1_start_missing(self):
if "tank1_start_missing" in self._entity_data:
return bool(self._entity_data.get('tank1_start_missing'))
return bool(None)
@property
def tank2_start_missing(self):
if "tank2_start_missing" in self._entity_data:
return bool(self._entity_data.get('tank2_start_missing'))
return bool(None)
@property
def tank3_start_missing(self):
if "tank3_start_missing" in self._entity_data:
return bool(self._entity_data.get('tank3_start_missing'))
return bool(None)
@property
def objective_model(self):
if "objective_model" in self._entity_data:
return self._entity_data.get('objective_model')
return ""
@property
def lightmapstatic(self):
if "lightmapstatic" in self._entity_data:
return self._entity_data.get('lightmapstatic')
return "0"
class item_combine_tank_locker(prop_animinteractable):
pass
@property
def model(self):
if "model" in self._entity_data:
return self._entity_data.get('model')
return "models/props_combine/combine_lockers/combine_locker_standing.vmdl"
@property
def starting_tanks(self):
if "starting_tanks" in self._entity_data:
return self._entity_data.get('starting_tanks')
return "0"
class hlvr_vault_tractor_beam_console(Targetname):
pass
class info_hlvr_toner_port(Targetname, EnableDisable):
pass
@property
def StartPortVisible(self):
if "StartPortVisible" in self._entity_data:
return bool(self._entity_data.get('StartPortVisible'))
return bool(0)
@property
def StartVisible(self):
if "StartVisible" in self._entity_data:
return bool(self._entity_data.get('StartVisible'))
return bool(0)
@property
def initial_orientation(self):
if "initial_orientation" in self._entity_data:
return self._entity_data.get('initial_orientation')
return "0"
@property
def desired_orientation(self):
if "desired_orientation" in self._entity_data:
return self._entity_data.get('desired_orientation')
return "2"
class info_hlvr_toner_path(Targetname):
pass
icon_sprite = "editor/info_hlvr_toner_path.vmat"
@property
def first_path_node(self):
if "first_path_node" in self._entity_data:
return self._entity_data.get('first_path_node')
return ""
@property
def start_entity(self):
if "start_entity" in self._entity_data:
return self._entity_data.get('start_entity')
return ""
@property
def end_entity(self):
if "end_entity" in self._entity_data:
return self._entity_data.get('end_entity')
return ""
class info_hlvr_toner_path_node(Targetname):
pass
@property
def target(self):
if "target" in self._entity_data:
return self._entity_data.get('target')
return None
@property
def is_spline_node(self):
if "is_spline_node" in self._entity_data:
return bool(self._entity_data.get('is_spline_node'))
return bool(1)
@property
def inset_distance(self):
if "inset_distance" in self._entity_data:
return float(self._entity_data.get('inset_distance'))
return float(-1)
class info_hlvr_toner_junction(Targetname):
pass
@property
def junction_toplogy(self):
if "junction_toplogy" in self._entity_data:
return self._entity_data.get('junction_toplogy')
return "0"
@property
def junction_orientation(self):
if "junction_orientation" in self._entity_data:
return self._entity_data.get('junction_orientation')
return "0"
@property
def inset_distance(self):
if "inset_distance" in self._entity_data:
return float(self._entity_data.get('inset_distance'))
return float(-1)
@property
def connection_0(self):
if "connection_0" in self._entity_data:
return self._entity_data.get('connection_0')
return ""
@property
def connection_1(self):
if "connection_1" in self._entity_data:
return self._entity_data.get('connection_1')
return ""
@property
def connection_2(self):
if "connection_2" in self._entity_data:
return self._entity_data.get('connection_2')
return ""
@property
def connection_3(self):
if "connection_3" in self._entity_data:
return self._entity_data.get('connection_3')
return ""
class | |
the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
:param Sequence['DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsArgs'] node_selector_terms: Required. A list of node selector terms. The terms are ORed.
"""
pulumi.set(__self__, "node_selector_terms", node_selector_terms)
@property
@pulumi.getter(name="nodeSelectorTerms")
def node_selector_terms(self) -> Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms']:
"""
Required. A list of node selector terms. The terms are ORed.
"""
return pulumi.get(self, "node_selector_terms")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms(dict):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields']] = None):
"""
A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
:param Sequence['DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityPodAffinity(dict):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']] = None):
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param Sequence['DatadogAgentSpecClusterChecksRunnerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
:param Sequence['DatadogAgentSpecClusterChecksRunnerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs'] required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution']]:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there | |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 <NAME> <<EMAIL>>
# Copyright (C) 2011,2012 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: <NAME> <<EMAIL>>
from pkg_resources import resource_filename
from trac.config import BoolOption, Configuration, ExtensionOption, \
Option, OrderedExtensionsOption
from trac.core import Component, ExtensionPoint, Interface, TracError, \
implements
from trac.perm import IPermissionRequestor
from trac.web.chrome import ITemplateProvider
# Import i18n methods. Fallback modules maintain compatibility to Trac 0.11
# by keeping Babel optional here.
try:
from trac.util.translation import domain_functions
add_domain, _, N_, gettext, ngettext, tag_ = \
domain_functions('acct_mgr', ('add_domain', '_', 'N_', 'gettext',
'ngettext', 'tag_'))
dgettext = None
except ImportError:
from genshi.builder import tag as tag_
from trac.util.translation import gettext
_ = gettext
N_ = lambda text: text
def add_domain(a,b,c=None):
pass
def dgettext(domain, string, **kwargs):
return safefmt(string, kwargs)
def ngettext(singular, plural, num, **kwargs):
string = num == 1 and singular or plural
kwargs.setdefault('num', num)
return safefmt(string, kwargs)
def safefmt(string, kwargs):
if kwargs:
try:
return string % kwargs
except KeyError:
pass
return string
from acct_mgr.model import delete_user, get_user_attribute, \
prime_auth_session, set_user_attribute
class IPasswordStore(Interface):
"""An interface for Components that provide a storage method for users and
passwords.
"""
def config_key():
"""'''Deprecated''': New implementations should not use this method.
The prefered way to configure an `IPasswordStore` implemenation is by
using its class name in the `password_store` option.
Returns a string used to identify this implementation in the config.
This password storage implementation will be used, if the value of
config property "account-manager.password_format" matches.
"""
def get_users():
"""Returns an iterable of the known usernames."""
def has_user(user):
"""Returns whether the user account exists."""
def set_password(user, password, old_password = None):
"""Sets the password for the user.
This should create the user account, if it doesn't already exist.
Returns True, if a new account was created, False if an existing
account was updated.
"""
def check_password(user, password):
"""Checks if the password is valid for the user.
Returns True, if the correct user and password are specfied.
Returns False, if the incorrect password was specified.
Returns None, if the user doesn't exist in this password store.
Note: Returing `False` is an active rejection of the login attempt.
Return None to let the authentication eventually fall through to
next store in a chain.
"""
def delete_user(user):
"""Deletes the user account.
Returns True, if the account existed and was deleted, False otherwise.
"""
class IAccountChangeListener(Interface):
"""An interface for receiving account change events."""
def user_created(user, password):
"""New user (account) created."""
def user_password_changed(user, password):
"""Password changed."""
def user_deleted(user):
"""User and related account information have been deleted."""
def user_password_reset(user, email, password):
"""User password has been reset.
Note, that this is no longer final, and the old password could still
be recovered before first successful login with the new password.
"""
def user_email_verification_requested(user, token):
"""User verification has been requested."""
class IAccountRegistrationInspector(Interface):
"""An interface for Components, that wish to participate in examining
requests for account creation.
The check method is called not only by RegistrationModule but when adding
new users from the user editor in AccountManagerAdminPanel too.
"""
def render_registration_fields(req, data):
"""Emit one or multiple additional fields for registration form built.
Returns a dict containing a 'required' and/or 'optional' tuple of
* Genshi Fragment or valid XHTML markup for registration form
* modified or unchanged data object (used to render `register.html`)
If the return value is just a single tuple, its fragment or markup
will be inserted into the 'required' section.
"""
def validate_registration(req):
"""Check registration form input.
Returns a RegistrationError with error message, or None on success.
"""
class AccountManager(Component):
"""The AccountManager component handles all user account management methods
provided by the IPasswordStore interface.
The methods will be handled by underlying password storage implementations
set in trac.ini with the "account-manager.password_store" option.
The "account-manager.password_store" may be an ordered list of password
stores, and if so, then each password store is queried in turn.
"""
implements(IAccountChangeListener, IPermissionRequestor)
_password_store = OrderedExtensionsOption(
'account-manager', 'password_store', IPasswordStore,
include_missing=False,
doc = N_("Ordered list of password stores, queried in turn."))
_password_format = Option('account-manager', 'password_format',
doc="Legacy option, deprecated since acct_mgr-0.1.2")
_register_check = OrderedExtensionsOption(
'account-manager', 'register_check', IAccountRegistrationInspector,
default="""BasicCheck, EmailCheck, BotTrapCheck, RegExpCheck,
UsernamePermCheck""",
include_missing=False,
doc="""Ordered list of IAccountRegistrationInspector's to use for
registration checks.""")
stores = ExtensionPoint(IPasswordStore)
change_listeners = ExtensionPoint(IAccountChangeListener)
allow_delete_account = BoolOption(
'account-manager', 'allow_delete_account', True,
doc="Allow users to delete their own account.")
force_passwd_change = BoolOption(
'account-manager', 'force_passwd_change', True,
doc="Force the user to change password when it's reset.")
persistent_sessions = BoolOption(
'account-manager', 'persistent_sessions', False,
doc="""Allow the user to be remembered across sessions without
needing to re-authenticate. This is, user checks a
\"Remember Me\" checkbox and, next time he visits the site,
he'll be remembered.""")
refresh_passwd = BoolOption(
'account-manager', 'refresh_passwd', False,
doc="""Re-set passwords on successful authentication.
This is most useful to move users to a new password store or
enforce new store configuration (i.e. changed hash type),
but should be disabled/unset otherwise.""")
verify_email = BoolOption(
'account-manager', 'verify_email', True,
doc="Verify the email address of Trac users.")
username_char_blacklist = Option(
'account-manager', 'username_char_blacklist', ':[]',
doc="""Always exclude some special characters from usernames.
This is enforced upon new user registration.""")
def __init__(self):
# Bind the 'acct_mgr' catalog to the specified locale directory.
locale_dir = resource_filename(__name__, 'locale')
add_domain(self.env.path, locale_dir)
# Public API
def get_users(self):
"""Get usernames from all active stores.
Because we allow concurrent active stores, and some stores even don't
warrant uniqueness within itself, multiple usernames should be
expected.
"""
users = []
for store in self._password_store:
users.extend(store.get_users())
return users
def has_user(self, user):
exists = False
user = self.handle_username_casing(user)
for store in self._password_store:
if store.has_user(user):
exists = True
break
continue
return exists
def set_password(self, user, password, old_password = None):
user = self.handle_username_casing(user)
store = self.find_user_store(user)
if store and not hasattr(store, 'set_password'):
raise TracError(_(
"""The authentication backend for user %s does not support
setting the password.
""" % user))
elif not store:
store = self.get_supporting_store('set_password')
if store:
if store.set_password(user, password, old_password):
self._notify('created', user, password)
else:
self._notify('password_changed', user, password)
else:
raise TracError(_(
"""None of the IPasswordStore components listed in the
trac.ini supports setting the password or creating users.
"""))
def check_password(self, user, password):
valid = False
user = self.handle_username_casing(user)
for store in self._password_store:
valid = store.check_password(user, password)
if valid:
if valid == True and (self.refresh_passwd == True) and \
self.get_supporting_store('set_password'):
self._maybe_update_hash(user, password)
break
return valid
def delete_user(self, user):
user = self.handle_username_casing(user)
# Delete from password store
store = self.find_user_store(user)
del_method = getattr(store, 'delete_user', None)
if callable(del_method):
del_method(user)
# Delete session attributes, session and any custom permissions
# set for the user.
delete_user(self.env, user)
self._notify('deleted', user)
def supports(self, operation):
try:
stores = self.password_store
except AttributeError:
return False
else:
if self.get_supporting_store(operation):
return True
else:
return False
def password_store(self):
try:
return self._password_store
except AttributeError:
# fall back on old "password_format" option
fmt = self._password_format
for store in self.stores:
config_key = getattr(store, 'config_key', None)
if config_key is None:
continue
if config_key() == fmt:
return [store]
# if the "password_format" is not set re-raise the AttributeError
raise
password_store = property(password_store)
def get_supporting_store(self, operation):
"""Returns the IPasswordStore that implements the specified operation.
None is returned if no supporting store can be found.
"""
supports = False
for store in self.password_store:
if hasattr(store, operation):
supports = True
break
continue
store = supports and store or None
return store
def get_all_supporting_stores(self, operation):
"""Returns a list of stores that implement the specified operation"""
stores = []
for store in self.password_store:
if hasattr(store, operation):
stores.append(store)
continue
return stores
def find_user_store(self, user):
"""Locates which store contains the user specified.
If the user isn't found in any IPasswordStore in the chain, None is
returned.
"""
user_stores = []
for store in self._password_store:
userlist = store.get_users()
user_stores.append((store, userlist))
continue
user = self.handle_username_casing(user)
for store in user_stores:
if user in store[1]:
return store[0]
continue
return None
def handle_username_casing(self, user):
"""Enforce lowercase usernames if required.
Comply with Trac's own behavior, when case-insensitive
user authentication is set to True.
"""
ignore_auth_case = self.config.getbool('trac', 'ignore_auth_case')
return ignore_auth_case and user.lower() or | |
voxels])
return averaged_voxels
def get_b(latlon_dict,file_path_slope,idx_slope,file_path_drainage,idx_drainage,shapefile):
'''Create a permanent lookup file for b for study area for future processing to be used in overwinter DC procedure
Parameters
----------
latlon_dict : dictionary
dictionary of latitude and longitudes for the hourly stations
file_path_slope : string
path to the slope file, includes file name
idx_slope : list
index of the slope variable in the header of the slope lookup file
file_path_drainage : string
path to the drainage file, includes file name
idx_drainage : list
index of the drainage variable in the header of the drainage lookup file
shapefile : string
path to the shapefile of the study area (.shp format)
'''
lat = [] #Initialize empty lists to store data
lon = []
for station_name in latlon_dict.keys(): #Loop through the list of stations
loc = latlon_dict[station_name]
latitude = loc[0]
longitude = loc[1]
lat.append(float(latitude))
lon.append(float(longitude))
y = np.array(lat) #Convert to a numpy array for faster processing speed
x = np.array(lon)
na_map = gpd.read_file(shapefile)
bounds = na_map.bounds #Get the bounding box of the shapefile
xmax = bounds['maxx']
xmin= bounds['minx']
ymax = bounds['maxy']
ymin = bounds['miny']
pixelHeight = 10000 #We want a 10 by 10 pixel, or as close as we can get
pixelWidth = 10000
num_col = int((xmax - xmin) / pixelHeight) #Calculate the number of rows cols to fill the bounding box at that resolution
num_row = int((ymax - ymin) / pixelWidth)
#We need to project to a projected system before making distance matrix
source_proj = pyproj.Proj(proj='latlong', datum = 'NAD83') #We dont know but assume NAD83
xProj, yProj = pyproj.Proj('esri:102001')(x,y) #Convert to Canada Albers Equal Area
yProj_extent=np.append(yProj,[bounds['maxy'],bounds['miny']]) #Add the bounding box coords to the dataset so we can extrapolate the interpolation to cover whole area
xProj_extent=np.append(xProj,[bounds['maxx'],bounds['minx']])
Yi = np.linspace(np.min(yProj_extent),np.max(yProj_extent),num_row) #Get the value for lat lon in each cell we just made
Xi = np.linspace(np.min(xProj_extent),np.max(xProj_extent),num_col)
Xi,Yi = np.meshgrid(Xi,Yi) #Make a rectangular grid (because eventually we will map the values)
Xi,Yi = Xi.flatten(), Yi.flatten() #Then we flatten the arrays for easier processing
#X and then Y for a reason
concat = np.array((Xi.flatten(), Yi.flatten())).T #Preparing the coordinates to send to the function that will get the elevation grid
send_to_list = concat.tolist()
send_to_tuple = [tuple(x) for x in send_to_list] #The elevation function takes a tuple
#in cython dictionaries maintain insertion order
Xi1_grd=[]
Yi1_grd=[]
slope_grd = []
drainage_grd = []
slope_grd_dict = finding_data_frm_lookup(send_to_tuple,file_path_slope,idx_slope) #Get the elevations from the lookup file
drainage_grd_dict = finding_data_frm_lookup(send_to_tuple,file_path_drainage,idx_drainage)
for keys in slope_grd_dict.keys(): #The keys are each lat lon pair
x= keys[0]
y = keys[1]
Xi1_grd.append(x)
Yi1_grd.append(y)
slope_grd.append(slope_grd_dict[keys])
drainage_grd.append(drainage_grd_dict[keys])
#combine the arrays
slope_array = np.array(slope_grd)
drainage_array = np.array(drainage_grd)
#return the b array to be passed to the other function
b_array = np.empty(slope_array.shape)
b_array[drainage_array == 3] = 0.5
b_array[drainage_array == 2] = 0.75
b_array[drainage_array == 0] = 0.75
b_array[drainage_array == 1] = 0.9
b_array[slope_array > 0.5] = 0.5
b_list = list(b_array)
with open('b_list.json', 'w') as fp: #write to hard drive for faster processing later
json.dump(b_list, fp)
def get_wind_speed(input_date,file_path):
'''Create a dictionary for wind speed data on the input date
Parameters
----------
input_date : string
input date for the date of interest, in the format: YYYY-MM-DD HH:MM
file_path : string
path to the feather files containing the hourly data from Environment & Climate Change Canada
Returns
----------
dictionary
- a dictionary of wind speed values for all the active & non-null stations on the input date
'''
def get_wind_speed(input_date,file_path):
'''Create a dictionary for wind speed data on the input date
Parameters
----------
input_date : string
input date for the date of interest, in the format: YYYY-MM-DD HH:MM
file_path : string
path to the feather files containing the hourly data from Environment & Climate Change Canada
Returns
----------
dictionary
- a dictionary of wind speed values for all the active & non-null stations on the input date
'''
ws_dictionary = {}
search_date = datetime.strptime(input_date, '%Y-%m-%d %H:%M') # Get the datetime object for input date
for station_name in os.listdir(file_path):
for file_name in os.listdir(file_path+station_name+'/'):
if input_date[5:7] == file_name[29:31]: #This is a trick to speed up the code, only look at files which have the month/day in the name
if input_date[0:4]== file_name[32:36]:
file = file_path+station_name+'/'+file_name
df = feather.read_dataframe(file)
try:
if pd.notnull(df.loc[df['Date/Time'] == input_date, 'Wind Spd (km/h)'].item()):
#Put the value into the dictionary.
if float(df.loc[df['Date/Time'] == input_date, 'Wind Spd (km/h)'].item()) >= 315:
print('The wind speed for %s corresponds to the most severe class of Tornado for the Enhanced Fujita Scale - Canada'%(station_name))
elif float(df.loc[df['Date/Time'] == input_date, 'Wind Spd (km/h)'].item()) < 0:
print('The wind speed for %s is less than 0'%(station_name))
else:
ws_dictionary[station_name] = df.loc[df['Date/Time'] == input_date, 'Wind Spd (km/h)'].item()
else:
pass
except ValueError:
pass
return ws_dictionary
def get_noon_temp(input_date,file_path):
'''Create a dictionary for noon temp data on the input date
Parameters
----------
input_date : string
input date for the date of interest, in the format: YYYY-MM-DD HH:MM
file_path : string
path to the feather files containing the hourly data from Environment & Climate Change Canada
Returns
----------
dictionary
- a dictionary of temperature values for all the active & non-null stations on the input date
'''
temp_dictionary = {}
search_date = datetime.strptime(input_date, '%Y-%m-%d %H:%M')
for station_name in os.listdir(file_path):
for file_name in os.listdir(file_path+station_name+'/'):
if input_date[5:7] == file_name[29:31]:
if input_date[0:4]== file_name[32:36]:
file = file_path+station_name+'/'+file_name
df = feather.read_dataframe(file)
try:
if pd.notnull(df.loc[df['Date/Time'] == input_date, 'Temp (°C)'].item()):
temp_dictionary[station_name] = df.loc[df['Date/Time'] == input_date, 'Temp (°C)'].item()
if float(df.loc[df['Date/Time'] == input_date, 'Temp (°C)'].item()) > 42.2 or \
float(df.loc[df['Date/Time'] == input_date, 'Temp (°C)'].item()) < -58.3:
print('The temperature for %s is either greater than the record high temperature recorded in Ontario \
or Québec or lower than the record lowest temperature'%(station_name))
else:
pass
except ValueError:
pass
return temp_dictionary
def get_relative_humidity(input_date,file_path):
'''Create a dictionary for rh% data on the input date
Parameters
----------
input_date : string
input date for the date of interest, in the format: YYYY-MM-DD HH:MM
file_path : string
path to the feather files containing the hourly data from Environment & Climate Change Canada
Returns
----------
dictionary
- a dictionary of relative humidity values for all the active & non-null stations on the input date
'''
RH_dictionary = {}
search_date = datetime.strptime(input_date, '%Y-%m-%d %H:%M')
for station_name in os.listdir(file_path):
for file_name in os.listdir(file_path+station_name+'/'):
if input_date[5:7] == file_name[29:31]:
if input_date[0:4]== file_name[32:36]:
file = file_path+station_name+'/'+file_name
df = feather.read_dataframe(file)
try:
if pd.notnull(df.loc[df['Date/Time'] == input_date, 'Rel Hum (%)'].item()):
if float(df.loc[df['Date/Time'] == input_date, 'Rel Hum (%)'].item()) > 100:
print('The relative humidity for %s is greater than 100%'%(station_name))
else:
RH_dictionary[station_name] = df.loc[df['Date/Time'] == input_date, 'Rel Hum (%)'].item()
else:
pass
except ValueError:
pass
return RH_dictionary
def get_pcp_dictionary_by_year(file_path):
'''Create a lookup file for the year_month that each daily station has data for faster
processing later --> this is an input to get_pcp
Parameters
----------
file_path : string
file path to the daily csv files provided by Environment & Climate Change Canada, including the name of the file
'''
date_dictionary = {}
for station_name in os.listdir(file_path):
yearList = []
count = 0
with open(file_path+station_name, encoding='latin1') as year_information:
for row in year_information:
information = row.rstrip('\n').split(',')
information_stripped = [i.replace('"','') for i in information]
if count==0:
header= information_stripped
keyword = 'month' #There is also the flag which is why we include the (
idx_list = [i for i, x in enumerate(header) if keyword in x.lower()]
if len(idx_list) >1:
print('The program is confused because there is more than one field name that could \
contain the month. Please check on this.') #there could be no index if the file is empty, which sometimes happens
sys.exit()
keyword2 = 'year'
idx_list2 = [i for i, x in enumerate(header) if keyword2 in | |
continue
if os.path.isdir(os.path.join(item_path, adm_name)):
xml_entries_relocate(item_path, from_url, to_url)
# Poor mans relocate to fix up an working copy to refer to a
# valid repository, so svn upgrade can do its work on it
def simple_entries_replace(path, from_url, to_url):
adm_name = svntest.main.get_admin_name()
entries = os.path.join(path, adm_name, 'entries')
txt = open(entries).read().replace(from_url, to_url)
os.chmod(entries, 0777)
open(entries, 'wb').write(txt)
for dirent in os.listdir(path):
item_path = os.path.join(path, dirent)
if dirent == svntest.main.get_admin_name():
continue
if os.path.isdir(os.path.join(item_path, adm_name)):
simple_entries_replace(item_path, from_url, to_url)
def basic_upgrade_1_0(sbox):
"test upgrading a working copy created with 1.0.0"
sbox.build(create_wc = False)
replace_sbox_with_tarfile(sbox, 'upgrade_1_0.tar.bz2')
url = sbox.repo_url
xml_entries_relocate(sbox.wc_dir, 'file:///1.0.0/repos', url)
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, None, expected_stderr,
'info', sbox.wc_dir)
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, None, [],
'upgrade', sbox.wc_dir)
# And the separate working copy below COPIED or check_format() fails
svntest.actions.run_and_verify_svn(None, None, [],
'upgrade',
os.path.join(sbox.wc_dir, 'COPIED', 'G'))
# Actually check the format number of the upgraded working copy
check_format(sbox, get_current_format())
# Now check the contents of the working copy
# #### This working copy is not just a basic tree,
# fix with the right data once we get here
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev=7),
'B' : Item(status=' ', wc_rev='7'),
'B/mu' : Item(status=' ', wc_rev='7'),
'B/D' : Item(status=' ', wc_rev='7'),
'B/D/H' : Item(status=' ', wc_rev='7'),
'B/D/H/psi' : Item(status=' ', wc_rev='7'),
'B/D/H/omega' : Item(status=' ', wc_rev='7'),
'B/D/H/zeta' : Item(status='MM', wc_rev='7'),
'B/D/H/chi' : Item(status=' ', wc_rev='7'),
'B/D/gamma' : Item(status=' ', wc_rev='9'),
'B/D/G' : Item(status=' ', wc_rev='7'),
'B/D/G/tau' : Item(status=' ', wc_rev='7'),
'B/D/G/rho' : Item(status=' ', wc_rev='7'),
'B/D/G/pi' : Item(status=' ', wc_rev='7'),
'B/B' : Item(status=' ', wc_rev='7'),
'B/B/lambda' : Item(status=' ', wc_rev='7'),
'MKDIR' : Item(status='A ', wc_rev='0'),
'MKDIR/MKDIR' : Item(status='A ', wc_rev='0'),
'A' : Item(status=' ', wc_rev='7'),
'A/B' : Item(status=' ', wc_rev='7'),
'A/B/lambda' : Item(status=' ', wc_rev='7'),
'A/D' : Item(status=' ', wc_rev='7'),
'A/D/G' : Item(status=' ', wc_rev='7'),
'A/D/G/rho' : Item(status=' ', wc_rev='7'),
'A/D/G/pi' : Item(status=' ', wc_rev='7'),
'A/D/G/tau' : Item(status=' ', wc_rev='7'),
'A/D/H' : Item(status=' ', wc_rev='7'),
'A/D/H/psi' : Item(status=' ', wc_rev='7'),
'A/D/H/omega' : Item(status=' ', wc_rev='7'),
'A/D/H/zeta' : Item(status=' ', wc_rev='7'),
'A/D/H/chi' : Item(status=' ', wc_rev='7'),
'A/D/gamma' : Item(status=' ', wc_rev='7'),
'A/mu' : Item(status=' ', wc_rev='7'),
'iota' : Item(status=' ', wc_rev='7'),
'COPIED' : Item(status=' ', wc_rev='10'),
'DELETED' : Item(status='D ', wc_rev='10'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
expected_infos = [ {
'Node Kind': 'directory',
'Schedule': 'normal',
'Revision': '7',
'Last Changed Author' : 'Bert',
'Last Changed Rev' : '7'
} ]
svntest.actions.run_and_verify_info(expected_infos, sbox.wc_dir)
expected_infos = [ {
'Node Kind': 'directory',
'Schedule': 'delete',
'Revision': '10',
'Last Changed Author' : 'Bert',
'Last Changed Rev' : '10'
} ]
svntest.actions.run_and_verify_info(expected_infos,
os.path.join(sbox.wc_dir, 'DELETED'))
check_pristine(sbox, ['iota', 'A/mu', 'A/D/H/zeta'])
# Helper function for the x3 tests.
def do_x3_upgrade(sbox, expected_error=[]):
# Attempt to use the working copy, this should give an error
expected_stderr = wc_is_too_old_regex
svntest.actions.run_and_verify_svn(None, None, expected_stderr,
'info', sbox.wc_dir)
# Now upgrade the working copy
svntest.actions.run_and_verify_svn(None, None, expected_error,
'upgrade', sbox.wc_dir)
if expected_error != []:
return
# Actually check the format number of the upgraded working copy
check_format(sbox, get_current_format())
# Now check the contents of the working copy
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='2'),
'A' : Item(status=' ', wc_rev='2'),
'A/D' : Item(status=' ', wc_rev='2'),
'A/D/H' : Item(status=' ', wc_rev='2'),
'A/D/H/omega' : Item(status=' ', wc_rev='2'),
'A/D/H/psi' : Item(status='D ', wc_rev='2'),
'A/D/H/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/D/H/chi' : Item(status='R ', copied='+', wc_rev='-'),
'A/D/gamma' : Item(status='D ', wc_rev='2'),
'A/D/G' : Item(status=' ', wc_rev='2'),
'A/B_new' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/B' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/B/E' : Item(status=' M', copied='+', wc_rev='-'),
'A/B_new/B/E/alpha' : Item(status=' ', copied='+', wc_rev='-'),
'A/B_new/B/E/beta' : Item(status='R ', copied='+', wc_rev='-'),
'A/B_new/B/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/B/lambda' : Item(status='R ', copied='+', wc_rev='-'),
'A/B_new/B/F' : Item(status=' ', copied='+', wc_rev='-'),
'A/B_new/E' : Item(status=' M', copied='+', wc_rev='-'),
'A/B_new/E/alpha' : Item(status=' M', copied='+', wc_rev='-'),
'A/B_new/E/beta' : Item(status='RM', copied='+', wc_rev='-'),
'A/B_new/lambda' : Item(status='R ', copied='+', wc_rev='-'),
'A/B_new/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/B_new/F' : Item(status=' ', copied='+', wc_rev='-'),
'A/B' : Item(status=' ', wc_rev='2'),
'A/B/E' : Item(status=' ', wc_rev='2'),
'A/B/E/beta' : Item(status='RM', copied='+', wc_rev='-'),
'A/B/E/alpha' : Item(status=' M', wc_rev='2'),
'A/B/F' : Item(status=' ', wc_rev='2'),
'A/B/lambda' : Item(status='R ', copied='+', wc_rev='-'),
'A/B/new' : Item(status='A ', copied='+', wc_rev='-'),
'A/G_new' : Item(status='A ', copied='+', wc_rev='-'),
'A/G_new/rho' : Item(status='R ', copied='+', wc_rev='-'),
'iota' : Item(status=' ', wc_rev='2'),
'A_new' : Item(status='A ', wc_rev='0'),
'A_new/alpha' : Item(status='A ', copied='+', wc_rev='-'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
simple_property_verify(sbox.wc_dir, {
'A/B_new/E/beta' : {'x3' : '3x',
'svn:eol-style': 'native'},
'A/B/E/beta' : {'s' : 't',
'svn:eol-style': 'native'},
'A/B_new/B/E/alpha' : {'svn:eol-style': 'native'},
'A/B/E/alpha' : {'q': 'r',
'svn:eol-style': 'native'},
'A_new/alpha' : {'svn:eol-style': 'native'},
'A/B_new/B/new' : {'svn:eol-style': 'native'},
'A/B_new/E/alpha' : {'svn:eol-style': 'native',
'u': 'v'},
'A/B_new/B/E' : {'q': 'r'},
'A/B_new/lambda' : {'svn:eol-style': 'native'},
'A/B_new/E' : {'x3': '3x'},
'A/B_new/new' : {'svn:eol-style': 'native'},
'A/B/lambda' : {'svn:eol-style': 'native'},
'A/B_new/B/E/beta' : {'svn:eol-style': 'native'},
'A/B_new/B/lambda' : {'svn:eol-style': 'native'},
'A/B/new' : {'svn:eol-style': 'native'},
'A/G_new/rho' : {'svn:eol-style': 'native'}
})
svntest.actions.run_and_verify_svn(None, 'Reverted.*', [],
'revert', '-R', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='2'),
'A' : Item(status=' ', wc_rev='2'),
'A/D' : Item(status=' ', wc_rev='2'),
'A/D/H' : Item(status=' ', wc_rev='2'),
'A/D/H/omega' : Item(status=' ', wc_rev='2'),
'A/D/H/psi' : Item(status=' ', wc_rev='2'),
'A/D/H/chi' : Item(status=' ', wc_rev='2'),
'A/D/gamma' : Item(status=' ', wc_rev='2'),
'A/D/G' : Item(status=' ', wc_rev='2'),
'A/B' : Item(status=' ', wc_rev='2'),
'A/B/F' : Item(status=' ', wc_rev='2'),
'A/B/E' : Item(status=' ', wc_rev='2'),
'A/B/E/beta' : Item(status=' ', wc_rev='2'),
'A/B/E/alpha' : Item(status=' ', wc_rev='2'),
'A/B/lambda' : Item(status=' ', wc_rev='2'),
'iota' : Item(status=' ', wc_rev='2'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
simple_property_verify(sbox.wc_dir, {
'A/B/E/beta' : {'svn:eol-style': 'native'},
# 'A/B/lambda' : {'svn:eol-style': 'native'},
'A/B/E/alpha' : {'svn:eol-style': 'native'}
})
@Issue(2530)
def x3_1_4_0(sbox):
"3x same wc upgrade 1.4.0 test"
replace_sbox_with_tarfile(sbox, 'wc-3x-1.4.0.tar.bz2', dir='wc-1.4.0')
do_x3_upgrade(sbox, expected_error='.*E155016: The properties of.*are in an '
'indeterminate state and cannot be upgraded. See issue #2530.')
@Issue(3811)
def x3_1_4_6(sbox):
"3x same wc upgrade 1.4.6 test"
replace_sbox_with_tarfile(sbox, 'wc-3x-1.4.6.tar.bz2', dir='wc-1.4.6')
do_x3_upgrade(sbox)
@Issue(3811)
def x3_1_6_12(sbox):
"3x same wc upgrade 1.6.12 test"
replace_sbox_with_tarfile(sbox, 'wc-3x-1.6.12.tar.bz2', dir='wc-1.6.12')
do_x3_upgrade(sbox)
def missing_dirs(sbox):
"missing directories and obstructing files"
# tarball wc looks like:
# svn co URL wc
# svn cp wc/A/B wc/A/B_new
# rm -rf wc/A/B/E wc/A/D wc/A/B_new/E wc/A/B_new/F
# touch wc/A/D wc/A/B_new/F
replace_sbox_with_tarfile(sbox, 'missing-dirs.tar.bz2')
svntest.actions.run_and_verify_svn(None, None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
'A/mu' : Item(status=' ', wc_rev='1'),
'A/C' : Item(status=' ', wc_rev='1'),
'A/D' : Item(status='! ', wc_rev='1'),
'A/B' : Item(status=' ', wc_rev='1'),
'A/B/F' : Item(status=' ', wc_rev='1'),
'A/B/E' : Item(status='! ', wc_rev='1'),
'A/B/lambda' : Item(status=' ', wc_rev='1'),
'iota' : Item(status=' ', wc_rev='1'),
'A/B_new' : Item(status='A ', wc_rev='-', copied='+'),
'A/B_new/E' : Item(status='! ', wc_rev='-'),
'A/B_new/F' : Item(status='! ', wc_rev='-'),
'A/B_new/lambda' : Item(status=' ', wc_rev='-', copied='+'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
def missing_dirs2(sbox):
"missing directories and obstructing dirs"
replace_sbox_with_tarfile(sbox, 'missing-dirs.tar.bz2')
os.remove(sbox.ospath('A/D'))
os.remove(sbox.ospath('A/B_new/F'))
os.mkdir(sbox.ospath('A/D'))
os.mkdir(sbox.ospath('A/B_new/F'))
svntest.actions.run_and_verify_svn(None, None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
'A/mu' : Item(status=' ', wc_rev='1'),
'A/C' : Item(status=' ', wc_rev='1'),
'A/D' : Item(status='! ', wc_rev='1'),
'A/B' : Item(status=' ', wc_rev='1'),
'A/B/F' : Item(status=' ', wc_rev='1'),
'A/B/E' : Item(status='! ', wc_rev='1'),
'A/B/lambda' : Item(status=' ', wc_rev='1'),
'iota' : Item(status=' ', wc_rev='1'),
'A/B_new' : Item(status='A ', wc_rev='-', copied='+'),
'A/B_new/E' : Item(status='! ', wc_rev='-'),
'A/B_new/F' : Item(status='! ', wc_rev='-'),
'A/B_new/lambda' : Item(status=' ', wc_rev='-', copied='+'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
@Issue(3808)
def delete_and_keep_local(sbox):
"check status delete and delete --keep-local"
replace_sbox_with_tarfile(sbox, 'wc-delete.tar.bz2')
svntest.actions.run_and_verify_svn(None, None, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir,
{
'' : Item(status=' ', wc_rev='0'),
'Normal' : Item(status=' ', wc_rev='1'),
'Deleted-Keep-Local': Item(status='D ', wc_rev='1'),
'Deleted' : Item(status='D ', wc_rev='1'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
# Deleted-Keep-Local should still exist after the upgrade
if not os.path.exists(os.path.join(sbox.wc_dir, 'Deleted-Keep-Local')):
raise svntest.Failure('wc/Deleted-Keep-Local should exist')
# Deleted should be removed after the upgrade as it was
# schedule delete and doesn't contain unversioned changes.
if os.path.exists(os.path.join(sbox.wc_dir, 'Deleted')):
raise svntest.Failure('wc/Deleted should not exist')
def dirs_only_upgrade(sbox):
"upgrade a wc without files"
replace_sbox_with_tarfile(sbox, 'dirs-only.tar.bz2')
expected_output = ["Upgraded '%s'\n" % (sbox.ospath('').rstrip(os.path.sep)),
"Upgraded '%s'\n" % (sbox.ospath('A'))]
svntest.actions.run_and_verify_svn(None, expected_output, [],
'upgrade', sbox.wc_dir)
expected_status = svntest.wc.State(sbox.wc_dir, {
'' : Item(status=' ', wc_rev='1'),
'A' : Item(status=' ', wc_rev='1'),
})
run_and_verify_status_no_server(sbox.wc_dir, expected_status)
def read_tree_conflict_data(sbox, path):
dot_svn = svntest.main.get_admin_name()
db = svntest.sqlite3.connect(os.path.join(sbox.wc_dir, dot_svn, 'wc.db'))
for row in db.execute("select | |
Models:
model, versions, parmlist = self.parseMS(modelString)
if model is None:
continue
if not self.acceptPL(WEname, parmlist):
continue
#
# Make labels for each of the model runs we want.
# Singleton databases (like FCST or Official) that have
# no date (actually a 1970 date) have no date/run label.
#
for run in range(0, -versions, -1):
db = self.findDatabase(model, run)
if db is None:
continue
modelId = db.modelIdentifier()
if modelId is None or "" == modelId or modelId in self.dbIds:
continue
if "ISC" == db.modelName():
if allOfficeTypes is None:
allOfficeTypes = self.knownOfficeTypes()
iscOfficeTypes = [self.myOfficeType()]
if plist is None:
plist = self.availableParms()
for pname, plevel, pdb in plist:
if modelId != pdb.modelIdentifier():
continue
for ot in allOfficeTypes:
if pname.endswith(ot) and \
ot not in iscOfficeTypes:
iscOfficeTypes.append(ot)
for otype in iscOfficeTypes:
ltext = "%s (%s):" % (model, otype)
self._addModel(ltext, modelId)
else:
modtime = db.modelTime()
year = modtime.year
if year == 1970:
lbltext = "%s:" % model
else:
month = modtime.month
day = modtime.day
hour = modtime.hour
lbltext = "%s %2.2d/%2.2d %2.2dZ:" % (model, month, day, hour)
self._addModel(lbltext, modelId)
#
# Now run the dialog box to get the weights
# resulting weights stored in Weights array
#
self.dlg = ToolDialog("Set Model Weights",
callbackMethod=self.execWeights,
labels=self.labels)
# self.dlg=TestDialog("Set Model Weights",
# callbackMethod=self.execWeights,
# labels=self.labels)
#
# Cancel the tool in the first pre-processGrid routine.
# No Execute routine is done - and grid is not marked as
# edited. Any editing will take place when they press a
# Button on the dialog and it calls execWeights
#
self.dlg.mainloop()
self.cancel()
def parseMS(self, modelstring):
"""Parse a model string into a model, versions, and parmlist."""
model = None
versions = None
parmlist = None
pieces = modelstring.split(":")
len_pcs = len(pieces)
if len_pcs < 4:
model = pieces[0]
versions = 1
parmlist = 'ALL'
if len_pcs > 1:
try:
versions = abs(int(pieces[1]))
except:
pass
if len_pcs > 2:
parmlist = pieces[2]
return (model, versions, parmlist)
def acceptPL(self, WEName, parmlist):
"""Check WEName against parmlist."""
invert = False
parms = parmlist.split(",")
if '^' == parms[0][0]:
parms[0] = parms[0][1:]
invert = True
result = ('ALL' == parms[0]) or (WEName in parms)
result = invert ^ result
return result
##
#
#
def _addModel(self, text, modelId):
"Add text and modelId to self.labels and self.dbIds, respectively."
self.labels.append(text)
self.dbIds.append(modelId)
#=================================================================
#
# Dummy execute routine. Tool is cancelled in preProcessGrid
# and all the real action is accomplished in execWeights which
# is called when the user presses a button on the dialog
#
def execute(self, variableElement):
"Specified blend of any/all model/forecast fields"
return variableElement
#=================================================================
#
# execWeights - The main calculation routine called when a button
# is pressed in the dialog. Passes in the string
# name of the button pressed
#
def execWeights(self, button):
#
# If user presses cancel, do an immediate return and stop
#
if button == "Cancel":
return
#
# Get the results from the dialog
#
#for num in range(len(Labels)):
# Weights[num]=ScaleIDs[num].get()
EdgeType = self.dlg.edgestyle
EdgeWidth = self.dlg.edgeWidth
#
# If user presses run or run/dismiss, first add up the
# weights (in the ScaleIDs variables) and check for
# common issues like all weights zero, only weights on
# current grid, or grids add up to zero.
#
totweight = 0
fcstweight = 0
someweights = 0
otherweights = 0
dbIds = self.dbIds # alias
weights = self.dlg.weights
maxAbsWeight = max(max(weights), abs(min(weights)))
someweights = (maxAbsWeight > 0.5)
fcstweight = weights[0]
otherweights = sum(weights[1:])
totweight = fcstweight + otherweights
if not someweights:
self.statusBarMsg("ModelBlend has no weights", "R")
return
if abs(fcstweight) > 0.5 and otherweights == 0:
self.statusBarMsg("ModelBlend Weights add to no change", "R")
return
if totweight == 0:
self.statusBarMsg("Weights cannot add up to zero", "A")
return
#
# Get stuff usually provided by tool code:
# fcst=mutable model database name
# selectTR=the selected timerange
#
fcst = self.mutableID().modelIdentifier()
selectTR = self._dbss.getParmOp().getSelectionTimeRange()
#
# get list of parms that are selected and mutable
#
# Making a derivation from AWIPS1's version of this script.
# Instead of calling direct to Java's ParmManager to get the Parm
# objects, we'll use SmartScript's selectedParms() to retrieve native
# Python objects which should save us Java heap space which wouldn't
# be freed otherwise until the user terminates the SmartTool
#
# allParms = self._dbss.getParmManager().getSelectedParms()
allParms = self.selectedParms()
parms = []
for parm in allParms:
# model = parm.getParmID().getDbId().getModelId()
model = parm[2].modelIdentifier()
if model == fcst:
parms.append(parm)
#
# loop over the mutable parms.
# get: wxType - type of parm
# WEname - short parm name string
# parmlevel - parm level string
#
for WEname, parmlevel, dbId in parms:
# Another AWIPS1 derivation: Use of different selectedParms()
# call forces us to retrieve Parm to retrieve some of these
# pieces of information
#
parm = self.getParm(dbId, WEname, parmlevel)
rateParm = parm.getGridInfo().isRateParm()
wxType = str(parm.getGridInfo().getGridType())
del parm
#
# Get list of grids for this parm within the selcted time range
# and loop over each of those grids
#
gridinfos = self.getGridInfo(fcst, WEname, parmlevel, selectTR)
for gridinfo in gridinfos:
GridTimeRange = gridinfo.gridTime()
#
# Easier when just a scalar
#
if 'SCALAR' == wxType:
#
# read each 'model' grid with a non-zero weight
# add up the weights again, because we cannot count
# weights for grids that cannot be read.
#
gsum = self.empty()
totweight = 0
fcstweight = 0
oldgrid = self.getGrids(self.dbIds[0], WEname, "SFC", GridTimeRange, noDataError=0, cache=0)
if oldgrid == None:
self.statusBarMsg("ModelBlend tool could not get Fcst data for " + WEName, "A")
for num, label in enumerate(self.labels):
weight = weights[num]
if weight != 0:
modeType = "TimeWtAverage"
if rateParm == 1:
modeType = "Sum"
#determine source - special if from ISC
idx = label.find("(")
idx1 = label.find(")", idx)
if idx == -1 or idx1 == -1:
WEnameSource = WEname
else:
ot = label[idx + 1:idx1]
if ot == self.myOfficeType():
WEnameSource = WEname
else:
WEnameSource = WEname + ot
grid = self.getGrids(self.dbIds[num], WEnameSource, "SFC", GridTimeRange, mode=modeType, noDataError=0, cache=0)
if grid != None:
gsum += (grid * weight)
totweight += weight
if (num == 0):
fcstweight = weight
else:
errorstring = "ModelBlend tool could not get data for %s" % label
self.statusBarMsg(errorstring, "A")
#
# Check again for no weights, or only weights for the current
# grid - in which case we make no changes and write info message
# otherwise - save the grid
#
if (totweight != 0):
if fcstweight == totweight:
self.statusBarMsg("ModelBlend makes no change", "R")
else:
newgrid = gsum / totweight
finalgrid = self.inEditArea(newgrid, oldgrid, EdgeType, EdgeWidth)
self.createGrid(fcst, WEname, wxType, finalgrid, GridTimeRange)
else:
self.statusBarMsg("ModelBlend weights ended up Zero - so cancelled", "A")
#
# A little more complicated when a vector
#
if 'VECTOR' == wxType:
#
# read each 'model' grid with a non-zero weight
# add up the weights again, because we cannot count
# weights for grids that cannot be read.
#
oldgrid = self.getGrids(dbIds[0], WEname, "SFC", GridTimeRange, noDataError=0, cache=0)
if oldgrid == None:
self.statusBarMsg("ModelBlend tool could not get Fcst data for " + WEName, "A")
(mag, direc) = oldgrid
(uold, vold) = self.MagDirToUV(mag, direc)
usum = self.empty()
vsum = self.empty()
totweight = 0
fcstweight = 0
for num, weight in enumerate(weights):
if weight != 0:
grid = self.getGrids(self.dbIds[num], WEname, "SFC", GridTimeRange, noDataError=0, cache=0)
if grid != None:
(mag, direc) = grid
(u, v) = self.MagDirToUV(mag, direc)
usum += (u * weight)
vsum += (v * weight)
totweight += weight
if (num == 0):
fcstweight = weight
else:
errorstring = "ModelBlend tool could not get data for %s" % self.labels[num]
self.statusBarMsg(errorstring, "A")
#
# Check again for no weights, or only weights for the current
# grid - in which case we make no changes and write info message
# otherwise - save the grid.
#
if (totweight | |
rets, covmatrix, periods_per_year):
'''
Returns a set of n_points optimal weights corresponding to portfolios (of the efficient frontier)
with minimum volatility constructed by fixing n_points target returns.
The weights are obtained by solving the minimization problem for the volatility.
'''
target_rets = np.linspace(rets.min(), rets.max(), n_points)
weights = [minimize_volatility(rets, covmatrix, target) for target in target_rets]
return weights
def minimize_volatility(rets, covmatrix, target_return=None):
'''
Returns the optimal weights of the minimum volatility portfolio on the effient frontier.
If target_return is not None, then the weights correspond to the minimum volatility portfolio
having a fixed target return.
The method uses the scipy minimize optimizer which solves the minimization problem
for the volatility of the portfolio
'''
n_assets = rets.shape[0]
# initial guess weights
init_guess = np.repeat(1/n_assets, n_assets)
weights_constraint = {
"type": "eq",
"fun": lambda w: 1.0 - np.sum(w)
}
if target_return is not None:
return_constraint = {
"type": "eq",
"args": (rets,),
"fun": lambda w, r: target_return - portfolio_return(w, r)
}
constr = (return_constraint, weights_constraint)
else:
constr = weights_constraint
result = minimize(portfolio_volatility,
init_guess,
args = (covmatrix,),
method = "SLSQP",
options = {"disp": False},
constraints = constr,
bounds = ((0.0,1.0),)*n_assets ) # bounds of each individual weight, i.e., w between 0 and 1
return result.x
def minimize_volatility_2(rets, covmatrix, target_return=None, weights_norm_const=True, weights_bound_const=True):
'''
Returns the optimal weights of the minimum volatility portfolio.
If target_return is not None, then the weights correspond to the minimum volatility portfolio
having a fixed target return (such portfolio will be on the efficient frontier).
The variables weights_norm_const and weights_bound_const impose two more conditions, the firt one on
weight that sum to 1, and the latter on the weights which have to be between zero and 1
The method uses the scipy minimize optimizer which solves the minimization problem
for the volatility of the portfolio
'''
n_assets = rets.shape[0]
# initial guess weights
init_guess = np.repeat(1/n_assets, n_assets)
if weights_bound_const:
# bounds of the weights (between 0 and 1)
bounds = ((0.0,1.0),)*n_assets
else:
bounds = None
constraints = []
if weights_norm_const:
weights_constraint = {
"type": "eq",
"fun": lambda w: 1.0 - np.sum(w)
}
constraints.append( weights_constraint )
if target_return is not None:
return_constraint = {
"type": "eq",
"args": (rets,),
"fun": lambda w, r: target_return - portfolio_return(w, r)
}
constraints.append( return_constraint )
result = minimize(portfolio_volatility,
init_guess,
args = (covmatrix,),
method = "SLSQP",
options = {"disp": False},
constraints = tuple(constraints),
bounds = bounds)
return result.x
def maximize_shape_ratio(rets, covmatrix, risk_free_rate, periods_per_year, target_volatility=None):
'''
Returns the optimal weights of the highest sharpe ratio portfolio on the effient frontier.
If target_volatility is not None, then the weights correspond to the highest sharpe ratio portfolio
having a fixed target volatility.
The method uses the scipy minimize optimizer which solves the maximization of the sharpe ratio which
is equivalent to minimize the negative sharpe ratio.
'''
n_assets = rets.shape[0]
init_guess = np.repeat(1/n_assets, n_assets)
weights_constraint = {
"type": "eq",
"fun": lambda w: 1.0 - np.sum(w)
}
if target_volatility is not None:
volatility_constraint = {
"type": "eq",
"args": (covmatrix, periods_per_year),
"fun": lambda w, cov, p: target_volatility - annualize_vol(portfolio_volatility(w, cov), p)
}
constr = (volatility_constraint, weights_constraint)
else:
constr = weights_constraint
def neg_portfolio_sharpe_ratio(weights, rets, covmatrix, risk_free_rate, periods_per_year):
'''
Computes the negative annualized sharpe ratio for minimization problem of optimal portfolios.
The variable periods_per_year can be, e.g., 12, 52, 252, in case of yearly, weekly, and daily data.
The variable risk_free_rate is the annual one.
'''
# annualized portfolio returns
portfolio_ret = portfolio_return(weights, rets)
# annualized portfolio volatility
portfolio_vol = annualize_vol(portfolio_volatility(weights, covmatrix), periods_per_year)
return - sharpe_ratio(portfolio_ret, risk_free_rate, periods_per_year, v=portfolio_vol)
#i.e., simply returns -(portfolio_ret - risk_free_rate)/portfolio_vol
result = minimize(neg_portfolio_sharpe_ratio,
init_guess,
args = (rets, covmatrix, risk_free_rate, periods_per_year),
method = "SLSQP",
options = {"disp": False},
constraints = constr,
bounds = ((0.0,1.0),)*n_assets)
return result.x
def weigths_max_sharpe_ratio(covmat, mu_exc, scale=True):
'''
Optimal (Tangent/Max Sharpe Ratio) portfolio weights using the Markowitz Optimization Procedure:
- mu_exc is the vector of Excess expected Returns (has to be a column vector as a pd.Series)
- covmat is the covariance N x N matrix as a pd.DataFrame
Look at pag. 188 eq. (5.2.28) of "The econometrics of financial markets", by Campbell, Lo, Mackinlay.
'''
w = inverse_df(covmat).dot(mu_exc)
if scale:
# normalize weigths
w = w/sum(w)
return w
# ---------------------------------------------------------------------------------
# CPPI backtest strategy
# ---------------------------------------------------------------------------------
def cppi(risky_rets, safe_rets=None, start_value=1000, floor=0.8, m=3, drawdown=None,
risk_free_rate=0.03, periods_per_year=12):
'''
Run a backtest of the CPPI investment strategy given a set of returns for a risky asset
Returns, account value history, risk budget history, and risky weight history
'''
# compute the risky wealth (100% investment in the risky asset)
risky_wealth = start_value * (1 + risky_rets).cumprod()
# CPPI parameters
account_value = start_value
floor_value = floor * account_value
# Make the returns a DataFrame
if isinstance(risky_rets, pd.Series):
risky_rets = pd.DataFrame(risky_rets, columns="Risky return")
# If returns of safe assets are not available just make artificial ones
if safe_rets is None:
safe_rets = pd.DataFrame().reindex_like(risky_rets)
safe_rets[:] = risk_free_rate / periods_per_year
# History dataframes
account_history = pd.DataFrame().reindex_like(risky_rets)
cushion_history = pd.DataFrame().reindex_like(risky_rets)
risky_w_history = pd.DataFrame().reindex_like(risky_rets)
# Extra history dataframes in presence of drawdown
if drawdown is not None:
peak_history = pd.DataFrame().reindex_like(risky_rets)
floor_history = pd.DataFrame().reindex_like(risky_rets)
peak = start_value
# define the multiplier
m = 1 / drawdown
# For loop over dates
for step in range( len(risky_rets.index) ):
if drawdown is not None:
# current peak
peak = np.maximum(peak, account_value)
# current floor value
floor_value = peak * (1 - drawdown)
floor_history.iloc[step] = floor_value
# computing the cushion (as a percentage of the current account value)
cushion = (account_value - floor_value) / account_value
# compute the weight for the allocation on the risky asset
risky_w = m * cushion
risky_w = np.minimum(risky_w, 1)
risky_w = np.maximum(risky_w, 0)
# the last two conditions ensure that the risky weight is in [0,1]
# compute the weight for the allocation on the safe asset
safe_w = 1 - risky_w
# compute the value allocation
risky_allocation = risky_w * account_value
safe_allocation = safe_w * account_value
# compute the new account value: this is given by the new values from both the risky and the safe assets
account_value = risky_allocation * (1 + risky_rets.iloc[step] ) + safe_allocation * (1 + safe_rets.iloc[step] )
# save data: current account value, cushions, weights
account_history.iloc[step] = account_value
cushion_history.iloc[step] = cushion
risky_w_history.iloc[step] = risky_w
# Given the CPPI wealth saved in the account_history, we can get back the CPPI returns
cppi_rets = ( account_history / account_history.shift(1) - 1 ).dropna()
# Returning results
backtest_result = {
"Risky wealth" : risky_wealth,
"CPPI wealth" : account_history,
"CPPI returns" : cppi_rets,
"Cushions" : cushion_history,
"Risky allocation": risky_w_history,
"Safe returns" : safe_rets
}
if drawdown is not None:
backtest_result.update({
"Floor value": floor_history,
"Peaks" : peak_history,
"m" : m
})
return backtest_result
# ---------------------------------------------------------------------------------
# Random walks
# ---------------------------------------------------------------------------------
def simulate_gbm_from_returns(n_years=10, n_scenarios=20, mu=0.07, sigma=0.15, periods_per_year=12, start=100.0):
'''
Evolution of an initial stock price using Geometric Brownian Model:
(S_{t+dt} - S_t)/S_t = mu*dt + sigma*sqrt(dt)*xi,
where xi are normal random variable N(0,1).
The equation for percentage returns above is used to generate returns and they are compounded
in order to get the prices.
Note that default periods_per_year=12 means that the method generates monthly prices (and returns):
change to 52 or 252 for weekly or daily prices and returns, respectively.
The method returns a dataframe of prices and the dataframe of returns.
'''
dt = 1 / periods_per_year
n_steps = int(n_years * periods_per_year)
# from GBM equation for percentage returns, returns have mean = mu*dt and std = sigma*sqrt(dt)
rets = pd.DataFrame( np.random.normal(loc=mu*dt, scale=sigma*(dt)**(0.5), size=(n_steps, n_scenarios)) )
# compute prices by compound the generated returns
prices = compound_returns(rets, start=start)
prices = insert_first_row_df(prices, | |
<filename>scripts/constituents.py
#!/usr/bin/env python
"""
Class and supporting code to hold all information on CCPP constituent
variables. A constituent variable is defined and maintained by the CCPP
Framework instead of the host model.
The ConstituentVarDict class contains methods to generate the necessary code
to implement this support.
"""
# Python library imports
from __future__ import print_function
import os
# CCPP framework imports
from file_utils import KINDS_MODULE
from fortran_tools import FortranWriter
from parse_tools import ParseInternalError
from metavar import Var, VarDictionary
########################################################################
CONST_DDT_NAME = "ccpp_model_constituents_t"
CONST_DDT_MOD = "ccpp_constituent_prop_mod"
CONST_PROP_TYPE = "ccpp_constituent_properties_t"
########################################################################
class ConstituentVarDict(VarDictionary):
"""A class to hold all the constituent variables for a CCPP Suite.
Also contains methods to generate the necessary code for runtime
allocation and support for these variables.
"""
__const_prop_array_name = "ccpp_constituent_array"
__const_prop_init_name = "ccpp_constituents_initialized"
__const_prop_init_consts = "ccpp_create_constituent_array"
__const_prop_type_name = "ccpp_constituent_properties_t"
__constituent_type = "suite"
def __init__(self, name, parent_dict, variables=None, logger=None):
"""Create a specialized VarDictionary for constituents.
The main difference is functionality to allocate and support
these variables with special functions for the host model.
The main reason for a separate dictionary is that these are not
proper Suite variables but will belong to the host model at run time.
The <parent_dict> feature of the VarDictionary class is required
because this dictionary must be connected to a host model.
"""
super(ConstituentVarDict, self).__init__(name, variables=variables,
parent_dict=parent_dict,
logger=logger)
def find_variable(self, standard_name=None, source_var=None,
any_scope=True, clone=None,
search_call_list=False, loop_subst=False):
"""Attempt to return the variable matching <standard_name>.
if <standard_name> is None, the standard name from <source_var> is used.
It is an error to pass both <standard_name> and <source_var> if
the standard name of <source_var> is not the same as <standard_name>.
If <any_scope> is True, search parent scopes if not in current scope.
Note: Unlike the <VarDictionary> version of this method, the case for
CCPP_CONSTANT_VARS is not handled -- it should have been handled
by a lower level.
If the variable is not found but is a constituent variable type,
create the variable in this dictionary
Note that although the <clone> argument is accepted for consistency,
cloning is not handled at this level.
If the variable is not found and <source_var> is not a constituent
variable, return None.
"""
if standard_name is None:
if source_var is None:
emsg = "One of <standard_name> or <source_var> must be passed."
raise ParseInternalError(emsg)
# end if
standard_name = source_var.get_prop_value('standard_name')
elif source_var is not None:
stest = source_var.get_prop_value('standard_name')
if stest != standard_name:
emsg = ("Only one of <standard_name> or <source_var> may " +
"be passed.")
raise ParseInternalError(emsg)
# end if
# end if
if standard_name in self:
var = self[standard_name]
elif any_scope and (self._parent_dict is not None):
srch_clist = search_call_list
var = self._parent_dict.find_variable(standard_name=standard_name,
source_var=source_var,
any_scope=any_scope,
clone=None,
search_call_list=srch_clist,
loop_subst=loop_subst)
else:
var = None
# end if
if (var is None) and source_var and source_var.is_constituent():
# If we did not find the variable and it is a constituent type,
# add a clone of <source_var> to our dictionary.
# First, maybe do a loop substitution
dims = source_var.get_dimensions()
newdims = list()
for dim in dims:
dstdnames = dim.split(':')
new_dnames = list()
for dstdname in dstdnames:
if dstdname == 'horizontal_loop_extent':
new_dnames.append('horizontal_dimension')
elif dstdname == 'horizontal_loop_end':
new_dnames.append('horizontal_dimension')
elif dstdname == 'horizontal_loop_begin':
new_dnames.append('ccpp_constant_one')
else:
new_dnames.append(dstdname)
# end if
# end for
newdims.append(':'.join(new_dnames))
# end for
var = source_var.clone({'dimensions' : newdims}, remove_intent=True,
source_type=self.__constituent_type)
self.add_variable(var)
return var
def declare_public_interfaces(self, outfile, indent):
"""Declare the public constituent interfaces.
Declarations are written to <outfile> at indent, <indent>."""
outfile.write("! Public interfaces for handling constituents", indent)
outfile.write("! Return the number of constituents for this suite",
indent)
outfile.write("public :: {}".format(self.num_consts_funcname()), indent)
outfile.write("! Return the name of a constituent", indent)
outfile.write("public :: {}".format(self.const_name_subname()), indent)
outfile.write("! Copy the data for a constituent", indent)
outfile.write("public :: {}".format(self.copy_const_subname()), indent)
def declare_private_data(self, outfile, indent):
"""Declare private suite module variables and interfaces
to <outfile> with indent, <indent>."""
outfile.write("! Private constituent module data", indent)
if self:
stmt = "type({}), private, allocatable :: {}(:)"
outfile.write(stmt.format(self.constituent_prop_type_name(),
self.constituent_prop_array_name()),
indent)
# end if
stmt = "logical, private :: {} = .false."
outfile.write(stmt.format(self.constituent_prop_init_name()), indent)
outfile.write("! Private interface for constituents", indent)
stmt = "private :: {}"
outfile.write(stmt.format(self.constituent_prop_init_consts()), indent)
def _write_init_check(self, outfile, indent, suite_name,
errvar_names, use_errflg):
"""Write a check to <outfile> to make sure the constituent properties
are initialized. Write code to initialize the error variables and/or
set them to error values."""
outfile.write('', 0)
if use_errflg:
outfile.write("errflg = 0", indent+1)
outfile.write("errmsg = ''", indent+1)
else:
raise ParseInternalError("Alternative to errflg not implemented")
# end if
outfile.write("! Make sure that our constituent array is initialized",
indent+1)
stmt = "if (.not. {}) then"
outfile.write(stmt.format(self.constituent_prop_init_name()), indent+1)
if use_errflg:
outfile.write("errflg = 1", indent+2)
stmt = 'errmsg = "constituent properties not '
stmt += 'initialized for suite, {}"'
outfile.write(stmt.format(suite_name), indent+2)
outfile.write("end if", indent+1)
# end if (no else until an alternative error mechanism supported)
def _write_index_check(self, outfile, indent, suite_name,
errvar_names, use_errflg):
"""Write a check to <outfile> to make sure the "index" input
is in bounds. Write code to set error variables if index is
out of bounds."""
if use_errflg:
if self:
outfile.write("if (index < 1) then", indent+1)
outfile.write("errflg = 1", indent+2)
stmt = "write(errmsg, '(a,i0,a)') 'ERROR: index (',index,') "
stmt += "too small, must be >= 1'"
outfile.write(stmt, indent+2)
stmt = "else if (index > SIZE({})) then"
outfile.write(stmt.format(self.constituent_prop_array_name()),
indent+1)
outfile.write("errflg = 1", indent+2)
stmt = "write(errmsg, '(2(a,i0))') 'ERROR: index (',index,') "
stmt += "too large, must be <= ', SIZE({})"
outfile.write(stmt.format(self.constituent_prop_array_name()),
indent+2)
outfile.write("end if", indent+1)
else:
outfile.write("errflg = 1", indent+1)
stmt = "write(errmsg, '(a,i0,a)') 'ERROR: suite, {}, "
stmt += "has no constituents'"
outfile.write(stmt, indent+1)
# end if
else:
raise ParseInternalError("Alternative to errflg not implemented")
# end if
def write_constituent_routines(self, outfile, indent, suite_name, err_vars):
"""Write the subroutine that, when called allocates and defines the
suite-cap module variable describing the constituent species for
this suite.
Code is written to <outfile> starting at indent, <indent>."""
# Format our error variables
errvar_names = [x.get_prop_value('local_name') for x in err_vars]
use_errflg = ('errflg' in errvar_names) and ('errmsg' in errvar_names)
errvar_alist = ", ".join([x for x in errvar_names])
errvar_alist2 = ", {}".format(errvar_alist) if errvar_alist else ""
errvar_call = ", ".join(["{}={}".format(x,x) for x in errvar_names])
errvar_call2 = ", {}".format(errvar_call) if errvar_call else ""
# Allocate and define constituents
stmt = "subroutine {}({})".format(self.constituent_prop_init_consts(),
errvar_alist)
outfile.write(stmt, indent)
outfile.write("! Allocate and fill the constituent property array",
indent + 1)
outfile.write("! for this suite", indent+1)
outfile.write("! Dummy arguments", indent+1)
for evar in err_vars:
evar.write_def(outfile, indent+1, self, dummy=True)
# end for
if self:
outfile.write("! Local variables", indent+1)
outfile.write("integer :: index", indent+1)
stmt = "allocate({}({}))"
outfile.write(stmt.format(self.constituent_prop_array_name(),
len(self)), indent+1)
outfile.write("index = 0", indent+1)
# end if
for std_name, var in self.items():
outfile.write("index = index + 1", indent+1)
dims = var.get_dim_stdnames()
if 'vertical_layer_dimension' in dims:
vertical_dim = 'vertical_layer_dimension'
elif 'vertical_interface_dimension' in dims:
vertical_dim = 'vertical_interface_dimension'
else:
vertical_dim = ''
# end if
advect_str = self.TF_string(var.get_prop_value('advected'))
stmt = 'call {}(index)%initialize("{}", "{}", {}{})'
outfile.write(stmt.format(self.constituent_prop_array_name(),
std_name, vertical_dim, advect_str,
errvar_call2), indent+1)
# end for
outfile.write("{} = .true.".format(self.constituent_prop_init_name()),
indent+1)
stmt = "end subroutine {}".format(self.constituent_prop_init_consts())
outfile.write(stmt, indent)
outfile.write("", 0)
outfile.write("\n! {}\n".format("="*72), 1)
# Return number of constituents
fname = self.num_consts_funcname()
outfile.write("integer function {}({})".format(fname, errvar_alist),
indent)
outfile.write("! Return the number of constituents for this suite",
indent+1)
outfile.write("! Dummy arguments", indent+1)
for evar in err_vars:
evar.write_def(outfile, indent+1, self, dummy=True)
# end for
outfile.write("! Make sure that our constituent array is initialized",
indent+1)
stmt = "if (.not. {}) then"
outfile.write(stmt.format(self.constituent_prop_init_name()), indent+1)
outfile.write("call {}({})".format(self.constituent_prop_init_consts(),
errvar_call), indent+2)
outfile.write("end if", indent+1)
outfile.write("{} = {}".format(fname, len(self)), indent+1)
outfile.write("end function {}".format(fname), indent)
outfile.write("\n! {}\n".format("="*72), 1)
# Return the name of a constituent given an index
stmt = "subroutine {}(index, name_out{})"
outfile.write(stmt.format(self.const_name_subname(), errvar_alist2),
indent)
outfile.write("! Return the name of constituent, <index>", indent+1)
outfile.write("! Dummy arguments", indent+1)
outfile.write("integer, intent(in) :: index", indent+1)
outfile.write("character(len=*), intent(out) :: name_out", indent+1)
for evar in err_vars:
evar.write_def(outfile, indent+1, self, dummy=True)
# end for
self._write_init_check(outfile, indent, suite_name,
errvar_names, use_errflg)
self._write_index_check(outfile, indent, suite_name,
errvar_names, use_errflg)
if self:
stmt = "call {}(index)%standard_name(name_out{})"
outfile.write(stmt.format(self.constituent_prop_array_name(),
errvar_call2), indent+1)
# end if
outfile.write("end subroutine {}".format(self.const_name_subname()),
indent)
outfile.write("\n! {}\n".format("="*72), 1)
# Copy a consitituent's properties
stmt = "subroutine {}(index, cnst_out{})"
fname = self.copy_const_subname()
outfile.write(stmt.format(fname, errvar_alist2), indent)
outfile.write("! Copy the data for a | |
import sys
import traceback
import logging
import pickle
sys.path.append("..") # Adds higher directory to python modules path.
from utils import config_reference as cfg
from data import load_dataset
from models import model_loader
from models import knowledge_distillation_models
from utils import teacher_utils, helper_util
import datetime
import json
import ast
import os
from tensorflow.python.keras import backend as K
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.losses import categorical_crossentropy as logloss
from tensorflow.python.keras.utils import multi_gpu_model
from tensorflow.keras.optimizers import Adadelta, SGD, Adam
from tensorflow.python.keras.backend import clear_session
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
def import_config(config_file_path):
with open(config_file_path, 'r') as f:
configuration = json.load(f)
return configuration
# TODO include best train and validation accuracies, may be more telling
def create_result(netSize, temp, alpha, train_score, val_score):
result = {}
result["date_time"] = str(datetime.datetime.now())
result["net_size"] = str(netSize)
result["temp"] = str(temp)
result["alpha"] = str(alpha)
result["val_acc"] = str(val_score[1])
result["acc"] = str(train_score[1])
result["val_loss"] = str(val_score[0])
result["loss"] = str(train_score[0])
return result
def create_meta(dataset, teacher_name, epochs, temp, alpha, order):
metadata = {}
metadata["date_time"] = str(datetime.datetime.now())
metadata["dataset"] = str(dataset)
metadata["teacher_name"] = str(teacher_name)
metadata["epochs"] = str(epochs)
metadata["temp"] = str(temp)
metadata["alpha"] = str(alpha)
metadata['order'] = str(order)
return metadata
def find_largest_value(output_distribution):
pos = 0
max_val = output_distribution[pos]
for i in range(1, len(output_distribution)):
if output_distribution[i] > max_val:
pos = i
max_val = output_distribution[i]
return max_val
# method to check for already saved copy of teacher knowledge
def get_pretrained_teacher_logits(logits_dir, netSize, alpha, dataset, trainOrder):
# load pre-created soft targets for teacher
if netSize == cfg.max_net_size:
target_file = str(dataset) + "_" + str(netSize) + ".pkl"
else:
target_file = str(dataset) + "_" + str(netSize) + "_" + str(alpha) + "_" + str(trainOrder) + ".pkl"
target_file = target_file.replace(" ", "")
logitFileName = os.path.join(logits_dir, target_file)
if os.path.isfile(logitFileName): # check for logit file existence
filehandler = open(logitFileName, 'rb')
teacher_train_logits = pickle.load(filehandler)
teacher_test_logits = pickle.load(filehandler)
return teacher_train_logits, teacher_test_logits
else:
print("logits do not exist for netSize: %s" % str(netSize))
return None, None
def save_pretrained_teacher_logits(logits_dir, netSize, alpha, teacher_train_logits, teacher_test_logits, dataset, trainOrder):
if netSize == cfg.max_net_size:
target_file = str(dataset) + "_" + str(netSize) + ".pkl"
else:
target_file = str(dataset) + "_" + str(netSize) + "_" + str(alpha) + "_" + str(trainOrder) + ".pkl"
target_file = target_file.replace(" ", "")
logitFileName = os.path.join(logits_dir, target_file)
filehandler = open(logitFileName, 'wb')
pickle.dump(teacher_train_logits, filehandler)
pickle.dump(teacher_test_logits, filehandler)
filehandler.close()
print("saving pretrained teacher logits - size: %s, dataset: %s" % (netSize, dataset))
print(logitFileName)
print(os.path.isfile(logitFileName))
def get_optimizer(type):
if type is "adam":
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
elif type is "adadelta":
optimizer = adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
elif type is "sgd":
optimizer = SGD(lr=0.01, momentum=0.9, nesterov=True)
return optimizer
def saved_model(logger, dataset, net_size, alpha, val_score, order, model, model_dir):
if net_size == cfg.max_net_size:
target_file_suffix = str(dataset) + "_" + str(net_size) + "_" + str(val_score[1])
else:
target_file_suffix = str(net_size) + "_" + str(alpha) + "_" + str(order) + "_" + str(val_score[1])
target_file_suffix = target_file_suffix.replace(" ", "")
target_file = target_file_suffix + ".h5"
modelWeightFile = os.path.join(model_dir, target_file)
model.save_weights(modelWeightFile)
logger.info("Saved trained model to: "+modelWeightFile)
def run(logger, options, session_log_file, logits_dir, models_dir):
logger.info(cfg.student_train_spacer + "GENERIC MULTISTAGE" + cfg.student_train_spacer)
with open(session_log_file, "w") as f:
f.write("begin test: " + datetime.datetime.now().isoformat() + "\n")
f.close()
# load configuration file
configuration = import_config(options.config_file_path)
teacher_name = configuration['teacher_name']
epochs = configuration['epochs']
temperatures = configuration['temp_config']
alphas = configuration['alpha_config']
order_combinations = configuration['size_combinations']
# loading training data
X_train, Y_train, X_test, Y_test = load_dataset.load_dataset_by_name(logger, cfg.dataset)
# mean subtraction regularization
if cfg.subtract_pixel_mean is True:
x_train_mean = np.mean(X_train, axis=0)
X_train -= x_train_mean
X_test -= x_train_mean
if cfg.use_fit_generator_student is True or cfg.use_fit_generator_teacher is True:
# data generator for on the fly training data manipulation
datagen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by std of dataset
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# epsilon for ZCA whitening
zca_epsilon=1e-06,
# randomly rotate images in the range (deg 0 to 180)
rotation_range=0,
# randomly shift images horizontally
width_shift_range=0.1,
# randomly shift images vertically
height_shift_range=0.1,
# set range for random shear
shear_range=0.,
# set range for random zoom
zoom_range=0.,
# set range for random channel shifts
channel_shift_range=0.,
# set mode for filling points outside the input boundaries
fill_mode='nearest',
# value used for fill_mode = "constant"
cval=0.,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False,
# set rescaling factor (applied before any other transformation)
rescale=None,
# set function that will be applied on each input
preprocessing_function=None,
# image data format, either "channels_first" or "channels_last"
data_format=None,
# fraction of images reserved for validation (strictly between 0 and 1)
validation_split=0.0)
datagen.fit(X_train)
try:
for order in order_combinations:
for temp in temperatures:
# clearing all saved teacher logits
for alpha in alphas:
tf.keras.backend.clear_session() # must clear the current session to free memory!
K.clear_session() # must clear the current session to free memory!
logger.info("Clearing tensorflow/keras backend session and de-allocating remaining models...")
model = None
previousModel = None
if teacher_name is not None:
ssm = model_loader(logger, options.teacherModel)
previousModel = ssm.get_loaded_model()
teacher_name = options.teacherModel
# creating experiment1 metadata
experiment_result = {"experiment_results": []} # empty space for our experiment1's data
experiment_metadata = create_meta(cfg.dataset, teacher_name, epochs, temp, alpha, order)
experiment_result['metadata'] = experiment_metadata
# performing experiment on given size, alpha, and temperature combination
for net_size in order:
model = None
# perform KD if there is a previously trained model to work with
if previousModel is not None:
model = knowledge_distillation_models.get_model(cfg.dataset, cfg.dataset_num_classes, X_train,
net_size, )
logger.info("loading soft targets for student training...")
print("previous model to load logits for: %s" % str(previousModel))
teacher_train_logits, teacher_test_logits = get_pretrained_teacher_logits(logits_dir, previousModel, alpha, cfg.dataset, order)
Y_train_new, Y_test_new = teacher_utils.convert_logits_to_soft_targets(temp, teacher_train_logits, teacher_test_logits, Y_train, Y_test)
# # TODO remove next three lines
# file_name = "/home/blakete/" + temp + "_" + previousModel + "_training_labels.npy"
# filehandler = open(file_name, 'wb')
# pickle.dump(Y_train_new, filehandler)
# pickle.dump(Y_test_new, filehandler)
if Y_train_new is None or Y_test_new is None:
logger.info("soft targets not loaded correctly!")
else:
logger.info("completed")
# filehandler = open("mnist_10_soft_targets.pkl", 'wb')
# pickle.dump(Y_train_new, filehandler)
# pickle.dump(Y_test_new, filehandler)
model = helper_util.apply_knowledge_distillation_modifications(logger, model, temp)
# model = multi_gpu_model(model, gpus=4)
optimizer = get_optimizer(cfg.student_optimizer)
model.compile(optimizer=optimizer,
loss=lambda y_true, y_pred: helper_util.knowledge_distillation_loss(logger, y_true, y_pred, alpha),
metrics=[helper_util.acc])
logger.info("training model...\norder:%s\nsize:%d\ntemp:%d\nalpha:%f" % (order, net_size, temp, alpha))
callbacks = [
EarlyStopping(monitor='val_acc', patience=50, min_delta=0.00007),
# ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=4, min_lr=0.0001),
ModelCheckpoint(cfg.checkpoint_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
]
if cfg.use_fit_generator_student is True:
model.fit(datagen.flow(X_train, Y_train_new, batch_size=cfg.student_batch_size),
validation_data=(X_test, Y_test_new),
epochs=epochs,
verbose=1,
callbacks=callbacks)
else:
model.fit(X_train, Y_train_new,
batch_size=cfg.student_batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_data=(X_test, Y_test_new))
# model = helper_util.revert_knowledge_distillation_modifications(logger, model)
del model
# train_score, val_score = helper_util.calculate_unweighted_score(logger, model, X_train, Y_train,
# X_test, Y_test)
model = knowledge_distillation_models.get_model(cfg.dataset, cfg.dataset_num_classes,
X_train, net_size, )
# model.summary()
# load best model from checkpoint for evaluation
model.load_weights(cfg.checkpoint_path)
optimizer = get_optimizer(cfg.student_optimizer)
model.compile(optimizer=optimizer,
loss=logloss, # the same as the custom loss function
metrics=['accuracy'])
train_score = model.evaluate(X_train, Y_train, verbose=0)
val_score = model.evaluate(X_test, Y_test, verbose=0)
result = create_result(net_size, temp, alpha, train_score, val_score)
logger.info(result)
experiment_result["experiment_results"].append(result)
# # remove checkpoint of best model for new checkpoint
# os.remove(cfg.checkpoint_path)
# save the trained model the saved model directory
saved_model(logger, cfg.dataset, net_size, alpha, val_score, order, model, models_dir)
if order.index(net_size) < len(order)-1:
# save soft targets
logger.info("creating student training data...")
Y_train_new, Y_test_new = teacher_utils.createStudentTrainingData(model, temp, X_train, Y_train, X_test, Y_test)
save_pretrained_teacher_logits(logits_dir, net_size, alpha, Y_train_new, Y_test_new, cfg.dataset, order)
logger.info("done.")
else:
logger.info("skipping creation of student training data, we are @ target model...")
# clear soft targets
Y_train_new = None
Y_test_new = None
# set model to current net size to preserve in previousModel
model = net_size
# if no previously trained model, train the network
else:
# load the already created soft targets
Y_train_new = None
Y_test_new = None
val_score = None
teacher_train_logits, teacher_test_logits = get_pretrained_teacher_logits(logits_dir, net_size, alpha, cfg.dataset, order)
# train network if not previously created logits
if teacher_train_logits is None or teacher_test_logits is None:
if os.path.isfile(cfg.checkpoint_path):
logger.info("removing previous checkpoint")
os.remove(cfg.checkpoint_path) # remove previous checkpoint
logger.info("training teacher model...\norder:%s\nsize:%d\ntemp:%d\nalpha:%f" % (
order, net_size, temp, alpha))
model = knowledge_distillation_models.get_model(cfg.dataset, cfg.dataset_num_classes,
X_train, net_size, )
# model.summary()
optimizer = get_optimizer(cfg.start_teacher_optimizer)
model.compile(optimizer=optimizer,
loss=logloss, # the same as the custom loss function
metrics=['accuracy'])
# train network and save model with bet validation accuracy to cfg.checkpoint_path
callbacks = [
EarlyStopping(monitor='val_acc', patience=50, min_delta=0.00007),
# ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=4, min_lr=0.0001),
ModelCheckpoint(cfg.checkpoint_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
| |
def GetMultiDimensionCommand(self, getter, commandType):
"""
Check multiple rows/columns for forced solution. If there are any return a command
:param getter: Function to get a specific row/column/area
:type getter: function
:param commandType: Set the command type (e.g. R, C, A)
:type commandType: str
:return: command
:rtype: StarBattleCommand or None
"""
for d1 in range(self._dimension):
dimSum = []
dimCount = 0
for d2 in range(d1, self._dimension):
dimSum.extend(getter(d2, [StarBattleTileStates.UNKNOWN,
StarBattleTileStates.STAR]))
dimCount += 1
areaCount = 0
dimSumTemp = dimSum.copy()
for key in self._boardData:
area = self.GetArea(key, [StarBattleTileStates.UNKNOWN,
StarBattleTileStates.STAR])
for pos in area:
if pos not in dimSum:
break
else:
areaCount += 1
for pos in area:
dimSumTemp.remove(pos)
if dimCount == areaCount and len(dimSumTemp) > 0:
command = StarBattleCommand(noStars=dimSumTemp)
command.SetType("{0}M{1}-{2}".format(commandType, d1, d2))
return command
return None
def TryPuzzleBreak(self, depth):
"""
This algorythm trys to break the puzzle via brute force:
1) Get all unknown positions
2) Save the state of the current board
3) Pick a position and put a "Star" in it
4) Call NextSolution up to [depth] times. There can be 3 things that happen:
4a) The grid is broken --> Now we know for sure that this position can't be a star --> 5)
4b) The grid is solved --> We wanted to break the puzzle --> reset board from temp and 3)
4c) The grid is neither solved nor broken --> reset board from temp and 3)
5) reset board, return new command(pos = NoStar) + commands that prove the statement
:param depth: number of NextSolution steps the algorithm trys
:type depth: int
:return: command
:rtype: StarBattleCommand or None
"""
unknownPositions = []
for r in range(self._dimension):
for c in range(self._dimension):
if self._board[r][c].Status() == StarBattleTileStates.UNKNOWN:
unknownPositions.append((r, c))
boardTemp, doublesTemp, triplesTemp = self.CopyBoard(self._board, self._doubles,
self._triples, self._dimension)
breakCommand = None
for pos in unknownPositions:
# reset board
self._board, self._doubles, self._triples = self.CopyBoard(boardTemp, doublesTemp,
triplesTemp, self._dimension)
isBroken = False
newStar = [pos]
newNoStars = []
for noStar in self.GetAdjacentPositions(pos, self._dimension):
if self._board[noStar[0]][noStar[1]].Status() != StarBattleTileStates.NO_STAR:
newNoStars.append(noStar)
command = StarBattleCommand(stars=newStar, noStars=newNoStars)
self.Commit(command, silent=True)
subCommands = []
for i in range(depth):
command = self.NextSolution(tryPuzzleBreak=False)
subCommands.append(command)
if command is None:
break
self.Commit(command, silent=True)
solved, broken = self.GetStatus()
if solved:
break
if broken:
isBroken = True
break
if isBroken:
breakCommand = StarBattleCommandPuzzleBreak([pos], subCommands)
break
# reset board
self._board, self._doubles, self._triples = self.CopyBoard(boardTemp, doublesTemp,
triplesTemp, self._dimension)
return breakCommand
def InterpretSolutions(self, solutions):
"""
Interpret all possible solutions and return a command
:param solutions: all possible solutions
:type solutions: list(dict)
:return: command
:rtype: StarBattleCommand or None
"""
if len(solutions) == 0:
return None
if len(solutions) == 1:
return StarBattleCommand(stars=solutions[0]["stars"], noStars=solutions[0]["noStars"])
forcedStars = self.GetForcedSolutions(solutions, "stars", StarBattleTileStates.STAR)
forcedNoStars = self.GetForcedSolutions(solutions, "noStars", StarBattleTileStates.NO_STAR)
if len(forcedStars) > 0 or len(forcedNoStars) > 0:
return StarBattleCommand(stars=forcedStars, noStars=forcedNoStars)
if self.GetDoubles(solutions, "stars", StarBattleTileStates.STAR):
return StarBattleCommand(doubles=self._doubles)
if self.GetTriples(solutions, "stars", StarBattleTileStates.STAR):
return StarBattleCommand(triples=self._triples)
return None
def GetForcedSolutions(self, solutions, key, flt):
"""
Find forced solutions in the possible solutions e.g. stars/noStars that are in all possible
solutions.
:param solutions: all possible solutions
:type solutions: dict
:param key: star/noStar
:type key: str
:param flt: tile filter
:type flt: list(StarBattleTileStates)
:return: positions of forced solutions
:rtype: list(tuple(int, int))
"""
foundForced = []
filteredSolutions = [s[key] for s in solutions]
for pos in filteredSolutions[0]:
if self._board[pos[0]][pos[1]].Status() != flt:
for otherSolution in filteredSolutions[1:]:
if pos not in otherSolution:
break
else:
foundForced.append(pos)
return foundForced
def GetDoubles(self, solutions, key, flt):
"""
Find doubles - two connected positions where one of them are in every possible solution.
:param solutions: all possible solutions
:type solutions: list(dict)
:param key: star/noStar
:type key: str
:param flt: tile filter
:type flt: list(StarBattleTileStates)
:return: positions of forced solutions
:rtype: list(tuple(int, int))
"""
countPositions = {}
filteredSolutions = [s[key] for s in solutions]
for solution in filteredSolutions:
for pos in solution:
if self._board[pos[0]][pos[1]].Status() != flt:
countPositions.setdefault(pos, 0)
countPositions[pos] += 1
doublesChanged = False
for firstPos, firstValue in countPositions.items():
for secondPos, secondValue in countPositions.items():
if firstPos == secondPos:
continue
if firstValue + secondValue == len(solutions) and \
self.IsAdjacent(firstPos, secondPos, False):
newDouble = [firstPos, secondPos]
newDouble.sort()
if newDouble not in self._doubles:
self._doubles.append(newDouble)
doublesChanged = True
return doublesChanged
def GetTriples(self, solutions, key, flt):
"""
Find triples - three connected positions where one of them are in every possible solution.
:param solutions: all possible solutions
:type solutions: list(dict)
:param key: star/noStar
:type key: str
:param flt: tile filter
:type flt: list(StarBattleTileStates)
:return: positions of forced solutions
:rtype: list(tuple(int, int))
"""
countPositions = {}
filteredSolutions = [s[key] for s in solutions]
for solution in filteredSolutions:
for pos in solution:
if self._board[pos[0]][pos[1]].Status() != flt:
countPositions.setdefault(pos, 0)
countPositions[pos] += 1
triplesChanged = False
for firstPos, firstValue in countPositions.items():
for secondPos, secondValue in countPositions.items():
for thirdPos, thirdValue in countPositions.items():
if firstPos == secondPos or firstPos == thirdPos or secondPos == thirdPos:
continue
validTriple = True
for solution in filteredSolutions:
if firstPos not in solution and secondPos not in solution and \
thirdPos not in solution:
validTriple = False
if validTriple:
if not self.IsAdjacent(firstPos, secondPos, False) and \
not self.IsAdjacent(firstPos, thirdPos, False):
continue
if not self.IsAdjacent(secondPos, firstPos, False) and \
not self.IsAdjacent(secondPos, thirdPos, False):
continue
if not self.IsAdjacent(thirdPos, firstPos, False) and \
not self.IsAdjacent(thirdPos, secondPos, False):
continue
if not firstPos[0] == secondPos[0] == thirdPos[0] and \
not firstPos[1] == secondPos[1] == thirdPos[1]:
continue
newTriple = [firstPos, secondPos, thirdPos]
newTriple.sort()
if newTriple not in self._triples:
self._triples.append(newTriple)
triplesChanged = True
return triplesChanged
def GetPossibleSolutions(self, positions):
"""
Compute every possible solution (stars, noStars) while considering the star battle rules.
Apply special logic by considering doubles and triples.
:param positions: list of positions that should be investigated
:type positions: list(tuple(int,int))
:return: list of possible solutions
:rtype: list(dict)
"""
stars = []
filteredPositions = []
for pos in positions:
if self._board[pos[0]][pos[1]].Status() == StarBattleTileStates.STAR:
stars.append(pos)
else:
filteredPositions.append(pos)
# Check if there are already N stars
if len(stars) == self._stars:
# if there are Unknown positions then all these positions must be noStars
if filteredPositions:
stars.sort()
filteredPositions.sort()
return [{"stars": [], "noStars": filteredPositions}]
return []
# Check doubles
newFilteredPositions = self.GetNoStarsFromForcedSolutions(self._doubles,
filteredPositions, len(stars))
if newFilteredPositions:
return [{"stars": [], "noStars": newFilteredPositions}]
# # Check triples
newFilteredPositions = self.GetNoStarsFromForcedSolutions(self._triples,
filteredPositions, len(stars))
if newFilteredPositions:
return [{"stars": [], "noStars": newFilteredPositions}]
return self._GetPossibleSolutionsRecursive(filteredPositions, stars)
def GetNoStarsFromForcedSolutions(self, forcedSolutions, unknownPositions, starCount):
"""
Special logic by considering doubles and triples. These may force special constraints.
:param forcedSolutions: doubles/triples
:type forcedSolutions: list(list(int, int))
:param unknownPositions: positons without a star
:type unknownPositions: list(tuple(int, int))
:param starCount: number of stars in this area
:type starCount: int
:return: list of noStar postions
:rtype: list(tuple(int, int))
"""
filteredForcedSolutions = []
for forcedSolution in forcedSolutions:
for pos in forcedSolution:
if pos not in unknownPositions:
break
else:
filteredForcedSolutions.append(forcedSolution.copy())
newFilteredPositions = []
if len(filteredForcedSolutions) + starCount == self._stars:
for pos in unknownPositions:
for forcedSolution in filteredForcedSolutions:
if pos in forcedSolution:
break
else:
newFilteredPositions.append(pos)
return newFilteredPositions
def _GetPossibleSolutionsRecursive(self, filteredPositions, stars, noStars=None,
solutions=None):
"""
Recursive function. Compute every possible solution (stars, noStars) while considering the
star battle rules.
:param filteredPositions: list of positions that should be investigated
:type filteredPositions: list(tuple(int,int))
:param stars: list of star positions
:type stars: list(tuple(int,int))
:param noStars: list of noStar positions
:type noStars: list(tuple(int,int))
:param solutions: list of possible solutions (give to sub call)
:type solutions: list(dict)
:return: list of possible solutions
:rtype: list(dict)
"""
if noStars is None:
noStars = []
if solutions is None:
solutions = []
# Set next star for all filtered solutions
for pos in filteredPositions:
newStars = stars.copy()
newStars.append(pos)
newStars.sort()
# if this combination is already in the results list skip it
if newStars in [r["stars"] for r in solutions]:
continue
# get all the known noStars and add the new ones from the new star
newNoStars = noStars.copy()
for noStar in self.GetAdjacentPositions(pos, self._dimension):
if noStar not in newNoStars and \
self._board[noStar[0]][noStar[1]].Status() != StarBattleTileStates.NO_STAR:
newNoStars.append(noStar)
newFilteredPositions = [p for p in filteredPositions
if p not in newStars and p not in newNoStars]
# termination new stars equal to the max | |
= _get_arr_and_force_fv(xin)
pin2,fill_value_pin = _get_arr_and_force_fv(pin)
pout2,fill_value_pin = _get_arr_and_fv(pout)
# Figure out what the output fill value should be
if fill_value_xin is None:
if fill_value_pin is None:
fill_value = 1.e20
else:
fill_value = fill_value_pin
else:
fill_value = fill_value_xin
aret = fplib.int2p(pin2, xin2, pout2, linlog, fill_value)
return ma.masked_array(aret, fill_value=fill_value)
################################################################
def labelbar_ndc(wks,nbox,labels,x,y,rlistc=None):
"""
Creates and draws a labelbar anywhere in the viewport, and returns
a PlotId representing the labelbar created.
pid = Ngl.labelbar_ndc(wks, nboxes, labels, x, y, res=None)
wks -- The identifier returned from calling Ngl.open_wks.
nboxes -- The number of labelbar boxes.
labels -- An array of label strings for the labelbar boxes.
x, y -- The NDC values (values from 0 to 1) defining the
coordinates of the upper left corner of the labelbar.
res -- An optional instance of the Resources class having Labelbar
resources as attributes.
"""
_set_spc_defaults(0)
rlist = _crt_dict(rlistc)
rlist1 = {}
for key in list(rlist.keys()):
rlist[key] = _convert_from_ma(rlist[key])
if(key[0:3] == "ngl"):
_set_spc_res(key[3:],rlist[key])
else:
rlist1[key] = rlist[key]
_set_labelbar_res(rlist,rlist1,False) # Set some addtl labelbar resources
ilb = labelbar_ndc_wrap(wks,nbox,labels,len(labels),x,y,
"double","double",rlist1,pvoid())
del rlist
del rlist1
return (_lst2pobj(ilb))
################################################################
def legend_ndc(wks,nitems,labels,x,y,rlistc=None):
"""
Draws a legend anywhere in the viewport, and returns a PlotId
representing the labelbar created.
pid = Ngl.legend_ndc(wks, nitems, labels, x, y, res=None)
wks -- The identifier returned from calling Ngl.open_wks.
nitems -- The number of legend items.
labels -- An array of label strings for the legend.
x, y -- The NDC values (values from 0 to 1) defining the coordinates
of the upper left corner of the legend.
res -- An optional instance of the Resources class having Labelbar
resources as attributes.
"""
_set_spc_defaults(0)
rlist = _crt_dict(rlistc)
rlist1 = {}
for key in list(rlist.keys()):
rlist[key] = _convert_from_ma(rlist[key])
if(key[0:3] == "ngl"):
_set_spc_res(key[3:],rlist[key])
else:
rlist1[key] = rlist[key]
_set_legend_res(rlist,rlist1) # Set some addtl legend resources
ilb = legend_ndc_wrap(wks,nitems,labels,len(labels),x,y,
"double","double",rlist1,pvoid())
del rlist
del rlist1
return (_lst2pobj(ilb))
################################################################
def linmsg(x, end_pts_msg=None, max_msg=None, fill_value=1.e20):
"""
Linearly interpolates to fill in missing values.
x = Ngl.linmsg(x,end_pts_msg=None,max_msg=None,fill_value=1.e20)
x -- A numpy or masked array of any dimensionality that contains missing values.
end_pts_msg -- how missing beginning and end points will be
returned. If this value is greater than or equal to 0,
then the beginning and end points will be returned as
missing (default option). If this value is less
than 0, then they will be set to the nearest
non-missing value.
max_msg -- the maximum number of consecutive missing values to be
interpolated. If not set, then this function will try
to interpolate as many values as it can.
fill_value -- The missing value for x. Defaults to 1.e20 if not set.
"""
#
# Set defaults for input parameters not specified by user.
#
if end_pts_msg is None:
end_pts_msg = 0
#
# Setting max_msg to 0 will cause the C wrapper to set this to
# npts before going into the Fortran routine.
#
if max_msg is None:
max_msg = 0
#
# If input array is a numpy masked array, return a numpy masked array.
# Otherwise missing values are dealt with using the fill_value.
#
fv = _get_fill_value(x)
if (any(fv is None)):
return fplib.linmsg(_promote_scalar(x),end_pts_msg,max_msg,fill_value)
else:
aret = fplib.linmsg(x.filled(fv), end_pts_msg, max_msg, fv)
return ma.masked_array(aret, fill_value=fv)
################################################################
def map(wks,rlistc=None,res=None):
"""
Creates and draws a map, and returns a PlotId of the map plot created.
pid = Ngl.map(wks, res=None)
wks -- The identifier returned from calling Ngl.open_wks.
res -- An optional instance of the Resources class having Map
resources as attributes.
"""
if not res is None:
rlistc = res
_set_spc_defaults(1)
rlist = _crt_dict(rlistc)
rlist1 = {}
for key in list(rlist.keys()):
rlist[key] = _convert_from_ma(rlist[key])
if (key[0:3] == "ngl"):
_set_spc_res(key[3:],rlist[key])
else:
rlist1[key] = rlist[key]
_set_map_res(rlist,rlist1) # Set some addtl map resources
#
# Test for masking a lambert conformal plot.
#
mask_list = _test_for_mask_lc(rlist,rlist1)
imap = map_wrap(wks,rlist1,pvoid())
limap = _lst2pobj(imap)
if mask_list["MaskLC"]:
limap = _mask_lambert_conformal(wks, limap, mask_list, rlist1)
del rlist
del rlist1
return(limap)
################################################################
def maximize_plot(wks,plot,rlistc=None):
"""
Maximizes the size of the given plot on the workstation.
Ngl.maximize_plot(wks, plotid, res=None)
wks -- The identifier returned from calling Ngl.open_wks.
plotid -- The identifier returned from calling any graphics routine
like Ngl.xy or Ngl.contour_map.
res -- An optional optional instance of the Resources class having
PyNGL resources as attributes.
"""
_set_spc_defaults(0)
rlist = _crt_dict(rlistc)
rlist1 = {}
for key in list(rlist.keys()):
rlist[key] = _convert_from_ma(rlist[key])
if (key[0:3] == "ngl"):
_set_spc_res(key[3:],rlist[key])
else:
rlist1[key] = rlist[key]
maximize_plots(wks,_pobj2lst(plot),1,0,pvoid())
################################################################
def merge_colormaps(wks,cmap1,cmap2):
"""
Merges two color maps into one for the given workstation.
Ngl.merge_colormaps(wks,cmap1,cmap2)
wks -- The identifier returned from calling Ngl.open_wks.
cmap1 -- An n x 3 array of RGB triplets, or a predefined colormap name.
cmap2 -- A second n x 3 array of RGB triplets, or a predefined colormap name.
"""
#
# Retrieve original color map in case we need to reset it.
#
orig_cmap = retrieve_colormap(wks)
#
# Set and retrieve both color maps so we can then get the RGB triplets.
#
# For second color map, toss the first two colors (background/foreground).
#
define_colormap(wks,cmap1)
rgb_cmap1 = retrieve_colormap(wks)
define_colormap(wks,cmap2)
o_rgb_cmap2 = retrieve_colormap(wks)
rgb_cmap2 = o_rgb_cmap2[2:,:] # Drop colors 0 and 1.
ncmap1 = rgb_cmap1.shape[0] # Size of colormaps
ncmap2 = rgb_cmap2.shape[0]
if (ncmap1 + ncmap2) > 256:
print("merge_colormaps - Warning, the two color maps combined must have 256 or fewer colors.")
print("Keeping original color map.")
define_colormap(wks,orig_cmap)
return None
#
# Merge two colormaps into one.
#
new_cmap = numpy.zeros((ncmap1+ncmap2,3),rgb_cmap1.dtype.char)
new_cmap[:ncmap1,:] = rgb_cmap1
new_cmap[ncmap1:,:] = rgb_cmap2
define_colormap(wks,new_cmap)
return None
################################################################
def natgrid(x,y,z,xo,yo):
"""
Uses a natural neighbor algorithm to interpolate 2-dimensional
randomly spaced data to a defined output grid.
xarray = Ngl.natgrid(x, y, z, xo, yo)
x, y -- One-dimensional arrays of the X and Y coordinate points of the
input data.
z -- The one-dimensional input data to be interpolated, of the same
length as x and y. Can be a NumPy float array or a Python list
or tuple.
xo, yo -- One-dimensional NumPy float arrays or Python lists (of
length numxout and numyout) containing the coordinate points
of the output data grid.
"""
if _is_list_or_tuple(x):
dsizes_x = len(x)
elif _is_numpy_array(x):
dsizes_x = x.shape[0]
else:
print("natgrid: type of argument 1 must be one of: list, tuple, or NumPy array")
return None
if _is_list_or_tuple(xo):
dsizes_xo = len(xo)
elif _is_numpy_array(xo):
dsizes_xo = xo.shape[0]
else:
print("natgrid: type of argument 4 must be one of: list, tuple, or NumPy array")
return None
if _is_list_or_tuple(yo):
dsizes_yo = len(yo)
elif _is_numpy_array(yo):
dsizes_yo = yo.shape[0]
else:
print("natgrid: type of argument 5 must be one of: list, tuple, or NumPy array")
return None
ier,zo = \
natgridc(dsizes_x,x,y,z,dsizes_xo,dsizes_yo,xo,yo,dsizes_xo,dsizes_yo)
if (ier != 0):
print("natgrid: error number %d returned, see error table." % (ier))
del ier
return None
else:
return zo
def _ncargpath(type):
return pynglpath(type)
################################################################
def new_color(wks_id,r,g,b):
"""
Adds the given color to the end of the color map of the given
workstation and returns the integer index of the
the new color.
index = Ngl.new_color(wks, red, green, blue)
wks -- The identifier returned from calling Ngl.open_wks.
red, green, blue -- Floating point values between 0.0 and 1.0
inclusive.
"""
return NhlNewColor(_int_id(wks_id),r,g,b)
################################################################
def ndctodata(obj,x,y):
"""
Converts coordinates in NDC space to coordinates in data space.
Missing values are ignored.
xdata,ydata = Ngl.ndctodata(plot,xndc,yndc)
plot -- The identifier returned from calling any plot object creation
function, like Ngl.xy, Ngl.contour, Ngl.vector_map, etc.
xndc,yndc -- One dimensional (masked) arrays containing values to be
converted.
"""
x2,fvx = _get_arr_and_fv(x)
y2,fvy = _get_arr_and_fv(y)
# Set flags indicating whether missing values present.
fvx,ismx = _set_default_msg(fvx)
fvy,ismy = _set_default_msg(fvy)
error,xout,yout,status,range = \
NhlPNDCToData(_int_id(obj),x2,y2,len(_promote_scalar(x2)),fvx,fvy,
ismx,ismy)
# Convert to masked array if input was masked array, or any of the
# values are outside the plot data space.
if ismx or status == 1:
xout = _convert_to_ma(xout,range)
if ismy or status == 1:
yout = _convert_to_ma(yout,range)
del error,status,range
return xout,yout
################################################################
def new_dash_pattern(wks_id,patterns):
"""
Adds a new dash pattern to the existing table of dash patterns, and
returns the integer index of the new pattern.
index = Ngl.new_dash_pattern(wks, dash_pattern)
wks -- The identifier returned from calling Ngl.open_wks.
dash_pattern -- A string indicating the dash pattern to create. The
dash pattern string can be any length, and should
be generated using a combination of "$" and "_"
characters. The "$" represents a pen-down, and the
"_" represents a pen-up.
"""
return NhlNewDashPattern(_int_id(wks_id),patterns)
################################################################
def new_marker(wks_id,marker_strings,font_nums,xoffset=0.,yoffset=0., \
aspect_ratio=1., size=1., angle=0.):
"""
Adds a new marker to the existing table of markers and returns the
integer index of the new | |
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from utils import *
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PyramidSqueeze(nn.Module):
def __init__(self, dims_of_layers, dim=16, base_window_size=1, dropout=0.0, norm_layer=nn.LayerNorm):
super(PyramidSqueeze, self).__init__()
# params
self.dim = dim
self.feature_layers = dims_of_layers
self.base_window_size = base_window_size
self.size = [self.base_window_size * 2 ** (len(self.feature_layers) - 1 - i) for i in range(len(self.feature_layers))]
# modules
self.linears = nn.ModuleList([nn.Linear(self.feature_layers[i], self.dim)
for i in range(len(self.feature_layers))])
self.dropouts = nn.ModuleList([nn.Dropout(dropout) for _ in range(len(self.feature_layers))])
self.norm = nn.ModuleList([norm_layer(self.feature_layers[i]) for i in range(len(self.feature_layers))])
def forward(self, multi_layer_features):
assert len(multi_layer_features) == len(self.feature_layers)
windowed_features = [window_partition(multi_layer_features[i], self.size[i]) for i in range(len(self.size))]
normed_features = [self.norm[i](windowed_features[i]) for i in range(len(self.size))]
squeezed_features = [self.dropouts[i](self.linears[i](normed_features[i])) for i in range(len(self.size))]
return squeezed_features
class PyramidReverse(nn.Module):
def __init__(self, dims_of_layers, base_pyramid_size, dim=16, base_window_size=1, dropout=0.0, drop_path=0.0, mlp_ratio=4, act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super(PyramidReverse, self).__init__()
# params
self.dim = dim
self.feature_layers = dims_of_layers
self.base_pyramid_size = base_pyramid_size
self.base_window_size = base_window_size
self.size = [self.base_window_size * 2 ** (len(self.feature_layers) - 1 - i) for i in range(len(self.feature_layers))]
# modules
self.linears = nn.ModuleList([nn.Linear(self.dim, self.feature_layers[i])
for i in range(len(self.feature_layers))])
self.dropouts = nn.ModuleList([nn.Dropout(dropout) for _ in range(len(self.feature_layers))])
self.norm = nn.ModuleList([norm_layer(self.dim) for i in range(len(self.feature_layers))])
self.mlps = nn.ModuleList([Mlp(in_features=self.feature_layers[i], hidden_features=self.feature_layers[i] * mlp_ratio,
act_layer=act_layer, drop=dropout) for i in range(len(self.feature_layers))])
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, multi_pyramids):
assert len(multi_pyramids) == len(self.feature_layers)
reversed_features = [window_reverse(multi_pyramids[i], self.size[i], self.base_pyramid_size) for i in range(len(self.size))]
normed_features = [self.norm[i](reversed_features[i]) for i in range(len(self.size))]
proj_features = [self.dropouts[i](self.linears[i](normed_features[i])) for i in range(len(self.size))]
f_features = [self.mlps[i](proj_features[i]) for i in range(len(self.size))]
f_features = [(proj_features[i] + self.drop_path(f_features[i])).permute(0, 3, 1, 2).contiguous() for i in range(len(self.size))]
return f_features
class Scale_Unified_Attention(nn.Module):
def __init__(self, dims_of_layers, num_pyramid, num_heads=8, base_window_size=1, window_squeeze_drop=0.0,
scale_attn_drop=0.0, spatial_attn_drop=0.0, window_reverse_drop=0.0, drop_path=0.0, mlp_ratio=4,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_rpb=True, mask=None):
super(Scale_Unified_Attention, self).__init__()
# related params
self.dim = dims_of_layers[0]
self.num_heads = num_heads # num of attention head
self.num_pyramid = num_pyramid # num of scale pyramids (equals to the num of pixels of the last feature map)
self.base_pyramid_size = int(torch.sqrt(torch.tensor(self.num_pyramid))) # num of scale pyramids alongside )
self.size = [base_window_size * 2 ** (len(dims_of_layers) - 1 - i) for i in range(len(dims_of_layers))] # window size
self.layers = [self.dim // num_heads for i in range(len(dims_of_layers))] # num of feature layers of every attention head
self.num_features = [self.size[i] ** 2 * self.layers[i] for i in range(len(dims_of_layers))]
self.layers_per_head = sum(self.num_features)
self.square_size = [self.size[i] ** 2 for i in range(len(self.size))]
self.use_rpb = use_rpb
self.mask = mask
# modules
self.pyramid_squeeze = PyramidSqueeze(dims_of_layers, dim=self.dim, base_window_size=base_window_size, norm_layer=norm_layer, dropout=window_squeeze_drop)
self.pyramid_reverse = PyramidReverse(dims_of_layers, dim=self.dim, base_pyramid_size=self.base_pyramid_size, base_window_size=base_window_size, norm_layer=norm_layer,
dropout=window_reverse_drop, mlp_ratio=mlp_ratio, act_layer=act_layer)
self.scale_qkv = nn.ModuleList([nn.Linear(self.dim, self.dim * 3) for i in range(len(dims_of_layers))])
self.spatial_qkv = nn.Linear(sum(self.square_size) * self.dim // self.num_heads, 3)
self.scale_softmax = nn.Softmax(dim=-1)
self.scale_attn_drop = nn.Dropout(scale_attn_drop)
self.spatial_softmax = nn.Softmax(dim=-1)
self.spatial_attn_drop = nn.Dropout(spatial_attn_drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# define a parameter table of relative scale/spatial bias
self.relative_scale_bias_table = nn.Parameter(torch.zeros(2 * (sum(self.square_size) + sum(self.square_size[1:])) - 1, num_heads))
# get pair-wise relative position index for each token inside the scale space
coords_h_scale = torch.arange(sum(self.square_size))
coords_w_scale = torch.arange(sum(self.square_size)).flip(0)
offset = [0]
for i in range(len(self.square_size) - 1):
offset.append(sum(self.square_size[-i - 1:]))
offset_h_scale = []
offset_w_scale = []
for i in range(len(self.square_size)):
offset_w_scale.extend([offset[- i - 1] for _ in range(self.square_size[i])])
offset_h_scale.extend([offset[i] for _ in range(self.square_size[i])])
offset_h_scale = torch.tensor(offset_h_scale).unsqueeze(-1) * torch.ones((1, sum(self.square_size))).long()
offset_w_scale = torch.tensor(offset_w_scale).unsqueeze(0) * torch.ones((sum(self.square_size), 1)).long()
coords_scale = torch.stack(torch.meshgrid([coords_h_scale, coords_w_scale])) # 2, len_code, len_code
relative_scale_index = coords_scale.sum(0) + offset_h_scale + offset_w_scale # len_code, len_code
self.register_buffer("relative_scale_index", relative_scale_index)
trunc_normal_(self.relative_scale_bias_table, std=.02)
self.relative_spatial_bias_table = nn.Parameter(
torch.zeros((2 * self.base_pyramid_size - 1) * (2 * self.base_pyramid_size - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the spatial dim
coords_h_spatial = torch.arange(self.base_pyramid_size)
coords_w_spatial = torch.arange(self.base_pyramid_size)
coords_spatial = torch.stack(torch.meshgrid([coords_h_spatial, coords_w_spatial])) # 2, Wh, Ww
coords_spatial_flatten = torch.flatten(coords_spatial, 1) # 2, Wh*Ww
relative_coords_spatial = coords_spatial_flatten[:, :, None] - coords_spatial_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords_spatial = relative_coords_spatial.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords_spatial[:, :, 0] += self.base_pyramid_size - 1 # shift to start from 0
relative_coords_spatial[:, :, 1] += self.base_pyramid_size - 1
relative_coords_spatial[:, :, 0] *= 2 * self.base_pyramid_size - 1
relative_spatial_index = relative_coords_spatial.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_spatial_index", relative_spatial_index)
trunc_normal_(self.relative_spatial_bias_table, std=.02)
def forward(self, multi_layer_features):
# assert (multi_layer_features[-1].size(-2) / self.size[-1]) == self.base_pyramid_size
B = multi_layer_features[0].size(0)
device = multi_layer_features[0].device
squeezed_features = self.pyramid_squeeze(multi_layer_features)
'''
scale attention
'''
scale_q, scale_k, scale_v = [], [], []
scale_space_num = 0
for i in range(len(squeezed_features)):
B_, N, C = squeezed_features[i].shape
if i == 0:
scale_space_num = B_
else:
assert scale_space_num == B_
qkv = self.scale_qkv[i](squeezed_features[i]).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(
2, 0, 3, 1, 4)
scale_q.append(qkv[0])
scale_k.append(qkv[1])
scale_v.append(qkv[2])
scale_q = torch.cat(scale_q, dim=-2)
scale_k = torch.cat(scale_k, dim=-2)
scale_v = torch.cat(scale_v, dim=-2)
regularize_scale = sum(self.square_size) ** -0.5
scale_q = scale_q * regularize_scale
scale_attn = (scale_q @ scale_k.transpose(-2, -1))
# build relative scale bias
if self.use_rpb:
relative_scale_bias = self.relative_scale_bias_table[self.relative_scale_index.view(-1)].view(
sum(self.square_size), sum(self.square_size), -1) # Wh*Ww,Wh*Ww,nH
relative_scale_bias = relative_scale_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
scale_attn = scale_attn + relative_scale_bias.unsqueeze(0)
# build scale mask
if self.mask is not None:
scale_mask = create_scale_mask([self.square_size[i] for i in range(len(self.square_size))], [1 for _ in range(len(self.square_size))], -100, self.mask).to(device)
scale_mask.requires_grad = False
scale_attn = scale_attn + scale_mask
scale_attn = self.scale_softmax(scale_attn) # num_pyramid*B, num_heads, layers_per_head, layers_per_head
scale_attn = self.scale_attn_drop(scale_attn)
b = multi_layer_features[-1].size(-2) // self.size[-1]
n = b ** 2
scale_x = (scale_attn @ scale_v).view(B, n, self.num_heads, -1)
'''
spatial attention(aggregation)
'''
spatial_qkv = self.spatial_qkv(scale_x).permute(3, 0, 2, 1)
spatial_q, spatial_k, spatial_v = spatial_qkv[0].unsqueeze(-1), spatial_qkv[1].unsqueeze(-1), spatial_qkv[2].unsqueeze(-1)
regularize_spatial = n ** -0.5
spatial_q = spatial_q * regularize_spatial
spatial_attn = (spatial_q @ spatial_k.transpose(-2, -1))
# build relative scale bias
if self.use_rpb:
size = int(multi_layer_features[-1].size(-2) / self.size[-1])
coords_h_spatial = torch.arange(size)
coords_w_spatial = torch.arange(size)
coords_spatial = torch.stack(torch.meshgrid([coords_h_spatial, coords_w_spatial])) # 2, Wh, Ww
coords_spatial_flatten = torch.flatten(coords_spatial, 1) # 2, Wh*Ww
relative_coords_spatial = coords_spatial_flatten[:, :, None] - coords_spatial_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords_spatial = relative_coords_spatial.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords_spatial[:, :, 0] += size - 1 # shift to start from 0
relative_coords_spatial[:, :, 1] += size - 1
relative_coords_spatial[:, :, 0] *= 2 * size - 1
relative_spatial_index = relative_coords_spatial.sum(-1) # Wh*Ww, Wh*Ww
if size != self.base_pyramid_size:
relative_spatial_bias_table = F.interpolate(self.relative_spatial_bias_table.unsqueeze(0).transpose(1, 2),
size=(2 * size - 1) * (2 * size - 1), mode='linear').squeeze().transpose(0, 1)
relative_spatial_bias = relative_spatial_bias_table[relative_spatial_index.view(-1)].view(
size ** 2, size ** 2, -1) # Wh*Ww,Wh*Ww,nH
else:
relative_spatial_bias = self.relative_spatial_bias_table[relative_spatial_index.view(-1)].view(
size ** 2, size ** 2, -1) # Wh*Ww,Wh*Ww,nH
relative_spatial_bias = relative_spatial_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
# if multi_layer_features[-1].size(-2) / self.size[-1] != self.base_pyramid_size:
# size = int(multi_layer_features[-1].size(-2) / self.size[-1]) ** 2
# relative_spatial_bias = F.interpolate(relative_spatial_bias.unsqueeze(0), size=(size, size), mode='bilinear').squeeze()
spatial_attn = spatial_attn + relative_spatial_bias.unsqueeze(0)
if self.mask is not None:
spatial_mask = create_spatial_mask(n, -100).to(device)
spatial_mask.requires_grad = False
spatial_attn = spatial_attn + spatial_mask
spatial_attn = self.spatial_softmax(spatial_attn) # B, num_heads, num_pyramid, num_pyramid
spatial_attn = self.spatial_attn_drop(spatial_attn)
spatial_x = (spatial_attn @ spatial_v).permute(0, 2, 1, 3).contiguous()
ss_x = (self.drop_path(scale_x * spatial_x) + scale_x).view(-1, self.num_heads, sum(self.square_size), self.dim // self.num_heads)
start_point = [0]
for i in range(len(self.num_features) - 1):
start_point.append(self.square_size[i])
for i in range(len(start_point)):
if i > 0:
start_point[i] += start_point[i - 1]
attn_features = []
for i in range(len(self.square_size)):
if i < len(self.square_size) - 1:
temp = ss_x[:, :, start_point[i] : start_point[i + 1]].view(scale_space_num,
self.num_heads, self.square_size[i], -1).permute(0, 2, 1, 3).flatten(2).contiguous()
else:
temp = ss_x[:, :, start_point[-1]:].view(scale_space_num,
self.num_heads, self.square_size[i], -1).permute(0, 2, 1, 3).flatten(2).contiguous()
attn_features.append(temp)
proj_features = self.pyramid_reverse(attn_features)
return proj_features
if __name__ == '__main__':
device = torch.device('cpu')
# l1 = torch.autograd.Variable(torch.randn(2, 64, 128, 128)).to(device)
# l2 = torch.autograd.Variable(torch.randn(2, 128, 64, 64)).to(device)
# l3 = torch.autograd.Variable(torch.randn(2, 256, 32, 32)).to(device)
# l4 = torch.autograd.Variable(torch.randn(2, 512, 16, 16)).to(device)
l1 = torch.autograd.Variable(torch.randn(2, 64, 256, 256)).to(device)
l2 = | |
specification along with,
# strictly speaking, '16. e4 Qd8(16... dxe4)17. fxe4' since '(' and ')'
# are self-terminating and nothing is said about separation from
# adjacent tokens.
if not length:
movetext.append(token)
return len(token)
if len(token) + length >= PGN_MAXIMUM_LINE_LENGTH:
movetext.append(PGN_LINE_SEPARATOR)
movetext.append(token)
return len(token)
# if token == ')':
# movetext.append(token)
# return len(token) + length
# if movetext[-1] == '(':
# movetext.append(token)
# return len(token) + length
# else:
movetext.append(PGN_TOKEN_SEPARATOR)
movetext.append(token)
return len(token) + length + 1
def get_movetext(self):
"""Return list of movetext.
Moves have check and checkmate indicators, but not the black move
indicators found in export format if a black move follows a comment
or is first move in a RAV, nor move numbers.
"""
if self._movetext_offset is None:
return []
return self._text[self._movetext_offset :]
def get_all_movetext_in_pgn_export_format(self):
"""Return all movetext in pgn export format.
Where check or checkmate moves are present the text is not in export
format unless generated by the GameIndicateCheck class, because these
indicators are not included in the text otherwise.
"""
fullmove_number, active_color = self._set_movetext_indicators()
movetext = ["\n"]
if self._movetext_offset is None:
return movetext
length = 0
insert_fullmove_number = True
fnas = [[fullmove_number, active_color]]
_attm = self._add_token_to_movetext
termination = self._tags.get(TAG_RESULT, DEFAULT_TAG_RESULT_VALUE)
for mvt in self._text[self._movetext_offset :]:
if mvt.startswith("{"):
for word in mvt.split():
length = _attm(word, movetext, length)
insert_fullmove_number = True
elif mvt.startswith("$"):
length = _attm(mvt, movetext, length)
elif mvt.startswith(";"):
if len(mvt) + length >= PGN_MAXIMUM_LINE_LENGTH:
movetext.append(PGN_LINE_SEPARATOR)
else:
movetext.append(PGN_TOKEN_SEPARATOR)
movetext.append(mvt)
length = 0
insert_fullmove_number = True
elif mvt == "(":
length = _attm(mvt, movetext, length)
fnas[-1] = [fullmove_number, active_color]
active_color = OTHER_SIDE[active_color]
if active_color == FEN_BLACK_ACTIVE:
fullmove_number -= 1
fnas.append([fullmove_number, active_color])
insert_fullmove_number = True
elif mvt == ")":
length = _attm(mvt, movetext, length)
del fnas[-1]
fullmove_number, active_color = fnas[-1]
insert_fullmove_number = True
elif mvt == termination:
length = _attm(mvt, movetext, length)
elif active_color == FEN_WHITE_ACTIVE:
length = _attm(
str(fullmove_number) + PGN_DOT, movetext, length
)
srchm = suffix_annotations.search(mvt)
if srchm:
mvt = mvt[: srchm.start()]
length = _attm(mvt, movetext, length)
if srchm:
length = _attm(
SUFFIX_ANNOTATION_TO_NAG[srchm.group()],
movetext,
length,
)
active_color = OTHER_SIDE[active_color]
insert_fullmove_number = False
else:
if insert_fullmove_number:
length = _attm(
str(fullmove_number) + PGN_DOT * 3, movetext, length
)
insert_fullmove_number = False
srchm = suffix_annotations.search(mvt)
if srchm:
mvt = mvt[: srchm.start()]
length = _attm(mvt, movetext, length)
if srchm:
length = _attm(
SUFFIX_ANNOTATION_TO_NAG[srchm.group()],
movetext,
length,
)
active_color = OTHER_SIDE[active_color]
fullmove_number += 1
return "".join(movetext)
def get_archive_movetext(self):
"""Return Reduced Export format PGN movetext.
Where check or checkmate moves are present the text is not in export
format unless generated by the GameIndicateCheck class, because these
indicators are not included in the text otherwise.
"""
fullmove_number, active_color = self._set_movetext_indicators()
movetext = ["\n"]
if self._movetext_offset is None:
return movetext
length = 0
insert_fullmove_number = True
rav_depth = 0
_attm = self._add_token_to_movetext
termination = self._tags.get(TAG_RESULT, DEFAULT_TAG_RESULT_VALUE)
for mvt in self._text[self._movetext_offset :]:
if (
mvt.startswith("{")
or mvt.startswith("$")
or mvt.startswith(";")
):
pass
elif mvt == "(":
rav_depth += 1
elif mvt == ")":
rav_depth -= 1
elif rav_depth:
pass
elif mvt == termination:
length = _attm(mvt, movetext, length)
elif active_color == FEN_WHITE_ACTIVE:
length = _attm(
str(fullmove_number) + PGN_DOT, movetext, length
)
srchm = suffix_annotations.search(mvt)
if srchm:
mvt = mvt[: srchm.start()]
length = _attm(mvt, movetext, length)
if srchm:
length = _attm(
SUFFIX_ANNOTATION_TO_NAG[srchm.group()],
movetext,
length,
)
active_color = OTHER_SIDE[active_color]
insert_fullmove_number = False
else:
if insert_fullmove_number:
length = _attm(
str(fullmove_number) + PGN_DOT * 3, movetext, length
)
insert_fullmove_number = False
srchm = suffix_annotations.search(mvt)
if srchm:
mvt = mvt[: srchm.start()]
length = _attm(mvt, movetext, length)
if srchm:
length = _attm(
SUFFIX_ANNOTATION_TO_NAG[srchm.group()],
movetext,
length,
)
active_color = OTHER_SIDE[active_color]
fullmove_number += 1
return "".join(movetext)
def get_movetext_without_comments_in_pgn_export_format(self):
"""Return movetext without comments in pgn export format.
Where check or checkmate moves are present the text is not in export
format unless generated by the GameIndicateCheck class, because these
indicators are not included in the text otherwise.
"""
fullmove_number, active_color = self._set_movetext_indicators()
movetext = ["\n"]
if self._movetext_offset is None:
return movetext
length = 0
insert_fullmove_number = True
fnas = [[fullmove_number, active_color]]
_attm = self._add_token_to_movetext
termination = self._tags.get(TAG_RESULT, DEFAULT_TAG_RESULT_VALUE)
for mvt in self._text[self._movetext_offset :]:
if (
mvt.startswith("{")
or mvt.startswith("$")
or mvt.startswith(";")
):
pass
elif mvt == "(":
length = _attm(mvt, movetext, length)
fnas[-1] = [fullmove_number, active_color]
active_color = OTHER_SIDE[active_color]
if active_color == FEN_BLACK_ACTIVE:
fullmove_number -= 1
fnas.append([fullmove_number, active_color])
insert_fullmove_number = True
elif mvt == ")":
length = _attm(mvt, movetext, length)
del fnas[-1]
fullmove_number, active_color = fnas[-1]
insert_fullmove_number = True
elif mvt == termination:
length = _attm(mvt, movetext, length)
elif active_color == FEN_WHITE_ACTIVE:
length = _attm(
str(fullmove_number) + PGN_DOT, movetext, length
)
srchm = suffix_annotations.search(mvt)
if srchm:
mvt = mvt[: srchm.start()]
length = _attm(mvt, movetext, length)
if srchm:
length = _attm(
SUFFIX_ANNOTATION_TO_NAG[srchm.group()],
movetext,
length,
)
active_color = OTHER_SIDE[active_color]
insert_fullmove_number = False
else:
if insert_fullmove_number:
length = _attm(
str(fullmove_number) + PGN_DOT * 3, movetext, length
)
insert_fullmove_number = False
srchm = suffix_annotations.search(mvt)
if srchm:
mvt = mvt[: srchm.start()]
length = _attm(mvt, movetext, length)
if srchm:
length = _attm(
SUFFIX_ANNOTATION_TO_NAG[srchm.group()],
movetext,
length,
)
active_color = OTHER_SIDE[active_color]
fullmove_number += 1
return "".join(movetext)
def get_export_pgn_elements(self):
"""Return Export format PGN version of game.
This method will be removed without notice in future. It seems more
convenient and clearer to use the called methods directly.
Where check or checkmate moves are present the text is not in export
format unless generated by the GameIndicateCheck class, because these
indicators are not included in the text otherwise.
"""
return (
self.get_seven_tag_roster_tags(),
self.get_all_movetext_in_pgn_export_format(),
self.get_non_seven_tag_roster_tags(),
)
def get_archive_pgn_elements(self):
"""Return Archive format PGN version of game. (Reduced Export Format).
This method will be removed without notice in future. It seems more
convenient and clearer to use the called methods directly.
Where check or checkmate moves are present the text is not in export
format unless generated by the GameIndicateCheck class, because these
indicators are not included in the text otherwise.
"""
return self.get_seven_tag_roster_tags(), self.get_archive_movetext()
def get_export_pgn_rav_elements(self):
"""Return Export format PGN version of game with RAVs but no comments.
This method will be removed without notice in future. It seems more
convenient and clearer to use the called methods directly.
Where check or checkmate moves are present the text is not in export
format unless generated by the GameIndicateCheck class, because these
indicators are not included in the text otherwise.
"""
return (
self.get_seven_tag_roster_tags(),
self.get_movetext_without_comments_in_pgn_export_format(),
self.get_non_seven_tag_roster_tags(),
)
def get_text_of_game(self):
"""Return current text version of game."""
return "".join(self._text)
def append_check_indicator(self):
"""Do nothing.
Check and checkmate indicators are not added to movetext where the
move gives check or checkmate.
Use the GameIndicateCheck class to append these indicators to movetext.
This method is used after the move in self._text[-1] has been applied
to the board, but before processing the next token starts.
"""
class GameStrictPGN(Game):
"""Data structure of game positions derived from a PGN game score.
Disambiguation is allowed only when necessary.
Thus 'Nge2' is not accepted when an 'N' on 'c3' is pinned to the 'K' on
'e1'.
The definition of strictness may change in future if the Game class is
modified to allow other transgressions of the PGN specification.
The strictness is not the distinction between Import and Export Format
described in the PGN specification.
"""
_strict_pgn = True
class GameTextPGN(Game):
"""Data structure derived with adjustments to PGN specification.
Long algebraic notation is accepted.
Redundant precision such as 'Nge2' when an 'N' on 'c3' is pinned to the 'K'
on 'e1' is allowed.
The FIDE versions of castling using the digit zero, 0-0 and 0-0-0, and pawn
promotion without the equal symbol, e8Q, are allowed.
The left and right angle bracket characters, and any characters between a
('<', '>') pair, are ignored. The two characters are reserved for future
expansion in the PGN specification. The ignored sequence, '<.*>', happens
to match the markup sequences of HTML and XHTML.
"""
_strict_pgn = None
_bishop_or_bpawn = None
# bx[a-h][1-8] is ambiguous when case is ignored and always matches as a
# piece, | |
#!/usr/bin/env python
"""
ml_color_transfer.py
Functions for transferring colors of one image onto another.
"""
import os
import sys
if '--omp_num_threads' in sys.argv:
os.environ["OMP_NUM_THREADS"] = sys.argv[sys.argv.index('--omp_num_threads')+1]
import numpy as np
import matplotlib
import numpy.linalg
import matplotlib.pyplot as plt
from PIL import Image
import glob
import imageio
import cv2
import json
import re
import time
import argparse
from ml_parameters import *
import np_utils as npu
import ml_core as mlc
import ml_color_timelapse as mlct
import OT_Sinkhorn
# For MacOS
matplotlib.use("agg") # Non-interactive backend
# matplotlib.use("Qt4Agg") # Interactive backend
# plt.get_current_fig_manager().window.wm_geometry("+1600+400") # For TkAgg
# plt.get_current_fig_manager().window.setGeometry(1600, 400, 1000, 800) # For QtAgg
__author__ = "<NAME>"
def save_color_hist(A, filebase, scale, color=True, colorspace="RGB", save_plot_png=True, show_plot=False, save_npy=True):
scale0 = 5000
if save_npy:
np.save(filebase,A)
if save_plot_png or show_plot:
from mpl_toolkits.mplot3d import Axes3D
n0, n1, n2 = A.shape
n = np.cbrt(n0*n1*n2)
N = n0*n1*n2
t0 = np.linspace(0,1,n0)
t1 = np.linspace(0,1,n1)
t2 = np.linspace(0,1,n2)
x, y, z, = np.meshgrid(t0,t1,t2,indexing="ij")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Only display values higher than threshold (for speed)
threshold = 1/N # mean for a uniform distribution
T = A >= threshold
if colorspace == "RGB":
colors = np.transpose(np.vstack((x.flatten(), y.flatten(), z.flatten())))[T.flatten()] if color else None
elif colorspace == "LAB":
C = (np.vstack((x.flatten(), y.flatten(), z.flatten())).T).astype(np.float32)[T.flatten()]
C[:, 0] *= 100; C[:, 1:] = C[:, 1:] * 255 - 128 # Put in the good range
colors = cv2.cvtColor(np.expand_dims(C, 0), cv2.COLOR_LAB2RGB)[0]
ax.scatter(x[T], y[T], z[T], s=(scale*scale0/n)*A[T]/np.max(A), c=colors)
minmax = (0 - 1/20, 1 + 1/20) # Add extra border
ax.set_xlim(minmax); ax.set_ylim(minmax); ax.set_zlim(minmax)
if colorspace == "RGB": ax.set_xlabel('R (slow index)'); ax.set_ylabel('G (medium index)'); ax.set_zlabel('B (fast index)');
elif colorspace == "LAB": ax.set_xlabel('L (slow index)'); ax.set_ylabel('A (medium index)'); ax.set_zlabel('B (fast index)');
plt.title("scale=%g, threshold=%.2g, max=%e"%(scale,threshold,np.max(A)))
if save_plot_png:
fig.savefig(filebase+".png", bbox_inches="tight")
if show_plot:
plt.show()
else:
plt.close(fig)
def save_3d_metric(A, filebase, scale=1, colorspace="RGB", save_plot_png=True, show_plot=False, save_npy=True):
scale0 = 5000
if save_npy:
np.save(filebase,A)
if save_plot_png or show_plot:
from mpl_toolkits.mplot3d import Axes3D
n0, n1, n2 = A.shape
n = np.cbrt(n0*n1*n2)
N = n0*n1*n2
t0 = np.linspace(0,1,n0)
t1 = np.linspace(0,1,n1)
t2 = np.linspace(0,1,n2)
x, y, z, = np.meshgrid(t0,t1,t2,indexing="ij")
fig = plt.figure(figsize=(9,6))
ax = fig.add_subplot(111, projection='3d')
p = ax.scatter(x, y, z, s=(scale*scale0/n), c=A.flatten(), cmap='coolwarm')
plt.colorbar(p)
minmax = (0 - 1/20, 1 + 1/20) # Add extra border
ax.set_xlim(minmax); ax.set_ylim(minmax); ax.set_zlim(minmax)
if colorspace == "RGB": ax.set_xlabel('R (slow index)'); ax.set_ylabel('G (medium index)'); ax.set_zlabel('B (fast index)');
elif colorspace == "LAB": ax.set_xlabel('L (slow index)'); ax.set_ylabel('A (medium index)'); ax.set_zlabel('B (fast index)');
plt.title("scale=%g"%(scale))
if save_plot_png:
fig.savefig(filebase+".png", bbox_inches="tight")
if show_plot:
plt.show()
else:
plt.close(fig)
def color_transfer_ot_barycentric_proj(im,im_hist,target_hist,xi,L,color_interp="bin_value",sink_conv_filebase="",
apply_bilateral_filter=False, sigmaRange=0,sigmaSpatial=0, colorspace="RGB"):
"""
:param im:
:param im_hist:
:param target_hist:
:param xi:
:param L:
:param color_interp:
:param sink_conv_filebase: Save a plot of the Sinkhorn error, unless it's ""
:param apply_bilateral_filter:
:param sigmaRange:
:param sigmaSpatial:
:return:
"""
p = im_hist
q = target_hist
N = im_hist.size
n = int(round(np.cbrt(N)))
if n**3 != N : print("Error: The histogram should be of size n^3")
# Sinkhorn algorithm
a = np.zeros(N)
b = np.ones(N)
err_p = np.zeros(L)
err_q = np.zeros(L)
for i in range(0,L):
if i%(L/5) == 0: print(i)
xib = xi(b)
err_p[i] = np.linalg.norm(a*xib - p)/np.linalg.norm(p)
if not np.isfinite(err_p[i]):
print("WARNING: Wild NaN appeared at iter %d. Stopping Sinkhorn here."%i)
break
a = p / xib
xia = xi(a)
err_q[i] = np.linalg.norm(b*xia - q)/np.linalg.norm(q)
if not np.isfinite(err_q[i]):
print("WARNING: Wild NaN appeared at iter %d. Stopping Sinkhorn here."%i)
break
b = q / xia
# print(xia)
# print(xib)
# plt.figure(); plt.imshow(np.log(xi(np.eye(N))), cmap='gray') # Metric
# Pi = np.matmul(np.diag(a), np.matmul(xi(np.eye(N)), np.diag(b)))
# plt.figure(); plt.imshow(Pi, cmap='gray')
# plt.figure(); plt.imshow(np.log(Pi), cmap='gray')
# Plot Sinkhorn error
if sink_conv_filebase:
fig = plt.figure()
plt.semilogy(err_p)
plt.semilogy(err_q)
plt.legend(["err_p","err_q"])
plt.title("Sinkhorn marginal constraint")
fig.savefig(os.path.join(prm.outdir, sink_conv_filebase+".png"), bbox_inches="tight")
plt.close(fig)
# Get the vector of all colors (bin centers)
t = (np.arange(0, n) + 1/2)/n # Get the bin centers
cr,cg,cb = np.meshgrid(t,t,t, indexing="ij")
bin_colors = np.vstack((cr.flatten(),cg.flatten(),cb.flatten())).T
# new_colors = (np.expand_dims(a,1)*xi(np.expand_dims(b,1)*bin_colors))/np.expand_dims(im_hist,1)
# new_colors = (np.expand_dims(a,1)*xi(np.expand_dims(b,1)*bin_colors))/np.tile((a*xi(b)),(3,1)).T
new_colors = (xi(np.expand_dims(b,1)*bin_colors))/np.tile((xi(b)),(3,1)).T
# print("max new_colors = %f" % np.max(new_colors))
# print("min new_colors = %f" % np.min(new_colors))
if np.max(new_colors) > 1:
print("WARNING: The color values in the new colors exceed 1 : max = %f"%np.max(new_colors))
new_colors = np.clip(new_colors,0,1)
h, w = im.shape[0:2]
imr = im.reshape(-1,3)
im_ct = np.zeros([h*w,3])
if color_interp == 'bin_value' :
# Don't interpolate, just stairs (nearest neighbor)
ind = (imr*(n-1e-4)).astype('int') # Remove an epsilon to avoid having the value n
rav_ind = np.ravel_multi_index(ind.T, (n, n, n))
im_ct = new_colors[rav_ind]
elif color_interp == 'linear':
print("WARNING: This doesn't make sense, I should interpolate with values that are close spatially, in all dimensions,"
"not just the index before and after (which amounts to interpolating only in the fast speed dimension.")
exit(-1)
# Get upper and lower sample index to interpolate between.
i_low = np.floor(imr*n-1/2).astype('int')
i_high = np.ceil(imr*n-1/2).astype('int')
i_low[i_low < 0] = 0 # Border interpolation is flat
i_high[i_high > n-1] = n-1 # Border interpolation is flat
i_rav_low = np.ravel_multi_index(i_low.T, (n, n, n))
i_rav_high = np.ravel_multi_index(i_high.T, (n, n, n))
# Interpolation weights
alpha = np.abs(imr - bin_colors[i_rav_low]) / (1 / n)
im_ct = (1 - alpha) * new_colors[i_rav_low] + alpha * new_colors[i_rav_high]
else:
print("ERROR: Unrecognized value for parameter 'color_interp'")
exit(-1)
im_out = im_ct.reshape([h,w,3])
if apply_bilateral_filter:
# Get file index to save each file
f_index = sink_conv_filebase.split("-",maxsplit=1)[1]
im_rgb = im_out
# Save image before bilteral filtering
if colorspace == "LAB": # Convert to RGB before saving image
# C = (np.vstack((x.flatten(), y.flatten(), z.flatten())).T).astype(np.float32)[T.flatten()]
im_lab = im_out.copy().astype(np.float32)
im_lab[:,:,0] *= 100; im_lab[:,:,1:] = im_lab[:,:,1:] * 255 - 128 # Put in the good range
im_rgb = cv2.cvtColor(im_lab, cv2.COLOR_LAB2RGB)
# Save the image before the bilateral filter (bbf)
# imageio.imsave(os.path.join(prm.outdir,"interp-%s-bf.png"%f_index),im_out)
Image.fromarray((im_rgb * 255).astype('uint8'), 'RGB').save(os.path.join(prm.outdir, "interp-bbf-%s.png"%f_index))
# Apply the bilateral filter technique
print("Applying bilateral filter")
in_filter = im_out - im
import bilateral_approximation as bf
edge_min = 0.0
edge_max = 1.0
if not sigmaSpatial:
sigmaSpatial = np.min(in_filter[:,:,0].shape)/16.0
print("Setting sigmaSpatial = %f"%sigmaSpatial)
if not sigmaRange:
sigmaRange = (edge_max - edge_min)/10.0
print("Setting sigmaRange = %f"%sigmaRange)
# Cross Bilateral filtering:
# We smooth the values of the difference btw color-transferred and input image,
# but respecting the edges of the input image, not of the difference one.
im_bf = bf.bilateral_approximation_color(in_filter, im, sigmaSpatial, sigmaRange, edgeMin=edge_min, edgeMax=edge_max)
# Clamp values because negative or > 1 values don't make sense for color.
im_ct_bf = np.clip(im + im_bf, 0, 1)
# # For Debug
# fig = plt.figure(figsize=(16,9))
# plt.subplot(231)
# plt.imshow(np.mean(np.abs(in_filter),axis=2)); plt.title('ct - in')
# plt.colorbar()
# plt.subplot(232)
# plt.imshow(im); plt.title('in')
# plt.subplot(233)
# plt.imshow(im_out); plt.title('ct')
# plt.subplot(234)
# plt.imshow(np.mean(np.abs(im_bf),axis=2)); plt.title('cross_bf(ct - in, in)')
# plt.colorbar()
# plt.subplot(236)
# plt.imshow(im_ct_bf); plt.title('in + cross_bf(ct - in, in)')
# # plt.show()
# fig.savefig(os.path.join(prm.outdir,"pipeline-%s.png"%f_index), bbox_inches="tight")
# plt.close(fig)
im_out = im_ct_bf
return im_out
def prolong_metric(w, n, num_prolong, metric_type, sp_order=2):
"""
Prolongs the metric of a square nd array (3d histogram)
Prolongation of the histogram is co-located (existing points stay where they are)
Prolongation of the metric is not, so it's a little more complicated
:param w: Input array of size [dim, n**(dim-1)*(n-1)]
:param n: Size of the arrays with which the metric is compatible (all dimensions must have same size: [n,...,n])
:param num_prolong: Number of times the array should be prolonged.
:param metric_type: Value of prm.metric_type (see ml_parameters.py)
:param sp_order: Order of the spline for interpolation.
A pair (x,y) with x the spline order for interpolation along the axis of the edge,
and y the spline order for interpolation along other axes.
Use 0 for constant (nearest) interpolation, 1 for linear, 2 for quadratic, etc...
:return: the prolonged array, and its new size
"""
import scipy.ndimage
dim = w.shape[0]
n_big = 2 ** num_prolong * (n - 1) + 1
if metric_type.startswith("grid_vertices"): # Metric is on points
# For metric on points, we don't need to differentiate between interpolation along axis of the edge or not,
# so it's a lot simpler than for metric on edges
w_big = scipy.ndimage.zoom(w.reshape([dim] + [n, ] * dim), zoom=(1,*((n_big/n,)*dim)), order=sp_order, mode='nearest').reshape([dim,n_big**dim])
else: # Metric is on edges
# For each vector of edge weight, use a 2x zoom for the axis that the edge is along,
# and a (2*n-1)/n zoom for the two other axes.
# This way, we get the correct number of interpolated points | |
from collections import defaultdict
from copy import deepcopy
from enum import Enum
from pybbn.graph.edge import JtEdge
from pybbn.graph.graph import Ug
from pybbn.graph.node import SepSet, Clique, BbnNode
from pybbn.graph.potential import Potential, PotentialEntry, PotentialUtil
from pybbn.graph.variable import Variable
class JoinTree(Ug):
"""
Join tree.
"""
def __init__(self):
"""
Ctor.
"""
Ug.__init__(self)
self.potentials = dict()
self.evidences = dict()
self.listener = None
self.parent_info = defaultdict(set)
# self.__all_nodes__ = None
def __deepcopy__(self, memodict={}):
nodes = deepcopy(self.nodes, memodict)
edges = deepcopy(self.edges, memodict)
edge_map = deepcopy(self.edge_map, memodict)
neighbors = deepcopy(self.neighbors, memodict)
potentials = deepcopy(self.potentials, memodict)
evidences = deepcopy(self.evidences, memodict)
parent_info = deepcopy(self.parent_info, memodict)
jt = JoinTree()
jt.nodes = nodes
jt.edges = edges
jt.edge_map = edge_map
jt.neighbors = neighbors
jt.potentials = potentials
jt.evidences = evidences
jt.parent_info = parent_info
return jt
def get_posteriors(self):
"""
Gets the posterior for all nodes.
:return: Map. Keys are node names; values are map of node values to posterior probabilities.
"""
bbn_nodes = self.get_bbn_nodes()
posteriors = {}
for bbn_node in bbn_nodes:
potential = self.get_bbn_potential(bbn_node)
m = {}
for potential_entry in potential.entries:
k = ''.join([f'{y}' for _, y in potential_entry.entries.items()])
m[k] = potential_entry.value
name = bbn_node.variable.name
posteriors[name] = m
return posteriors
def get_bbn_potential(self, node):
"""
Gets the potential associated with the specified BBN node.
:param node: BBN node.
:return: Potential.
"""
clique = node.metadata['parent.clique']
potential = PotentialUtil.normalize(PotentialUtil.marginalize_for(self, clique, [node]))
return potential
def unmark_cliques(self):
"""
Unmarks the cliques.
"""
for clique in self.get_cliques():
clique.unmark()
def update_bbn_cpts(self, cpts):
"""
Updates the CPTs of the BBN nodes.
:param cpts: Dictionary of CPTs. Keys are ids of BBN node and values are new CPTs.
:return: None
"""
bbn_nodes = {node.id: node for clique in self.get_cliques() for node in clique.nodes}
for idx, cpt in cpts.items():
if idx in bbn_nodes:
bbn_nodes[idx].probs = cpt
bbn_nodes[idx].potential = None
def get_bbn_node_and_parents(self):
"""
Gets a map of nodes and its parents.
:return: Map. Keys are node ID and values are list of nodes.
"""
bbn_nodes = {node.id: node for clique in self.get_cliques() for node in clique.nodes}
result = {node: [pa for pa_id, pa in bbn_nodes.items() if pa_id in self.parent_info[node_id]]
for node_id, node in bbn_nodes.items()}
return result
def __get_bbn_nodes__(self):
"""
Gets all BBN nodes (cached).
:return: Dictionary of BBN nodes.
"""
# if self.__all_nodes__ is None:
# self.__all_nodes__ = {node.id: node for clique in self.get_cliques() for node in clique.nodes}
# return self.__all_nodes__
result = {node.id: node for clique in self.get_cliques() for node in clique.nodes}
return result
def get_bbn_nodes(self):
"""
Gets all the BBN nodes in this junction tree.
:return: List of BBN nodes.
"""
return list(self.__get_bbn_nodes__().values())
def get_bbn_node(self, id):
"""
Gets the BBN node associated with the specified id.
:param id: Node id.
:return: BBN node or None if no such node exists.
"""
bbn_nodes = self.__get_bbn_nodes__()
if id in bbn_nodes:
return bbn_nodes[id]
return None
def get_bbn_node_by_name(self, name):
"""
Gets the BBN node associated with the specified name.
:param name: Node name.
:return: BBN node or None if no such node exists.
"""
bbn_nodes = {node.variable.name: node for clique in self.get_cliques() for node in clique.nodes}
if name in bbn_nodes:
return bbn_nodes[name]
return None
def find_cliques_with_node_and_parents(self, id):
"""
Finds all cliques in this junction tree having the specified node and its parents.
:param id: Node id.
:return: Array of cliques.
"""
ids = self.__get_parent_ids__(id)
ids.append(id)
set1 = set(ids)
result = [clique for clique in self.get_cliques() if clique.get_node_ids().issuperset(set1)]
return result
def add_potential(self, clique, potential):
"""
Adds a potential associated with the specified clique.
:param clique: Clique.
:param potential: Potential.
:return: This join tree.
"""
self.potentials[clique.id] = potential
return self
def get_cliques(self):
"""
Gets all the cliques in this junction tree.
:return: Array of cliques.
"""
return [clique for clique in self.get_nodes() if not isinstance(clique, SepSet)]
def get_sep_sets(self):
"""
Gets all the separation sets in this junction tree.
:return: Array of separation sets.
"""
return [sep_set for sep_set in self.get_nodes() if isinstance(sep_set, SepSet)]
def add_edge(self, edge):
"""
Adds an JtEdge.
:param edge: JtEdge.
:return: This join tree.
"""
if not isinstance(edge, JtEdge):
return self
sep_set = edge.sep_set
lhs = edge.i
rhs = edge.j
if self.__shouldadd__(edge):
self.add_node(sep_set)
self.add_node(lhs)
self.add_node(rhs)
self.edge_map[lhs.id].add(sep_set.id)
self.edge_map[rhs.id].add(sep_set.id)
self.neighbors[lhs.id].add(sep_set.id)
self.neighbors[rhs.id].add(sep_set.id)
self.edge_map[sep_set.id].add(lhs.id)
self.edge_map[sep_set.id].add(rhs.id)
self.neighbors[sep_set.id].add(lhs.id)
self.neighbors[sep_set.id].add(rhs.id)
self.edges[edge.key] = edge
return self
def get_flattened_edges(self):
"""
Gets all the edges "flattened" out. Since separation-sets are really hyper-edges, this method breaks
separation-sets into two edges.
:return: Array of edges.
"""
edges = []
for edge in self.edges.values():
edges.append(edge.get_lhs_edge())
edges.append(edge.get_rhs_edge())
return edges
def set_listener(self, listener):
"""
Sets the listener.
:param listener: JoinTreeListener.
"""
self.listener = listener
def get_evidence(self, node, value):
"""
Gets the evidence associated with the specified BBN node and value.
:param node: BBN node.
:param value: Value.
:return: Potential (the evidence).
"""
if node.id not in self.evidences:
self.evidences[node.id] = dict()
if value not in self.evidences[node.id]:
entry = PotentialEntry()
entry.add(node.id, value)
entry.value = 1.0
potential = Potential()
potential.add_entry(entry)
self.evidences[node.id][value] = potential
result = self.evidences[node.id][value]
return result
def get_change_type(self, evidences):
"""
Gets the change type associated with the specified list of evidences.
:param evidences: List of evidences.
:return: ChangeType.
"""
changes = []
for evidence in evidences:
node = evidence.node
potentials = self.evidences[node.id]
change = evidence.compare(potentials)
changes.append(change)
count = len([change_type for change_type in changes if ChangeType.RETRACTION == change_type])
if count > 0:
return ChangeType.RETRACTION
count = len([change_type for change_type in changes if ChangeType.UPDATE == change_type])
if count > 0:
return ChangeType.UPDATE
return ChangeType.NONE
def get_unobserved_evidence(self, node):
"""
Gets the unobserved evidences associated with the specified node.
:param node: BBN node.
:return: Evidence.
"""
evidence = Evidence(node, EvidenceType.UNOBSERVE)
for value in node.variable.values:
evidence.add_value(value, 1.0)
return evidence
def unobserve(self, nodes):
"""
Unobserves a list of nodes.
:param nodes: List of nodes.
:return: This join tree.
"""
evidences = [self.get_unobserved_evidence(node) for node in nodes]
self.update_evidences(evidences)
return self
def unobserve_all(self):
"""
Unobserves all BBN nodes.
:return: This join tree.
"""
self.unobserve(self.get_bbn_nodes())
return self
def update_evidences(self, evidences):
"""
Updates this join tree with the list of specified evidence.
:param evidences: List of evidences.
:return: This join tree.
"""
for evidence in evidences:
evidence.validate()
change = self.get_change_type(evidences)
for evidence in evidences:
node = evidence.node
potentials = self.evidences[node.id]
for k, v in evidence.values.items():
potential = potentials[k]
potential.entries[0].value = v
self.__notify_listener__(change)
return self
def set_observation(self, evidence):
"""
Sets a single observation.
:param evidence: Evidence.
:return: This join tree.
"""
potentials = self.evidences[evidence.node.id]
pvalues = []
for v, potential in potentials.items():
entry = potential.entries[0]
p = entry.value
if 1.0 == p:
pvalues.append(v)
cvalues = []
for v, likelihood in evidence.values.items():
if 1.0 == likelihood:
cvalues.append(v)
if 1 == len(pvalues):
last_value = pvalues[0]
curr_value = cvalues[0]
if last_value == curr_value:
self.unobserve([evidence.node])
else:
self.update_evidences([evidence])
else:
self.update_evidences([evidence])
return self
@staticmethod
def to_dict(jt):
"""
Converts a junction tree to a serializable dictionary.
:param jt: Junction tree.
:return: Dictionary.
"""
def nodes_to_dict(nodes):
d = {}
for n in nodes:
if isinstance(n, SepSet):
d[n.id] = {
'left': n.left.id,
'right': n.right.id,
'type': 'sepset'
}
elif isinstance(n, Clique):
d[n.id] = {
'node_ids': list(n.node_ids),
'type': 'clique'
}
return d
def edges_to_dict(edges):
return [e.sep_set.id for e in edges]
bbn_nodes = {n.id: n.to_dict() for n in jt.get_bbn_nodes()}
jt_nodes = nodes_to_dict(jt.get_nodes())
jt_edges = edges_to_dict(jt.get_edges())
return {
'bbn_nodes': bbn_nodes,
'jt': {
'nodes': jt_nodes,
'edges': jt_edges,
'parent_info': jt.parent_info
}
}
@staticmethod
def from_dict(d):
"""
Converts a dictionary to a junction tree.
:param d: Dictionary.
:return: Junction tree.
"""
def get_variable(d):
return Variable(d['id'], d['name'], d['values'])
def get_bbn_node(d):
return BbnNode(get_variable(d['variable']), d['probs'])
def get_clique(d, bbn_nodes):
return Clique([bbn_nodes[idx] if idx in bbn_nodes else bbn_nodes[str(idx)] for idx in d['node_ids']])
def get_sep_set(lhs_clique, rhs_clique):
_, lhs, rhs, intersection = lhs_clique.intersects(rhs_clique)
return SepSet(lhs_clique, rhs_clique, lhs, rhs, intersection)
bbn_nodes = {k: get_bbn_node(n) for k, n in d['bbn_nodes'].items()}
cliques = [get_clique(clique, bbn_nodes)
for k, clique in d['jt']['nodes'].items() if clique['type'] == 'clique']
cliques = {c.id: c for c in cliques}
sepsets = [get_sep_set(cliques[s['left']], cliques[s['right']])
for k, s in d['jt']['nodes'].items() if s['type'] == 'sepset']
sepsets = {s.id: s for s in sepsets}
edges = [JtEdge(sepsets[e]) for e in d['jt']['edges']]
jt = JoinTree()
if len(edges) > 0:
for e in edges:
jt.add_edge(e)
else:
jt.nodes = cliques
jt.parent_info = | |
4), dpi=300)
ax1.imshow(bone_img < thresh_vals[1], cmap="bone_r")
ax1.set_title("Threshold $<$ %d" % (thresh_vals[1]))
ax2.imshow(bone_img < thresh_vals[2], cmap="bone_r")
ax2.set_title("Threshold $<$ %d" % (thresh_vals[2]))
# # Baby Bear
# We can thus follow a process for ending up with a happy medium of the two
#
# ## Hysteresis Thresholding: Reducing Pixels
#
# Now we apply the following steps.
#
# 1. Take the first threshold image with the highest (more strict) threshold
# 1. Remove the objects which are not cells (too small) using an opening operation.
# 1. Take a second threshold image with the higher value
# 1. Combine both threshold images
# 1. Keep the _between_ pixels which are connected (by looking again at a neighborhood $\mathcal{N}$) to the _air_ voxels and ignore the other ones. This goes back to our original supposition that the smaller structures are connected to the larger structures
#
# In[8]:
from skimage.morphology import dilation, opening, disk
from collections import OrderedDict
step_list = OrderedDict()
step_list["Strict Threshold"] = bone_img < thresh_vals[1]
step_list["Remove Small Objects"] = opening(step_list["Strict Threshold"], disk(1))
step_list["Looser Threshold"] = bone_img < thresh_vals[2]
step_list["Both Thresholds"] = (
1.0 * step_list["Looser Threshold"] + 1.0 * step_list["Remove Small Objects"]
)
# the tricky part keeping the between images
step_list["Connected Thresholds"] = step_list["Remove Small Objects"]
for i in range(10):
step_list["Connected Thresholds"] = (
dilation(step_list["Connected Thresholds"], disk(1.8))
& step_list["Looser Threshold"]
)
fig, ax_steps = plt.subplots(
len(step_list), 1, figsize=(6, 4 * (len(step_list))), dpi=150
)
for i, (c_ax, (c_title, c_img)) in enumerate(
zip(ax_steps.flatten(), step_list.items()), 1
):
c_ax.imshow(c_img, cmap="bone_r" if c_img.max() <= 1 else "jet")
c_ax.set_title("%d) %s" % (i, c_title))
c_ax.axis("off")
# More Complicated Images
# ===
# As we briefly covered last time, many measurement techniques produce quite rich data.
# - Digital cameras produce 3 channels of color for each pixel (rather than just one intensity)
# - MRI produces dozens of pieces of information for every voxel which are used when examining different _contrasts_ in the system.
# - Raman-shift imaging produces an entire spectrum for each pixel
# - Coherent diffraction techniques produce 2- (sometimes 3) diffraction patterns for each point.
# $$ I(x,y) = \hat{f}(x,y) $$
# # Feature Vectors
#
# __A pairing between spatial information (position) and some other kind of information (value).__
# $$ \vec{x} \rightarrow \vec{f} $$
#
# We are used to seeing images in a grid format where the position indicates the row and column in the grid and the intensity (absorption, reflection, tip deflection, etc) is shown as a different color. We take an example here of text on a page.
# In[9]:
from skimage.io import imread
import matplotlib.pyplot as plt
import numpy as np
from skimage.data import page
import pandas as pd
from skimage.filters import gaussian, median, threshold_triangle
page_image = page()
just_text = median(page_image, np.ones((2, 2))) - 255 * gaussian(page_image, 20.0)
plt.imshow(page_image, cmap="bone")
# In[10]:
xx, yy = np.meshgrid(np.arange(page_image.shape[1]), np.arange(page_image.shape[0]))
page_table = pd.DataFrame(
dict(
x=xx.ravel(),
y=yy.ravel(),
intensity=page_image.ravel(),
is_text=just_text.ravel() > 0,
)
)
page_table.sample(5)
# In[11]:
fig, ax1 = plt.subplots(1, 1)
for c_cat, c_df in page_table.groupby(["is_text"]):
ax1.hist(
c_df["intensity"], np.arange(255), label="Text: {}".format(c_cat), alpha=0.5
)
ax1.set_yscale("log", nonposy="clip")
ax1.legend()
# In[12]:
from sklearn.metrics import roc_curve, roc_auc_score
fpr, tpr, _ = roc_curve(page_table["is_text"], page_table["intensity"])
roc_auc = roc_auc_score(page_table["is_text"], page_table["intensity"])
fig, ax = plt.subplots(1, 1)
ax.plot(fpr, tpr, label="ROC curve (area = %0.2f)" % roc_auc)
ax.plot([0, 1], [0, 1], "k--")
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
ax.set_title("Receiver operating characteristic example")
ax.legend(loc="lower right")
# # Adding Information
# Here we can improve the results by adding information. As we discussed in the second lecture on enhancement, edge-enhancing filters can be very useful for classifying images.
# In[13]:
def dog_filter(in_img, sig_1, sig_2):
return gaussian(page_image, sig_1) - gaussian(page_image, sig_2)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), dpi=150)
page_edges = dog_filter(page_image, 0.5, 10)
ax1.imshow(page_image, cmap="jet")
ax2.imshow(page_edges, cmap="jet")
page_table["edges"] = page_edges.ravel()
page_table.sample(5)
# In[14]:
fig, ax1 = plt.subplots(1, 1)
for c_cat, c_df in page_table.groupby(["is_text"]):
ax1.hist(c_df["edges"], label="Text: {}".format(c_cat), alpha=0.5)
ax1.set_yscale("log", nonposy="clip")
ax1.legend()
# In[15]:
from sklearn.metrics import roc_curve, roc_auc_score
fpr2, tpr2, _ = roc_curve(
page_table["is_text"], page_table["intensity"] / 1000.0 + page_table["edges"]
)
roc_auc2 = roc_auc_score(
page_table["is_text"], page_table["intensity"] / 1000.0 + page_table["edges"]
)
fig, ax = plt.subplots(1, 1)
ax.plot(fpr, tpr, label="Intensity curve (area = %0.2f)" % roc_auc)
ax.plot(fpr2, tpr2, label="Combined curve (area = %0.2f)" % roc_auc2)
ax.plot([0, 1], [0, 1], "k--")
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
ax.set_title("Receiver operating characteristic example")
ax.legend(loc="lower right")
# # K-Means Clustering / Classification (Unsupervised)
#
# - Automatic clustering of multidimensional data into groups based on a distance metric
# - Fast and scalable to petabytes of data (Google, Facebook, Twitter, etc. use it regularly to classify customers, advertisements, queries)
# - __Input__ = feature vectors, distance metric, number of groups
# - __Output__ = a classification for each feature vector to a group
# # Example
# - Distance metric
# $$ D_{ij}=||\vec{v}_i-\vec{v}_j|| $$
#
# - Group Count ($N=2$)
#
# In[16]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
test_pts = pd.DataFrame(
make_blobs(n_samples=200, random_state=2018)[0], columns=["x", "y"]
)
plt.plot(test_pts.x, test_pts.y, "r.")
test_pts.sample(5)
# In[18]:
from sklearn.cluster import KMeans
km = KMeans(n_clusters=2, random_state=2018)
n_grp = km.fit_predict(test_pts)
plt.scatter(test_pts.x, test_pts.y, c=n_grp)
grp_pts = test_pts.copy()
grp_pts["group"] = n_grp
grp_pts.groupby(["group"]).apply(lambda x: x.sample(5))
# # K-Means Algorithm
#
# We give as an initial parameter the number of groups we want to find and possible a criteria for removing groups that are too similar
#
# 1. Randomly create center points (groups) in vector space
# 1. Assigns group to data point by the “closest” center
# 1. Recalculate centers from mean point in each group
# 1. Go back to step 2 until the groups stop changing
#
# ***
#
# What vector space to we have?
# - Sometimes represent physical locations (classify swiss people into cities)
# - Can include intensity or color (K-means can be used as a thresholding technique when you give it image intensity as the vector and tell it to find two or more groups)
# - Can also include orientation, shape, or in extreme cases full spectra (chemically sensitive imaging)
#
# #### Note: If you look for N groups you will almost always find N groups with K-Means, whether or not they make any sense
#
# In[19]:
from sklearn.cluster import KMeans
km = KMeans(n_clusters=4, random_state=2018)
n_grp = km.fit_predict(test_pts)
plt.scatter(test_pts.x, test_pts.y, c=n_grp)
grp_pts = test_pts.copy()
grp_pts["group"] = n_grp
grp_pts.groupby(["group"]).apply(lambda x: x.sample(3))
# # K-Means Applied to Cortex Image
#
#
# In[20]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
cortex_img = imread("ext-figures/cortex.png")[::3, ::3] / 1000.0
np.random.seed(2018)
fig, (ax1) = plt.subplots(1, 1, figsize=(8, 8), dpi=72)
ax1.imshow(cortex_img, cmap="bone")
xx, yy = np.meshgrid(np.arange(cortex_img.shape[1]), np.arange(cortex_img.shape[0]))
cortex_df = pd.DataFrame(dict(x=xx.ravel(), y=yy.ravel(), intensity=cortex_img.ravel()))
cortex_df.sample(5)
# In[21]:
from sklearn.cluster import KMeans
km = KMeans(n_clusters=4, random_state=2018)
cortex_df["group"] = km.fit_predict(cortex_df[["x", "y", "intensity"]].values)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8), dpi=72)
ax1.imshow(cortex_img, cmap="bone")
ax2.imshow(cortex_df["group"].values.reshape(cortex_img.shape), cmap="gist_earth")
cortex_df.groupby(["group"]).apply(lambda x: x.sample(3))
# # Rescaling components
#
# Since the distance is currently calculated by $||\vec{v}_i-\vec{v}_j||$ and the values for the position is much larger than the values for the _Intensity_, _Sobel_ or _Gaussian_ they need to be rescaled so they all fit on the same axis
# $$\vec{v} = \left\{\frac{x}{10}, \frac{y}{10}, \textrm{Intensity}\right\}$$
# In[22]:
km = KMeans(n_clusters=4, random_state=2018)
scale_cortex_df = cortex_df.copy()
scale_cortex_df.x = scale_cortex_df.x / 10
scale_cortex_df.y = scale_cortex_df.y / 10
scale_cortex_df["group"] = km.fit_predict(
scale_cortex_df[["x", "y", "intensity"]].values
)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8), dpi=72)
ax1.imshow(cortex_img, cmap="bone")
ax2.imshow(scale_cortex_df["group"].values.reshape(cortex_img.shape), cmap="gist_earth")
scale_cortex_df.groupby(["group"]).apply(lambda x: x.sample(3))
# In[23]:
km = KMeans(n_clusters=5, random_state=2019)
scale_cortex_df = cortex_df.copy()
scale_cortex_df.x = scale_cortex_df.x / 5
scale_cortex_df.y = scale_cortex_df.y / 5
scale_cortex_df["group"] = km.fit_predict(
scale_cortex_df[["x", "y", "intensity"]].values
)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8), dpi=72)
ax1.imshow(cortex_img, cmap="bone")
ax2.imshow(
scale_cortex_df["group"].values.reshape(cortex_img.shape), cmap="nipy_spectral"
)
scale_cortex_df.groupby(["group"]).apply(lambda x: x.sample(3))
# # Superpixels
#
# An approach for simplifying images by performing a clustering and forming super-pixels from groups of similar pixels.
# - https://ivrl.epfl.ch/research/superpixels
# 
# ## Why use superpixels
#
# Drastically reduced data size, serves as an initial segmentation showing spatially meaningful groups
#
# We start with an example of shale with multiple phases
# - rock
# - clay
# - pore
# In[24]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
shale_img = imread("../common/figures/shale-slice.tiff")
np.random.seed(2018)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4), dpi=100)
ax1.imshow(shale_img, cmap="bone")
thresh_vals = np.linspace(shale_img.min(), shale_img.max(), 5 + 2)[:-1]
out_img = np.zeros_like(shale_img)
for i, (t_start, t_end) in enumerate(zip(thresh_vals, thresh_vals[1:])):
thresh_reg = (shale_img > t_start) & (shale_img < t_end)
ax2.hist(shale_img.ravel()[thresh_reg.ravel()])
out_img[thresh_reg] = i
ax3.imshow(out_img, cmap="gist_earth")
# In[26]:
from skimage.segmentation import slic, mark_boundaries
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4), dpi=100)
shale_segs = slic(shale_img, n_segments=100, compactness=5e-2, sigma=3.0)
ax1.imshow(shale_img, cmap="bone")
ax1.set_title("Original Image")
ax2.imshow(shale_segs, cmap="gist_earth")
ax2.set_title("Superpixels")
ax3.imshow(mark_boundaries(shale_img, shale_segs))
ax1.set_title("Superpixel Overlay")
# In[27]:
flat_shale_img = shale_img.copy()
for s_idx in np.unique(shale_segs.ravel()):
flat_shale_img[shale_segs == s_idx] = np.mean(flat_shale_img[shale_segs == s_idx])
fig, ax1 = plt.subplots(1, 1, figsize=(8, 8))
ax1.imshow(flat_shale_img, cmap="bone")
# In[28]:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 4), dpi=100)
ax1.imshow(shale_img, cmap="bone")
thresh_vals = np.linspace(flat_shale_img.min(), flat_shale_img.max(), 5 + 2)[:-1]
sp_out_img = np.zeros_like(flat_shale_img)
for i, (t_start, t_end) in enumerate(zip(thresh_vals, thresh_vals[1:])):
thresh_reg = (flat_shale_img > t_start) & (flat_shale_img < t_end)
sp_out_img[thresh_reg] = i
ax2.imshow(out_img, cmap="gist_earth")
ax2.set_title("Pixel Segmentation")
ax3.imshow(sp_out_img, cmap="gist_earth")
ax2.set_title("Superpixel Segmentation")
# # Probabilistic Models of Segmentation
#
# A more general approach is to use a probabilistic model to segmentation. We start with our image $I(\vec{x}) \forall | |
Shadow":0xd7e4ed,
"Snow Storm":0xeeedea,
"Snow Tiger":0xdadce0,
"Snow White":0xeeffee,
"Snow White Blush":0xf8afa9,
"Snowball Effect":0xd9e9e5,
"Snowbank":0xe8e9e9,
"Snowbelt":0xeef1ec,
"Snowberry":0xefeced,
"Snowboard":0x74a9b9,
"Snowbound":0xddebe3,
"Snowdrop":0xeeffcc,
"Snowdrop Explosion":0xe0efe1,
"Snowfall":0xe0deda,
"Snowfall White":0xeeede0,
"Snowflake":0xeff0f0,
"Snowglory":0xc8c8c4,
"Snowman":0xfefafb,
"Snowmelt":0xc9e6e9,
"Snowpink":0xf1c5c2,
"Snowshoe Hare":0xe7e3d6,
"Snowstorm Space Shuttle":0x001188,
"Snowy Evergreen":0xedf2e0,
"Snowy Mint":0xd6f0cd,
"Snowy Mount":0xf1eeeb,
"Snowy Pine":0xf0efe3,
"Snowy Shadow":0xd3dbec,
"Snowy Summit":0xc5d8e9,
"Snub":0xa5adbd,
"Snuff":0xe4d7e5,
"Snug Cottage":0xfff9e2,
"Snuggle Pie":0xa58f73,
"So Blue-Berry":0xd4d8e3,
"So Chic!":0xcecdc5,
"So Dainty":0xcdc0c9,
"So Merlot":0x84525a,
"So Much Fawn":0xf1e0cb,
"So Shy":0xdad5d6,
"So Sour":0x00ff11,
"So Sublime":0x8b847c,
"So-Sari":0x006f47,
"Soap":0xcec8ef,
"Soap Bubble":0xb2dcef,
"Soap Green":0xa0b28e,
"Soap Pink":0xe5bfca,
"Soapstone":0xece5da,
"Soar":0xddf0f0,
"Soaring Eagle":0x9badbe,
"Soaring Sky":0xb9e5e8,
"Soccer Turf":0x22bb00,
"Sociable":0xcf8c76,
"Social Butterfly":0xfaf2dc,
"Socialist":0x921a1c,
"Socialite":0x907676,
"Sockeye":0xe49780,
"Soda Pop":0xc3c67e,
"Sodalite Blue":0x253668,
"Sōdenkaracha Brown":0x9b533f,
"Sodium Silver":0xfffcee,
"Sofisticata":0x93806a,
"Soft Amber":0xcfbea5,
"Soft Amethyst":0xcfb7c9,
"Soft Apricot":0xe0b392,
"Soft Bark":0x897670,
"Soft Beige":0xb9b5af,
"Soft Blue":0x6488ea,
"Soft Blue Lavender":0x888cba,
"Soft Blue White":0xdae7e9,
"Soft Blush":0xe3bcbc,
"Soft Boiled":0xffb737,
"Soft Breeze":0xf6f0eb,
"Soft Bromeliad":0x99656c,
"Soft Bronze":0xa18666,
"Soft Buttercup":0xffedbe,
"Soft Candlelight":0xf7eacf,
"Soft Cashmere":0xefb6d8,
"Soft Celadon":0xbfcfc8,
"Soft Celery":0xc4cd87,
"Soft Chamois":0xdbb67a,
"Soft Charcoal":0x838298,
"Soft Cloud":0xd0e3ed,
"Soft Cocoa":0x987b71,
"Soft Coral":0xffeee0,
"Soft Cream":0xf5efd6,
"Soft Denim":0xb9c6ca,
"Soft Doeskin":0xe0cfb9,
"Soft Dove":0xc2bbb2,
"Soft Fawn":0xb59778,
"Soft Feather":0xefe4dc,
"Soft Fern":0xc7d368,
"Soft Fig":0x817714,
"Soft Focus":0xe2efdd,
"Soft Fresco":0xc0d5ca,
"Soft Froth":0xbdccb3,
"Soft Fur":0x7e7574,
"Soft Fuschia":0xd496bd,
"Soft Gossamer":0xfbeede,
"Soft Grass":0xc1dfc4,
"Soft Green":0x6fc276,
"Soft Greige":0xd7c3b5,
"Soft Heather":0xbea8b7,
"Soft Ice Rose":0xe7cfca,
"Soft Impact":0xb28ea8,
"Soft Impala":0xa28b7e,
"Soft Iris":0xe6e3eb,
"Soft Ivory":0xfbf1df,
"Soft Kind":0xd1d2be,
"Soft Lace":0xf5ede5,
"Soft Lavender":0xf6e5f6,
"Soft Leather":0xd9a077,
"Soft Lilac":0xe2d4df,
"Soft Lumen":0xbeddba,
"Soft Matte":0xdd99bb,
"Soft Metal":0xbab2b1,
"Soft Mint":0xe6f9f1,
"Soft Moonlight":0xefecd7,
"Soft Moss":0xcce1c7,
"Soft Muslin":0xf7eadf,
"Soft Olive":0x59604f,
"Soft Orange":0xeec0ab,
"Soft Orange Bloom":0xffdcd2,
"Soft Peach":0xeedfde,
"Soft Peach Mist":0xfff3f0,
"Soft Pearl":0xefe7db,
"Soft Petals":0xebf8ef,
"Soft Pink":0xfdb0c0,
"Soft Pumice":0x949ea2,
"Soft Purple":0xa66fb5,
"Soft Red":0x412533,
"Soft Sage":0xbcbcae,
"Soft Salmon":0xeaaaa2,
"Soft Satin":0xeec5ce,
"Soft Savvy":0x837e87,
"Soft Secret":0xd6d4ca,
"Soft Shoe":0xe8d5c6,
"Soft Sienna":0xd09f93,
"Soft Silver":0xf7f9e9,
"Soft Sky":0xb5b5cb,
"Soft Steel":0x404854,
"Soft Straw":0xf5d180,
"Soft Suede":0xd8cbad,
"Soft Summer Rain":0xa1d7ef,
"Soft Sunrise":0xf2e3d8,
"Soft Tone":0xc3b3b2,
"Soft Tone Ink":0x9d6016,
"Soft Touch":0x639b95,
"Soft Turquoise":0x74ced2,
"Soft Violet":0xe9e6e2,
"Soft Wheat":0xd9bd9c,
"Softened Green":0xbbbca7,
"Softer Tan":0xdacab2,
"Softly Softly":0xc9b7ce,
"Softsun":0xf3ca40,
"Software":0x7f8486,
"Sohi Orange":0xe0815e,
"Sohi Red":0xe35c38,
"Soho Red":0xab6953,
"Soil Of Avagddu":0x845c00,
"Sojourn Blue":0x416f8b,
"Solar":0xfbeab8,
"Solar Ash":0xcc6622,
"Solar Energy":0xf7da74,
"Solar Flare":0xe67c41,
"Solar Fusion":0xdc9f46,
"Solar Light":0xfaf0c9,
"Solar Power":0xf4bf3a,
"Solar Storm":0xffc16c,
"Solar Wind":0xfce9b9,
"Solaria":0xf5d68f,
"Solarium":0xe1ba36,
"Soldier Green":0x545a2c,
"Solé":0xf7dda1,
"Soleil":0xe9cb2e,
"Solemn Silence":0xd3d8d8,
"Solid Empire":0x635c59,
"Solid Gold":0xb7d24b,
"Solid Opal":0xeeeae2,
"Solid Pink":0xc78b95,
"Solid Snake":0xa1a58c,
"Solitaire":0xc6decf,
"Solitary Slate":0x80796d,
"Solitary State":0xc4c7c4,
"Solitary Tree":0x539b6a,
"Solitude":0xe9ecf1,
"Solo":0xcbd2d0,
"Solstice":0xbabdb8,
"Solution":0x77abab,
"Somali Brown":0x6c5751,
"Somber":0xcbb489,
"Somber Green":0x005c2b,
"Sombre Grey":0x555470,
"Sombrero":0xb39c8c,
"Sombrero Tan":0xcba391,
"Someday":0xefe4cc,
"Something Blue":0xb0d6e6,
"Sommelier":0x5d3736,
"Somnambulist":0x778899,
"Sonata":0xabc8d8,
"Sonata Blue":0x8a9eae,
"Song Bird":0x0078af,
"Song of Summer":0xfce7b5,
"Song Thrush":0xaf987f,
"Song Thrush Egg":0xf2e5e0,
"Songbird":0xa3d1eb,
"Sonia Rose":0xf3c8c2,
"Sonic Blue":0x17569b,
"Sonic Silver":0x757575,
"Sonoma Chardonnay":0xddcb91,
"Sonoma Sage":0x90a58a,
"Sonoma Sky":0xbfd1ca,
"Sonora Apricot":0xe0b493,
"Sonora Hills":0xbea77d,
"Sonora Rose":0xe8d2e3,
"Sonora Shade":0xc89672,
"Sonoran Desert":0xcfb8a1,
"Sonoran Sands":0xddd5c6,
"Sonorous Bells":0xfaf0cb,
"Soooo Bloody":0x550000,
"Soot":0x555e5f,
"Soothing Breeze":0xb3bec4,
"Soothing Pink":0xf2e7de,
"Soothing Sea":0xc3e9e4,
"Soothing Spring":0xbccbc4,
"Soothing White":0xe1e2e4,
"Soothsayer":0x8092bc,
"Sooty":0x141414,
"Sooty Willow Bamboo":0x4d4b3a,
"Sophisticated Lilac":0x956c87,
"Sophisticated Plum":0x5d5153,
"Sophisticated Teal":0x537175,
"Sophistication":0xbfb5a6,
"Sophomore":0x7d7170,
"Sora Blue":0xa0d8ef,
"Sora Sky":0x4d8fac,
"Sorbet Ice Mauve":0xa1a6d6,
"Sorbet Yellow":0xdac100,
"Sorbus":0xdd6b38,
"Sorcerer":0x3398ce,
"Sorrel Brown":0x9b6d51,
"Sorrel Felt":0xa49688,
"Sorrel Leaf":0x887e64,
"Sorrell Brown":0x9d7f61,
"Sorx Red":0xfc0156,
"Sotek Green":0x47788a,
"Soufflé":0xedd1a8,
"Soul Quenching":0x7e989d,
"Soul Search":0x377290,
"Soul Side":0xffaa55,
"Soul Train":0x58475e,
"Soulful":0x374357,
"Soulful Blue":0x757c91,
"Soulful Music":0x3b4457,
"Soulmate":0x85777b,
"Soulstone Blue":0x0047ab,
"Sounds of Nature":0xdfe5d7,
"Sour Apple":0xa0ac4f,
"Sour Apple Rings":0x33bb00,
"Sour Bubba":0x8b844e,
"Sour Candy":0x66b348,
"Sour Face":0xadc979,
"Sour Green":0xc1e613,
"Sour Green Cherry":0xc8ffb0,
"Sour Lemon":0xffeea5,
"Sour Patch Peach":0xf4d9c5,
"Sour Tarts":0xfee5c8,
"Sour Yellow":0xeeff04,
"Source Blue":0xcdeae5,
"Source Green":0x84b6a2,
"Sourdough":0xc9b59a,
"South Kingston":0x76614b,
"South Pacific":0x698694,
"South Peach":0xead2bb,
"South Peak":0xeadfd2,
"South Rim Trail":0xa6847b,
"South Shore Sun":0xffdc9e,
"Southern Barrens Mud":0xb98258,
"Southern Beauty":0xf7dddb,
"Southern Belle":0xa6d6c3,
"Southern Blue":0x365787,
"Southern Breeze":0xe4dfd1,
"Southern Evening":0x34657d,
"Southern Moss":0xbca66a,
"Southern Pine":0xacb4ab,
"Southern Platyfish":0xd0d34d,
"Southwest Stone":0xde9f85,
"Southwestern Clay":0xcc6758,
"Southwestern Sand":0xede0ce,
"Sovereign":0x4b4356,
"Sovereignty":0x304e63,
"Soy Milk":0xd5d2c7,
"Soya":0xfae3bc,
"Soya Bean":0x6f634b,
"Soybean":0xd2c29d,
"Soylent Green":0x578363,
"Spa":0xceece7,
"Spa Blue":0xd3dedf,
"Spa Dream":0x1993be,
"Spa Retreat":0xd4e4e6,
"Spa Sangria":0xd7c9a5,
"Space Angel":0x3b4271,
"Space Black":0x505150,
"Space Cadet":0x1d2951,
"Space Convoy":0x667788,
"Space Dust":0x002299,
"Space Exploration":0x001199,
"Space Explorer":0x114499,
"Space Grey":0x110022,
"Space Invader":0x139d08,
"Space Opera":0x5511dd,
"Space Shuttle":0x4b433b,
"Space Station":0x6c6d7a,
"Space Wolves Grey":0xdae6ef,
"Spacebox":0x5c6b6b,
"Spaceman":0x5f6882,
"Spacescape":0x222255,
"Spacious Grey":0x877d75,
"Spacious Plain":0x9a8557,
"Spacious Skies":0xd5eaf2,
"Spacious Sky":0xaeb5c7,
"Spade Black":0x424142,
"Spaghetti":0xfef69e,
"Spaghetti Carbonara":0xddddaa,
"Spaghetti Monster":0xeecc88,
"Spaghetti Strap Pink":0xfbaed2,
"Spalding Gray":0x8d7f75,
"Spandex Green":0x36b14e,
"Spangle":0xe5dbe5,
"Spanish Bistre":0x807532,
"Spanish Blue":0x0070b8,
"Spanish Carmine":0xd10047,
"Spanish Chestnut":0x7f5f52,
"Spanish Cream":0xfce5c0,
"Spanish Crimson":0xe51a4c,
"Spanish Galleon":0x817863,
"Spanish Gold":0xb09a4f,
"Spanish Green":0x7b8976,
"Spanish Grey":0x989898,
"Spanish Lace":0xfce8ca,
"Spanish Leather":0x8e6a3f,
"Spanish Mustang":0x684b40,
"Spanish Olive":0xa1a867,
"Spanish Orange":0xe86100,
"Spanish Peanut":0xc57556,
"Spanish Pink":0xf7bfbe,
"Spanish Plum":0x5c3357,
"Spanish Purple":0x66033c,
"Spanish Raisin":0x61504e,
"Spanish Red":0xe60026,
"Spanish Roast":0x111133,
"Spanish Sand":0xcab08e,
"Spanish Sky Blue":0x00fffe,
"Spanish Style":0x93765c,
"Spanish Villa":0xdfbaa9,
"Spanish Violet":0x4c2882,
"Spanish Viridian":0x007f5c,
"Spanish White":0xded1b7,
"Spanish Yellow":0xf6b511,
"Spare White":0xe4e4dd,
"Sparkle Glow":0xf5d2b5,
"Sparkler":0xffee99,
"Sparkling Apple":0x77b244,
"Sparkling Blueberry Lemonade":0xc15187,
"Sparkling Brook":0xdceee3,
"Sparkling Champagne":0xefcf98,
"Sparkling Cider":0xfffdeb,
"Sparkling Cove":0x2da4b6,
"Sparkling Emerald":0x1f6c53,
"Sparkling Frost":0xd2d5da,
"Sparkling Grape":0x773376,
"Sparkling Green":0x66ee00,
"Sparkling Lavender":0xeeccff,
"Sparkling Metal":0xc3c3c7,
"Sparkling Pink":0xf5cee6,
"Sparkling Purple":0xcc11ff,
"Sparkling Red":0xee3333,
"Sparkling River":0xd6edf1,
"Sparkling Silver":0xcbd0cd,
"Sparkling Spring":0xd9e3e0,
"Sparks In The Dark":0xff7711,
"Sparrow":0x69595c,
"Sparrow Grey Red":0x523e47,
"Sparrow’s Fire":0xff6622,
"Spartacus":0x76a4a7,
"Spartan Blue":0x7a8898,
"Spartan Crimson":0x9e1316,
"Spartan Stone":0xafa994,
"Spatial Spirit":0xc1edd3,
"Spatial White":0xdedddb,
"Spätzle Yellow":0xffee88,
"Speak To Me":0xffd9a6,
"Speakeasy":0x826a6c,
"Speaking of the Devil":0xa8415b,
"Spear Shaft":0x885500,
"Spearfish":0x5fb6bf,
"Spearmint":0x64bfa4,
"Spearmint Frosting":0x8dc2a8,
"Spearmint Ice":0xbfd3cb,
"Spearmint Stick":0xe8f0e2,
"Spearmint Water":0xb1eae8,
"Spearmints":0xbce3c9,
"Special Delivery":0xa5b2b7,
"Special Gray":0x7b787d,
"Special Ops":0x868b53,
"Species":0xdcd867,
"Speckled Easter Egg":0xd38798,
"Spectacular Purple":0xbb00ff,
"Spectra":0x375d4f,
"Spectra Green":0x009b8c,
"Spectra Yellow":0xf7b718,
"Spectral Green":0x008664,
"Spectrum Blue":0x3d3c7c,
"Speedboat":0x90bfd4,
"Speeding Ticket":0xf9f1d7,
"Speedwell":0x5a6272,
"Spell":0x5e4f50,
"Spell Caster":0x4a373e,
"Spelt Grain Brown":0xa38c6b,
"Spelunking":0x35465e,
"Sphagnales Moss":0xa5ad44,
"Sphagnum Moss":0x75693d,
"Sphere":0xf2e8cc,
"Sphinx":0xab9895,
"Spice":0x6c4f3f,
"Spice Bazaar":0x86613f,
"Spice Cake":0xb87243,
"Spice Cookie":0xf0ded3,
"Spice Delight":0xf3e9cf,
"Spice Garden":0xc9d6b4,
"Spice Girl":0xe1c2c1,
"Spice Is Nice":0xebd0a4,
"Spice Ivory":0xf4eedc,
"Spice of Life":0x86493f,
"Spice Route":0xb95b3f,
"Spiceberry":0x604941,
"Spiced Apple":0x783937,
"Spiced Beige":0xe9d2bb,
"Spiced Berry":0x85443f,
"Spiced Brandy":0xbb9683,
"Spiced Butternut":0xffd978,
"Spiced Carrot":0xa4624c,
"Spiced Cashews":0xd3b080,
"Spiced Cider":0x915b41,
"Spiced Cinnamon":0x805b48,
"Spiced Coral":0xd75c5d,
"Spiced Honey":0xa38061,
"Spiced Hot Chocolate":0x53433e,
"Spiced Latte":0x886c57,
"Spiced Mustard":0xb99563,
"Spiced Nectarine":0xffbb72,
"Spiced Nutmeg":0x927d6c,
"Spiced Orange":0xedc7b6,
"Spiced Plum":0x6d4773,
"Spiced Potpourri":0x905d5f,
"Spiced Pumpkin":0xd88d56,
"Spiced Red":0x8b4c3d,
"Spiced Rum":0xad8b6a,
"Spiced Tea":0xab6162,
"Spiced Up":0xb14b38,
"Spiced Vinegar":0xcdba99,
"Spiced Wine":0x664942,
"Spicy":0xff1111,
"Spicy Berry":0xcc3366,
"Spicy Cayenne":0x9b5b4f,
"Spicy Hue":0x994b35,
"Spicy Hummus":0xeebbaa,
"Spicy Mix":0x8b5f4d,
"Spicy Mustard":0x74640d,
"Spicy Orange":0xd73c26,
"Spicy Pink":0xff1cae,
"Spicy Red":0x97413e,
"Spicy Sweetcorn":0xf6ac00,
"Spicy Tomato":0xc75433,
"Spider Cotton":0xe2e8df,
"Spike":0x656271,
"Spiked Apricot":0xfdddb7,
"Spikey Red":0x600000,
"Spill the Beans":0x9b351b,
"Spilled Cappuccino":0xe4e1de,
"Spilt Milk":0xf4f4d1,
"Spinach Banana Smoothie":0xaaaa55,
"Spinach Dip":0xb1cdac,
"Spinach Green":0x909b4c,
"Spinach Soup":0x6e750e,
"Spinach White":0xe4e8da,
"Spindle":0xb3c4d8,
"Spindrift":0x73fcd6,
"Spinel Black":0x41435b,
"Spinel Grey":0x6a5662,
"Spinel Stone Black":0x272a3b,
"Spinel Violet":0x38283d,
"Spinnaker":0xa3e2dd,
"Spinning Blue":0x5b6a7c,
"Spinning Silk":0xf3ddbc,
"Spinning Wheel":0xf6edda,
"Spirit":0xb2bbc6,
"Spirit Dance":0x514b80,
"Spirit Mountain":0x6a8b98,
"Spirit Rock":0x5f534e,
"Spirit Warrior":0xd45341,
"Spirit Whisper":0xe3eebf,
"Spirited Away":0xfde7e3,
"Spirited Green":0xbddec7,
"Spirited Yellow":0xffdc83,
"Spiritstone Red":0xfd411e,
"Spiro Disco Ball":0x0fc0fc,
"Spirulina":0x5a665c,
"Spitsbergen Blue":0x6f757d,
"Splash":0xf1d79e,
"Splash Of Grenadine":0xf984e5,
"Splash of Honey":0xd8b98c,
"Splash Palace":0x5984b0,
"Splashing Wave":0x44ddff,
"Splashy":0x019196,
"Splatter":0xa9586c,
"Spleen Green":0xccee00,
"Splendiferous":0x806e7c,
"Splendor":0xf3dfcc,
"Splendor and Pride":0x5870a4,
"Splendor Gold":0xffb14e,
"Splinter":0xa3713f,
"Splish Splash":0x3194ce,
"Split Pea":0x9c9a40,
"Split Pea Soup":0xc8b165,
"Split Rail":0x8e6c51,
"Spoiled Egg":0xe8ff2a,
"Spoiled Rotten":0xb6bfe5,
"Sponge":0xa49775,
"Sponge Cake":0xfffe40,
"Spooky":0xd1d2bf,
"Spooky Ghost":0xd4d1d9,
"Spooky Graveyard":0x685e4f,
"Spooled White":0xf5eae3,
"Spoonful of Sugar":0xe7e9e3,
"Spores":0x7f8162,
"Sport Green":0x00a27d,
"Sport Yellow":0xefd678,
"Sporting Green":0x434c47,
"Sports Blue":0x399bb4,
"Sports Fan":0xe08119,
"Sports Field Green":0x4d8064,
"Sporty Blue":0x6a8aa4,
"Spotlight":0xfaeacd,
"Spotted Dove":0xbfbfbd,
"Spotted Snake Eel":0xb1d3e3,
"Spray":0x7ecddd,
"Spray Green":0xaea692,
"Spray of Mint":0xbdd0c3,
"Spreadsheet Green":0x007711,
"Sprig Muslin":0xd6c1c5,
"Sprig of Mint":0x8be0ba,
"Spring":0x00f900,
"Spring Blossom":0xe9edbd,
"Spring Bouquet":0x6dce87,
"Spring Boutique":0xd7b9cb,
"Spring Bud":0xa7fc00,
"Spring Burst":0xc9e0c8,
"Spring Buttercup":0xfff6c2,
"Spring Crocus":0xba69a1,
"Spring Day":0xdbd7b7,
"Spring Fever":0xe5e3bf,
"Spring Fields":0xb3cdac,
"Spring Fog":0xecf1ec,
"Spring Forest":0x67926f,
"Spring Forth":0x11bb22,
"Spring Frost":0x87ff2a,
"Spring Garden":0x558961,
"Spring Glow":0xd3e0b8,
"Spring Grass":0xd5cb7f,
"Spring Green":0x00ff7c,
"Spring Grey":0xc5c6b3,
"Spring Heat":0xfffddd,
"Spring Hill":0xc4cbb2,
"Spring Juniper":0x4a754a,
"Spring Kiss":0xe3efb2,
"Spring Leaves":0xa8c3aa,
"Spring Lilac":0xb1b3cb,
"Spring Lily":0xfcf4c8,
"Spring Lobster":0x640125,
"Spring Lobster Brown":0x6c2c2f,
"Spring Lobster Dye":0x7a4171,
"Spring Marsh":0xc0b05d,
"Spring Mist":0xd3e0de,
"Spring Morn":0xe5f0d5,
"Spring Moss":0xa99757,
"Spring Onion":0x596c3c,
"Spring Pink":0xdfbcc9,
"Spring Rain":0xa3bd9c,
"Spring Reflection":0xa1bfab,
"Spring Roll":0xc4a661,
"Spring Savor":0xcceecc,
"Spring Shoot":0xe2edc1,
"Spring Shower":0xabdcee,
"Spring Slumber Green":0xb8f8b8,
"Spring Song":0xfaccbf,
"Spring Sprig":0xa2c09b,
"Spring Sprout":0x86ba4a,
"Spring Storm":0xa9c6cb,
"Spring Stream":0x98beb2,
"Spring Sun":0xf1f1c6,
"Spring Thaw":0xd9dcdd,
"Spring Thyme":0xd8dcb3,
"Spring Valley":0xced7c5,
"Spring Walk":0xacb193,
"Spring Water Turquoise":0x7ab5ae,
"Spring Wheat":0xe0eed4,
"Spring White":0xecfcec,
"Spring Wisteria":0xcda4de,
"Spring Wood":0xe9e1d9,
"Spring Yellow":0xf2e47d,
"Springtide Green":0xc8cb8e,
"Springtime":0xe9e5b3,
"Springtime Bloom":0xdb88ac,
"Springtime Dew":0xffffef,
"Springtime Rain":0xebeef3,
"Springview Green":0x7ea15a,
"Sprinkle":0xebddea,
"Sprite Twist":0xb9dcc3,
"Spritzig":0x76c5e7,
"Sprout":0xb8ca9d,
"Sprout Green":0xcbd7d2,
"Spruce":0x0a5f38,
"Spruce Shade":0x91a49d,
"Spruce Stone":0x9fc09c,
"Spruce Tree Flower":0xb35e97,
"Spruce Woods":0x6e6a51,
"Spruce Yellow":0xbe8a4a,
"Spruced Up":0x9a7f28,
"Spumoni":0xbccfa4,
"Spun Cotton":0xf3ecdc,
"Spun Jute":0xf4e4cf,
"Spun Pearl":0xa2a1ac,
"Spun Sugar":0xfae2ed,
"Spun Wool":0xe3ded4,
"SQL Injection Purple":0x5e0092,
"Squant":0x666666,
"Squash":0xf2ab15,
"Squash Bisque":0xe7b17c,
"Squash Blossom":0xf8b438,
"Squeaky":0x6cc4da,
"Squeeze Toy Alien":0x11ee00,
"Squid Hat":0x6e2233,
"Squid Ink Powder":0x001133,
"Squid Pink":0xf5b4bd,
"Squid's Ink":0x4d4e5c,
"Squig Orange":0xaa4f44,
"Squirrel":0x8f7d6b,
"Squirrel's Nest":0x665e48,
"Squirt":0x95bcc5,
"Sriracha":0xf56961,
"St. Augustine":0xd0ddcc,
"St. Bart's":0x577c88,
"St. <NAME>":0xeedddd,
"St. Patrick":0x2b8c4e,
"St. Patrick's Blue":0x23297a,
"St. Petersburg":0xdee8f3,
"St. Tropez":0x325482,
"Stability":0xc1d0b2,
"Stable Hay":0xf6e0be,
"Stack":0x858885,
"Stacked Limestone":0xd1b992,
"Stacked Stone":0x918676,
"Stadium Grass":0xd5f534,
"Stadium Lawn":0x9af764,
"Stag Beetle":0x603b41,
"Stage Gold":0x9e6928,
"Stage Mauve":0xb081aa,
"Stagecoach":0x7f5a44,
"Stained Glass":0x556682,
"Stainless Steel":0xb4bdc7,
"Stairway to Heaven":0x67716e,
"Stalactite Brown":0xd4c4a7,
"Stalk":0x7cb26e,
"Stamina":0xb0a8ad,
"Stamp Pad Green":0x2ea18c,
"Stamped Concrete":0xa0a09a,
"Stand Out":0x7f8596,
"Standby Led":0xff0066,
"Standing Ovation":0xbfb9bd,
"Standing Waters":0x005599,
"Standish Blue":0x85979a,
"Stanford Green":0x658f7c,
"Stanford Stone":0xbcab9c,
"Stanger Red":0xa40000,
"Stanley":0x9bc2b4,
"Star":0xffe500,
"Star Anise":0x5c5042,
"Star Bright":0xe8ddae,
"Star City":0x5796a1,
"Star Command Blue":0x007bb8,
"Star Dust":0xf9f3dd,
"Star Fruit Yellow Green":0xbeaa4a,
"Star Grass":0x75dbc1,
"Star Magic":0xe4d8d8,
"Star Map":0xdae2e9,
"Star Mist":0xb3c6ce,
"Star of Gold":0xebe3c7,
"Star of Life":0x057bc1,
"Star of Morning":0xebbbbe,
"Star Sapphire":0x386192,
"Star Shine":0xf8f6e3,
"Star Spangled":0x3a5779,
"Star White":0xefefe8,
"Star-Studded":0xf7ebac,
"Starboard":0x016c4f,
"Starbright":0xf5ecc9,
"Starbur":0x6cb037,
"Starburst":0xdce7e5,
"Stardew":0xa6b2b5,
"Stardust":0xddd3ae,
"Stardust Ballroom":0xdacfd4,
"Stardust Evening":0xb8bfdc,
"Starfish":0xe5bca5,
"Starfleet Blue":0x0096ff,
"Starflower Blue":0x4e9ab0,
"Starfox":0xf0e8d5,
"Starfruit":0xe4d183,
"Stargate":0xb7c4d3,
"Stargate Shimmer":0x7777ff,
"Stargazer":0x39505c,
"Stargazing":0x414549,
"Starglider":0xfaeede,
"Stark White":0xd2c6b6,
"Starless Night":0x3e4855,
"Starlet":0x854e51,
"Starlet Pink":0xedc2db,
"Starlight":0xbcc0cc,
"Starlight Blue":0xb5ced4,
"Starling's Egg":0xe8dfd8,
"Starlit Eve":0x384351,
"Starlit Night":0x3b476b,
"Starry Night":0x286492,
"Starry Sky Blue":0x4f5e7e,
"Starset":0x758ba4,
"Starship":0xe3dd39,
"Starship Tonic":0xcce7e8,
"Starship Trooper":0x229966,
"Starstruck":0x4664a5,
"Startling Orange":0xe56131,
"Stately Frills":0xc5bdc4,
"Stately Stems":0x577a6c,
"Stately White":0xfaf9ea,
"Static":0xd5d3c3,
"Statue of Liberty":0x376d64,
"Statued":0xd0bcb1,
"Statuesque":0xe0dfd9,
"Status Bronze":0xdc8a30,
"Stay in Lime":0x9fac5c,
"Steadfast":0x4a5777,
"Steady Brown":0x8a6b4d,
"Stealth Jet":0x4b4844,
"Steam":0xdddddd,
"Steam Bath":0xccd0da,
"Steam Chestnut":0xebe1a9,
"Steam Engine":0xb2b2ad,
"Steam White":0xe8e9e5,
"Steamboat Geyser":0xd2ccb4,
"Steamed Chai":0xe0d4bd,
"Steamed Chestnut":0xd3b17d,
"Steamed Milk":0xead8be,
"Steamed Salmon":0xee8888,
"Steamy Dumpling":0xeae9b4,
"Steamy Spring":0xb1cfc7,
"Steel":0x797979,
"Steel Armor":0x767275,
"Steel Blue":0x4682b4,
"Steel Blue Eyes":0x7d94c6,
"Steel Blue Grey":0x436175,
"Steel Grey":0x43464b,
"Steel Legion Drab":0x7a744d,
"Steel Light Blue":0x5599b6,
"Steel Me":0xddd5ce,
"Steel Pan Mallet":0x71a6a1,
"Steel Pink":0xcc33cc,
"Steel Teal":0x5f8a8b,
"Steel Wool":0x777777,
"Steely Gray":0x90979b,
"Steeple Grey":0x827e7c,
"Stegadon Scale Green":0x074863,
"Steiglitz Fog":0x415862,
"Stella":0xf5d056,
"Stella Dora":0xf9daa5,
"Stellar":0x46647e,
"Stellar Explorer":0x002222,
"Stellar Light":0xfff4dd,
"Stellar Mist":0xab9d9c,
"Stem Green":0xabdf8f,
"Stencil Blue":0xb4ceda,
"Steppe Green":0x7d7640,
"Stepping Stone":0xa4a7a4,
"Stepping Stones":0xb2a18c,
"Sterling":0xd1d4d1,
"Sterling Blue":0xa2b9c2,
"Sterling Shadow":0xe9ebde,
"Sterling Silver":0x9eafc2,
"Stetson":0x9e7a58,
"Steveareno Beige":0xc5b5a4,
"Sticks & Stones":0xbaa482,
"Sticky Black Tarmac":0x112111,
"Sticky Toffee":0xcc8149,
"Stieglitz Silver":0x8d8f8e,
"Stil De Grain Yellow":0xfadb5e,
"Stiletto":0x323235,
"Stiletto Love":0xb6453e,
"Still":0xadaf9c,
"Still Fuchsia":0xc154c0,
"Still Grey":0xaba9a0,
"Still Moment":0xcbc4b2,
"Still Morning":0xfff8e1,
"Still Water":0x4a5d5f,
"Stillwater":0x70a4b0,
"Stillwater Lake":0xc2d0df,
"Stilted Stalks":0xa29a6a,
"Stinging Nettle":0x495d39,
"Stinging Wasabi":0xaefd6c,
"Stingray Grey":0xb0aba3,
"Stinkhorn":0x2a545c,
"Stirland Battlemire":0xae5a2c,
"Stirland Mud":0x492b00,
"Stirring Orange":0xf6b064,
"Stizza":0x900910,
"Stock Horse":0x806852,
"Stockade Green":0x104f4a,
"Stocking White":0xe9e5d8,
"Stockleaf":0x647b72,
"Stoic White":0xe0e0ff,
"Stolen Kiss":0xefdcd3,
"Stomy Shower":0x0088b0,
"Stone":0xada587,
"Stone Blue":0x829ca5,
"Stone Bridge":0x52706c,
"Stone Brown":0xb79983,
"Stone Craft":0x7d867c,
"Stone Creek":0x8f9183,
"Stone Cypress Green":0x5f7d6c,
"Stone Fence":0x929c9c,
"Stone Fruit":0xf2a28c,
"Stone Golem":0xc2cbd2,
"Stone Green":0x658e67,
"Stone Grey":0x9f9484,
"Stone Guardians":0xcaba97,
"Stone Harbour":0xe8e0d8,
"Stone Hearth":0x636869,
"Stone Lion":0xb3a491,
"Stone Mason":0x7a7b75,
"Stone Mill":0xb6b7ad,
"Stone Path":0xe4efe5,
"Stone Pillar":0xefe5d4,
"Stone Pine":0x665c46,
"Stone Quarry":0xece4dc,
"Stone Silver":0x8ba8ae,
"Stone Terrace":0xa09484,
"Stone Violet":0x4d404f,
"Stone Walkway":0xb5b09e,
"Stone Wall":0xefe1d8,
"Stone Walls":0xafa791,
"Stone Wash":0xe5d4c0,
"Stone's Throw":0x605c58,
"Stonebread":0xddcea7,
"Stonebriar":0xcba97e,
"Stonecrop":0xa08f6f,
"Stonegate":0x99917e,
"Stonehenge Greige":0xa79d8d,
"Stonelake":0xbab1a3,
"Stonetalon Mountains":0x8d7a4d,
"Stonewall":0x807661,
"Stonewall Grey":0xc1c1c1,
"Stonewash":0x74809a,
"Stonewashed":0xddd7c5,
"Stonewashed Brown":0xdcccc0,
"Stonewashed Pink":0xf4eee4,
"Stonish Beige":0xccb49a,
"Stony Creek":0x948f82,
"Stony Field":0x615547,
"Stop":0xc33a36,
"Storksbill":0xe5e1dd,
"Storksbill White":0xf2f2e2,
"Storm":0x444400,
"Storm Blue":0x507b9c,
"Storm Break":0x938988,
"Storm Cloud":0x808283,
"Storm Dust":0x65645f,
"Storm Front":0x787376,
"Storm Green":0x113333,
"Storm Grey":0x717486,
"Storm Lightning":0xf9e69c,
"Storm Petrel":0x7f95a5,
"Storm Red":0xa28a88,
"Storm Warning":0x696863,
"Storm's Coming":0xcfc9bc,
"Stormeye":0xe7b57f,
"Stormfang":0x80a7c1,
"Stormhost Silver":0xbbc6c9,
"Storms Mountain":0x8d9390,
"Stormvermin Fur":0x5c5954,
"Stormy":0xb0bcc3,
"Stormy Bay":0x9aafaf,
"Stormy Grey":0x7d7b7c,
"Stormy Horizon":0x777799,
"Stormy Mauve":0x71738c,
"Stormy Oceans":0x70818e,
"Stormy Pink":0xe3b5ad,
"Stormy Ridge":0x507b9a,
"Stormy Sea":0x6e8082,
"Stormy Strait Green":0x0f9b8e,
"Stormy Strait Grey":0x6b8ba4,
"Stormy Sunrise":0xc8a2c8,
"Stormy Weather":0x58646d,
"Stout":0x0f0b0a,
"Stowaway":0x7b8393,
"Straightforward Green":0x52a550,
"Straken Green":0x628026,
"Stranglethorn Ochre":0xdbb060,
"Stratford Blue":0x528a9a,
"Stratford Sage":0x8c8670,
"Stratos":0x000741,
"Stratos Blue":0x3799c8,
"Stratosphere":0x9ec1cc,
"Stratus":0x8193aa,
"Stravinsky":0x996e74,
"Stravinsky Pink":0x77515a,
"Straw":0xe4d96f,
"Straw Basket":0xd9c69a,
"Straw Gold":0xfcf679,
"Straw Harvest":0xdbc8a2,
"Straw Hat":0xf0d5a8,
"Straw Hut":0xbdb268,
"Straw Yellow":0xf0d696,
"Strawberry":0xfb2943,
"Strawberry Blonde":0xffdadc,
"Strawberry Confection":0xf4bfc6,
"Strawberry Cough":0x990011,
"Strawberry Cream":0xf4c3c4,
"Strawberry Daiquiri":0xa23d50,
"Strawberry Dreams":0xff88aa,
"Strawberry Dust":0xfff0ea,
"Strawberry Frappe":0xffa2aa,
"Strawberry Freeze":0xc677a8,
"Strawberry Frosting":0xff6ffc,
"Strawberry Glaze":0xdab7be,
"Strawberry Ice":0xe78b90,
"Strawberry Jam":0x86423e,
"Strawberry Jubilee":0xc08591,
"Strawberry Milkshake Red":0xd47186,
"Strawberry Mousse":0xa5647e,
"Strawberry Pink":0xf57f8e,
"Strawberry Pop":0xee2255,
"Strawberry Rhubarb":0xb96364,
"Strawberry Rose":0xe29991,
"Strawberry Shortcake":0xfa8e99,
"Strawberry Smash":0xee0055,
"Strawberry Smoothie":0xe79ea6,
"Strawberry Soap":0xf7879a,
"Strawberry Spinach Red":0xfa4224,
"Strawberry Surprise":0xb9758d,
"Strawberry Whip":0xf9d7cd,
"Strawberry Wine":0xcb6a6b,
"Strawberry Yogurt":0xe9b3b4,
"Strawflower":0xddbdba,
"Stream":0x495e7b,
"Streetwise":0xd8e2df,
"Stretch Limo":0x2b2c30,
"Streusel Cake":0xd7aa60,
"Strike a Pose":0x5a4659,
"Strike It Rich":0xd7b55f,
"Strikemaster":0x946a81,
"Striking":0x00667b,
"Striking Purple":0x944e87,
"Striking Red":0xc03543,
"String":0xaa9f96,
"String Ball":0xf1e8d8,
"String Cheese":0xfbf1dd,
"String Deep":0x7f7860,
"String of Pearls":0xebe3d8,
"Stromboli":0x406356,
"Strong Blue":0x0c06f7,
"Strong Cerise":0x960056,
"Strong Envy":0x782e2c,
"Strong Iris":0x5e5f7e,
"Strong Mocha":0x6f372d,
"Strong Mustard":0xa88905,
"Strong Olive":0x646756,
"Strong Pink":0xff0789,
"Strong Sage":0x2b6460,
"Strong Strawberry":0x8a3e34,
"Strong Tone Wash":0x454129,
"Strong Winds":0xa3a59b,
"Stroopwafel":0xa86f48,
"Struck by Lightning":0xf0e1e8,
"Structural Blue":0x0e9bd1,
"Stucco":0xa58d7f,
"Stucco Tan":0xe8dece,
"Stucco Wall":0xf1b19d,
"Stucco White":0xe2d3b9,
"Studer Blue":0x005577,
"Studio":0x724aa1,
"Studio Beige":0xc1b2a1,
"Studio Blue Green":0x6d817b,
"Studio Clay":0xd9ccb8,
"Studio Cream":0xebdbaa,
"Studio Mauve":0xc6b9b8,
"Studio Taupe":0xa59789,
"Studio White":0xe8dcd5,
"Stuffed Olive":0xadac7c,
"Stuffing":0xbf9b84,
"Stump Green":0x5e5f4d,
"Stunning Gold":0xda9a5d,
"Stunning Sapphire":0x185887,
"Stunning Shade":0x676064,
"<NAME>":0x9b856f,
"Sturgis Grey":0x57544d,
"Stylish":0xcec1a5,
"Su-Nezumi Grey":0x9fa0a0,
"Suave Grey":0xd1d8dd,
"Subaqueous":0x00576f,
"Subdue Red":0xccb8b3,
"Subdued Hue":0xc6b1ad,
"Subdued Sienna":0xcc896c,
"Sublime":0xecede0,
"Submarine":0x7a7778,
"Submarine Base":0x5566aa,
"Submarine Grey":0x4d585c,
"Submerged":0x4a7d82,
"Submersible":0x00576e,
"Subpoena":0xd8ccc6,
"Subterranean River":0x1f3b4d,
"Subtle Blue":0xd9e3e5,
"Subtle Green":0xb5cbbb,
"Subtle Night Sky":0x554b4f,
"Subtle Shadow":0xd8d8d0,
"Subtle Suede":0xd0bd94,
"Subtle Sunshine":0xe4d89a,
"Subtle Touch":0xdbdbd9,
"Subtle Turquoise":0x7a9693,
"Subtle Violet":0xb29e9e,
"Subway":0x87857c,
"Succinct Violet":0x513b6e,
"Succubus":0x990022,
"Succulent":0xdcdd65,
"Succulent Garden":0xbccbb2,
"Succulent Green":0x5e9b86,
"Succulent Leaves":0x658e64,
"Succulents":0x007744,
"Such Melodrama":0xc6c1c5,
"Sudan Brown":0xac6b29,
"Sudden Sapphire":0x6376a9,
"Suddenly Sapphire":0x1a5897,
"Suds":0xa6b4c5,
"Suede Beige":0xd9c7b9,
"Suede Grey":0x857f7a,
"Suede Indigo":0x585d6d,
"Suede Leather":0x896757,
"Suede Vest":0xd79043,
"Suffragette Yellow":0xecd0a1,
"Sugar Almond":0x935529,
"Sugar Beet":0x834253,
"Sugar Berry":0xe3d4cd,
"Sugar Cane":0xeeefdf,
"Sugar Cane Dahlia":0xf7c2bf,
"Sugar Chic":0xffccff,
"Sugar Coated Almond":0xbb6611,
"Sugar Cookie":0xf2e2a4,
"Sugar Coral":0xf56c73,
"Sugar Crystal":0xf8f4ff,
"Sugar Dust":0xf9ede3,
"Sugar Glaze":0xfff0e1,
"Sugar Glazed Cashew":0xcc9955,
"Sugar Grape":0x9437ff,
"Sugar Honey Cashew":0xddaa66,
"Sugar Maple":0x9c7647,
"Sugar Mint":0xc0e2c5,
"Sugar Pie":0xc7a77b,
"Sugar Pine":0x73776e,
"Sugar Plum":0x914e75,
"Sugar Pool":0xaed6d4,
"Sugar Poppy":0xe58281,
"Sugar Quill":0xebe5d7,
"Sugar Rush Peach Pepper":0xcfb599,
"Sugar Shack":0xeed5b6,
"Sugar Soap":0xefe8dc,
"Sugar Sweet":0xecc4dc,
"Sugar Swizzle":0xf3eee7,
"Sugar Tooth":0xd68f9f,
"Sugar Tree":0xa2999a,
"Sugar-Candied Peanuts":0x8b2e16,
"Sugared Almond":0xb49d7b,
"Sugared Peach":0xfddcc6,
"Sugared Pears":0xebd5b7,
"Sugarloaf Brown":0x554400,
"Sugarpills":0xffddff,
"Sugilite":0xa2999f,
"Suit Blue":0x2b3036,
"Suitable Brown":0x645a4b,
"Sulfur Pit":0xe5cc69,
"Sulfur Yellow":0xdbc058,
"Sulfuric Yellow":0xa79f5c,
"Sullen Gold":0xa58b34,
"Sullivan's Heart":0xf7c5d1,
"Sulphur":0xddb614,
"Sulphur Spring":0xd5d717,
"Sulphur Water":0xf2f3cf,
"Sulphur Yellow":0xccc050,
"Sultan Sand":0xe3c9be,
"Sultan's Silk":0x134558,
"Sultana":0x674668,
"Sultry Castle":0x948d84,
"Sultry Sea":0x506770,
"Sultry Smoke":0x73696f,
"Sultry Spell":0x716563,
"Sulu":0xc6ea80,
"Sumac dyed":0xe08a1e,
"Sumatra":0xf6e8cc,
"Sumatra Chicken":0x4f666a,
"Sumi Ink":0x595857,
"Sumire Violet":0x7058a3,
"Summer Air":0x3fafcf,
"Summer Beige":0xdbc2b9,
"Summer Birthday":0xbbd5ef,
"Summer Bliss":0xfcf1cf,
"Summer Bloom":0xd1beb4,
"Summer Blue":0x1880a1,
"Summer Blush":0xf6dfd6,
"Summer Breeze":0xd3e5db,
"Summer Citrus":0xf8822a,
"Summer Cloud":0xbbffee,
"Summer Clover":0xe5cfde,
"Summer Concrete":0x57595d,
"Summer Cosmos":0xfad1e0,
"Summer Crush":0xf2d6da,
"Summer Daffodil":0xffe078,
"Summer Day":0xeaaa62,
"Summer Dragonfly":0x83ada3,
"Summer Field":0xe2c278,
"Summer Fig":0xbe4b3b,
"Summer Forest Green":0x228b22,
"Summer Garden":0x7aac80,
"Summer Glow":0xeeaa44,
"Summer Green":0x8fb69c,
"Summer Harvest":0xffe69a,
"Summer Heat":0xaa5939,
"Summer Hill":0xc1a58d,
"Summer House":0xc8efe2,
"Summer Hue":0xffefc2,
"Summer in the City":0xcda168,
"Summer Jasmine":0xeeebd6,
"Summer Lake":0x0077a7,
"Summer Lily":0xf8d374,
"Summer Melon":0xead3ae,
"Summer Memory":0xdf856e,
"Summer Mist":0xcbeaee,
"Summer Moon":0xfdedcf,
"Summer Night":0x36576a,
"Summer Orange":0xffb653,
"Summer Pear":0xf5f0d1,
"Summer Rain":0xe1e8db,
"Summer Resort":0xf7efba,
"Summer Sandcastle":0xece4ce,
"Summer Sea":0x66a9b1,
"Summer Shade":0xd1d9d7,
"Summer Shower":0xe5ebe3,
"Summer Sky":0x38b0de,
"Summer Soft Blue":0x94d3d1,
"Summer Solstice":0xded1a3,
"Summer Storm":0xb0c5df,
"Summer Sun":0xffdc00,
"Summer Sunset":0xd88167,
"Summer Sunshine":0xf7e8c7,
"Summer Turquoise":0x008572,
"Summer Turquoise Blue":0x4b9cab,
"Summer Waters":0x215399,
"Summer Weasel":0xbb8e55,
"Summer White":0xf4e9d6,
"Summer's End":0xdc9367,
"Summer's Eve":0xa97069,
"Summer's Heat":0xf9e699,
"Summerday Blue":0x376698,
"Summertime":0xf2d178,
"Summertown":0x8cbc9e,
"Summerville Brown":0x997651,
"Summerwood":0xd4b28b,
"Summit":0x8bb6b8,
"Summit Gray":0x959491,
"Sumptuous Peach":0xe5b99b,
"Sun":0xef8e38,
"Sun Baked":0xd27f63,
"Sun Baked Earth":0xa36658,
"Sun Bleached Mint":0xe3efe1,
"Sun Bleached Ochre":0xe3ab7b,
"Sun Bleached Pink":0xfadadd,
"Sun City":0xfffed9,
"Sun Crete":0xff8c00,
"Sun Dance":0xc4aa4d,
"Sun Deck":0xf0dca0,
"Sun Dial":0xc79b36,
"Sun Drenched":0xffe7a3,
"Sun Dried":0xeabd5b,
"Sun Dried Tomato":0x752329,
"Sun Drops":0xeaaf11,
"Sun Dust":0xf6e0a4,
"Sun Glare":0xf1f4d1,
"Sun Glint":0xfaf3d9,
"Sun God":0xdfba5a,
"Sun Kiss":0xebd1bb,
"Sun Kissed":0xffeec2,
"Sun Orange":0xf48037,
"Sun Ray":0xffb219,
"Sun Salutation":0xe7c26f,
"Sun Shower":0xffde73,
"Sun Song":0xe9ad17,
"Sun Splashed":0xfbd795,
"Sun Surprise":0xfff2a0,
"Sun Touched":0xfad675,
"Sun Valley":0x698538,
"Sun Wukong's Crown":0xecc033,
"Sun Yellow":0xffdf22,
"Sun-Kissed Brick":0xb75e41,
"Sun's Glory":0xf6f2e5,
"Sun's Rage":0xa94e37,
"Suna White":0xdcd3b2,
"Sunbaked Adobe":0xab9a6e,
"Sunbeam":0xf5edb2,
"Sunbeam Yellow":0xf0d39d,
"Sunblast Yellow":0xfeff0f,
"Sunbleached":0xe5e0d7,
"Sunbound":0xf9d964,
"Sunburn":0xb37256,
"Sunburnt Cyclops":0xff404c,
"Sunburnt Toes":0xd79584,
"Sunburst":0xf6c289,
"Sunburst Yellow":0xffff99,
"Sundance":0xfac76c,
"Sunday Afternoon":0xf6c778,
"Sunday Best":0xfcc9c7,
"Sunday Drive":0xdcc9ae,
"Sunday Gloves":0xd7bad1,
"Sunday Niqab":0x3d4035,
"Sundaze":0xfae198,
"Sundew":0xe1cdae,
"Sundown":0xf5c99e,
"Sundress":0xebcf89,
"Sundried Tomato":0x692b2b,
"Sunezumi Brown":0x6e5f57,
"Sunflower":0xffc512,
"Sunflower Seed":0xffe3a9,
"Sunflower Yellow":0xffda03,
"Sunglo":0xc76155,
"Sunglow":0xffcc33,
"Sunglow Gecko":0xffcf48,
"Sunken Battleship":0x51574f,
"Sunken Gold":0xb29700,
"Sunken Pool":0xc8ddda,
"Sunken Ship":0x6b443d,
"Sunkissed Apricot":0xf2bda8,
"Sunkissed Peach":0xfed8bf,
"Sunkissed Yellow":0xffe9ba,
"Sunkist Coral":0xea6676,
"Sunlight":0xedd59e,
"Sunlit Allium":0x9787bb,
"Sunlit Kelp Green":0x7d7103,
"Sunlounge":0xda8433,
"Sunning Deck":0xe8d7b1,
"Sunny":0xf2f27a,
"Sunny Disposition":0xdba637,
"Sunny Festival":0xffc946,
"Sunny Gazebo":0xede1cc,
"Sunny Green":0xc5cd40,
"Sunny Honey":0xf8f0d8,
"Sunny Horizon":0xd0875a,
"Sunny Lime":0xdfef87,
"Sunny Mimosa":0xf5f5cc,
"Sunny Mood":0xf7c84a,
"Sunny Morning":0xf6d365,
"Sunny Pavement":0xd9d7d9,
"Sunny Side Up":0xffdc41,
"Sunny Summer":0xffc900,
"Sunny Summit":0xe3e9cf,
"Sunny Veranda":0xfedf94,
"Sunny Yellow":0xfff917,
"Sunnyside":0xf8d016,
"Sunporch":0xffd18c,
"Sunray":0xe3ab57,
"Sunray Venus":0xcfc5b6,
"Sunrise":0xf4bf77,
"Sunrise Glow":0xfef0c5,
"Sunrise Heat":0xcaa061,
"Sunrose Yellow":0xffdb67,
"Sunset":0xc0514a,
"Sunset Beige":0xd0a584,
"Sunset Cloud":0xbe916d,
"Sunset Cove":0xdcb397,
"Sunset Cruise":0xffbe94,
"Sunset Drive":0xeabba2,
"Sunset Gold":0xf7c46c,
"Sunset Horizon":0xba87aa,
"Sunset in Italy":0xf0c484,
"Sunset Meadow":0xa5a796,
"Sunset Orange":0xfd5e53,
"Sunset Papaya":0xfc7d64,
"Sunset Pink":0xfad6e5,
"Sunset Purple":0x6f456e,
"Sunset Red":0x7f5158,
"Sunset Riders":0xd70040,
"Sunset Serenade":0x594265,
"Sunset Strip":0xffbc00,
"Sunset Yellow":0xfa873d,
"Sunshade":0xfa9d49,
"Sunshine":0xfade85,
"Sunshine Surprise":0xfcb02f,
"Sunshine Yellow":0xfffd37,
"Sunshone Plum":0x886688,
"Sunstitch":0xfee2b2,
"Sunstone":0xc7887f,
"Suntan":0xd9b19f,
"Suntan Glow":0xbe8c74,
"Sunwashed Brick":0xe3c1b3,
"Suō":0x7e2639,
"Super Banana":0xfffe71,
"Super Black":0x221100,
"Super Gold":0xaa8822,
"Super Hero":0xca535b,
"Super Leaf Brown":0xba5e0f,
"Super Lemon":0xe4bf45,
"Super Pink":0xce6ba4,
"Super Rose Red":0xcb1028,
"Super Saiyan":0xffdd00,
"Super Sepia":0xffaa88,
"Super Silver":0xeeeeee,
"Superior Blue":0x3a5e73,
"Superior Bronze":0x786957,
"Superman Red":0xff1122,
"Supermint":0x00928c,
"Supernatural":0x313641,
"Supernova":0xfff8d9,
"Supernova Residues":0xd9ece9,
"Superstar":0xffcc11,
"Superstition":0x5b6e74,
"Superstitious":0xac91b5,
"Superwhite":0xe8eaea,
"Support Green":0x78a300,
"Supreme Green":0xcfddc7,
"Supreme Grey":0x86949f,
"Surati Pink":0xfc53fc,
"Surf":0xb8d4bb,
"Surf Crest":0xc3d6bd,
"Surf Green":0x427573,
"Surf Rider":0x0193c2,
"Surf Spray":0xb4c8c2,
"Surf the Web":0x203c7f,
"Surf Wash":0x87ceca,
"Surf'n'dive":0x374755,
"Surf's Surprise":0xc4d3e5,
"Surf's Up":0xc6e4eb,
"Surfboard Yellow":0xfcda89,
"Surfer":0x70b8ba,
"Surfer Girl":0xdb6484,
"Surfie Green":0x007b77,
"Surfin'":0x73c0d2,
"Surfside":0x9acad3,
"Surgeon Green":0x009f6b,
"Surprise":0xc9936f,
"Surprise Amber":0xefb57a,
"Surya Red":0x70191f,
"Sushi":0x7c9f2f,
"Sushi Rice":0xfff7df,
"Sussie":0x58bac2,
"Susu Green":0x887f7a,
"Susu-Take Bamboo":0x6f514c,
"Sutherland":0x859d95,
"Su<NAME>":0x888387,
"Suzu Grey":0x9ea1a3,
"<NAME>":0xaa4f37,
"<NAME>":0x8c4736,
"Svelte":0xb8a3bb,
"Svelte Sage":0xb2ac96,
"Swagger":0x19b6b5,
"Swallow Blue":0x154962,
"Swallow-Tailed Moth":0xece9dd,
"Swamp":0x7f755f,
"Swamp Fox":0xb79d69,
"Swamp Green":0x748500,
"Swamp Monster":0x005511,
"Swamp Mosquito":0x252f2f,
"Swamp Moss":0x698339,
"Swamp Mud":0x857947,
"Swamp of Sorrows":0x36310d,
"Swamp Shrub":0x6d753b,
"Swampland":0x226633,
"Swan Dive":0xe5e4dd,
"Swan Lake":0xc5e5e2,
"Swan Sea":0xa6c1bf,
"Swan White":0xf7f1e2,
"Swan Wing":0xf5f2e6,
"Swanky Gray":0xb5b1b5,
"Swanndri":0x5f7963,
"Swans Down":0xdae6dd,
"Sweat Bee":0x1d4e8f,
"Sweater Weather":0xccccc5,
"Swedish Blue":0x007eb1,
"Swedish Clover":0x7b8867,
"Swedish Green":0x184d43,
"Swedish Yellow":0xfce081,
"Sweet & Sour":0xc9aa37,
"Sweet 60":0xf29eab,
"Sweet Almond":0xcc9977,
"Sweet Alyssum":0xe7c2de,
"Sweet Angel":0xf5c8bb,
"Sweet Angelica":0xe8d08e,
"Sweet Annie":0x9c946e,
"Sweet Apricot":0xfcc0a6,
"Sweet Aqua":0xa7e8d3,
"Sweet Ariel":0xe5eae3,
"Sweet as Honey":0xffe9ac,
"Sweet Baby Rose":0xc24f40,
"Sweet Bianca":0xeedadd,
"Sweet Blue":0xaebed2,
"Sweet Breeze":0xc8dae3,
"Sweet Brown":0xa83731,
"Sweet Butter":0xfffcd7,
"Sweet Buttermilk":0xfceedd,
"Sweet Carrot":0xcc764f,
"Sweet Cashew":0xddaa77,
"Sweet Chamomile":0xffe186,
"Sweet Cherry":0x9f4f4d,
"Sweet Cherry | |
from typing import List, Type, Any, Dict, Tuple, Optional
from facebook_business.adobjects.adaccount import AdAccount
from facebook_business.adobjects.comment import Comment
from facebook_business.api import FacebookAdsApi, FacebookSession
from facebook_business.adobjects import abstractcrudobject
from facebook_business.adobjects.campaign import Campaign
from facebook_business.adobjects.adset import AdSet
from facebook_business.adobjects.ad import Ad
from facebook_business.adobjects.adcreative import AdCreative
from facebook_business.adobjects.advideo import AdVideo
from facebook_business.adobjects.customaudience import CustomAudience
from facebook_business.exceptions import FacebookRequestError
from facebook_business.adobjects.page import Page
from facebook_business.adobjects.pagepost import PagePost
from common.enums.failure_bucket import FailureBucket
from oozer.common.enum import to_fb_model, ExternalPlatformJobStatus
from oozer.common.facebook_fields import collapse_fields_children
class PlatformApiContext:
"""
A simple wrapper for Facebook SDK, using local API sessions as not to
pollute the the global default API session with initialization
"""
token: str = None
api: FacebookAdsApi = None
def __init__(self, token: str):
self.token = token
def __enter__(self) -> 'PlatformApiContext':
self.api = FacebookAdsApi(FacebookSession(access_token=self.token))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
We do not need to do anything specific yet
"""
pass
def to_fb_model(self, entity_id: str, entity_type: str):
"""
Like stand-alone to_fb_model but removes the need to pass in API
instance manually
"""
return to_fb_model(entity_id, entity_type, self.api)
class FacebookApiErrorInspector:
"""
A vehicle to store the information on distinct *types* of errors that FB
cna throw at us and we're interested in them
"""
ERROR_CODE_MAP: Dict[Tuple[int, int], Tuple[int, int]] = {
# Application request limit reached
(4, None): (ExternalPlatformJobStatus.ApplicationThrottlingError, FailureBucket.ApplicationThrottling),
# User request limit reached
(17, None): (ExternalPlatformJobStatus.UserThrottlingError, FailureBucket.UserThrottling),
# User request limit reached
(17, 2446079): (ExternalPlatformJobStatus.UserThrottlingError, FailureBucket.UserThrottling),
# AdAccount request limit reached
(613, 1487742): (ExternalPlatformJobStatus.AdAccountThrottlingError, FailureBucket.AdAccountThrottling),
# Too big a report
(100, 1487534): (ExternalPlatformJobStatus.TooMuchData, FailureBucket.TooLarge),
# Object does not exist, cannot be loaded due to missing permissions, or does not support this operation
(100, 13): (ExternalPlatformJobStatus.InaccessibleObject, FailureBucket.InaccessibleObject),
(100, 33): (ExternalPlatformJobStatus.InaccessibleObject, FailureBucket.InaccessibleObject),
}
ERROR_MESSAGE_MAP = {
(1, "Please reduce the amount of data you're asking for, then retry your request"): (
ExternalPlatformJobStatus.TooMuchData,
FailureBucket.TooLarge,
)
}
_exception: FacebookRequestError
def __init__(self, exception: FacebookRequestError):
"""Store the exception we want to test."""
self._exception = exception
def get_status_and_bucket(self) -> Optional[Tuple[int, int]]:
"""Extract status and bucket from inspected exception."""
code = self._exception.api_error_code()
subcode = self._exception.api_error_subcode()
error_message = self._exception.api_error_message()
status_bucket = self.ERROR_CODE_MAP.get((code, subcode))
if status_bucket is None:
status_bucket = self.ERROR_MESSAGE_MAP.get((code, error_message))
if status_bucket is None:
status_bucket = ExternalPlatformJobStatus.GenericPlatformError, FailureBucket.Other
return status_bucket
_default_fields_map = {
AdAccount: collapse_fields_children(
[
AdAccount.Field.id,
AdAccount.Field.account_id,
AdAccount.Field.name,
AdAccount.Field.account_status,
AdAccount.Field.amount_spent,
AdAccount.Field.attribution_spec,
AdAccount.Field.can_create_brand_lift_study,
AdAccount.Field.capabilities,
AdAccount.Field.currency,
AdAccount.Field.end_advertiser,
AdAccount.Field.end_advertiser_name,
AdAccount.Field.owner,
AdAccount.Field.rf_spec,
AdAccount.Field.spend_cap,
AdAccount.Field.timezone_id,
AdAccount.Field.timezone_name,
AdAccount.Field.timezone_offset_hours_utc,
]
),
Campaign: collapse_fields_children(
[
Campaign.Field.account_id,
Campaign.Field.adlabels,
Campaign.Field.buying_type,
Campaign.Field.can_create_brand_lift_study,
Campaign.Field.can_use_spend_cap,
Campaign.Field.created_time,
Campaign.Field.effective_status,
Campaign.Field.id,
Campaign.Field.name,
Campaign.Field.objective,
Campaign.Field.spend_cap,
Campaign.Field.start_time,
Campaign.Field.status,
Campaign.Field.stop_time,
Campaign.Field.updated_time,
]
),
AdSet: collapse_fields_children(
[
AdSet.Field.account_id,
AdSet.Field.bid_amount,
AdSet.Field.bid_info,
AdSet.Field.billing_event,
AdSet.Field.budget_remaining,
AdSet.Field.campaign_id,
AdSet.Field.created_time,
AdSet.Field.daily_budget,
AdSet.Field.effective_status,
AdSet.Field.end_time,
AdSet.Field.id,
AdSet.Field.bid_strategy,
AdSet.Field.lifetime_budget,
AdSet.Field.name,
AdSet.Field.optimization_goal,
AdSet.Field.start_time,
AdSet.Field.status,
AdSet.Field.targeting,
AdSet.Field.updated_time,
]
),
Ad: collapse_fields_children(
[
Ad.Field.account_id,
Ad.Field.adset_id,
Ad.Field.campaign_id,
Ad.Field.created_time,
Ad.Field.effective_status,
Ad.Field.id,
Ad.Field.last_updated_by_app_id,
Ad.Field.name,
Ad.Field.source_ad_id,
Ad.Field.status,
Ad.Field.tracking_specs,
Ad.Field.updated_time,
(
Ad.Field.creative,
[
AdCreative.Field.effective_instagram_story_id,
AdCreative.Field.effective_object_story_id,
AdCreative.Field.id,
AdCreative.Field.name,
],
),
]
),
AdCreative: collapse_fields_children(
[
AdCreative.Field.id,
AdCreative.Field.account_id,
AdCreative.Field.actor_id,
AdCreative.Field.adlabels,
AdCreative.Field.applink_treatment,
AdCreative.Field.asset_feed_spec,
AdCreative.Field.body,
AdCreative.Field.branded_content_sponsor_page_id,
AdCreative.Field.call_to_action_type,
AdCreative.Field.effective_instagram_story_id,
AdCreative.Field.effective_object_story_id,
AdCreative.Field.image_crops,
AdCreative.Field.image_hash,
AdCreative.Field.image_url,
AdCreative.Field.instagram_actor_id,
AdCreative.Field.instagram_permalink_url,
AdCreative.Field.instagram_story_id,
AdCreative.Field.link_og_id,
AdCreative.Field.link_url,
AdCreative.Field.name,
AdCreative.Field.object_id,
AdCreative.Field.object_story_id,
AdCreative.Field.object_story_spec,
AdCreative.Field.object_type,
AdCreative.Field.object_url,
AdCreative.Field.platform_customizations,
AdCreative.Field.product_set_id,
AdCreative.Field.status,
AdCreative.Field.template_url,
AdCreative.Field.template_url_spec,
AdCreative.Field.thumbnail_url,
AdCreative.Field.title,
AdCreative.Field.url_tags,
AdCreative.Field.video_id,
]
),
AdVideo: collapse_fields_children(
[
AdVideo.Field.id,
AdVideo.Field.ad_breaks,
AdVideo.Field.backdated_time,
AdVideo.Field.backdated_time_granularity,
AdVideo.Field.content_tags,
AdVideo.Field.created_time,
AdVideo.Field.content_category,
AdVideo.Field.custom_labels,
AdVideo.Field.description,
AdVideo.Field.embed_html,
AdVideo.Field.embeddable,
AdVideo.Field.event,
AdVideo.Field.format,
AdVideo.Field.field_from,
AdVideo.Field.icon,
AdVideo.Field.is_crosspost_video,
AdVideo.Field.is_crossposting_eligible,
AdVideo.Field.is_instagram_eligible,
AdVideo.Field.length,
AdVideo.Field.live_status,
AdVideo.Field.permalink_url,
AdVideo.Field.picture,
AdVideo.Field.place,
AdVideo.Field.privacy,
AdVideo.Field.published,
AdVideo.Field.scheduled_publish_time,
AdVideo.Field.source,
AdVideo.Field.status,
AdVideo.Field.title,
AdVideo.Field.universal_video_id,
AdVideo.Field.updated_time,
]
),
CustomAudience: collapse_fields_children(
[
CustomAudience.Field.id,
CustomAudience.Field.account_id,
CustomAudience.Field.name,
CustomAudience.Field.approximate_count,
CustomAudience.Field.data_source,
CustomAudience.Field.delivery_status,
CustomAudience.Field.description,
CustomAudience.Field.rule_aggregation,
CustomAudience.Field.subtype,
CustomAudience.Field.external_event_source,
CustomAudience.Field.is_value_based,
CustomAudience.Field.lookalike_audience_ids,
CustomAudience.Field.lookalike_spec,
CustomAudience.Field.operation_status,
CustomAudience.Field.opt_out_link,
CustomAudience.Field.permission_for_actions,
CustomAudience.Field.pixel_id,
CustomAudience.Field.retention_days,
CustomAudience.Field.time_content_updated,
CustomAudience.Field.time_created,
CustomAudience.Field.time_updated,
# `rule` too large objects to download
# (for more information, see https://operam.atlassian.net/browse/PROD-4298)
# 'rule',
# These are Create/Update only fields
# 'allowed_domains',
# 'claim_objective',
# 'content_type',
# 'dataset_id',
# 'event_source_group',
# 'origin_audience_id',
# 'prefill',
# 'product_set_id',
# These fields are not part of the official api docs
# 'associated_audience_id',
# 'exclusions',
# 'inclusions',
# 'parent_audience_id',
# 'tags',
]
),
Page: collapse_fields_children(
[
Page.Field.about,
Page.Field.ad_campaign,
Page.Field.affiliation,
Page.Field.app_id,
# Page.Field.app_links, # quietly removed in v6.0
Page.Field.artists_we_like,
Page.Field.attire,
Page.Field.awards,
Page.Field.band_interests,
Page.Field.band_members,
# Page.Field.best_page', # requires Page Public Content Access
Page.Field.bio,
Page.Field.birthday,
Page.Field.booking_agent,
Page.Field.built,
Page.Field.business,
Page.Field.can_checkin,
Page.Field.can_post,
Page.Field.category,
Page.Field.category_list,
Page.Field.checkins,
Page.Field.company_overview,
Page.Field.connected_instagram_account,
Page.Field.contact_address,
# Page.Field.context', # silently lost access to this field on April 30, 2019 4AM
# Page.Field.copyright_attribution_insights', # A page access token is required to request this resource
# Page.Field.copyright_whitelisted_ig_partners', # A page access token is required to request this resource
Page.Field.country_page_likes,
Page.Field.cover,
Page.Field.culinary_team,
Page.Field.current_location,
Page.Field.description,
Page.Field.description_html,
Page.Field.directed_by,
Page.Field.display_subtext,
Page.Field.displayed_message_response_time,
Page.Field.emails,
Page.Field.engagement,
Page.Field.fan_count,
Page.Field.featured_video,
Page.Field.features,
Page.Field.food_styles,
Page.Field.founded,
Page.Field.general_info,
Page.Field.general_manager,
Page.Field.genre,
Page.Field.global_brand_page_name,
Page.Field.global_brand_root_id,
# Page.Field.has_added_app', # requires Page Public Content Access
Page.Field.has_whatsapp_number,
Page.Field.hometown,
Page.Field.hours,
Page.Field.id,
Page.Field.impressum,
Page.Field.influences,
Page.Field.instagram_business_account,
# Page.Field.instant_articles_review_status',
Page.Field.is_always_open,
Page.Field.is_chain,
Page.Field.is_community_page,
Page.Field.is_eligible_for_branded_content,
Page.Field.is_messenger_bot_get_started_enabled,
Page.Field.is_messenger_platform_bot,
Page.Field.is_owned,
Page.Field.is_permanently_closed,
Page.Field.is_published,
Page.Field.is_unclaimed,
Page.Field.is_verified,
Page.Field.is_webhooks_subscribed,
Page.Field.keywords,
Page.Field.link,
Page.Field.location,
Page.Field.members,
Page.Field.messenger_ads_default_icebreakers,
Page.Field.messenger_ads_default_page_welcome_message,
Page.Field.messenger_ads_default_quick_replies,
Page.Field.messenger_ads_quick_replies_type,
Page.Field.mission,
Page.Field.mpg,
Page.Field.name,
Page.Field.name_with_location_descriptor,
Page.Field.network,
Page.Field.new_like_count,
Page.Field.offer_eligible,
Page.Field.overall_star_rating,
Page.Field.page_token,
# Page.Field.parent_page', # requires Page Public Content Access
Page.Field.parking,
Page.Field.payment_options,
Page.Field.personal_info,
Page.Field.personal_interests,
Page.Field.pharma_safety_info,
Page.Field.phone,
Page.Field.place_type,
Page.Field.plot_outline,
# Page.Field.preferred_audience', # Error msg "Param account_linking_token is required"
Page.Field.press_contact,
Page.Field.price_range,
Page.Field.produced_by,
Page.Field.products,
Page.Field.promotion_eligible,
Page.Field.promotion_ineligible_reason,
Page.Field.public_transit,
Page.Field.rating_count,
# Page.Field.recipient', # Error message "(#100) Param account_linking_token is required"
Page.Field.record_label,
Page.Field.release_date,
Page.Field.restaurant_services,
Page.Field.restaurant_specialties,
Page.Field.schedule,
Page.Field.screenplay_by,
Page.Field.season,
Page.Field.single_line_address,
Page.Field.starring,
Page.Field.start_info,
# Page.Field.store_code', # Error message "(#200) The parent page should be whitelisted for store codes."
Page.Field.store_location_descriptor,
Page.Field.store_number,
Page.Field.studio,
# Page.Field.supports_instant_articles', # requires 'view instant articles"
Page.Field.talking_about_count,
Page.Field.unread_message_count,
Page.Field.unread_notif_count,
Page.Field.unseen_message_count,
Page.Field.username,
Page.Field.verification_status,
Page.Field.voip_info,
Page.Field.website,
Page.Field.were_here_count,
Page.Field.whatsapp_number,
Page.Field.written_by,
]
),
PagePost: collapse_fields_children(
[
# 'can_reply_privately', # requires READ_PAGE_MAILBOXES or PAGES_MESSAGING permission
PagePost.Field.admin_creator,
PagePost.Field.allowed_advertising_objectives,
PagePost.Field.application,
'attachments', # PagePost.Field.attachments, <- :( not official attribute
PagePost.Field.backdated_time,
PagePost.Field.call_to_action,
PagePost.Field.child_attachments,
PagePost.Field.comments_mirroring_domain,
PagePost.Field.coordinates,
PagePost.Field.created_time,
PagePost.Field.event,
PagePost.Field.expanded_height,
PagePost.Field.expanded_width,
PagePost.Field.feed_targeting,
PagePost.Field.field_from,
PagePost.Field.full_picture,
PagePost.Field.height,
PagePost.Field.icon,
PagePost.Field.id,
PagePost.Field.instagram_eligibility,
PagePost.Field.is_app_share,
PagePost.Field.is_eligible_for_promotion,
PagePost.Field.is_expired,
PagePost.Field.is_hidden,
PagePost.Field.is_instagram_eligible,
PagePost.Field.is_popular,
PagePost.Field.is_published,
PagePost.Field.is_spherical,
PagePost.Field.message,
PagePost.Field.message_tags,
PagePost.Field.multi_share_end_card,
PagePost.Field.multi_share_optimized,
PagePost.Field.parent_id,
PagePost.Field.permalink_url,
PagePost.Field.picture,
PagePost.Field.place,
PagePost.Field.privacy,
PagePost.Field.promotable_id,
PagePost.Field.promotion_status,
PagePost.Field.properties,
PagePost.Field.scheduled_publish_time,
PagePost.Field.shares,
PagePost.Field.status_type,
PagePost.Field.story,
PagePost.Field.story_tags,
PagePost.Field.subscribed,
PagePost.Field.target,
PagePost.Field.targeting,
PagePost.Field.timeline_visibility,
PagePost.Field.updated_time,
PagePost.Field.via,
PagePost.Field.video_buying_eligibility,
PagePost.Field.width,
]
),
Comment: collapse_fields_children(
[
Comment.Field.application,
Comment.Field.attachment,
Comment.Field.can_comment,
Comment.Field.can_like,
Comment.Field.can_remove,
Comment.Field.comment_count,
Comment.Field.created_time,
Comment.Field.field_from,
Comment.Field.id,
Comment.Field.is_hidden,
Comment.Field.is_private,
Comment.Field.like_count,
Comment.Field.live_broadcast_timestamp,
Comment.Field.message,
Comment.Field.message_tags,
Comment.Field.object,
Comment.Field.parent,
'parent_id', # Comment.Field.parent_id, # Not official?
Comment.Field.permalink_url,
Comment.Field.user_likes,
# 'can_reply_privately',
# 'can_hide', # Error message: "(#210) A page access token is required to request this resource."
# 'private_reply_conversation',
# Error message: "(#200) The page does not have READ_PAGE_MAILBOXES or PAGES_MESSAGING permission."
# Reactions edge traversal
'reactions.type(LIKE).summary(true).limit(0).as(reaction_like)',
'reactions.type(LOVE).summary(true).limit(0).as(reaction_love)',
'reactions.type(WOW).summary(true).limit(0).as(reaction_wow)',
'reactions.type(HAHA).summary(true).limit(0).as(reaction_haha)',
'reactions.type(SAD).summary(true).limit(0).as(reaction_sad)',
'reactions.type(ANGRY).summary(true).limit(0).as(reaction_angry)',
'reactions.type(THANKFUL).summary(true).limit(0).as(reaction_thankful)',
]
),
}
def get_default_fields(model_klass: Type['Model']) -> List[str]:
"""
Obtain default fields for a given entity type. Note that the entity
class must come from the Facebook SDK
"""
assert issubclass(model_klass, abstractcrudobject.AbstractCrudObject)
if model_klass in _default_fields_map:
return _default_fields_map[model_klass]
return [
getattr(model_klass.Field, field_name)
for field_name in dir(model_klass.Field)
if not field_name.startswith('__')
]
# defaults for most of these are some 20-25
# at that level paging through tens of thousands of results is super painful (and long, obviously)
# Hence bumping the page size for each, but the larger the size, the more change
# is there for too much data on the page killing the request.
# So, if you start seeing chronic failures in fetches of particular type of object across
# all AdAccounts, push the number lower.
# If you see problems with particular AdAccount (that likes to use lots of DMA, Zipcodes for targeting, for example)
# that would be the time for tuning page sizes per AdAccount or speculatively adapt page size
# in evidence of errors in prior attempts.
# There is also a nice side-effect to shifting this to FB - each request outstanding
# runs longer and allows greater concurrency locally.
_default_page_size = {
Campaign: 400,
AdSet: 200, # this is super heavy object mostly because of Targeting spec. Keep it smallish
Ad: 400,
Comment: 100,
CustomAudience: 50,
}
DEFAULT_PAGE_ACCESS_TOKEN_LIMIT = 250
def get_default_page_size(model_klass: Type['Model']) -> int:
"""
Default paging size on FB API is too small for large collections
It's usually some 25 items. We page through a lot of stuff, hence this fn.
"""
assert issubclass(model_klass, abstractcrudobject.AbstractCrudObject)
return _default_page_size.get(model_klass, 100)
_default_additional_params = {Comment: {'filter': 'stream'}}
def get_additional_params(model_klass: Type['Model']) -> Dict[str, Any]:
"""
By default, we dont need additional params to FB API requests. But in some instances (i.e. fetching Comments),
adding parameters makes fetching data simpler
"""
assert issubclass(model_klass, abstractcrudobject.AbstractCrudObject)
return _default_additional_params.get(model_klass, {})
# By default ARCHIVED is filtered out
# Here we repeat all possible status values we get by default
# | |
from ansys.mapdl.core._commands import parse
def bsplin(self, p1="", p2="", p3="", p4="", p5="", p6="", xv1="", yv1="",
zv1="", xv6="", yv6="", zv6="", **kwargs) -> int:
"""Generate a single line from a spline fit to a series of keypoints.
APDL Command: BSPLIN
Parameters
----------
p1, p2, p3, p4, p5, p6
Keypoints through which a spline is fit. At least two
keypoints must be defined.
XV1, YV1, ZV1
Orientation point of an outward vector tangent to line at
P1. Vector coordinate system has its origin at the
keypoint. Coordinate interpretation corresponds to the
active coordinate system type, i.e., X is R for
cylindrical, etc. Defaults to zero curvature slope.
XV6, YV6, ZV6
Orientation point of an outward vector tangent to a line
at P6 (or the last keypoint specified if fewer than six
specified). Defaults to zero curvature slope.
Returns
-------
int
Line number of the spline generated from the spline fit.
Examples
--------
Generate a spline through ``(0, 0, 0)``, ``(0, 1, 0)`` and
``(1, 2, 0)``
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 0, 1, 0)
>>> k2 = mapdl.k("", 1, 2, 0)
>>> lnum = mapdl.bsplin(k0, k1, k2)
Notes
-----
One line is generated between keypoint P1 and the last
keypoint entered. The line will pass through each entered
keypoint. Solid modeling in a toroidal coordinate system is
not recommended.
"""
command = f"BSPLIN,{p1},{p2},{p3},{p4},{p5},{p6},{xv1},{yv1},{zv1},{xv6},{yv6},{zv6}"
return parse.parse_line_no(self.run(command, **kwargs))
def circle(self, pcent="", rad="", paxis="", pzero="", arc="", nseg="",
**kwargs) -> list:
"""Generate circular arc lines.
APDL Command: CIRCLE
Parameters
----------
pcent
Keypoint defining the center of the circle (in the plane
of the circle).
rad
Radius of the circle. If RAD is blank and PCENT = P, the
radius is the distance from PCENT to PZERO.
paxis
Keypoint defining axis of circle (along with PCENT). If
PCENT = P and PAXIS is omitted, the axis is normal to the
working plane.
pzero
Keypoint defining the plane normal to circle (along with
PCENT and PAXIS) and the zero degree location. Need not
be in the plane of the circle. This value is not required
if PAXIS is defined along the Y axis (that is, a circle in
the XZ plane).
arc
Arc length (in degrees). Positive follows right-hand rule
about PCENT-PAXIS vector. Defaults to 360 degrees.
nseg
Number of lines around circumference (defaults to minimum
required for 90 degrees-maximum arcs, i.e., 4 for 360 degrees). Number
of keypoints generated is NSEG for 360 degrees or NSEG + 1 for
less than 360 degrees.
Returns
-------
list
List of lines of the circular arcs generated from this
command.
Examples
--------
Create a full circle containing four circular arcs. Circle
centered at (0, 0, 0) and generated in the XY plane. Return
the lines generated from the circle.
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 0, 0, 1)
>>> carc0 = mapdl.circle(k0, 1, k1)
>>> carc0
[1, 2, 3, 4]
Notes
-----
Generates circular arc lines (and their corresponding
keypoints). Keypoints are generated at regular angular
locations (based on a maximum spacing of 90 degrees). Arc lines are
generated connecting the keypoints. Keypoint and line numbers
are automatically assigned, beginning with the lowest
available values [NUMSTR]. Adjacent lines use a common
keypoint. Line shapes are generated as arcs, regardless of
the active coordinate system. Line shapes are invariant with
coordinate system after they are generated.
"""
command = f"CIRCLE,{pcent},{rad},{paxis},{pzero},{arc},{nseg}"
return parse.parse_line_nos(self.run(command, **kwargs))
def l(self, p1="", p2="", ndiv="", space="", xv1="", yv1="", zv1="",
xv2="", yv2="", zv2="", **kwargs) -> int:
"""Define a line between two keypoints.
APDL Command: L
Parameters
----------
p1
Keypoint at the beginning of line.
p2
Keypoint at the end of line.
ndiv
Number of element divisions within this line. Normally
this field is not used; specifying divisions with LESIZE,
etc. is recommended.
space
Spacing ratio. Normally this field is not used, as
specifying spacing ratios with the LESIZE command is
recommended. If positive, space is the nominal ratio of
the last division size (at P2) to the first division size
(at P1). If the ratio is greater than 1, the division
sizes increase from P1 to P2, and if less than 1, they
decrease. If space is negative, then ``space`` is the
nominal ratio of the center division size to those at the
ends.
Returns
-------
int
The line number of the generated line.
Examples
--------
Create a line between the two keypoints (0, 0, 0) and (1, 0, 0)
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 1, 0, 0)
>>> lnum = mapdl.l(k0, k1)
>>> lnum
1
Notes
-----
Defines a line between two keypoints from P1 to P2. The line
shape may be generated as "straight" (in the active coordinate
system) or curved. The line shape is invariant with
coordinate system after it is generated. Note that solid
modeling in a toroidal coordinate system is not recommended.
A curved line is limited to 180 degrees. Lines may be redefined only
if not yet attached to an area.
"""
command = f"L,{p1},{p2},{ndiv},{space},{xv1},{yv1},{zv1},{xv2},{yv2},{zv2}"
return parse.parse_line_no(self.run(command, **kwargs))
def l2ang(self, nl1="", nl2="", ang1="", ang2="", phit1="", phit2="",
**kwargs) -> int:
"""Generates a line at an angle with two existing lines.
APDL Command: L2ANG
Parameters
----------
nl1
Number of the first line to be hit (touched by the end of
the new line). If negative, assume P1 (see below) is the
second keypoint of the line instead of the first.
nl2
Number of the second line to be hit. If negative, assume
P3 is the second keypoint of the line instead of the
first.
ang1
Angle of intersection (usually zero or 180) of generated
line with tangent to first line.
ang2
Angle of intersection (usually zero or 180) of generated
line with tangent to second line.
phit1
Number to be assigned to keypoint generated at hit
location on first line (defaults to lowest available
keypoint number [NUMSTR]).
phit2
Number to be assigned to keypoint generated at hit
location on second line (defaults to lowest available
keypoint number [NUMSTR]).
Returns
-------
int
Line number of the generated line.
Examples
--------
Create two circles and join them with a line.
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 0, 0, 1)
>>> k2 = mapdl.k("", 0, 0, 0.5)
>>> carc0 = mapdl.circle(k0, 1, k1)
>>> carc1 = mapdl.circle(k2, 1, k1)
>>> lnum = mapdl.l2ang(carc0[0], carc1[0], 90, 90)
>>> lnum
9
Notes
-----
Generates a straight line (PHIT1-PHIT2) at an angle (ANG1)
with an existing line NL1 (P1-P2) and which is also at an
angle (ANG2) with another existing line NL2 (P3-P4). If the
angles are zero the generated line is tangent to the two
lines. The PHIT1 and PHIT2 locations on the lines are
automatically calculated. Line P1-P2 becomes P1-PHIT1, P3-P4
becomes P3-PHIT2, and new lines PHIT1-P2, PHIT2-P4, and
PHIT1-PHIT2 are generated. Line divisions are set to zero
(use LESIZE, etc. to modify).
"""
command = f"L2ANG,{nl1},{nl2},{ang1},{ang2},{phit1},{phit2}"
msg = self.run(command, **kwargs)
if msg:
return parse.parse_line_no(msg)
def l2tan(self, nl1="", nl2="", **kwargs) -> int:
"""Generates a line tangent to two lines.
APDL Command: L2TAN
Parameters
----------
nl1
Number of the first line generated line is tangent to. If
negative, assume P1 (see below) is the second keypoint of
the line instead of the first.
nl2
Number of the second line generated line is tangent to.
If negative, assume P3 is the second keypoint of the line
instead of the first.
Returns
-------
int
Line number of the generated line.
Examples
--------
Create two circular arcs and connect them with a spline.
>>> k0 = mapdl.k("", 0, 0, 0)
>>> k1 = mapdl.k("", 0, 0, 1)
>>> k2 = mapdl.k("", -1.5, 1.5, 0)
>>> k3 = mapdl.k("", -1.5, 1.5, 1)
>>> carc0 = mapdl.circle(k0, 1, k1, arc=90)
>>> carc1 = mapdl.circle(k2, 1, k3, arc=90)
>>> lnum = | |
is not None:
payload = wallet.get_formatted_request_payload()
payload["network"] = network.value
payload["amount"] = amount
payload["recipient_address"] = recipient_address
payload["return_compiled_transaction"] = return_compiled_transaction
if sender_public_key is not None:
payload["sender_public_key"] = sender_public_key
if token_address is not None:
payload["token_address"] = token_address
if fee_payer_wallet is not None:
payload["fee_payer_wallet"] = fee_payer_wallet.get_formatted_request_payload()['wallet']
response = self._request(
payload=payload,
endpoint="solana/wallet/transfer",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response['transaction_signature']
def create_nft(
self,
wallet: SolanaWallet,
network: SolanaNetwork = SolanaNetwork.DEVNET,
mint_to_public_key: str = None,
name: Optional[str] = None,
symbol: Optional[str] = None,
description: Optional[str] = None,
uri: Optional[str] = None,
image_url: Optional[str] = None,
uri_metadata: Optional[dict] = None,
upload_method: SolanaNFTUploadMethod = SolanaNFTUploadMethod.S3,
creators: Optional[List[str]] = None,
share: Optional[List[int]] = None,
seller_fee_basis_points: int = 0,
is_mutable: bool = True,
is_master_edition: bool = True
) -> dict:
"""
More info:
https://docs.blockchainapi.com/#operation/solanaCreateNFT
:param wallet:
:param network:
:param mint_to_public_key: Assign ownership of the NFT after minting it
:param name: The name of the NFT
:param symbol: The symbol of the NFT
:param description: The description of the NFT
:param uri: The image of the NFT. Please see the description in the documentation
(docs.blockchainapi.com/#operation/solanaCreateNFT)
:param image_url: Please see the description in the documentation.
:param uri_metadata: The metadata of the NFT. Please see the description in the documentation.
:param upload_method: The upload method of the NFT. Please see the description in the documentation.
to the NFT
:param creators:
:param share:
:param seller_fee_basis_points:
:param is_mutable:
:param is_master_edition:
:return:
"""
wallet_payload = wallet.get_formatted_request_payload()
payload = {
"network": network.value,
"name": name,
"symbol": symbol,
"description": description,
"uri": uri,
"image_url": image_url,
"upload_method": upload_method.value,
"is_mutable": is_mutable,
"is_master_edition": is_master_edition,
"seller_fee_basis_points": seller_fee_basis_points
}
payload = {**payload, **wallet_payload}
if uri_metadata is not None:
payload['uri_metadata'] = uri_metadata
if creators is not None:
payload['creators'] = creators
if share is not None:
payload['share'] = share
if mint_to_public_key is not None:
payload['mint_to_public_key'] = mint_to_public_key
response = self._request(
payload=payload,
endpoint="solana/nft",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def search_nfts(
self,
update_authority: Optional[str] = None,
update_authority_search_method: SearchMethod = SearchMethod.EXACT_MATCH,
mint_address: Optional[str] = None,
mint_address_search_method: SearchMethod = SearchMethod.EXACT_MATCH,
nft_name: Optional[str] = None,
nft_name_search_method: SearchMethod = SearchMethod.EXACT_MATCH,
nft_uri: Optional[str] = None,
nft_uri_search_method: SearchMethod = SearchMethod.EXACT_MATCH,
symbol: Optional[str] = None,
symbol_search_method: SearchMethod = SearchMethod.EXACT_MATCH,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
payload = {
'network': network.value
}
if update_authority is not None:
payload['update_authority'] = update_authority
payload['update_authority_search_method'] = update_authority_search_method.value
if mint_address is not None:
payload['mint_address'] = mint_address
payload['mint_address_search_method'] = mint_address_search_method.value
if nft_uri is not None:
payload['uri'] = nft_uri
payload['uri_search_method'] = nft_uri_search_method.value
if symbol is not None:
payload['symbol'] = symbol
payload['symbol_search_method'] = symbol_search_method.value
if nft_name is not None:
payload['name'] = nft_name
payload['name_search_method'] = nft_name_search_method.value
response = self._request(
payload=payload,
endpoint="solana/nft/search",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def get_nft_metadata(
self,
mint_address: str,
network: SolanaNetwork = SolanaNetwork.DEVNET
) -> Optional[dict]:
"""
More info:
https://docs.blockchainapi.com/#operation/solanaGetNFT
:param mint_address:
:param network:
:return:
"""
response = self._request(
endpoint=f"solana/nft/{network.value}/{mint_address}",
request_method=self._RequestMethod.GET
)
if 'error_message' in response:
raise Exception(response['error_message'])
if isinstance(response, Response):
if response.status_code == 404:
return None
else:
raise Exception("Unknown error: ", response.status_code)
return response
def get_nft_mint_fee(
self
) -> dict:
"""
More info:
https://docs.blockchainapi.com/#operation/solanaGetNFTMintFee
:return:
"""
response = self._request(
endpoint="solana/nft/mint/fee",
request_method=self._RequestMethod.GET
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def get_airdrop(
self,
recipient_address: str
) -> str:
"""
Get an airdrop of 0.015 SOL on the devnet
More info:
https://docs.blockchainapi.com/#operation/solanaGetAirdrop
:param recipient_address:
:return: Transaction signature
"""
response = self._request(
payload={
"recipient_address": recipient_address
},
endpoint="solana/wallet/airdrop",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response['transaction_signature']
def get_candy_machine_metadata(
self,
candy_machine_id: Optional[str] = None,
config_address: Optional[str] = None,
uuid: Optional[str] = None,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
More Info:
https://docs.blockchainapi.com/#operation/solanaGetCandyMachineDetails
:param candy_machine_id: The candy_machine_id. Same as config_address in v2.
:param config_address: The config_address. Same as candy_machine_id in v2.
:param uuid: The first six characters of config_address. Sometimes, you can only find the uuid.
:param network: e.g., mainnet-beta, devnet
:return:
"""
payload = {
"network": network.value,
"candy_machine_contract_version": "v2"
}
if candy_machine_id is not None:
payload['candy_machine_id'] = candy_machine_id
if config_address is not None:
payload['config_address'] = config_address
if uuid is not None:
payload['uuid'] = uuid
response = self._request(
payload=payload,
endpoint="solana/nft/candy_machine/metadata",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def mint_from_candy_machine(
self,
config_address: str,
wallet: SolanaWallet,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
Mint Info:
https://docs.blockchainapi.com/#operation/solanaMintFromCandyMachine
:param config_address: The config address of the candy machine.
You can retrieve this if you have the candy machine ID using
this endpoint (https://docs.blockchainapi.com/#operation/solanaGetCandyMachineDetails)
and retrieving the config_address from the response..
:param wallet:
:param network:
:return: A task_id. Use the `get_task` function to retrieve the result once this task has completed processing.
You can poll the `get_task` function to see results.
"""
wallet_payload = wallet.get_formatted_request_payload()
payload = {
"network": network.value,
"config_address": config_address,
"candy_machine_contract_version": "v2"
}
payload = {**payload, **wallet_payload}
response = self._request(
payload=payload,
endpoint="solana/nft/candy_machine/mint",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response['transaction_signature']
def list_all_candy_machines(self):
"""
:return:
"""
response = self._request(
endpoint="solana/nft/candy_machine/list",
request_method=self._RequestMethod.GET
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def search_candy_machines(
self,
update_authority: Optional[str] = None,
update_authority_search_method: SearchMethod = SearchMethod.EXACT_MATCH,
config_address: Optional[str] = None,
config_address_search_method: SearchMethod = SearchMethod.EXACT_MATCH,
uuid: Optional[str] = None,
uuid_search_method: SearchMethod = SearchMethod.EXACT_MATCH,
symbol: Optional[str] = None,
symbol_search_method: SearchMethod = SearchMethod.EXACT_MATCH,
nft_name: Optional[str] = None,
nft_name_index: Optional[int] = None,
nft_name_search_method: SearchMethod = SearchMethod.EXACT_MATCH,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
payload = {
'network': network.value,
'candy_machine_contract_version': 'v2'
}
if update_authority is not None:
payload['update_authority'] = update_authority
payload['update_authority_search_method'] = update_authority_search_method.value
if config_address is not None:
payload['config_address'] = config_address
payload['config_address_search_method'] = config_address_search_method.value
if uuid is not None:
payload['uuid'] = uuid
payload['uuid_search_method'] = uuid_search_method.value
if symbol is not None:
payload['symbol'] = symbol
payload['symbol_search_method'] = symbol_search_method.value
if nft_name is not None:
payload['nft_name'] = nft_name
payload['nft_name_index'] = nft_name_index
payload['nft_name_search_method'] = nft_name_search_method.value
response = self._request(
payload=payload,
endpoint="solana/nft/candy_machine/search",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def create_test_candy_machine(
self,
wallet: SolanaWallet,
include_gatekeeper: bool = False,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
Mint Info:
https://docs.blockchainapi.com/#operation/solanaCreateTestCandyMachine
:param wallet:
:param network:
:param include_gatekeeper:
:return:
"""
wallet_payload = wallet.get_formatted_request_payload()
payload = {
"network": network.value,
"include_gatekeeper": include_gatekeeper
}
payload = {**payload, **wallet_payload}
response = self._request(
payload=payload,
endpoint="solana/nft/candy_machine",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response['candy_machine_id']
def get_solana_transaction(
self,
tx_signature: str,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
https://docs.blockchainapi.com/#operation/solanaGetTransaction
:param tx_signature:
:param network:
:return:
"""
response = self._request(
endpoint=f"solana/transaction/{network.value}/{tx_signature}",
request_method=self._RequestMethod.GET
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def get_all_nfts_from_candy_machine(
self,
candy_machine_id,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
Retrieves all NFTs from a Solana candy machine - both minted and unminted
See the returned attributes `all_nfts`, `unminted_nfts`, and `minted_nfts`
https://docs.blockchainapi.com/#operation/solanaGetAllNFTsFromCandyMachine
:param candy_machine_id:
:param network:
:return:
"""
response = self._request(
payload={},
endpoint=f"solana/nft/candy_machine/{network.value}/{candy_machine_id}/nfts",
request_method=self._RequestMethod.GET
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def get_candy_machine_id_from_nft(
self,
mint_address,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
https://docs.blockchainapi.com/#operation/solanaGetNFTsCandyMachineId
:param mint_address:
:param network:
:return:
"""
payload = {
"network": network.value,
"mint_address": mint_address
}
response = self._request(
payload=payload,
endpoint="solana/nft/candy_machine_id",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def get_account_info(
self,
public_key,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
https://docs.blockchainapi.com/#operation/solanaGetAccount
:param public_key:
:param network:
:return:
"""
response = self._request(
endpoint=f"solana/account/{network.value}/{public_key}",
request_method=self._RequestMethod.GET
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def get_spl_token(
self,
public_key,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
https://docs.blockchainapi.com/#operation/solanaGetAccount
:param public_key:
:param network:
:return:
"""
response = self._request(
endpoint=f"solana/spl-token/{network.value}/{public_key}",
request_method=self._RequestMethod.GET
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def get_nft_listing(
self,
mint_address: str,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
https://docs.blockchainapi.com/#operation/solanaGetAccount
"""
response = self._request(
endpoint=f"solana/nft/marketplaces/listing/{network.value}/{mint_address}",
request_method=self._RequestMethod.GET
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response
def list_nft(
self,
mint_address: str,
wallet: SolanaWallet,
nft_price: int,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
https://docs.blockchainapi.com/#operation/solanaGetAccount
"""
payload = wallet.get_formatted_request_payload()
payload['nft_price'] = nft_price
response = self._request(
payload=payload,
endpoint=f"solana/nft/marketplaces/magic-eden/list/{network.value}/{mint_address}",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response['transaction_signature']
def delist_nft(
self,
mint_address: str,
wallet: SolanaWallet,
network: SolanaNetwork = SolanaNetwork.DEVNET
):
"""
https://docs.blockchainapi.com/#operation/solanaGetAccount
"""
payload = wallet.get_formatted_request_payload()
response = self._request(
payload=payload,
endpoint=f"solana/nft/marketplaces/magic-eden/delist/{network.value}/{mint_address}",
request_method=self._RequestMethod.POST
)
if 'error_message' in response:
raise Exception(response['error_message'])
return response['transaction_signature']
def buy_nft(
self,
mint_address: str,
wallet: SolanaWallet,
nft_price: int,
network: SolanaNetwork = SolanaNetwork.DEVNET,
skip_checks: | |
<filename>peakhood/hoodlib.py<gh_stars>1-10
#!/usr/bin/env python3
from distutils.spawn import find_executable
import matplotlib.pyplot as plt
# import plotly.express as px
import seaborn as sns
import pandas as pd
import numpy as np
import subprocess
import statistics
import random
import math
import gzip
import uuid
import sys
import re
import os
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~ OPEN FOR BUSINESS ~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
AuthoR: uhlm [at] informatik.uni-freiburg.de
~~~~~~~~~~~~~
Run doctests
~~~~~~~~~~~~~
python3 -m doctest hoodlib.py
"""
################################################################################
def is_tool(name):
"""Check whether tool "name" is in PATH."""
return find_executable(name) is not None
################################################################################
def dir_get_files(file_dir,
file_ending=False,
check=True):
"""
Return list of files from given file_dir.
E.g. file_ending="bed" to filter for .bed files.
>>> test_dir = "test_data"
>>> dir_get_files(test_dir, file_ending="bam")
['empty.bam']
"""
from os import listdir
from os.path import isfile, join
dir_files = [f for f in listdir(file_dir) if isfile(join(file_dir, f))]
if check:
assert dir_files, "given directory \"%s\" contains no files" %(file_dir)
# If filter for file ending true.
if file_ending:
new_files = []
for df in dir_files:
if re.search(".+\.%s" %(file_ending), df):
new_files.append(df)
if check:
assert new_files, "no files left after filtering by file ending \"%s\"" %(file_ending)
return sorted(new_files)
else:
return sorted(dir_files)
################################################################################
def shutil_copy_file(from_file, to_file):
"""
Copy a file to another destination.
This will overwrite to_file (!).
"""
assert os.path.exists(from_file), "given file %s does not exist" %(from_file)
from shutil import copyfile
copyfile(from_file, to_file)
################################################################################
def get_filter_lists(list_f1_filter, list_f2_filter,
valid_filters_dic=False):
"""
Check and get peakhood extract filter lists.
"""
"""
Filter setting checks.
valid_filters_dic:
1 : transcript level filter
2 : exon level filter
"""
if not valid_filters_dic:
valid_filters_dic = {"TSC": 1,
"EIR": 2,
"ISRN": 2,
"ISR": 1,
"ISRFC" : 1,
"SEO" : 1,
"FUCO": 1,
"TCOV": 1,
"TSL": 1
}
f1_filters = []
f2_filters = []
if list_f1_filter:
assert not list_found_duplicates(list_f1_filter), "--f1-filter list contains duplicates. Please provide each filter ID only once"
for fid in list_f1_filter:
assert fid in valid_filters_dic, "invalid --f1-filter ID given (%s)" %(fid)
f1_filters.append(fid)
else:
f1_filters = ['TSC']
if list_f2_filter:
assert not list_found_duplicates(list_f2_filter), "--f2-filter list contains duplicates. Please provide each filter ID only once"
for fid in list_f2_filter:
assert fid in valid_filters_dic, "invalid --f2-filter ID given (%s)" %(fid)
f2_filters.append(fid)
else:
f2_filters = ['EIR', 'ISRN', 'ISR', 'ISRFC', 'SEO', 'FUCO', 'TCOV']
return f1_filters, f2_filters
################################################################################
def get_tsl_score(tsl, gc_basic, ccds):
"""
Get score from TSL flag.
Quality tags in (Ensembl) GTF:
CCDS:
Member of the consensus CDS gene set, confirming coding regions
between ENSEMBL, UCSC, NCBI and HAVANA.
basic:
Identifies a subset of representative transcripts for each gene;
prioritises full-length protein coding transcripts over partial
or non-protein coding transcripts within the same gene, and
intends to highlight those transcripts that will be useful
to the majority of users.
>>> tsl = "3 (assigned to previous version 5)"
>>> get_tsl_score(tsl, False, False)
14
>>> tsl = "1"
>>> get_tsl_score(tsl, True, True)
32
>>> tsl = "NA"
>>> get_tsl_score(tsl, False, False)
4
"""
assert tsl, "given tsl empty"
tag2sc_dic = {
"1" : 24,
"2" : 20,
"3" : 16,
"4" : 12,
"5" : 8,
"NA" : 4 }
tsl_sc = 0
if re.search('assigned', tsl):
tsl_sc -= 2
m = re.search('(.+) \(assigned', tsl)
tsl_sc += tag2sc_dic[m.group(1)]
else:
tsl_sc += tag2sc_dic[tsl]
if gc_basic:
tsl_sc += 3
if ccds:
tsl_sc += 5
return tsl_sc
################################################################################
def bed_extract_sequences_from_2bit(in_bed, out_fa, in_2bit,
lc_repeats=False,
convert_to_rna=False):
"""
Extract sequences from genome (provide genome .2bit file).
twoBitToFa executable needs to be in PATH. Store extracted
sequences in out_fa.
convert_to_rna:
If true, read in extracted sequences and convert to RNA.
lc_repeats:
If True, do not convert repeat regions to uppercase and output.
>>> in_bed = "test_data/test_seq_extr.sites.bed"
>>> tmp_2bit_fa = "test_data/test_seq_extr.sites.2bit.tmp.fa"
>>> tmp_seq_fa = "test_data/test_seq_extr.sites.seq.tmp.fa"
>>> exp_fa = "test_data/test_seq_extr.sites.exp.fa"
>>> in_fa = "test_data/test_seq_extr.sequences.fa"
>>> in_2bit = "test_data/test_seq_extr.sequences.2bit"
>>> id2row_dic = bed_read_rows_into_dic(in_bed)
>>> seqs_dic = read_fasta_into_dic(in_fa, dna=True)
>>> id2seq_dic = extract_transcript_sequences(id2row_dic, seqs_dic, revcom=True)
>>> fasta_output_dic(id2seq_dic, tmp_seq_fa)
>>> bed_extract_sequences_from_2bit(in_bed, tmp_2bit_fa, in_2bit)
>>> diff_two_files_identical(tmp_seq_fa, exp_fa)
True
>>> diff_two_files_identical(tmp_2bit_fa, exp_fa)
True
"""
# Check for twoBitToFa.
assert is_tool("twoBitToFa"), "twoBitToFa not in PATH"
# Run twoBitToFa and check.
check_cmd = "twoBitToFa"
if not lc_repeats:
check_cmd += " -noMask"
check_cmd += " -bed=" + in_bed + " " + in_2bit + " " + out_fa
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "twoBitToFa is complaining:\n%s\n%s" %(check_cmd, output)
if convert_to_rna:
# Read in tmp_fa into dictionary (this also converts sequences to RNA).
seqs_dic = read_fasta_into_dic(out_fa)
# Output RNA sequences.
fasta_output_dic(seqs_dic, out_fa,
split=True)
################################################################################
def get_chromosome_lengths_from_2bit(in_2bit, out_lengths,
std_chr_filter=False):
"""
Get chromosome lengths from in_2bit .2bit file. Write lengths
to out_lengths, with format:
chr1 248956422
chr10 133797422
chr11 135086622
...
Also return a dictionary with key=chr_id and value=chr_length.
std_chr_filter:
Filter / convert chromosome IDs with function check_convert_chr_id(),
removing non-standard chromosomes, and convert IDs like 1,2,X,MT ..
to chr1, chr2, chrX, chrM.
"""
# Check for twoBitInfo.
assert is_tool("twoBitInfo"), "twoBitInfo not in PATH"
# Run twoBitInfo and check.
check_cmd = "twoBitInfo " + in_2bit + " " + out_lengths
output = subprocess.getoutput(check_cmd)
error = False
if output:
error = True
assert error == False, "twoBitInfo is complaining:\n%s\n%s" %(check_cmd, output)
# Read in lengths into dictionary.
chr_len_dic = {}
with open(out_lengths) as f:
for line in f:
row = line.strip()
cols = line.strip().split("\t")
chr_id = cols[0]
chr_l = int(cols[1])
# Check ID.
if std_chr_filter:
new_chr_id = check_convert_chr_id(chr_id)
# If not standard chromosome ID or conversion failed, skip.
if not new_chr_id:
continue
else:
chr_id = new_chr_id
assert chr_id not in chr_len_dic, "non-unique chromosome ID \"%s\" encountered in \"%s\"" %(chr_id, out_lengths)
chr_len_dic[chr_id] = chr_l
f.closed
assert chr_len_dic, "chr_len_dic empty (\"%s\" empty? Chromosome IDs filter activated?)" %(out_lengths)
return chr_len_dic
################################################################################
def gtf_get_transcript_lengths(in_gtf,
tr2exc_dic=None):
"""
Get transcript lengths (= length of their exons, not unspliced length!)
from GTF file.
tr2exc_dic:
Optionally provide a transcript ID to exon count dictionary for counting
transcript exons.
>>> in_gtf = "test_data/map_test_in.gtf"
>>> gtf_get_transcript_lengths(in_gtf)
{'ENST001': 2000, 'ENST002': 2000}
"""
# Transcript ID to exonic length dictionary.
tr2len_dic = {}
# Open GTF either as .gz or as text file.
if re.search(".+\.gz$", in_gtf):
f = gzip.open(in_gtf, 'rt')
else:
f = open(in_gtf, "r")
for line in f:
# Skip header.
if re.search("^#", line):
continue
cols = line.strip().split("\t")
feature = cols[2]
feat_s = int(cols[3])
feat_e = int(cols[4])
infos = cols[8]
if not feature == "exon":
continue
# Extract transcript ID.
m = re.search('transcript_id "(.+?)"', infos)
assert m, "transcript_id entry missing in GTF file \"%s\", line \"%s\"" %(in_gtf, line)
tr_id = m.group(1)
# Sum up length.
ex_len = feat_e - feat_s + 1
if not tr_id in tr2len_dic:
tr2len_dic[tr_id] = ex_len
else:
tr2len_dic[tr_id] += ex_len
if tr2exc_dic is not None:
if not tr_id in tr2exc_dic:
tr2exc_dic[tr_id] = 1
else:
tr2exc_dic[tr_id] += 1
f.close()
assert tr2len_dic, "No IDs read into dictionary (input file \"%s\" empty or malformatted?)" % (in_gtf)
return tr2len_dic
################################################################################
def rem_exb_pairs_exb_dist(id2ids_dic, id2exids_dic,
id2gen_se_dic, id2gen_cp_dic, exid2gen_se_dic,
max_exb_dist=10,
exid2exnr_dic=False,
exid2trid_dic=False):
"""
Remove exon border pairs from id2ids_dic if the sites are too far
away from the matching exon borders (supporting the pair).
>>> id2ids_dic = {'id1': ['id2'], 'id2': ['id1']}
>>> id2exids_dic = {'id1': ['t1_e1'], 'id2': ['t1_e2']}
>>> id2gen_se_dic = {'id1': [1980, 1990], 'id2': [3005, 3025]}
>>> id2gen_cp_dic = {'id1': 1985, 'id2': 3015}
>>> exid2gen_se_dic = {'t1_e1': [1000, 2000], 't1_e2': [3000, 4000], 't1_e3': [5000, 6000]}
>>> exid2exnr_dic = {'t1_e1': 1, 't1_e2': 2, 't1_e3' : 3}
>>> exid2trid_dic = {'t1_e1': 't1', 't1_e2': 't1', 't1_e3': 't1'}
>>> rem_exb_pairs_exb_dist(id2ids_dic, id2exids_dic, id2gen_se_dic, id2gen_cp_dic, exid2gen_se_dic, max_exb_dist=10, exid2exnr_dic=exid2exnr_dic, exid2trid_dic=exid2trid_dic)
{'id1': ['id2'], 'id2': ['id1']}
>>> rem_exb_pairs_exb_dist(id2ids_dic, id2exids_dic, id2gen_se_dic, id2gen_cp_dic, exid2gen_se_dic, max_exb_dist=9, exid2exnr_dic=exid2exnr_dic, exid2trid_dic=exid2trid_dic)
{}
>>> id2exids_dic = {'id1': ['t1_e1'], 'id2': ['t1_e3']}
>>> rem_exb_pairs_exb_dist(id2ids_dic, id2exids_dic, id2gen_se_dic, id2gen_cp_dic, exid2gen_se_dic, max_exb_dist=10, exid2exnr_dic=exid2exnr_dic, exid2trid_dic=exid2trid_dic)
{}
>>> id2ids_dic = {'id1': ['id2', 'id3'], 'id2': ['id1'], 'id3': ['id1']}
>>> id2exids_dic = {'id1': ['t1_e1'], 'id2': ['t1_e2'], 'id3': ['t1_e1']}
>>> id2gen_se_dic = {'id1': [1980, 1990], 'id2': [3005, 3025], 'id2': [1970, 1980]}
>>> id2gen_cp_dic = {'id1': 1985, 'id2': 3015, 'id1': 1975}
>>> rem_exb_pairs_exb_dist(id2ids_dic, id2exids_dic, id2gen_se_dic, id2gen_cp_dic, exid2gen_se_dic, max_exb_dist=10, exid2exnr_dic=exid2exnr_dic, exid2trid_dic=exid2trid_dic)
{'id1': ['id2'], 'id2': ['id1']}
"""
assert exid2exnr_dic, "exid2exnr_dic empty"
assert exid2trid_dic, "exid2trid_dic empty"
rem_sids_list = []
for sid1 in id2ids_dic:
new_con_list = []
exl1 = id2exids_dic[sid1]
for sid2 in id2ids_dic[sid1]:
exl2 = id2exids_dic[sid2]
# Compare the two exon ID lists.
for exid1 in exl1:
exnr1 = exid2exnr_dic[exid1]
trid1 = exid2trid_dic[exid1]
for exid2 in | |
<reponame>erosson/pypoe-json
"""
Overview
===============================================================================
+----------+------------------------------------------------------------------+
| Path | PyPoE/poe/file/specification/data/stable.py |
+----------+------------------------------------------------------------------+
| Version | 1.0.0a0 |
+----------+------------------------------------------------------------------+
| Revision | $Id: bca717dbf9fd48029915b5d61aaf47c05ec55afd $ |
+----------+------------------------------------------------------------------+
| Author | Omega_K2 |
+----------+------------------------------------------------------------------+
Description
===============================================================================
Contains the specification for the stable version of the game.
Please see the following for more details:
:py:mod:`PyPoE.poe.file.specification.fields`
Information about the Field classes
:py:mod:`PyPoE.poe.file.specification`
Specification loader
Agreement
===============================================================================
See PyPoE/LICENSE
"""
# =============================================================================
# Imports
# =============================================================================
# 3rd-party
from PyPoE.poe.file.specification.fields import *
# self
# =============================================================================
# Globals
# =============================================================================
__all__ = ['specification', ]
specification = Specification({
'AbyssObjects.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='MetadataFile',
type='ref|string',
file_path=True,
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Keys0',
type='ref|list|ulong',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown12',
type='int',
),
Field(
name='Unknown13',
type='int',
),
Field(
name='Unknown14',
type='int',
),
Field(
name='Unknown15',
type='int',
),
Field(
name='Unknown16',
type='int',
),
Field(
name='Unknown17',
type='int',
),
Field(
name='Unknown18',
type='int',
),
Field(
name='Unknown19',
type='int',
),
Field(
name='Unknown20',
type='int',
),
),
),
'AbyssRegions.dat': File(
fields=(
),
),
'AbyssTheme.dat': File(
fields=(
),
),
'AccountQuestFlags.dat': File(
fields=(
),
),
'AchievementItemRewards.dat': File(
fields=(
Field(
name='AchievementItemsKey',
type='ulong',
key='AchievementItems.dat',
),
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
Field(
name='Message',
type='ref|string',
),
),
),
'AchievementItems.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Name',
type='ref|string',
),
Field(
name='CompletionsRequired',
type='int',
),
Field(
name='AchievementsKey',
type='ulong',
key='Achievements.dat',
),
# Todo some kind of flag related to "all"
Field(
name='Flag0',
type='bool',
),
# Added in ~3.4.x
Field(
name='Flag1',
type='bool',
),
Field(
name='Flag2',
type='bool',
),
),
),
'AchievementSetRewards.dat': File(
fields=(
Field(
name='AchievementSetsDisplayKey',
type='int',
key='AchievementSetsDisplay.dat',
key_id='Id',
key_offset=1,
),
Field(
name='Unknown1',
type='int',
),
Field(
name='BaseItemTypesKeys',
type='ref|list|ulong',
key='BaseItemTypes.dat',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Message',
type='ref|string',
),
Field(
name='DDSFile',
type='ref|string',
file_path=True,
file_ext='.dds',
),
),
),
'AchievementSets.dat': File(
fields=(
),
),
'AchievementSetsDisplay.dat': File(
fields=(
Field(
name='Id',
type='int',
unique=True,
),
Field(
name='Title',
type='ref|string',
),
),
),
'Achievements.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Description',
type='ref|string',
),
Field(
name='AchievementSetsDisplayKey',
type='int',
key='AchievementSetsDisplay.dat',
key_id='Id',
),
Field(
name='Objective',
type='ref|string',
),
Field(
name='UnknownUnique',
type='int',
unique=True,
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Flag1',
type='bool',
),
Field(
name='Flag2',
type='bool',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Flag3',
type='bool',
),
Field(
name='Flag4',
type='bool',
),
Field(
name='Unknown1',
type='ref|string',
),
Field(
name='Flag5',
type='bool',
),
Field(
name='Flag6',
type='bool',
),
Field(
name='Flag7',
type='bool',
),
Field(
name='Unknown2',
type='ref|string',
),
),
),
'ActiveSkillTargetTypes.dat': File(
fields=(
),
),
'ActiveSkillType.dat': File(
fields=(
),
),
'ActiveSkills.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='DisplayedName',
type='ref|string',
),
Field(
name='Description',
type='ref|string',
),
Field(
name='Index3',
type='ref|string',
),
Field(
name='Icon_DDSFile',
type='ref|string',
file_path=True,
file_ext='.dds',
),
# keys to (empty) ActiveSkillTargetTypes.dat with offset 1
Field(
name='ActiveSkillTargetTypes',
type='ref|list|uint',
),
# keys to (empty) ActiveSkillType.dat with offset 1
Field(
name='ActiveSkillTypes',
type='ref|list|uint',
),
Field(
name='WeaponRestriction_ItemClassesKeys',
type='ref|list|ulong',
key='ItemClasses.dat',
),
Field(
name='WebsiteDescription',
type='ref|string',
),
Field(
name='WebsiteImage',
type='ref|string',
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Unknown0',
type='ref|string',
),
Field(
name='Flag1',
type='bool',
),
Field(
name='SkillTotemId',
type='int',
description='This links to SkillTotems.dat, but the number mayexceed the number of entries; in that case it is player skill.',
),
# key = SkillTotems.dat
# key_offset = 1
Field(
name='IsManuallyCasted',
type='bool',
),
Field(
name='Input_StatKeys',
type='ref|list|ulong',
key='Stats.dat',
description='Stats that will modify this skill specifically',
),
Field(
name='Output_StatKeys',
type='ref|list|ulong',
key='Stats.dat',
description='Stat an input stat will be transformed into',
),
Field(
name='MinionActiveSkillTypes',
type='ref|list|int',
description='ActiveSkillTypes of skills of minions summoned by this skill',
),
Field(
name='Flag2',
type='bool',
),
Field(
name='Flag3',
type='bool',
),
Field(
name='Keys0',
type='ref|list|ulong',
),
# 3.8
Field(
name='Unknown1',
type='int',
),
# 3.9
Field(
name='Key0',
type='ulong',
),
# 3.10
Field(
name='Flag4',
type='bool',
),
# 3.11
Field(
name='AIFile',
type='ref|string',
file_path=True,
file_ext='.ai',
),
),
),
'AddBuffToTargetVarieties.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown1',
type='ref|list|int',
),
Field(
name='StatsKeys',
type='ref|list|ulong',
key='Stats.dat',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='ref|list|int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
# 3.11
Field(
name='Unknown7',
type='ref|list|int',
),
),
),
'AdditionalLifeScaling.dat': File(
fields=(
Field(
name='IntId',
type='int',
),
Field(
name='ID',
type='ref|string',
),
Field(
name='DatFile',
type='ref|string',
file_path=True,
file_ext='.dat',
),
),
),
'AdditionalLifeScalingPerLevel.dat': File(
fields=(
),
),
'AdditionalMonsterPacksFromStats.dat': File(
fields=(
Field(
name='StatsKey',
type='ulong',
key='Stats.dat',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='MonsterPacksKeys',
type='ref|list|ulong',
key='MonsterPacks.dat',
),
# TODO: Enum
Field(
name='AdditionalMonsterPacksStatMode',
type='int',
),
Field(
name='PackCountStatsKey',
type='ulong',
key='Stats.dat',
),
Field(
name='StatsKeys',
type='ref|list|ulong',
key='Stats.dat',
),
Field(
name='StatsValues',
type='ref|list|int',
),
Field(
name='Unknown7',
type='int',
),
),
),
'AdditionalMonsterPacksStatMode.dat': File(
fields=(
),
),
'AdvancedSkillsTutorial.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Keys1',
type='ref|list|ulong',
),
Field(
name='Key2',
type='ref|list|ulong',
),
Field(
name='Description',
type='ref|string',
),
Field(
name='International_BK2File',
type='ref|string',
file_path=True,
file_ext='.bk2',
),
Field(
name='Key0',
type='ulong',
),
Field(
name='China_BK2File',
type='ref|string',
file_path=True,
file_ext='.bk2',
),
Field(
name='CharactersKey',
type='ref|list|ulong',
key='Characters.dat',
),
),
),
'AfflictionBalancePerLevel.dat': File(
fields=(
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='float',
),
Field(
name='Unknown2',
type='float',
),
Field(
name='Unknown3',
type='float',
),
Field(
name='Unknown4',
type='float',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
Field(
name='Unknown11',
type='int',
),
Field(
name='Unknown12',
type='int',
),
Field(
name='Unknown13',
type='float',
),
Field(
name='Unknown14',
type='float',
),
),
),
'AfflictionEndgameWaveMods.dat': File(
fields=(
Field(
name='ModsKey',
type='ulong',
key='Mods.dat',
),
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
),
),
'AfflictionFixedMods.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='ModsKey',
type='ulong',
key='Mods.dat',
),
),
),
'AfflictionRandomModCategories.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Unknown1',
type='byte',
),
),
),
'AfflictionRewardMapMods.dat': File(
fields=(
Field(
name='ModsKey',
type='ulong',
key='Mods.dat',
),
),
),
'AfflictionRewardTypeVisuals.dat': File(
fields=(
Field(
name='AfflictionRewardTypes',
type='int',
key='AfflictionRewardTypes.dat',
),
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Name',
type='ref|string',
),
),
),
'AfflictionRewardTypes.dat': File(
fields=(
),
),
'AfflictionSplitDemons.dat': File(
fields=(
Field(
name='Unknown0',
type='int',
),
Field(
name='MonsterVarietiesKey',
type='ulong',
key='MonsterVarieties.dat',
),
Field(
name='AfflictionRandomModCategoriesKey',
type='ulong',
key='AfflictionRandomModCategories.dat',
),
),
),
'AfflictionStartDialogue.dat': File(
fields=(
Field(
name='Key0',
type='ulong',
),
Field(
name='NPCTextAudioKey',
type='ulong',
key='NPCTextAudio.dat',
),
Field(
name='Unknown0',
type='ref|list|ulong',
),
),
),
'AlternateBehaviourTypes.dat': File(
fields=(
),
),
'AlternatePassiveAdditions.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='AlternateTreeVersionsKey',
type='ulong',
key='AlternateTreeVersions.dat',
),
Field(
name='SpawnWeight',
type='int',
),
Field(
name='StatsKeys',
type='ref|list|ulong',
key='Stats.dat',
),
Field(
name='Stat1Min',
type='int',
),
Field(
name='Stat1Max',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
Field(
name='Unknown8',
type='int',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='PassiveType',
type='ref|list|int',
),
Field(
name='Unknown11',
type='int',
),
),
),
'AlternatePassiveSkills.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='AlternateTreeVersionsKey',
type='ulong',
key='AlternateTreeVersions.dat',
),
Field(
name='Name',
type='ref|string',
),
Field(
name='PassiveType',
type='ref|list|int',
),
Field(
name='StatsKeys',
type='ref|list|ulong',
key='Stats.dat',
),
Field(
name='Stat1Min',
type='int',
),
Field(
name='Stat1Max',
type='int',
),
Field(
name='Stat2Min',
type='int',
),
Field(
name='Stat2Max',
type='int',
),
Field(
name='Unknown9',
type='int',
),
Field(
name='Unknown10',
type='int',
),
Field(
name='Unknown11',
type='int',
),
Field(
name='Unknown12',
type='int',
),
Field(
name='Unknown13',
type='int',
),
Field(
name='Unknown14',
type='int',
),
Field(
name='Unknown15',
type='int',
),
Field(
name='Unknown16',
type='int',
),
Field(
name='SpawnWeight',
type='int',
),
Field(
name='Unknown18',
type='int',
),
Field(
name='RandomMin',
type='int',
),
Field(
name='RandomMax',
type='int',
),
Field(
name='FlavourText',
type='ref|string',
),
Field(
name='DDSIcon',
type='ref|string',
file_path=True,
file_ext='.dds',
),
Field(
name='AchievementItemsKeys',
type='ref|list|ulong',
key='AchievementItems.dat',
),
# 3.11
Field(
name='Unknown19',
type='int',
),
Field(
name='Unknown20',
type='int',
),
),
),
'AlternateQualityCurrencyDecayFactors.dat': File(
fields=(
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
Field(
name='Factor',
type='int',
),
),
),
'AlternateQualityTypes.dat': File(
fields=(
Field(
name='StatsKey',
type='ulong',
key='Stats.dat',
),
Field(
name='Description',
type='ref|string',
),
Field(
name='BaseItemTypesKey',
type='ulong',
key='BaseItemTypes.dat',
),
# TODO 3.12
# Verify that this is actually a mods key still and not tags
# or seomthing else
Field(
name='ModsKey',
type='ulong',
key='Mods.dat',
),
),
),
'AlternateSkillTargetingBehaviours.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Key0',
type='ulong',
),
Field(
name='Unknown0',
type='int',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='ref|list|int',
),
),
),
'AlternateTreePassiveSizes.dat': File(
fields=(
),
),
'AlternateTreeVersions.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='Flag0',
type='byte',
),
Field(
name='Flag1',
type='byte',
),
Field(
name='Unknown1',
type='int',
),
Field(
name='Unknown2',
type='int',
),
Field(
name='Unknown3',
type='int',
),
Field(
name='Unknown4',
type='int',
),
Field(
name='Unknown5',
type='int',
),
Field(
name='Unknown6',
type='int',
),
Field(
name='Unknown7',
type='int',
),
),
),
'Animation.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True
),
Field(
name='Flag0',
type='bool',
),
Field(
name='Flag1',
type='bool',
),
Field(
name='IntId',
type='int',
unique=True,
),
Field(
name='Flag2',
type='bool',
),
Field(
name='Mainhand_AnimationKey',
type='ref|string',
key='Animation.dat',
key_id='Id',
),
Field(
name='Offhand_AnimationKey',
type='ref|string',
key='Animation.dat',
key_id='Id',
),
),
),
'ApplyDamageFunctions.dat': File(
fields=(
Field(
name='Id',
type='ref|string',
unique=True,
),
Field(
name='StatsKeys',
type='ref|list|ulong',
key='Stats.dat'
),
Field(
name='Flag0',
type='bool',
),
| |
import glob
from pathlib import Path
import asyncio
import csv
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
import traceback
from django.contrib.staticfiles import finders
from praw import Reddit
from praw.const import API_PATH
from django.urls import reverse
from django import http
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import last_modified
from django.utils.decorators import method_decorator
from django.core.cache import cache
import time
from datetime import datetime, timezone
from pprint import pprint
from django.views import generic, View
from .models import Player, Settings, Priority
from .ajaxmixin import AJAXListMixin, AJAXSingleObjectMixin
from .sheets import GoogleSheetsData
from .imgur import Imgur
from .screenshot import Screenshot
from .tweet import Tweeter
from redditnfl.nfltools import nflteams
from redditnfl.nfltools import draft
from django.db import transaction
def add_common_context(context):
settings = Settings.objects.all()[0]
context['positions'] = Player.POSITIONS
context['teams'] = sorted(filter(lambda v: v[1]['short'] not in ('AFC', 'NFC'), nflteams.fullinfo.items()), key=lambda v: v[1]['mascot'])
context['settings'] = settings
context['msgs'] = []
context['next_pick'] = draft.round_pick(settings.draft_year, min(256, Settings.objects.all()[0].last_submitted_overall + 1))
return context
def latest_update(*args, **kwargs):
return Settings.objects.all()[0].last_updated
@method_decorator(login_required, name='dispatch')
class IndexView(generic.TemplateView):
template_name = 'draftcardposter/index.html'
def get_context_data(self, *args, **kwargs):
context_data = super(IndexView, self).get_context_data(*args, **kwargs)
return add_common_context(context_data)
class MissingPhotos(generic.TemplateView):
template_name = 'draftcardposter/missingphotos.html'
def get_context_data(self, *args, **kwargs):
context = super(MissingPhotos, self).get_context_data(*args, **kwargs)
missing = []
all_imgs = set()
for player in Player.objects.all().order_by('name'):
all_imgs.add(player.data['filename'])
if player.data.get('buzzscore', '0') == '0' and not player.data.get('draft.overall') and not player.data.get('RANK'):
continue
photo = 'draftcardposter/' + Settings.objects.all()[0].layout + '/playerimgs/' + player.data['filename'] + '.jpg'
if not finders.find(photo):
missing.append(player)
context['missing'] = missing
surplus = []
for basedir in map(Path, finders.searched_locations):
d = basedir / 'draftcardposter' / Settings.objects.all()[0].layout / 'playerimgs'
for f in map(Path, glob.glob("%s/*.jpg" % d)):
if f.stem not in all_imgs:
surplus.append(f)
context['surplus'] = sorted(surplus)
return add_common_context(context)
class MissingCsv(generic.ListView):
model = Player
context_object_name = 'players'
def get(self, *args, **kwargs):
settings = Settings.objects.all()[0]
response = http.HttpResponse(content_type='text/plain')
writer = csv.DictWriter(response, ['ds.playerid', 'name', 'position', 'college', 'jersey', 'filename', 'buzz', 'draft.overall', 'photo_found'])
writer.writeheader()
for player in self.get_queryset():
photo = 'draftcardposter/' + Settings.objects.all()[0].layout + '/playerimgs/' + player.data['filename'] + '.jpg'
found = finders.find(photo) is not None
writer.writerow({
'ds.playerid': player.data.get('ds.playerid', '0'),
'name': player.name,
'position': player.position,
'college': player.college,
'jersey': player.data.get('ds.jersey', ''),
'filename': player.data.get('filename', ''),
'buzz': player.data.get('buzzscore_rel', ''),
'draft.overall': player.data.get('draft.overall', ''),
'photo_found': found,
})
return response
class PhotoExists(View):
def get(self, *args, **kwargs):
settings = Settings.objects.all()[0]
photo = 'draftcardposter/%s/playerimgs/%s.jpg' % (settings.layout, self.kwargs.get('filename', 'xyzzy'))
found = finders.find(photo)
obj = found is not None
return http.HttpResponse('<?xml version="1.0" encoding="UTF-8"?>'+"\n"+'<HasPhoto>%s</HasPhoto>' % obj, content_type='text/xml')
@method_decorator(last_modified(latest_update), name='dispatch')
class HasPhoto(generic.DetailView):
model = Player
context_object_name = 'player'
def get_object(self, *args, **kwargs):
for player in self.get_queryset():
if player.data and player.data.get('ds.playerid', False) == self.kwargs.get('dsplayerid', None):
settings = Settings.objects.all()[0]
playerimgs = 'draftcardposter/' + settings.layout + '/playerimgs'
if 'filename' in player.data:
photo = playerimgs + '/' + player.data['filename'] + '.jpg'
found = finders.find(photo)
return found is not None
raise http.Http404("Player does not exist")
def get(self, *args, **kwargs):
obj = self.get_object()
return http.HttpResponse('<?xml version="1.0" encoding="UTF-8"?>'+"\n"+'<HasPhoto>%s</HasPhoto>' % obj, content_type='text/xml')
@method_decorator(last_modified(latest_update), name='dispatch')
class PlayerList(AJAXListMixin, generic.ListView):
model = Player
context_object_name = 'players'
@method_decorator(last_modified(latest_update), name='dispatch')
class PlayerDetail(AJAXSingleObjectMixin, generic.DetailView):
model = Player
context_object_name = 'player'
def remove_na(d):
dk = [k for k, v in d.items() if v.lower().strip() in ('n/a', '--')]
for k in dk:
del(d[k])
return d
@method_decorator(login_required, name='dispatch')
class Picks(View):
def get(self, request, *args, **kwargs):
settings = Settings.objects.all()[0]
if not request.is_ajax() and False:
raise http.Http400("This is an ajax view, friend.")
data = {
'current_year': settings.draft_year,
'next_pick': draft.round_pick(settings.draft_year, min(256, Settings.objects.all()[0].last_submitted_overall + 1)),
'picks': draft.drafts
}
return http.JsonResponse(data)
@method_decorator(transaction.atomic, name='dispatch')
class UpdatePlayers(View):
def get(self, request, *args, **kwargs):
context = add_common_context({})
settings = Settings.objects.all()[0]
sheets = GoogleSheetsData(settings.sheet_id, parseargs=False)
Priority.objects.all().delete()
i = 0
for prio in sheets.get_range_dict(settings.prio_range_def):
lowercase_dict = dict([(k.lower(), v) for (k,v) in prio.items()])
if len(lowercase_dict) == 0 or len(lowercase_dict['position']) == 0:
continue
p = Priority(**lowercase_dict)
p.save()
i += 1
context['msgs'].append(('success', 'Updated %d priorities' % i))
Player.objects.all().delete()
players = sheets.get_range_dict(settings.range_def)
i = 0
for player in players:
p = Player(name=player['name'], position=player['pos'], college=player['college'])
del(player['name'])
del(player['pos'])
del(player['college'])
player = remove_na(player)
p.data = player
p.save()
i += 1
context['msgs'].append(('success', 'Updated %d player%s' % (i, '' if i==1 else 's')))
settings.last_updated = datetime.now(timezone.utc)
settings.save()
cache.clear()
return render(request, 'draftcardposter/index.html', context=context)
def player_if_found(name, college):
players = Player.objects.filter(name=name, college=college)
if len(players) == 1:
return players[0]
def render_template(type_, context):
s = Settings.objects.all()[0]
try:
return render_to_string('draftcardposter/layout/' + getattr(s, type_ + "_template"), context).strip()
except Exception:
traceback.print_exc()
return ''
@method_decorator(login_required, name='dispatch')
class SubmitView(View):
def post(self, request, *args, **kwargs):
s = Settings.objects.all()[0]
context = add_common_context({})
url = request.POST.get('imageurl', None)
overall = request.POST.get('overall', None)
name = request.POST.get('name', None)
college = request.POST.get('college', None)
position = request.POST.get('position', None)
team = nflteams.fullinfo[request.POST.get('team', None)]
if not url or not overall or not team or not name or not college or not position:
raise Exception("AAAAAAAAA")
context['cardurl'] = url
context['player'] = player_if_found(name, college)
context['name'] = name
context['college'] = college
context['position'] = position
context['team'] = team
context['overall'] = int(overall)
context['round'], context['pick'] = draft.round_pick(s.draft_year, int(overall))
fn = 'temp-card.png'
image_data = get_and_cache_sshot(url.replace('.png', '.html'))
with open(fn, 'wb') as fp:
fp.write(image_data)
try:
if s.image_host == Settings.IMGUR:
imgur_title = render_template("imgur", context)
imgur_album = render_template("imgur_album", context)
ret = self.upload_to_imgur(imgur_album, imgur_title, url)
context['imagetitle'] = imgur_title
context['imageurl'] = ret['link']
permalink = None
reddit_title = render_template("reddit_title", context)
if s.posting_enabled and reddit_title:
if s.image_host == Settings.REDDIT:
submission = self.submit_img_to_reddit_as_pic(s.subreddit, reddit_title, fn)
else:
submission = self.submit_img_to_reddit(s.subreddit, reddit_title, context['imageurl'])
permalink = submission._reddit.config.reddit_url + submission.permalink
context['submission'] = submission
context['permalink'] = permalink
context['reddit_title'] = reddit_title
reddit_live_msg = render_template("reddit_live", context)
if s.live_thread_id and reddit_live_msg:
reddit_live_thread = self.post_to_live_thread(s.live_thread_id, reddit_live_msg)
context['reddit_live_msg'] = reddit_live_msg
context['reddit_live_thread'] = reddit_live_thread
tweet = render_template("tweet", context)
if tweet:
tweeturl = self.submit_twitter(tweet, image_data)
context['tweet'] = tweet
context['tweeturl'] = tweeturl
except Exception as e:
context['msgs'].append(('danger', str(e)))
context['msgs'].append(('danger', traceback.format_exc()))
traceback.print_exc()
s.last_submitted_overall = overall
s.save()
return render(request, 'draftcardposter/submit.html', context=context)
def upload_to_imgur(self, album, title, fn):
imgur = Imgur()
return imgur.upload(fn, album, title)
def submit_img_to_reddit(self, srname, title, url):
r = Reddit('draftcardposter')
sub = r.subreddit(srname)
return sub.submit(title, url=url)
def submit_img_to_reddit_as_pic(self, srname, title, fn):
r = Reddit('draftcardposter')
sub = r.subreddit(srname)
return sub.submit_image(title, fn, timeout=60)
def post_to_live_thread(self, live_thread_id, body):
r = Reddit('draftcardposter')
live_thread = r.live(live_thread_id)
live_thread.contrib.add(body)
return live_thread
def submit_twitter(self, status, imagedata):
t = Tweeter()
print("Tweeting: " + status)
resp = t.tweet(status, imagedata)
return "https://twitter.com/statuses/%s" % resp['id_str']
@method_decorator(login_required, name='dispatch')
class PreviewPost(View):
def post(self, request, *args, **kwargs):
settings = Settings.objects.all()[0]
context = add_common_context({})
for k in ('name', 'college', 'position', 'round', 'pick', 'team'):
if k not in request.POST or not request.POST[k]:
context['msgs'].append(('danger', 'You didn\'t set %s' % k))
return render(request, 'draftcardposter/index.html', context=context)
context[k] = request.POST[k]
player = player_if_found(name=request.POST['name'], college=request.POST['college'])
context['player'] = player
team = nflteams.fullinfo[request.POST['team']]
context['team'] = team
overall = draft.overall(settings.draft_year, int(context['round']), int(context['pick']))
if overall is None:
raise Exception("Pick {round}.{pick} does not exist".format(**context))
context['overall'] = overall
context['permalink'] = 'https://reddit.com/r/'+settings.subreddit+'/comments/_____/'
for type_ in ('tweet', 'reddit_live', 'reddit_title', 'imgur'):
context[type_] = render_template(type_, context)
pick_type = draft.pick_type(settings.draft_year, int(context['round']), int(context['pick']))
if pick_type and pick_type in (draft.FORFEITED, draft.UNKNOWN, draft.MOVED):
context['msgs'].append(('warning', 'I don\'t think round {round} has a pick #{pick}. Are you sure? It has either been forfeited, moved or something else. This will probably mess up the overall pick.'.format(**context)))
elif pick_type and pick_type in (draft.COMP, draft.JC2A):
context['msgs'].append(('info', 'This is a compensatory or JC-2A pick. Just so you\'re aware'))
url = reverse('player-card', kwargs={'overall':overall, 'team':team['short'], 'pos':context['position'], 'name':context['name'], 'college':context['college'], 'fmt':'png'})
fullurl = request.build_absolute_uri(url)
context['imageurl'] = fullurl
return render(request, 'draftcardposter/preview.html', context=context)
def split_name(name):
return name.split(' ', 1)
class RandomCard(View):
def get(self, request, *args, **kwargs):
import random
p = Player.objects.all().order_by('?')[0]
t = random.choice(list(nflteams.mascots.keys()))
return redirect('player-card', permanent=True,**{
'overall': str(random.randint(1, 256)),
'team': t,
'pos': p.position,
'name': p.name,
'college': p.college,
'fmt': 'png'
})
def subdivide_stats(data):
"""
If a key contains a ., create a sub-dict with the first part as parent key
"""
ret = {}
for key, value in data.items():
if '.' in key:
parent, subkey = key.split('.', 2)
if parent not in ret:
ret[parent] = {}
ret[parent][subkey] = value
else:
ret[key] = value
return ret
def current_time_in_millis():
return int(round(time.time() * 1000))
def get_and_cache_sshot(fullurl):
settings = Settings.objects.all()[0]
png = cache.get(fullurl)
start = current_time_in_millis()
if not png:
print("PNG %s not cached, regenerating" % fullurl)
async def ss(url):
sshot = await Screenshot.create()
png = await sshot.sshot_url_to_png(url, 0.3)
await sshot.close()
return png
png = asyncio.run(ss(fullurl))
cache.set(fullurl, png, settings.cache_ttl)
print("Retrieved %s in %d ms" % (fullurl, (current_time_in_millis() - start)))
return png
class PlayerCard(View):
def get(self, request, overall, team, pos, name, college, fmt, *args, **kwargs):
settings = Settings.objects.all()[0]
if fmt == 'png':
url = reverse('player-card', kwargs={'overall':overall, 'team':team, 'pos':pos, 'name':name, 'college':college, 'fmt':'html'})
fullurl = request.build_absolute_uri(url)
png = get_and_cache_sshot(fullurl)
return HttpResponse(png, content_type="image/png")
else:
player = player_if_found(name, college)
misprint = | |
'<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861820374':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u8bb8\u660c\u5e02')},
'861820375':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'86181657':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86181655':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861828883':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u8fea\u5e86\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861825037':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u9f99\u5ca9\u5e02')},
'86181653':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'86181300':{'en': 'He<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861817758':{'en': 'Guigang, Guangxi', 'zh': u('\u5e7f\u897f\u8d35\u6e2f\u5e02')},
'861817759':{'en': 'Guigang, Guangxi', 'zh': u('\u5e7f\u897f\u8d35\u6e2f\u5e02')},
'861828886':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')},
'861817752':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861817753':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861817750':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861817751':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861817756':{'en': 'Guigang, Guangxi', 'zh': u('\u5e7f\u897f\u8d35\u6e2f\u5e02')},
'861817757':{'en': 'Guigang, Guangxi', 'zh': u('\u5e7f\u897f\u8d35\u6e2f\u5e02')},
'861817754':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861817755':{'en': 'Guigang, Guangxi', 'zh': u('\u5e7f\u897f\u8d35\u6e2f\u5e02')},
'861812694':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861812695':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861812696':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861812697':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861812690':{'en': 'Shantou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'861812691':{'en': 'Yunfu, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861812692':{'en': 'Shanwei, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5c3e\u5e02')},
'861812693':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861824071':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861824070':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861824073':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861824072':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861812698':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861812699':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861824077':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861824076':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'86181663':{'en': 'Chongqing', 'zh': u('\u91cd\u5e86\u5e02')},
'86181667':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'86181664':{'en': 'Chongqing', 'zh': u('\u91cd\u5e86\u5e02')},
'86181665':{'en': 'Chongqing', 'zh': u('\u91cd\u5e86\u5e02')},
'86181668':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')},
'86181669':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9075\u4e49\u5e02')},
'861820088':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861815471':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861815470':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861815473':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861815472':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861815475':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861815474':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861815477':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861815476':{'en': 'Liuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861815479':{'en': 'Guilin, Guangxi', 'zh': u('\u5e7f\u897f\u6842\u6797\u5e02')},
'861815478':{'en': 'Guilin, Guangxi', 'zh': u('\u5e7f\u897f\u6842\u6797\u5e02')},
'861829402':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5b9a\u897f\u5e02')},
'861824863':{'en': '<NAME>', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')},
'861818559':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861818558':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861825298':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861825299':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861825292':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861825293':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861825290':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861825291':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861825296':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861825297':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861825294':{'en': 'Zhenjiang, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861825295':{'en': 'Zhenjiang, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'86182313':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861818551':{'en': 'Qiandongnan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'86182311':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'86182310':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'86182317':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'86182316':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'86182315':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861818550':{'en': 'Qiandongnan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'86182649':{'en': 'Linyi, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'86182648':{'en': 'TaiAn, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'86182319':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'86182318':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')},
'86182869':{'en': 'Qianxinan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861818553':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861818552':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861818555':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861818554':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861818557':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861818556':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861820902':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u54c8\u5bc6\u5730\u533a')},
'861820567':{'en': 'Bozhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u4eb3\u5dde\u5e02')},
'861820566':{'en': 'Chizhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861820565':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861820564':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861820563':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861820562':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861820561':{'en': 'Huaibei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5317\u5e02')},
'861820560':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861820569':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861820568':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861811574':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811575':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811576':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811577':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811570':{'en': 'HuaiAn, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861811571':{'en': 'Xuzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811572':{'en': 'Xuzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811573':{'en': 'Xuzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811578':{'en': 'Ch<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811579':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861810735':{'en': 'Chenzhou, Hunan', 'zh': u('\u6e56\u5357\u7701\u90f4\u5dde\u5e02')},
'861810734':{'en': 'Hengyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u8861\u9633\u5e02')},
'861810737':{'en': 'Yiyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u76ca\u9633\u5e02')},
'861810736':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5e38\u5fb7\u5e02')},
'861810731':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861810730':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861810733':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861810732':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861820743':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u6e58\u897f\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861820742':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861820741':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861820740':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861810739':{'en': 'Sh<NAME>', 'zh': u('\u6e56\u5357\u7701\u90b5\u9633\u5e02')},
'861810738':{'en': 'L<NAME>', 'zh': u('\u6e56\u5357\u7701\u5a04\u5e95\u5e02')},
'861820745':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u6000\u5316\u5e02')},
'861820744':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u5f20\u5bb6\u754c\u5e02')},
'861827666':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u767e\u8272\u5e02')},
'861811862':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861820084':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861811863':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861811860':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861811861':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861811866':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861811867':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861820085':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861811864':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861811865':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861824455':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')},
'861824942':{'en': 'Shuangyashan, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u53cc\u9e2d\u5c71\u5e02')},
'861813428':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861816109':{'en': 'Yibin, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861813420':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861813421':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861813422':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861813423':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861816104':{'en': 'Mianyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861816105':{'en': 'Mianyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861816106':{'en': 'Mianyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861816107':{'en': 'Yibin, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861825047':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861825046':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861824947':{'en': 'Harbin, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861825044':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861825043':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861825042':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861825041':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861825040':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861825049':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861825048':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'86182418':{'en': 'Fuxin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u961c\u65b0\u5e02')},
'86182419':{'en': 'Liaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'86182414':{'en': 'Benxi, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u672c\u6eaa\u5e02')},
'86182415':{'en': 'Dandong, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u4e39\u4e1c\u5e02')},
'86182416':{'en': 'Jinzhou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u9526\u5dde\u5e02')},
'86182417':{'en': 'Yingkou, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8425\u53e3\u5e02')},
'86182410':{'en': 'Tieling, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u94c1\u5cad\u5e02')},
'86182411':{'en': 'Dalian, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u5927\u8fde\u5e02')},
'86182412':{'en': 'Anshan, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u978d\u5c71\u5e02')},
'86182413':{'en': '<NAME>', 'zh': u('\u8fbd\u5b81\u7701\u629a\u987a\u5e02')},
'861824946':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861820504':{'en': 'Zhenjiang, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861824949':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')},
'861824948':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')},
'861815868':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861815869':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861815866':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')},
'861815867':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')},
'861815864':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861815865':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')},
'861815862':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861815863':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861815860':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861815861':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861826248':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861826249':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861826240':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861826241':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861826242':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861826243':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861826244':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861826245':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861826246':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861826247':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861812481':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861812480':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861812483':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861812482':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861812485':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861812484':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861812487':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861812486':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861812489':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861812488':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861821906':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5c3e\u5e02')},
'861821907':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861821904':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861821905':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861821902':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861810337':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861821900':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861821901':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861810488':{'en': 'Baotou, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5305\u5934\u5e02')},
'861821908':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861821909':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861810339':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861825629':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861825628':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861816029':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861810338':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')},
'861825623':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'86181854':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861825621':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861825620':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'86181851':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861825626':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'86181505':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'86181852':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9075\u4e49\u5e02')},
'86182874':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u66f2\u9756\u5e02')},
'86182875':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u4fdd\u5c71\u5e02')},
'86182656':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'86182657':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861827993':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861827992':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861827991':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5409\u5b89\u5e02')},
'861827990':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5409\u5b89\u5e02')},
'861827997':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861827996':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861827995':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861827994':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861827999':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861827998':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861829405':{'en': '<NAME>', 'zh': u('\u7518\u8083\u7701\u5e73\u51c9\u5e02')},
'861815130':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861815131':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861815132':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861815133':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861815134':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861815135':{'en': 'Xuzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861815136':{'en': 'Suqian, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861815137':{'en': 'Suqian, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861815138':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861815139':{'en': 'Yancheng, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861824451':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861824450':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861820659':{'en': 'Binzhou, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6ee8\u5dde\u5e02')},
'861824457':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'86182637':{'en': 'Zaozhuang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u67a3\u5e84\u5e02')},
'861824456':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861814429':{'en': 'Nanchong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5357\u5145\u5e02')},
'861814428':{'en': 'Yibin, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861814425':{'en': 'Mianyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861811904':{'en': 'Altay, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u52d2\u6cf0\u5730\u533a')},
'861814427':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u9042\u5b81\u5e02')},
'861814426':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861814421':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861814420':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861814423':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861814422':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861818739':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u695a\u96c4\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861818738':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861818735':{'en': 'H<NAME>', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861818734':{'en': 'L<NAME>', 'zh': u('\u4e91\u5357\u7701\u4e34\u6ca7\u5e02')},
'861818737':{'en': 'Honghe, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861818736':{'en': 'Honghe, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861818731':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861818730':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861818733':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7ea2\u6cb3\u54c8\u5c3c\u65cf\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861818732':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861816959':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861816958':{'en': 'Yinchuan, Ningxia', 'zh': u('\u5b81\u590f\u94f6\u5ddd\u5e02')},
'861810508':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861810509':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861810506':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861810507':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861810504':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861810505':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861816957':{'en': 'Wuzhong, Ningxia', 'zh': u('\u5b81\u590f\u5434\u5fe0\u5e02')},
'861816956':{'en': 'Guyuan, Ningxia', 'zh': u('\u5b81\u590f\u56fa\u539f\u5e02')},
'861816955':{'en': 'Zhongwei, Ningxia', 'zh': u('\u5b81\u590f\u4e2d\u536b\u5e02')},
'861810501':{'en': | |
membrane is not None:
membrane.build(input_shape)
# last layers output shape to models output shape
self.output_shape = input_shape
def add_layer(self, layer: Layer, activation: Membrane = None):
self.__layers.append(layer)
self.__membrane.append(activation)
def forward_propagation(self, X):
caches = []
A = X
for layer_idx in range(0, len(self.__layers)):
layer = self.__layers[layer_idx]
membrane = self.__membrane[layer_idx]
print(layer)
Z, linear_cache = layer.forward_propagate(A)
print("Z: " + str(np.amax(Z)))
if membrane is not None:
print(membrane)
A, activation_cache = membrane.activate(Z)
print("A: " + str(np.amax(A)))
cache = { 'linear_cache' : linear_cache,
'activation_cache' : activation_cache }
caches.append({ 'A': A,
'Z': Z,
'cache': cache})
else:
print("Z: " + str(np.amax(Z)))
A = Z
cache = { 'linear_cache' : linear_cache,
'activation_cache' : None }
caches.append({ 'A': None,
'Z': Z,
'cache': cache})
return A, caches
def compute_cost(self, A, Y):
return 0.5 * np.sum(np.power((A - Y), 2))
def compute_loss(self, A, Y):
# np.mean(np.square(Y - A), axis=-2) <- MSE loss
return Y - A
def backward_propagation(self, AL, caches, Y):
grads = []
L = len(self.__layers)
m = AL.shape[1] ## figure this out
# gradients
dZ, dW, db = (None, None, None)
# derivative of activation in final layer
dAL = self.compute_loss(AL, Y)
grad = [
{
"dZ": None,
"dA": dAL,
"dW": None,
"db": None
}
]
grads.insert(0, grad)
# backwards propagating the loss
for layer_idx in range(L-1, 0, -1):
layer = self.__layers[layer_idx]
A, Z, cache = (caches[layer_idx]['A'], caches[layer_idx]['Z'], caches[layer_idx]['cache'])
linear_cache, activation_cache = (cache['linear_cache'], cache['activation_cache'])
membrane = self.__membrane[layer_idx]
if membrane is not None:
dZ = membrane.differentiate(dAL, activation_cache)
dAL, dW, db = layer.backward_propagate(dZ, linear_cache)
else:
dAL, dW, db = layer.backward_propagate(dAL, linear_cache)
grad = [
{
"dZ":dZ,
"dA":dAL,
"dW":dW,
"db":db
}
]
grads.insert(0, grad)
return grads
def fit(self, X=None, Y=None, epochs=1, batch_size=None, learning_rate=0.002):
# batch_size + (time, height, width, channel)
num_input_dimensions = len(self.input_shape)
num_output_dimensions = len(self.output_shape)
batch_shape = X.shape[:-num_input_dimensions]
batch_ndim = len(batch_shape)
num_samples = math.prod(batch_shape)
sample_shape = X.shape[-num_input_dimensions:]
sample_label_shape = Y.shape[-batch_ndim:]
assert(sample_label_shape == self.output_shape)
batch_samples = np.zeros(shape=tuple([batch_size]) + sample_shape)
batch_samples_labels = np.zeros(shape=tuple([batch_size]) + sample_label_shape)
# output from the opperation
output = np.zeros(shape=batch_shape+Y.shape)
# run the training data an epochs number of times
for epoch in range(epochs):
# start processing and updating the network according to the batch size
for train_start in SpikingNeuralNetwork.__traverse_batch(0, num_samples-batch_size, batch_size):
# get the end index
train_end = min(train_start+batch_size, num_samples)
# prevent over indexing at the end of the array
number_of_training_samples = train_end - train_start
# can this be optimized
batch_indices = []
for train in range(number_of_training_samples):
batch_idx = np.unravel_index(train_start + train, batch_shape)
print(batch_idx)
batch_samples[batch_idx] = X[batch_idx]
batch_samples_labels[batch_idx] = Y[batch_idx]
batch_outputs, batch_cache = self.forward_propagation(batch_samples)
final_cache = batch_cache[len(batch_cache)-1]
cache = final_cache['cache']
activation_cache = cache['activation_cache']
Vp = activation_cache['Vp']
costs = self.compute_cost(Vp, batch_samples_labels)
loss = self.compute_loss(Vp, batch_samples_labels)
# this needs to be fixed
AL = Vp
grads = self.backward_propagation(AL, batch_cache, batch_samples_labels)
parameters = self.update_parameters(batch_cache, grads, learning_rate)
# select batch images
batch_labels = np.take(Y, batch_indices)
# batch_spike_train = Model.__generate_model_spike_train(batch_size, train_labels[0])
# generate batch activation outputs
# batch_outputs = self.__generate_batch_outputs(batch_size, train_labels[0])
# generate batch cache
# batch_cache = self.__generate_batch_cache(batch_size)
# generate batch gradients
# batch_gradients = self.__generate_batch_gradients(batch_size)
# costs
# costs = np.zeros(batch_size)
# run batch # potentially multi threaded
# for i in range(0, batch_size):
# select sample from batch
#train_image = batch_images[i]
# train_label = batch_labels[i]
# convert to input to spike train
# layer_spike_train = generate_layer_spike_train(train_image, self.spike_train_length)
# propagate through network
# batch_outputs[i], batch_cache[i] = self.forward_propagation(train_image)
# dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# dA_prev_temp, dW_temp, db_temp
# calculate the cost
# costs[i] = self.compute_cost(Y, train_label)
# backwards propagate to calculate the gradients in the network
# grads = Model.backward_propagation(model, batch_outputs[i], Y, batch_cache[i])
# batch_end_idx = batch_start_idx
# update the network using the gradients from the batch
# parameters = Model.update_parameters(model, batch_cache, batch_gradients, learning_rate)
def predict(self, X):
pass
def test_case_dropout():
probability = 0.2
layer = Dropout(probability)
dim = 10000
test_shape = (dim,dim)
layer.build(input_shape=test_shape)
test_input = np.ones(test_shape)
expected = math.prod(test_shape) * (1 - probability)
print("expected: " + str(expected))
received_output, cache = layer.forward_propagate(test_input)
sum_received_output = np.sum(received_output)
print("actual: " + str(sum_received_output))
test_case_dropout()
tau_m = 10
dropout_rate = 0.2
LeNet5 = SpikingNeuralNetwork()
LeNet5.add_layer(Convolution2D(kernel_size=(5,5), strides=(1,1), number_of_filters=20), LeakyIntegrateAndFire(0, 1, tau_m, fire=True, leaky=True))
LeNet5.add_layer(Dropout(probability=dropout_rate))
LeNet5.add_layer(AveragePool2D(kernel_size=(2,2),strides=(2,2)), LeakyIntegrateAndFire(0, 0.75, tau_m, fire=True, leaky=True))
LeNet5.add_layer(Dropout(probability=dropout_rate))
LeNet5.add_layer(Convolution2D(kernel_size=(5,5), strides=(1,1), number_of_filters=50), LeakyIntegrateAndFire(0, 1, tau_m, fire=True, leaky=True))
LeNet5.add_layer(Dropout(probability=dropout_rate))
LeNet5.add_layer(AveragePool2D(kernel_size=(2,2),strides=(2,2)), LeakyIntegrateAndFire(0, 1, tau_m, fire=True, leaky=True))
LeNet5.add_layer(Dropout(probability=dropout_rate))
LeNet5.add_layer(Flatten())
LeNet5.add_layer(Dense(num_outputs=200), LeakyIntegrateAndFire(0, 1, tau_m, fire=True, leaky=True))
LeNet5.add_layer(Dense(num_outputs=10), LeakyIntegrateAndFire(0, 1, tau_m, fire=False, leaky=True))
input_shape = train_images[0].shape
input_images = np.array([generate_layer_spike_train(train_images[0], tau_m), generate_layer_spike_train(train_images[1], tau_m),
generate_layer_spike_train(train_images[2], tau_m), generate_layer_spike_train(train_images[3], tau_m),
generate_layer_spike_train(train_images[4], tau_m)])
LeNet5.build(input_images[0].shape)
lbl = train_labels[0:5,:]
LeNet5.fit(input_images, train_labels[0:5,:], batch_size=2,learning_rate=0.002)
LeNet5.predict(test_images, test_labels)
class Model:
@staticmethod
def __generate_batch_outputs(batch_size, train_label):
return np.zeros(shape=(tuple([batch_size]) + train_label.shape))
@staticmethod
def __generate_batch_cache(model, batch_size):
cache = []
time = tuple([model.spike_train_length])
for batch_idx in range(0, batch_size):
cache.extend([{
"l1_activation" : np.zeros(shape= time + model.__l1_output_dimensions),
"l1_membrane": np.zeros(shape=time + model.__l1_output_dimensions),
"l1_spike": np.zeros(shape=time + model.__l1_output_dimensions),
"l2_activation": np.zeros(shape=time + model.__l2_output_dimensions),
"l2_membrane": np.zeros(shape=time + model.__l2_output_dimensions),
"l2_spike": np.zeros(shape=time + model.__l2_output_dimensions),
"l3_activation": np.zeros(shape=time + model.__l3_output_dimensions),
"l3_membrane": np.zeros(shape=time + model.__l3_output_dimensions),
"l3_spike": np.zeros(shape=time + model.__l3_output_dimensions),
"l4_activation": np.zeros(shape=time + model.__l4_output_dimensions),
"l4_membrane": np.zeros(shape=time + model.__l4_output_dimensions),
"l4_spike": np.zeros(shape=time + model.__l4_output_dimensions),
"l5_output": np.zeros(shape=time + model.__l5_output_dimensions),
"l6_activation": np.zeros(shape=time + model.__l6_output_dimensions),
"l6_membrane": np.zeros(shape=time + model.__l6_output_dimensions),
"l6_spike": np.zeros(shape=time + model.__l6_output_dimensions),
"l7_activation": np.zeros(shape=time + model.__l7_output_dimensions),
"l7_membrane": np.zeros(shape=time + model.__l7_output_dimensions),
"l7_spike": np.zeros(shape=time + model.__l7_output_dimensions),
}])
return cache
@staticmethod
def __generate_batch_gradients(model, batch_size):
gradients = []
for i in range(batch_size):
gradients.extend([{
"l1_gradients": np.zeros(shape=model.__l1_filters.shape),
"l3_gradients": np.zeros(shape=model.__l1_filters.shape),
"l6_gradients": np.zeros(shape=model.__l6_weights.shape),
"l7_gradients": np.zeros(shape=model.__l7_weights.shape)
}])
return gradients
def __init__(self, spike_train_length):
"""
"""
self.spike_train_length = spike_train_length
"""Convoloution 1"""
self.input_channels = 1
self.input_y = 28
self.input_x = 28
self.__l1_num_channels = self.input_channels
self.__l1_input_dim = (self.input_y, self.input_x, self.input_channels)
self.__l1_num_filters = 20
self.__l1_kernel_dimensions = (5, 5, self.__l1_num_channels, self.__l1_num_filters)
self.__l1_stride_dimensions = (1, 1)
self.__l1_padding_dimensions = 0
self.__l1_output_dimensions = conv_output_size(self.__l1_input_dim,
self.__l1_kernel_dimensions,
self.__l1_stride_dimensions,
self.__l1_padding_dimensions)
self.__l1_filters = generate_conv2d_filters(self.__l1_kernel_dimensions)
self.__l1_membrane = generate_membrane(self.__l1_output_dimensions)
"""Average Pool 1"""
self.__l2_input_dim = self.__l1_output_dimensions
self.__l2_kernel_dimensions = (2, 2, 1, self.__l1_output_dimensions[2])
self.__l2_stride_dimensions = (2, 2)
self.__l2_padding_dimensions = 0
self.__l2_output_dimensions = conv_output_size(self.__l2_input_dim,
self.__l2_kernel_dimensions,
self.__l2_stride_dimensions,
self.__l2_padding_dimensions)
self.__l2_membrane = generate_membrane(self.__l2_output_dimensions)
"""Convoloution 2"""
self.__l3_input_dim = self.__l2_output_dimensions
self.__l3_num_channels = 50
self.__l3_kernel_dimensions = (5, 5, self.__l3_input_dim[2], self.__l3_num_channels)
self.__l3_stride_dimensions = (1, 1)
self.__l3_padding_dimensions = 0
self.__l3_output_dimensions = conv_output_size(self.__l3_input_dim,
self.__l3_kernel_dimensions,
self.__l3_stride_dimensions,
self.__l3_padding_dimensions)
self.__l3_filters = generate_conv2d_filters(self.__l3_kernel_dimensions)
self.__l3_membrane = generate_membrane(self.__l3_output_dimensions)
"""Average Pool 2"""
self.__l4_input_dim = self.__l3_output_dimensions
self.__l4_kernel_dimensions = (2, 2, 1, self.__l3_output_dimensions[2])
self.__l4_stride_dimensions = (2, 2)
self.__l4_padding_dimensions = 0
self.__l4_output_dimensions = conv_output_size(self.__l4_input_dim,
self.__l4_kernel_dimensions,
self.__l4_stride_dimensions,
self.__l4_padding_dimensions)
self.__l4_membrane = generate_membrane(self.__l4_output_dimensions)
"""Flatten"""
self.__l5_input_dimensions = self.__l4_output_dimensions
self.__l5_output_dimensions = tuple([math.prod(self.__l5_input_dimensions)])
"""Dense Layer 1"""
self.__l6_input_dim = self.__l5_output_dimensions
self.__l6_neurons = 200
self.__l6_output_dimensions = tuple([self.__l6_neurons])
self.__l6_weights = generate_dense_layer_weights(self.__l5_output_dimensions, self.__l6_neurons)
self.__l6_membrane = generate_membrane(self.__l6_output_dimensions)
"""Dense Layer 2"""
self.__l7_input_dim = self.__l5_output_dimensions
self.__l7_neurons = 10
self.__l7_output_dimensions = tuple([self.__l7_neurons])
self.__l7_weights = generate_dense_layer_weights(self.__l6_output_dimensions, self.__l7_neurons)
self.__l7_membrane = generate_membrane(self.__l7_output_dimensions)
self.reset()
@staticmethod
def forward_propagation(model, input, cache, output):
## TODO: reset membrane
## TODO: fix conv2d, avgpool, lif and dense to not require input/output
cache["l1_activation"] = conv2d(input, cache["l1_activation"], model.__l1_filters, stride=model.__l1_stride_dimensions)
[cache["l1_membrane"], cache["l1_spike"]] = lif_neuron_pool(cache["l1_activation"], cache["l1_membrane"], cache["l1_spike"], Vth=1, fire=True, leaky=True)
cache["l2_activation"] = avg_pool(cache["l1_spike"], cache["l2_activation"], kernel_size=model.__l2_kernel_dimensions, stride=model.__l2_stride_dimensions)
[cache["l2_membrane"], cache["l2_spike"]] = lif_neuron_pool(cache["l2_activation"], cache["l2_membrane"], cache["l2_spike"], Vth=0.75, fire=True, leaky=False)
conv2d(cache["l2_spike"], cache["l3_activation"], model.__l3_filters, stride=model.__l3_stride_dimensions)
[cache["l3_membrane"], cache["l3_spike"]] = lif_neuron_pool(cache["l3_activation"], cache["l3_membrane"], cache["l3_spike"], Vth=1.0, fire=True, leaky=False)
cache["l4_activation"] = avg_pool(cache["l3_spike"], cache["l4_activation"], kernel_size=model.__l4_kernel_dimensions, stride=model.__l4_stride_dimensions)
[cache["l4_membrane"], cache["l4_spike"]] = lif_neuron_pool(cache["l4_activation"], cache["l4_membrane"], cache["l4_spike"], Vth=0.75, leaky=False)
cache['l5_output'] = flatten(cache["l4_spike"], cache['l5_output'], 1)
cache["l6_activation"] = dense_forward(cache['l5_output'], cache["l6_activation"], model.__l6_weights)
[cache["l6_membrane"], cache["l6_spike"]] = lif_neuron_pool(cache["l6_activation"], cache["l6_membrane"], cache["l6_spike"], Vth=1, fire=True, leaky=True, time_index=0)
cache["l7_activation"] = dense_forward(cache["l6_spike"], cache["l7_activation"], model.__l7_weights)
[cache["l7_membrane"], cache["l7_spike"]] = lif_neuron_pool(cache["l7_activation"], cache["l7_membrane"], cache["l7_spike"], Vth=1, fire=False, leaky=False, time_index=0)
output = np.divide(cache["l7_membrane"][-1], model.spike_train_length) # this may be problematic
return [output, cache]
@staticmethod
def generate_grads():
pass
@staticmethod
def lif_backward_propagation(input_error_gradient, weights, layer_spike_train, Vth=1):
# activation derivative
neurons = layer_spike_train.shape[1]
time = layer_spike_train.shape[0]
a_lif_derivative = np.zeros((neurons))
spike_train = []
layer_spike_train[1][0] = 1
layer_spike_train[5][0] = 1
layer_spike_train[10][0] = 1
layer_spike_train[15][0] = 1
layer_spike_train[18][0] = 1
for neuron_idx in range(0, neurons):
spike_train = layer_spike_train[:,neuron_idx]
a_lif_derivative[neuron_idx] = differentiate_spike_train(spike_train, Vth)
output_neurons = weights.shape[0]
input_neurons = weights.shape[1]
# (weights * error gradient)
for output_neuron_idx in range(0, output_neurons):
error = 0
for input_neuron_idx in range(0, input_neurons):
ax = loss[input_neuron_idx] # <-- loss from this neuron
wx = weights[input_neuron_idx][output_neuron_idx] # <-- its connected weights
error = error + ax * wx
# (weights * error gradient) . alif_dw/dt
a_lif_derivative[neuron_idx]
@staticmethod
def backward_propagation(model, output: np.array, label: np.array, cache: dict):
grads = [{"dW7" : np.zeros(shape=model.__l7_weights.shape)},
{"dW6" : np.zeros(shape=model.__l7_weights.shape)},
{"dW3" : np.zeros(shape=model.__l7_weights.shape)},
{"dW1" : np.zeros(shape=model.__l7_weights.shape)}]
tau_m = len(cache["l7_spike"])
#d (a_lif / d_net) | |
Thumbprint of the server key.
:vartype thumbprint: str
:param auto_rotation_enabled: Key auto rotation opt-in flag. Either true or false.
:type auto_rotation_enabled: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'readonly': True},
'uri': {'readonly': True},
'thumbprint': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'server_key_name': {'key': 'properties.serverKeyName', 'type': 'str'},
'server_key_type': {'key': 'properties.serverKeyType', 'type': 'str'},
'uri': {'key': 'properties.uri', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'auto_rotation_enabled': {'key': 'properties.autoRotationEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceEncryptionProtector, self).__init__(**kwargs)
self.kind = None
self.server_key_name = kwargs.get('server_key_name', None)
self.server_key_type = kwargs.get('server_key_type', None)
self.uri = None
self.thumbprint = None
self.auto_rotation_enabled = kwargs.get('auto_rotation_enabled', None)
class ManagedInstanceEncryptionProtectorListResult(msrest.serialization.Model):
"""A list of managed instance encryption protectors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.ManagedInstanceEncryptionProtector]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagedInstanceEncryptionProtector]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceEncryptionProtectorListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ManagedInstanceExternalAdministrator(msrest.serialization.Model):
"""Properties of a active directory administrator.
:param administrator_type: Type of the sever administrator. Possible values include:
"ActiveDirectory".
:type administrator_type: str or ~azure.mgmt.sql.models.AdministratorType
:param principal_type: Principal Type of the sever administrator. Possible values include:
"User", "Group", "Application".
:type principal_type: str or ~azure.mgmt.sql.models.PrincipalType
:param login: Login name of the server administrator.
:type login: str
:param sid: SID (object ID) of the server administrator.
:type sid: str
:param tenant_id: Tenant ID of the administrator.
:type tenant_id: str
:param azure_ad_only_authentication: Azure Active Directory only Authentication enabled.
:type azure_ad_only_authentication: bool
"""
_attribute_map = {
'administrator_type': {'key': 'administratorType', 'type': 'str'},
'principal_type': {'key': 'principalType', 'type': 'str'},
'login': {'key': 'login', 'type': 'str'},
'sid': {'key': 'sid', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'azure_ad_only_authentication': {'key': 'azureADOnlyAuthentication', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceExternalAdministrator, self).__init__(**kwargs)
self.administrator_type = kwargs.get('administrator_type', None)
self.principal_type = kwargs.get('principal_type', None)
self.login = kwargs.get('login', None)
self.sid = kwargs.get('sid', None)
self.tenant_id = kwargs.get('tenant_id', None)
self.azure_ad_only_authentication = kwargs.get('azure_ad_only_authentication', None)
class ManagedInstanceFamilyCapability(msrest.serialization.Model):
"""The managed server family capability.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Family name.
:vartype name: str
:ivar sku: SKU name.
:vartype sku: str
:ivar supported_license_types: List of supported license types.
:vartype supported_license_types: list[~azure.mgmt.sql.models.LicenseTypeCapability]
:ivar supported_vcores_values: List of supported virtual cores values.
:vartype supported_vcores_values: list[~azure.mgmt.sql.models.ManagedInstanceVcoresCapability]
:ivar status: The status of the capability. Possible values include: "Visible", "Available",
"Default", "Disabled".
:vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus
:param reason: The reason for the capability not being available.
:type reason: str
"""
_validation = {
'name': {'readonly': True},
'sku': {'readonly': True},
'supported_license_types': {'readonly': True},
'supported_vcores_values': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'supported_license_types': {'key': 'supportedLicenseTypes', 'type': '[LicenseTypeCapability]'},
'supported_vcores_values': {'key': 'supportedVcoresValues', 'type': '[ManagedInstanceVcoresCapability]'},
'status': {'key': 'status', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceFamilyCapability, self).__init__(**kwargs)
self.name = None
self.sku = None
self.supported_license_types = None
self.supported_vcores_values = None
self.status = None
self.reason = kwargs.get('reason', None)
class ManagedInstanceKey(ProxyResource):
"""A managed instance key.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar kind: Kind of encryption protector. This is metadata used for the Azure portal
experience.
:vartype kind: str
:param server_key_type: The key type like 'ServiceManaged', 'AzureKeyVault'. Possible values
include: "ServiceManaged", "AzureKeyVault".
:type server_key_type: str or ~azure.mgmt.sql.models.ServerKeyType
:param uri: The URI of the key. If the ServerKeyType is AzureKeyVault, then the URI is
required.
:type uri: str
:ivar thumbprint: Thumbprint of the key.
:vartype thumbprint: str
:ivar creation_date: The key creation date.
:vartype creation_date: ~datetime.datetime
:ivar auto_rotation_enabled: Key auto rotation opt-in flag. Either true or false.
:vartype auto_rotation_enabled: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'readonly': True},
'thumbprint': {'readonly': True},
'creation_date': {'readonly': True},
'auto_rotation_enabled': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'server_key_type': {'key': 'properties.serverKeyType', 'type': 'str'},
'uri': {'key': 'properties.uri', 'type': 'str'},
'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'},
'creation_date': {'key': 'properties.creationDate', 'type': 'iso-8601'},
'auto_rotation_enabled': {'key': 'properties.autoRotationEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceKey, self).__init__(**kwargs)
self.kind = None
self.server_key_type = kwargs.get('server_key_type', None)
self.uri = kwargs.get('uri', None)
self.thumbprint = None
self.creation_date = None
self.auto_rotation_enabled = None
class ManagedInstanceKeyListResult(msrest.serialization.Model):
"""A list of managed instance keys.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.ManagedInstanceKey]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagedInstanceKey]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceKeyListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ManagedInstanceListResult(msrest.serialization.Model):
"""A list of managed instances.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.ManagedInstance]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagedInstance]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ManagedInstanceLongTermRetentionBackup(ProxyResource):
"""A long term retention backup for a managed database.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar managed_instance_name: The managed instance that the backup database belongs to.
:vartype managed_instance_name: str
:ivar managed_instance_create_time: The create time of the instance.
:vartype managed_instance_create_time: ~datetime.datetime
:ivar database_name: The name of the database the backup belong to.
:vartype database_name: str
:ivar database_deletion_time: The delete time of the database.
:vartype database_deletion_time: ~datetime.datetime
:ivar backup_time: The time the backup was taken.
:vartype backup_time: ~datetime.datetime
:ivar backup_expiration_time: The time the long term retention backup will expire.
:vartype backup_expiration_time: ~datetime.datetime
:ivar backup_storage_redundancy: The storage redundancy type of the backup. Possible values
include: "Geo", "Local", "Zone".
:vartype backup_storage_redundancy: str or ~azure.mgmt.sql.models.BackupStorageRedundancy
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'managed_instance_name': {'readonly': True},
'managed_instance_create_time': {'readonly': True},
'database_name': {'readonly': True},
'database_deletion_time': {'readonly': True},
'backup_time': {'readonly': True},
'backup_expiration_time': {'readonly': True},
'backup_storage_redundancy': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'managed_instance_name': {'key': 'properties.managedInstanceName', 'type': 'str'},
'managed_instance_create_time': {'key': 'properties.managedInstanceCreateTime', 'type': 'iso-8601'},
'database_name': {'key': 'properties.databaseName', 'type': 'str'},
'database_deletion_time': {'key': 'properties.databaseDeletionTime', 'type': 'iso-8601'},
'backup_time': {'key': 'properties.backupTime', 'type': 'iso-8601'},
'backup_expiration_time': {'key': 'properties.backupExpirationTime', 'type': 'iso-8601'},
'backup_storage_redundancy': {'key': 'properties.backupStorageRedundancy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceLongTermRetentionBackup, self).__init__(**kwargs)
self.managed_instance_name = None
self.managed_instance_create_time = None
self.database_name = None
self.database_deletion_time = None
self.backup_time = None
self.backup_expiration_time = None
self.backup_storage_redundancy = None
class ManagedInstanceLongTermRetentionBackupListResult(msrest.serialization.Model):
"""A list of long term retention backups for managed database(s).
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.ManagedInstanceLongTermRetentionBackup]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagedInstanceLongTermRetentionBackup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedInstanceLongTermRetentionBackupListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ManagedInstanceLongTermRetentionPolicy(ProxyResource):
"""A long term retention policy.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
| |
from starling_sim.basemodel.trace.events import *
from starling_sim.basemodel.agent.requests import UserStop, StopPoint, StationRequest
from starling_sim.utils.constants import PUBLIC_TRANSPORT_TYPE
class KPI:
"""
Generic structure of a KPI class
Its sub-classes compute and update specific indicator
from given events
"""
#: **agentId**: id of the agent
KEY_ID = "agentId"
def __init__(self):
"""
The indicator_dict associates values to the indicators names
The keys attribute correspond to the keys of the indicator_dict
"""
self.indicator_dict = None
self.keys = []
self.new_indicator_dict()
def setup(self, simulation_model):
"""
Setup method called during simulation setup.
:param simulation_model:
:return:
"""
pass
def new_indicator_dict(self):
"""
Reset the indicator_dict, when computing the indicator
for a new target
:return: None, resets directly the indicator_dict attribute
"""
pass
def update(self, event, agent):
"""
Update the kpi values according to the event content and the agent.
:param event: processed event
:param agent: subject of the event
:return:
"""
if isinstance(event, InputEvent):
self.indicator_dict[self.KEY_ID] = agent.id
class MoveKPI(KPI):
"""
This KPI evaluates the distance and spent time for each one of the simulation modes
"""
#: **{mode}Distance**: distance travelled in <mode> [meters]
SUFFIX_KEY_DISTANCE = "{mode}Distance"
#: **{mode}Time**: time travelled in <mode> [seconds]
SUFFIX_KEY_TIME = "{mode}Time"
def __init__(self):
self.modes = []
# init of indicator dict
super().__init__()
def setup(self, simulation_model):
self.modes = list(simulation_model.environment.topologies.keys())
self.new_indicator_dict()
def new_indicator_dict(self):
"""
Initialize the time and distance values at 0
for the considered modes
:return:
"""
base_dict = {}
for mode in self.modes:
key = self.SUFFIX_KEY_DISTANCE.format(mode=mode)
base_dict[key] = 0
self.keys += [key]
key = self.SUFFIX_KEY_TIME.format(mode=mode)
base_dict[key] = 0
self.keys += [key]
self.indicator_dict = base_dict
def update(self, event, agent):
"""
Add travelled distances and durations
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, MoveEvent):
self.indicator_dict[self.SUFFIX_KEY_DISTANCE.format(mode=event.mode)] += event.distance
self.indicator_dict[self.SUFFIX_KEY_TIME.format(mode=event.mode)] += event.duration
class WaitKPI(KPI):
"""
This KPI evaluates the time spent waiting
"""
#: **waitTime**: total traced wait time [seconds]
KEY_WAIT = "waitTime"
def __init__(self):
super().__init__()
self.keys = [self.KEY_WAIT]
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_WAIT: 0}
def update(self, event, agent):
"""
Add total wait duration of the request
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, RequestEvent):
self.indicator_dict[self.KEY_WAIT] += sum(event.request.waitSequence)
if isinstance(event, WaitEvent):
self.indicator_dict[self.KEY_WAIT] += event.waiting_time
class OdtWaitsKPI(KPI):
"""
This KPI evaluates the lateness in ODT requests.
"""
#: **odtPickupWait**: series of wait times at ODT pickups [seconds]
KEY_PICKUP_WAIT = "odtPickupWait"
#: **odtDetour**: series of ODT detour times [seconds]
KEY_DETOUR = "odtDetour"
#: **odtDirectTrip**: series of ODT direct trip times [seconds]
KEY_DIRECT_TRIP = "odtDirectTrip"
def __init__(self):
super().__init__()
self.keys = [self.KEY_PICKUP_WAIT, self.KEY_DETOUR, self.KEY_DIRECT_TRIP]
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_PICKUP_WAIT: "", self.KEY_DETOUR: "", self.KEY_DIRECT_TRIP: ""}
def update(self, event, agent):
"""
Add wait durations of ODT requests to KPIs.
:param event:
:param agent:
:return:
"""
# TODO : find a better condition
if isinstance(event, StopEvent) and event.serviceVehicle.type != PUBLIC_TRANSPORT_TYPE:
dropoff_agents = [request.agent.id for request in event.dropoffs]
pickup_agents = [request.agent.id for request in event.pickups]
if agent.id in dropoff_agents:
request = event.dropoffs[dropoff_agents.index(agent.id)]
if len(request.waitSequence) > 1:
if self.indicator_dict[self.KEY_DETOUR] != "":
self.indicator_dict[self.KEY_DETOUR] += "-"
self.indicator_dict[self.KEY_DIRECT_TRIP] += "-"
self.indicator_dict[self.KEY_DETOUR] += str(request.waitSequence[1])
self.indicator_dict[self.KEY_DIRECT_TRIP] += str(request.directTravelTime)
elif agent.id in pickup_agents:
request = event.pickups[pickup_agents.index(agent.id)]
if len(request.waitSequence) > 0:
if self.indicator_dict[self.KEY_PICKUP_WAIT] != "":
self.indicator_dict[self.KEY_PICKUP_WAIT] += "-"
self.indicator_dict[self.KEY_PICKUP_WAIT] += str(request.waitSequence[0])
class GetVehicleKPI(KPI):
"""
This KPI evaluates the number of vehicle uses
"""
#: **nbGetVehicle**: number of uses of the vehicle
KEY_GET_VEHICLE = "nbGetVehicle"
def __init__(self):
super().__init__()
self.keys = [self.KEY_GET_VEHICLE]
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_GET_VEHICLE: 0}
def update(self, event, agent):
"""
Add a new use for each GetVehicleEvent
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, GetVehicleEvent):
self.indicator_dict[self.KEY_GET_VEHICLE] += 1
class SuccessKPI(KPI):
"""
This KPI evaluates the number of failed/successful requests
"""
#: **nbFailedGet**: number of failed get requests
KEY_FAILED_GET = "nbFailedGet"
#: **nbSuccessGet**: number successful get requests
KEY_SUCCESS_GET = "nbSuccessGet"
#: **nbFailedPut**: number of failed put requests
KEY_FAILED_PUT = "nbFailedPut"
#: **nbSuccessPut**: number of successful put requests
KEY_SUCCESS_PUT = "nbSuccessPut"
#: **nbFailedRequest**: number of failed requests
KEY_FAILED_REQUEST = "nbFailedRequest"
#: **nbSuccessRequest**: number of successful requests
KEY_SUCCESS_REQUEST = "nbSuccessRequest"
def __init__(self, indicator_selection):
super().__init__()
self.keys = indicator_selection
def new_indicator_dict(self):
base_dict = {self.KEY_FAILED_GET: 0, self.KEY_SUCCESS_GET: 0,
self.KEY_FAILED_PUT: 0, self.KEY_SUCCESS_PUT: 0,
self.KEY_FAILED_REQUEST: 0, self.KEY_SUCCESS_REQUEST: 0}
self.indicator_dict = base_dict
def update(self, event, agent):
"""
Add request events according to their success
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, RequestEvent):
if event.request.success:
self.indicator_dict[self.KEY_SUCCESS_REQUEST] += 1
if event.request.type == StationRequest.GET_REQUEST:
self.indicator_dict[self.KEY_SUCCESS_GET] += 1
else:
self.indicator_dict[self.KEY_SUCCESS_PUT] += 1
else:
self.indicator_dict[self.KEY_FAILED_REQUEST] += 1
if event.request.type == StationRequest.GET_REQUEST:
self.indicator_dict[self.KEY_FAILED_GET] += 1
else:
self.indicator_dict[self.KEY_FAILED_PUT] += 1
class StaffOperationKPI(KPI):
"""
This KPI evaluates the number of staff operations
"""
#: **nbFailedGetStaff**: number of failed gets by staff
KEY_FAILED_GET_STAFF = "nbFailedGetStaff"
#: **nbSuccessGetStaff**: number of successful gets by staff
KEY_SUCCESS_GET_STAFF = "nbSuccessGetStaff"
#: **nbFailedPutStaff**: number of failed puts by staff
KEY_FAILED_PUT_STAFF = "nbFailedPutStaff"
#: **nbSuccessPutStaff**: number of successful puts by staff
KEY_SUCCESS_PUT_STAFF = "nbSuccessPutStaff"
def __init__(self):
super().__init__()
self.keys = [self.KEY_FAILED_GET_STAFF, self.KEY_SUCCESS_GET_STAFF,
self.KEY_FAILED_PUT_STAFF, self.KEY_SUCCESS_PUT_STAFF]
def new_indicator_dict(self):
self.indicator_dict = {self.KEY_FAILED_GET_STAFF: 0,
self.KEY_SUCCESS_GET_STAFF: 0,
self.KEY_FAILED_PUT_STAFF: 0,
self.KEY_SUCCESS_PUT_STAFF: 0}
def update(self, event, agent):
"""
Add operations to the total
:param agent:
:param event:
:return:
"""
super().update(event, agent)
if isinstance(event, StaffOperationEvent):
goal = event.goal
total = event.total
if goal < 0:
self.indicator_dict[self.KEY_SUCCESS_GET_STAFF] += abs(total)
self.indicator_dict[self.KEY_FAILED_GET_STAFF] += total - goal
elif goal > 0:
self.indicator_dict[self.KEY_SUCCESS_PUT_STAFF] += total
self.indicator_dict[self.KEY_FAILED_PUT_STAFF] += goal - total
class OccupationKPI(KPI):
"""
This KPI evaluates the empty and full time and distance
and the stock relative time/distance
"""
def __init__(self):
#: **emptyTime**: time spent empty [seconds]
self.KEY_EMPTY_TIME = "emptyTime"
#: **emptyDistance**: distance travelled empty [meters]
self.KEY_EMPTY_DISTANCE = "emptyDistance"
#: **fullTime**: time spent full [seconds]
self.KEY_FULL_TIME = "fullTime"
#: **fullDistance**: distance travelled full [meters]
self.KEY_FULL_DISTANCE = "fullDistance"
#: **stockTime**: stock relative time (stock*time) [seconds]
self.KEY_STOCK_TIME = "stockTime"
#: **stockDistance**: stock relative distance (stock*distance) [meters]
self.KEY_STOCK_DISTANCE = "stockDistance"
#: **maxStock**: maximum stock
self.KEY_MAX_STOCK = "maxStock"
super().__init__()
self.keys = [self.KEY_EMPTY_TIME, self.KEY_EMPTY_DISTANCE, self.KEY_FULL_TIME, self.KEY_FULL_DISTANCE,
self.KEY_STOCK_TIME, self.KEY_STOCK_DISTANCE, self.KEY_MAX_STOCK]
self.capacity = None
self.currentStock = None
self.previousTime = 0
self.currentDistance = None
def new_indicator_dict(self):
"""
Initialize the time and distance counts to 0.
"""
self.indicator_dict = {self.KEY_EMPTY_TIME: 0, self.KEY_EMPTY_DISTANCE: 0,
self.KEY_FULL_TIME: 0, self.KEY_FULL_DISTANCE: 0,
self.KEY_STOCK_TIME: 0, self.KEY_STOCK_DISTANCE: 0,
self.KEY_MAX_STOCK: 0}
self.capacity = None
self.currentStock = None
self.previousTime = 0
self.currentDistance = None
def get_capacity(self, element):
"""
Get the capacity of the agent, according to its type.
:param element:
:return: agent's capacity
"""
return self.capacity
def get_initial_stock(self, element):
"""
Get the initial stock of the agent, according to its type.
:param element:
:return: agent's initial stock
"""
return self.currentStock
def add_to_stock(self, value, timestamp):
"""
Update the full and empty time and distance counts, according to the previous
stock value, then updates the stock and time.
:param value: stock change (negative for stock loss)
:param timestamp: timestamp of the stock change event
"""
# compute time spent with last stock
duration = timestamp - self.previousTime
# add time to relevant time count
if self.currentStock == 0:
self.indicator_dict[self.KEY_EMPTY_TIME] += duration
if self.currentDistance is not None:
self.indicator_dict[self.KEY_EMPTY_DISTANCE] += self.currentDistance
elif self.currentStock == self.capacity:
self.indicator_dict[self.KEY_FULL_TIME] += duration
if self.currentDistance is not None:
self.indicator_dict[self.KEY_FULL_DISTANCE] += self.currentDistance
# add stock relative time and distance
self.indicator_dict[self.KEY_STOCK_TIME] += duration * self.currentStock
if self.currentDistance is not None:
self.indicator_dict[self.KEY_STOCK_DISTANCE] += self.currentDistance * self.currentStock
# update stock and current time
self.currentStock += value
self.previousTime = timestamp
if self.currentStock > self.indicator_dict[self.KEY_MAX_STOCK]:
self.indicator_dict[self.KEY_MAX_STOCK] = self.currentStock
# reset distance count
if self.currentDistance is not None:
self.currentDistance = 0
def update(self, event, agent):
"""
Update the stock and time counts from traced events
:param agent:
:param event:
"""
super().update(event, agent)
if isinstance(event, InputEvent):
self.capacity = self.get_capacity(event.element)
self.currentStock = self.get_initial_stock(event.element)
self.indicator_dict[self.KEY_MAX_STOCK] = self.currentStock
if isinstance(event, LeaveSimulationEvent):
self.add_to_stock(0, event.timestamp)
class StationOccupationKPI(OccupationKPI):
"""
This KPI evaluates the time spent in the empty and full states (of a station),
and the stock relative time spent in the station
"""
def __init__(self):
super().__init__()
self.keys = [self.KEY_EMPTY_TIME, self.KEY_FULL_TIME, self.KEY_STOCK_TIME]
def get_capacity(self, element):
return element.capacity
def get_initial_stock(self, element):
return element.initial_stock
def update(self, event, agent):
"""
Update the stock and time counts from request events
:param agent:
:param event:
"""
super().update(event, agent)
if isinstance(event, RequestEvent) and event.request.success:
request = event.request
# update time | |
if jsont.get('options') is not None:
node['data']['opts'] = jsont['options']
if jsont.get('status') is not None:
node['data']['status'] = jsont['status']
if jsont.get('path') is not None:
path_list = jsont['path'].split('/')[1:]
path = ''
for path_part in path_list:
path = '{}/{}'.format(path, path_part.split('?')[0])
node['data']['path'] = path
node['data']['sensor_path'] = re.sub(r'/[^:]+:', '/', path).replace('/', '/{}:'.format(module), 1)
if jsont['name'] != module and jsont.get('children') is None or len(jsont['children']) == 0:
node['icon'] = 'glyphicon glyphicon-leaf'
if jsont.get('path') is not None:
if augments:
node['a_attr']['href'] = "show_node/{}/{}".format(module, jsont['path'].replace('?', '%3F'))
else:
path_list = jsont['path'].split('/')[1:]
path = ''
for schema in enumerate(pass_on_schemas):
path = '{}{}%3F{}/'.format(path, path_list[schema[0]].split('?')[0], schema[1])
node['a_attr']['href'] = "show_node/{}/{}".format(module, path)
pass_on_schemas.pop()
node['a_attr']['class'] = 'nodeClass'
node['a_attr']['style'] = 'color: #00e;'
elif jsont.get('children') is not None:
node['children'] = []
for child in jsont['children']:
node['children'].append(build_tree(child, module, pass_on_schemas, augments))
if len(pass_on_schemas) != 0 and jsont.get('schema_type') not in ['choice', 'case']:
pass_on_schemas.pop()
return node
def get_doc(mod_obj):
"""
Gets document-name and reference from input module object safely
:param mod_obj: module object
:return: documentation of module object if it exists.
"""
try:
doc_name = mod_obj.get('document-name')
ref = mod_obj.get('reference')
if ref and doc_name:
return '<a href="' + ref + '">' + doc_name + '</a>'
elif ref:
return '<a href="' + ref + '">' + ref + '</a>'
elif doc_name:
return doc_name
except Exception as e:
raise Exception(e)
return 'N/A'
def get_parent(mod_obj):
"""
Gets parent of module object if it exists.
:param mod_obj: module object
:return: name of the parent of the module
"""
try:
bt = mod_obj.get('belongs-to')
if not bt:
return mod_obj.get('name')
return bt
except Exception as e:
return mod_obj.get('name')
def is_submod(mod_obj):
"""
Find out whether module has a parent or not.
:param mod_obj: module object
:return: module status
"""
try:
bt = mod_obj.get('belongs-to')
if not bt:
return False
return True
except Exception as e:
return False
def build_graph(module, mod_obj, orgs, nodes, edges, edge_counts, nseen, eseen, alerts, show_rfcs, colors,
recurse=0, nested=False, show_subm=True, show_dir='both'):
"""
Builds graph for impact_analysis. takes module name, and mod_obj, which has all of the modules
dependents and dependencies.
Goes through both dependents and dependencies and adds them to output if they are
eligible for
:param module: module name
:param mod_obj: module object
:param orgs: organizations array
:param nodes: nodes for output (circles)
:param edges: lines for output
:param edge_counts: number of edges
:param nseen: dict
:param eseen: dict
:param alerts: alerts to show when something has gone awry.
:param show_rfcs: (bool) show rfcs or not
:param recurse: recursion level
:param nested: (bool) module object multiple level status
:param show_subm: (bool) submodules visibility status
:param show_dir: (bool) directory visibility status
:return: (dict) graph output
"""
global found_orgs, found_mats
is_subm = False
if not show_subm and nested:
module = get_parent(mod_obj)
elif show_subm:
is_subm = is_submod(mod_obj)
if nested and nseen.get(module) is not None:
return
if mod_obj.get('organization') is not None:
org = mod_obj.get('organization')
else:
org = 'independent'
if nested > 0 and len(orgs) > 0 and not (len(orgs) == 1 and orgs[0] == ''):
if org not in orgs:
return
found_orgs[org] = True
try:
dependents = mod_obj.get('dependents')
dependencies = mod_obj.get('dependencies')
mmat = get_maturity(mod_obj)
if nested and mmat.get('olevel') == 'RATIFIED' and not show_rfcs:
return
color = color_gen(org, colors)
if found_mats.get(mmat['level']) is None:
found_mats[mmat['level']] = [module]
else:
found_mats[mmat['level']].append(module)
document = get_doc(mod_obj)
upper_org = ''
if org:
upper_org = org.upper()
nodes.append({'data': {'id': "mod_{}".format(module), 'name': module, 'objColor': color,
'document': document, 'sub_mod': is_subm, 'org': upper_org, 'mat': mmat['level']}})
if edge_counts.get(module) is None:
edge_counts[module] = 0
nseen[module] = True
if (show_dir == 'both' or show_dir == 'dependents') and dependents is not None:
for moda in dependents:
mod = moda['name']
is_msubm = False
mobj = get_rev_org_obj(mod, alerts)
if mobj is None:
continue
if not show_subm:
mod = get_parent(mobj)
else:
is_msubm = is_submod(mobj)
if eseen.get("mod_{}:mod_{}".format(module, mod)):
continue
eseen["mod_{}:mod_{}".format(module, mod)] = True
maturity = get_maturity(mobj)
if maturity['olevel'] == 'RATIFIED' and not show_rfcs:
continue
org = mobj.get('organization')
if not org:
org = 'UNKNOWN'
mcolor = color_gen(org, colors)
if found_mats.get(maturity['level']) is None:
found_mats[maturity['level']] = [mod]
else:
found_mats[maturity['level']].append(mod)
if len(orgs) > 0:
if org not in orgs:
continue
found_orgs[org] = True
if mmat['olevel'] == 'INITIAL' or mmat['olevel'] == 'ADOPTED':
edge_counts[module] += 1
if "mod_{}".format(module) != "mod_{}".format(mod):
edges.append({'data': {'source': "mod_{}".format(module), 'target': "mod_{}".format(mod),
'objColor': mcolor, 'org': org.upper(), 'mat': maturity['level']}})
if recurse > 0 or recurse < 0:
r = recurse - 1
build_graph(mod, mobj, orgs, nodes, edges, edge_counts, nseen, eseen, alerts, show_rfcs,
colors, r, True, show_subm, show_dir)
else:
document = get_doc(mobj)
nodes.append(
{'data': {'id': "mod_{}".format(mod), 'name': mod, 'objColor': mcolor, 'document': document,
'sub_mod': is_msubm, 'org': org.upper(), 'mat': maturity['level']}})
if (show_dir == 'both' or show_dir == 'dependencies') and dependencies:
for moda in dependencies:
mod = moda['name']
is_msubm = False
mobj = get_rev_org_obj(mod, alerts)
if show_subm:
is_msubm = is_submod(mobj)
else:
is_msubm = is_submod(mobj)
if is_msubm:
continue
if eseen.get("mod_{}:mod_{}".format(mod, module)) is not None:
continue
if eseen.get("mod_{}:mod_{}".format(module, mod)) is not None:
alerts.append("Loop found {} <=> {}")
eseen["mod_{}:mod_{}".format(mod, module)] = True
maturity = get_maturity(mobj)
if maturity.get('olevel') == 'RATIFIED' and not show_rfcs:
continue
org = mobj.get('organization')
if org == '':
org = 'UNKNOWN'
if found_mats.get(maturity['level']) is None:
found_mats[maturity['level']] = [mod]
else:
found_mats[maturity['level']].append(mod)
if len(orgs) > 0:
if org not in orgs:
continue
found_orgs[org] = True
mcolor = color_gen(org, colors)
if maturity['olevel'] == 'INITIAL' or maturity['olevel'] == 'ADOPTED':
if not edge_counts.get(mod):
edge_counts[mod] = 1
else:
edge_counts[mod] += 1
if not nested:
if "mod_{}".format(mod) != "mod_{}".format(module):
edges.append({'data': {'source': "mod_{}".format(mod), 'target': "mod_{}".format(module),
'objColor': mcolor, 'org': org.upper(), 'mat': maturity['level']}})
if recurse > 0:
r = recurse - 1
build_graph(mod, mobj, orgs, nodes, edges, edge_counts, nseen, eseen, alerts, show_rfcs, colors,
r, True)
elif not nested:
document = get_doc(mobj)
nodes.append(
{'data': {'id': "mod_{}".format(mod), 'name': mod, 'objColor': mcolor, 'document': document,
'sub_mod': is_msubm, 'org': org.upper(), 'mat': maturity['level']}})
except Exception as e:
alerts.append("Failed to read dependency data for {}, {}".format(module, e))
def get_maturity(mod_obj, alerts=None):
"""
Get maturity level of given module object
:param mod_obj: module object
:param alerts: alerts
:return: maturity
"""
global MATURITY_UNKNOWN, MATURITY_MAP
maturity = {'color': MATURITY_UNKNOWN, 'level': 'N/A', 'olevel': 'N/A'}
try:
if mod_obj.get('maturity-level'):
mmat = mod_obj.get('maturity-level').upper()
else:
mmat = ''
if MATURITY_MAP.get(mmat) is not None:
maturity = {'color': MATURITY_MAP[mmat], 'level': mmat, 'olevel': mmat}
if mmat == 'INITIAL' or mmat == 'ADOPTED':
cstatus = get_compile_status(mod_obj)
if cstatus == 'failed':
level = 'COMPILATION FAILED'
maturity = {'color': MATURITY_MAP[level], 'level': level, 'olevel': mmat}
except Exception as e:
raise Exception(e)
return maturity
def get_compile_status(mod_obj):
"""
Gets compilation status of give module object
:param mod_obj: module object
:return: compilation status
"""
try:
cstatus = mod_obj.get('compilation-status')
if cstatus is None:
return ''
return cstatus
except Exception as e:
return ''
def color_gen(org, colors):
"""
Color generator for impact_analysis website, dependent organization and it's arguments.
Makes request to local database
:param org: organization
:return: color
"""
global NUM_STEPS, CUR_STEP, ORG_CACHE
if org:
org = org.upper()
if ORG_CACHE.get(org) is not None:
return ORG_CACHE[org]
if NUM_STEPS == -1:
try:
query = \
{
"size": 0,
"aggs": {
"distinct_orgs": {
"cardinality": {
"field": "organization.keyword"
}
}
}
}
row = es.search(index='modules', doc_type='modules', body=query)['aggregations']['distinct_orgs']['value']
NUM_STEPS = row + 1
except Exception as e:
NUM_STEPS = 33
raise Exception(e)
if len(colors) != 0:
ORG_CACHE[org] = colors.pop()
else:
r = -1
g = -1
b = -1
h = CUR_STEP / NUM_STEPS
i = int(h * 6)
f = h * 6 - i
q = 1 - f
result = i % 6
if result == 0:
r = 1
g = f
b = 0
elif result == 1:
r = q
g = 1
b = 0
elif result == 2:
r = 0
g = 1
b = f
elif result == 3:
r = 0
g = q
b = 1
elif result == 4:
r = f
g = 0
b = 1
elif result == 5:
r = 1
g = 0
b = q
c = '#' + ('00' + hex(int(r * 255)))[-2:] + ('00' + hex(int(g * 255)))[-2:] + ('00' + hex(int(b * 255)))[-2:]
c = c.replace('x', '0')
ORG_CACHE[org] = c
CUR_STEP += 1
return ORG_CACHE[org]
def moduleFactoryFromSearch(search):
"""
Creates module based on api search. Used only | |
<filename>vericred_client/configuration.py
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import urllib3
import sys
import logging
from six import iteritems
from six.moves import http_client as httplib
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class Configuration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""
Constructor
"""
# Default Base url
self.host = "https://api.vericred.com/"
# Default api client
self.api_client = None
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("vericred_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
@property
def logger_file(self):
"""
Gets the logger_file.
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""
Sets | |
<reponame>xclxxl414/rqalpha<filename>rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.model.base_position import BasePosition
from rqalpha.environment import Environment
from rqalpha.const import SIDE, POSITION_EFFECT, DEFAULT_ACCOUNT_TYPE
<<<<<<< HEAD
=======
from rqalpha.utils import is_valid_price
from rqalpha.utils.i18n import gettext as _
>>>>>>> upstream/master
class FuturePosition(BasePosition):
__abandon_properties__ = []
def __init__(self, order_book_id):
super(FuturePosition, self).__init__(order_book_id)
self._buy_old_holding_list = []
self._sell_old_holding_list = []
self._buy_today_holding_list = []
self._sell_today_holding_list = []
self._buy_transaction_cost = 0.
self._sell_transaction_cost = 0.
self._buy_realized_pnl = 0.
self._sell_realized_pnl = 0.
self._buy_avg_open_price = 0.
self._sell_avg_open_price = 0.
def __repr__(self):
return 'FuturePosition({})'.format(self.__dict__)
def get_state(self):
return {
'order_book_id': self._order_book_id,
'buy_old_holding_list': self._buy_old_holding_list,
'sell_old_holding_list': self._sell_old_holding_list,
'buy_today_holding_list': self._buy_today_holding_list,
'sell_today_holding_list': self._sell_today_holding_list,
'buy_transaction_cost': self._buy_transaction_cost,
'sell_transaction_cost': self._sell_transaction_cost,
'buy_realized_pnl': self._buy_realized_pnl,
'sell_realized_pnl': self._sell_realized_pnl,
'buy_avg_open_price': self._buy_avg_open_price,
'sell_avg_open_price': self._sell_avg_open_price,
# margin rate may change
'margin_rate': self.margin_rate,
}
def set_state(self, state):
assert self._order_book_id == state['order_book_id']
self._buy_old_holding_list = state['buy_old_holding_list']
self._sell_old_holding_list = state['sell_old_holding_list']
self._buy_today_holding_list = state['buy_today_holding_list']
self._sell_today_holding_list = state['sell_today_holding_list']
self._buy_transaction_cost = state['buy_transaction_cost']
self._sell_transaction_cost = state['sell_transaction_cost']
self._buy_avg_open_price = state['buy_avg_open_price']
self._sell_avg_open_price = state['sell_avg_open_price']
@property
def type(self):
return DEFAULT_ACCOUNT_TYPE.FUTURE.name
@property
def margin_rate(self):
env = Environment.get_instance()
margin_info = env.data_proxy.get_margin_info(self.order_book_id)
margin_multiplier = env.config.base.margin_multiplier
return margin_info['long_margin_ratio'] * margin_multiplier
@property
def market_value(self):
return (self.buy_quantity - self.sell_quantity) * self.last_price * self.contract_multiplier
@property
def buy_market_value(self):
return self.buy_quantity * self.last_price * self.contract_multiplier
@property
def sell_market_value(self):
return self.sell_quantity * self.last_price * self.contract_multiplier
# -- PNL 相关
@property
def contract_multiplier(self):
return Environment.get_instance().get_instrument(self.order_book_id).contract_multiplier
@property
def open_orders(self):
return Environment.get_instance().broker.get_open_orders(self.order_book_id)
@property
def buy_holding_pnl(self):
"""
[float] 买方向当日持仓盈亏
"""
return (self.last_price - self.buy_avg_holding_price) * self.buy_quantity * self.contract_multiplier
@property
def sell_holding_pnl(self):
"""
[float] 卖方向当日持仓盈亏
"""
return (self.sell_avg_holding_price - self.last_price) * self.sell_quantity * self.contract_multiplier
@property
def buy_realized_pnl(self):
"""
[float] 买方向平仓盈亏
"""
return self._buy_realized_pnl
@property
def sell_realized_pnl(self):
"""
[float] 卖方向平仓盈亏
"""
return self._sell_realized_pnl
@property
def holding_pnl(self):
"""
[float] 当日持仓盈亏
"""
return self.buy_holding_pnl + self.sell_holding_pnl
@property
def realized_pnl(self):
"""
[float] 当日平仓盈亏
"""
return self.buy_realized_pnl + self.sell_realized_pnl
@property
def buy_daily_pnl(self):
"""
[float] 当日买方向盈亏
"""
return self.buy_holding_pnl + self.buy_realized_pnl
@property
def sell_daily_pnl(self):
"""
[float] 当日卖方向盈亏
"""
return self.sell_holding_pnl + self.sell_realized_pnl
@property
def daily_pnl(self):
"""
[float] 当日盈亏
"""
return self.holding_pnl + self.realized_pnl
@property
def buy_pnl(self):
"""
[float] 买方向累计盈亏
"""
return (self.last_price - self._buy_avg_open_price) * self.buy_quantity * self.contract_multiplier
@property
def sell_pnl(self):
"""
[float] 卖方向累计盈亏
"""
return (self._sell_avg_open_price - self.last_price) * self.sell_quantity * self.contract_multiplier
@property
def pnl(self):
"""
[float] 累计盈亏
"""
return self.buy_pnl + self.sell_pnl
# -- Quantity 相关
@property
def buy_open_order_quantity(self):
"""
[int] 买方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.BUY and order.position_effect == POSITION_EFFECT.OPEN)
@property
def sell_open_order_quantity(self):
"""
[int] 卖方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.SELL and order.position_effect == POSITION_EFFECT.OPEN)
@property
def buy_close_order_quantity(self):
"""
[int] 买方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and
order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
@property
def sell_close_order_quantity(self):
"""
[int] 卖方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and
order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
<<<<<<< HEAD
=======
@property
def _buy_close_today_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and
order.position_effect == POSITION_EFFECT.CLOSE_TODAY)
@property
def _sell_close_today_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and
order.position_effect == POSITION_EFFECT.CLOSE_TODAY)
@property
def _closable_today_sell_quantity(self):
return self.sell_today_quantity - self._buy_close_today_order_quantity
@property
def _closable_today_buy_quantity(self):
return self.buy_today_quantity - self._sell_close_today_order_quantity
>>>>>>> upstream/master
@property
def buy_old_quantity(self):
"""
[int] 买方向昨仓
"""
return sum(amount for price, amount in self._buy_old_holding_list)
@property
def sell_old_quantity(self):
"""
[int] 卖方向昨仓
"""
return sum(amount for price, amount in self._sell_old_holding_list)
@property
def buy_today_quantity(self):
"""
[int] 买方向今仓
"""
return sum(amount for price, amount in self._buy_today_holding_list)
@property
def sell_today_quantity(self):
"""
[int] 卖方向今仓
"""
return sum(amount for price, amount in self._sell_today_holding_list)
@property
def buy_quantity(self):
"""
[int] 买方向持仓
"""
return self.buy_old_quantity + self.buy_today_quantity
@property
def sell_quantity(self):
"""
[int] 卖方向持仓
"""
return self.sell_old_quantity + self.sell_today_quantity
@property
def closable_buy_quantity(self):
"""
[float] 可平买方向持仓
"""
return self.buy_quantity - self.sell_close_order_quantity
@property
def closable_sell_quantity(self):
"""
[float] 可平卖方向持仓
"""
return self.sell_quantity - self.buy_close_order_quantity
# -- Margin 相关
@property
def buy_margin(self):
"""
[float] 买方向持仓保证金
"""
return self._buy_holding_cost * self.margin_rate
@property
def sell_margin(self):
"""
[float] 卖方向持仓保证金
"""
return self._sell_holding_cost * self.margin_rate
@property
def margin(self):
"""
[float] 保证金
"""
# TODO: 需要添加单向大边相关的处理逻辑
return self.buy_margin + self.sell_margin
@property
def buy_avg_holding_price(self):
"""
[float] 买方向持仓均价
"""
return 0 if self.buy_quantity == 0 else self._buy_holding_cost / self.buy_quantity / self.contract_multiplier
@property
def sell_avg_holding_price(self):
"""
[float] 卖方向持仓均价
"""
return 0 if self.sell_quantity == 0 else self._sell_holding_cost / self.sell_quantity / self.contract_multiplier
@property
def _buy_holding_cost(self):
return sum(p * a * self.contract_multiplier for p, a in self.buy_holding_list)
@property
def _sell_holding_cost(self):
return sum(p * a * self.contract_multiplier for p, a in self.sell_holding_list)
@property
def buy_holding_list(self):
return self._buy_old_holding_list + self._buy_today_holding_list
@property
def sell_holding_list(self):
return self._sell_old_holding_list + self._sell_today_holding_list
@property
def buy_avg_open_price(self):
return self._buy_avg_open_price
@property
def sell_avg_open_price(self):
return self._sell_avg_open_price
@property
def buy_transaction_cost(self):
return self._buy_transaction_cost
@property
def sell_transaction_cost(self):
return self._sell_transaction_cost
@property
def transaction_cost(self):
return self._buy_transaction_cost + self._sell_transaction_cost
# -- Function
<<<<<<< HEAD
=======
def is_de_listed(self):
"""
判断合约是否过期
"""
instrument = Environment.get_instance().get_instrument(self._order_book_id)
current_date = Environment.get_instance().trading_dt
if instrument.de_listed_date is not None and current_date >= instrument.de_listed_date:
return True
return False
>>>>>>> upstream/master
def cal_close_today_amount(self, trade_amount, trade_side):
if trade_side == SIDE.SELL:
close_today_amount = trade_amount - self.buy_old_quantity
else:
close_today_amount = trade_amount - self.sell_old_quantity
return max(close_today_amount, 0)
def apply_settlement(self):
env = Environment.get_instance()
<<<<<<< HEAD
data_proxy = env.data_proxy
trading_date = env.trading_dt.date()
settle_price = data_proxy.get_settle_price(self.order_book_id, trading_date)
=======
settle_price = env.data_proxy.get_settle_price(self.order_book_id, env.trading_dt)
if not is_valid_price(settle_price):
raise RuntimeError(_("Settlement price {settle_price} of {order_book_id} is invalid".format(
settle_price=settle_price, order_book_id=self.order_book_id
)))
>>>>>>> upstream/master
self._buy_old_holding_list = [(settle_price, self.buy_quantity)]
self._sell_old_holding_list = [(settle_price, self.sell_quantity)]
self._buy_today_holding_list = []
self._sell_today_holding_list = []
self._buy_transaction_cost = 0.
self._sell_transaction_cost = 0.
self._buy_realized_pnl = 0.
self._sell_realized_pnl = 0.
def _margin_of(self, quantity, price):
env = Environment.get_instance()
instrument = env.data_proxy.instruments(self.order_book_id)
return quantity * instrument.contract_multiplier * price * self.margin_rate
def apply_trade(self, trade):
<<<<<<< HEAD
=======
"""
应用成交,并计算交易产生的现金变动。
开仓:
delta_cash
= -1 * margin
= -1 * quantity * contract_multiplier * price * margin_rate
平仓:
delta_cash
= old_margin - margin + delta_realized_pnl
= (sum of (cost_price * quantity) of closed trade) * contract_multiplier * margin_rate + delta_realized_pnl
:param trade: rqalpha.model.trade.Trade
:return: float
"""
# close_trade: delta_cash = old_margin - margin + delta_realized_pnl
>>>>>>> upstream/master
trade_quantity = trade.last_quantity
if trade.side == SIDE.BUY:
if trade.position_effect == POSITION_EFFECT.OPEN:
self._buy_avg_open_price = (self._buy_avg_open_price * self.buy_quantity +
trade_quantity * trade.last_price) / (self.buy_quantity + trade_quantity)
self._buy_transaction_cost += trade.transaction_cost
self._buy_today_holding_list.insert(0, (trade.last_price, trade_quantity))
return -1 * self._margin_of(trade_quantity, trade.last_price)
else:
old_margin = self.margin
self._sell_transaction_cost += trade.transaction_cost
delta_realized_pnl = self._close_holding(trade)
self._sell_realized_pnl += delta_realized_pnl
return old_margin - self.margin + delta_realized_pnl
else:
if trade.position_effect == POSITION_EFFECT.OPEN:
self._sell_avg_open_price = (self._sell_avg_open_price * self.sell_quantity +
trade_quantity * trade.last_price) / (self.sell_quantity + trade_quantity)
self._sell_transaction_cost += trade.transaction_cost
self._sell_today_holding_list.insert(0, (trade.last_price, trade_quantity))
return -1 * self._margin_of(trade_quantity, trade.last_price)
else:
old_margin = self.margin
self._buy_transaction_cost += trade.transaction_cost
delta_realized_pnl = self._close_holding(trade)
self._buy_realized_pnl += delta_realized_pnl
return old_margin - self.margin + delta_realized_pnl
def _close_holding(self, trade):
<<<<<<< HEAD
=======
"""
应用平仓,并计算平仓盈亏
买平:
delta_realized_pnl = sum of ((trade_price - cost_price)* quantity) of closed trades * contract_multiplier
卖平:
delta_realized_pnl = sum of ((cost_price - trade_price)* quantity) of closed trades * contract_multiplier
:param trade: rqalpha.model.trade.Trade
:return: float
"""
>>>>>>> upstream/master
left_quantity = trade.last_quantity
delta = 0
if trade.side == SIDE.BUY:
# 先平昨仓
<<<<<<< HEAD
if len(self._sell_old_holding_list) != 0:
=======
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._sell_old_holding_list) != 0:
>>>>>>> upstream/master
old_price, old_quantity = self._sell_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
<<<<<<< HEAD
# 再平进仓
=======
# 再平今仓
>>>>>>> upstream/master
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._sell_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
else:
# 先平昨仓
<<<<<<< HEAD
if len(self._buy_old_holding_list) != 0:
=======
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._buy_old_holding_list) != 0:
>>>>>>> upstream/master
old_price, old_quantity = self._buy_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = | |
else:
self._config._debug_trace("cachemiss", len(view), a_end-a_beg, 1)
data = bytearray(a_end - a_beg)
self._accessor.read(seg, a_beg, data)
self._cached_data = (seg, a_beg, data)
view[:] = data[offset-a_beg:offset-a_beg+len(view)]
def xx_read(self, in_offset, in_size, *, usagehint=UsageHint.Unknown):
self._validate_read(in_offset, in_size)
work = self._split_by_segment([(in_offset, in_size)])
result = bytearray(in_size)
view = memoryview(result)
maxseg = max([seg for seg, offset, size, outpos in work])
if maxseg >= len(self._sizes):
# This should only happen in white box unit tests.
# The higher levels of ZGY should have checked for EOF already.
# But seismic store can return a really obscure error message
# and/or hang in a retry loop if we don't do this check.
raise ZgyEndOfFile("Attempt to read from segment " + str(maxseg))
for seg, offset, size, outpos in work:
self._cached_read(seg, offset, view[outpos:outpos+size])
return result
def xx_readv(self, requests, *, parallel_ok=False, immutable_ok=False, transient_ok=False, usagehint=UsageHint.Unknown):
"""
Handle both brick consolidation and multi threading.
This implementation will issue a single readv() request to the
seismic store wrapper, wait for all threads to complete, and
then deliver all the results. For this reason it needs to
allocate a buffer to hold the entire data to be read.
In the future it might be possible to have the seismic store
wrapper support delivery callbacks and for it to allocate
the result buffers itself. This saves some memory and also
allows data to be decompressed if needed and copied out to
user space as the bricks become available. Caveat: requests
may need to be split if they cross a segment boundary.
This means that we need support for partial delivery.
Which would complicate things a lot.
"""
self._validate_readv(requests)
# I don't really like this kind of short cut since it creates
# a lot of corner cases to test for. But, if the naive caching
# is in effect then it is needed to make caching work.
# If multiple requests then the cache won't work anyway,
# and we might as well clear any data it contains.
# TODO-Performance, can I move this test after consolidate
# and split? Otherwise it will probably only work for the headers
# and when the application really did fetch just one brick at
# time. It might be good enough for Petrel though.
if self._config.aligned and len(requests) == 1:
for offset, size, delivery in requests:
delivery(SeismicStoreFile.xx_read(self, offset, size, usagehint=usagehint))
return
self._cached_data = None
# For debugging / logging only
asked = sum([e[1] for e in requests])
new_requests = self._consolidate_requests(requests,
max_hole=self._config.maxhole,
max_size=self._config.maxsize,
force_align=self._config.aligned,
eof=self.xx_eof)
work = self._split_by_segment(new_requests)
# TODO-Low: For robustness scan work[] to get realize. As the
# C++ code in impl/file_sd.cpp SeismicStoreFile::xx_readv() does.
realsize = work[-1][2] + work[-1][3] if work else 0
data = bytearray(realsize)
view = memoryview(data)
eof = sum(self._sizes)
# Read bulk data from seismic store using multiple threads.
self._config._debug_trace("readv", asked, len(view), len(work))
self._accessor.readv(work, data, self._config.threads)
# Deliver result to caller.
pos = 0
for offset, size, delivery in new_requests:
size = max(0, min(size, eof - offset))
delivery(view[pos:pos+size])
pos += size
def xx_write(self, data, offset, *, usagehint=UsageHint.Unknown):
self._validate_write(data, offset)
current_eof = SeismicStoreFile.xx_eof.__get__(self) # nonvirtual call
#print("SeismicStoreFile.xx_write(offset={0}, size={1}, current EOF is {2}".format(offset, len(data), current_eof))
if offset == current_eof:
# Sequential write from current EOF.
# Specific limitation for ZGY, for performance reasons only.
# This means we don't need to fetch sizes for all segments
# when opening a file for read. Note that since the last
# segment can have any size we won't discover a violation
# until the following read.
if len(self._sizes) >= 3 and self._sizes[-1] != self._sizes[1]:
raise ZgyUserError("Cannot write arbitrarily sized segment.")
self._config._debug_trace("append", len(data), len(data), 1)
self._accessor.write(len(self._sizes), data, False)
self._sizes.append(len(data))
elif offset < current_eof:
# Rewrite existing block. Resizing not allowed.
seg = 0
for segsize in self._sizes:
if offset == 0:
if len(data) == segsize:
self._config._debug_trace("write", len(data), len(data), 1)
self._accessor.write(seg, data, True)
break
else:
raise ZgySegmentIsClosed("Cannot write resized segment.")
elif offset < segsize:
raise ZgySegmentIsClosed("Cannot write part of segment.")
seg += 1
offset -= segsize
else:
# Attempting to write sparse data.
raise ZgyUserError("Cannot write segments out of order.")
return len(data)
# If I want to disable threading, possibly also consolidation:
#xx_readv = FileADT._forward_consolidated_readv
#xx_readv = FileADT._forward_readv
@property
def threadsafe(self):
return True if self._mode in ("rb") else False
@property
def xx_iscloud(self):
return True
class SeismicStoreFileDelayedWrite(FileADT):
"""
Improve on SeismicStoreFile, have it buffer large chunks of data before
writing it out to a new segment.
* Writes starting at EOF are allowed, and will buffer data in the
"open segment" until explicitly flushed.
* Writes starting past EOF, signifying a hole in the data, are not
allowed.
* Writes fully inside the open segment are allowed.
* Writes starting before the open segment are only allowed if
offset,size exactly matches a previous write. This will cause that
segment to be rewritten. As a corollary, writes canot span the
closed segment / open segment boundary.
* Possible future extension: For the last segment only offset
needs to match. This means the last segment may be resized.
Why we might want this: On opening a file with existing
data bricks we might choose to read the last segment and
turn it into an open segment. Then delete (in memory only)
the last segment. When it is time to flush the data it gets
rewritten. This allows adding bricks to a file, while still
ensuring that all segments except first and last need to be
the same size. Note that there are other tasks such as
incrementally updating statistics and histogram that might
turn out to be a lot of work.
* When used to create ZGY files, caller must honor the convention
that all segments except the first and last must have the same size.
* Caveat: The fact that random writes are sometimes allowed, sometimes
not depending on the segment number violates the principle of
least surprise. And makes for more elaborate testing. For ZGY
it is quite useful though. ZGY can recover from a ZgySegmentIsClosed
exception by abandoning (leaking) the current block and write it
to a new location. With a typical access pattern this will happen
only occasionally.
"""
def __init__(self, filename, mode, iocontext):
super().__init__(filename, mode, iocontext)
self._relay = SeismicStoreFile(filename, mode, iocontext)
self._mode = mode
self._open_segment = bytearray()
self._usage_hint = None
self._config = self._relay._config
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.xx_close()
def _flush_part(self, this_segsize):
"""
Flush "this_segsize" pending bytes. Leave any residual data
in the open segment buffer.
"""
assert this_segsize <= len(self._open_segment)
assert len(self._open_segment) > 0
flushme = memoryview(self._open_segment)
nbytes = self._relay.xx_write(flushme[:this_segsize],
self._relay.xx_eof,
usagehint=self._usage_hint)
if nbytes != this_segsize:
raise ZgyInternalError("Short write to seismic store")
self._open_segment = bytearray(flushme[this_segsize:])
def _flush(self, final):
"""
Flush pending writes, but only if we have enough data to fill
one or more complete segments or if the file is being closed.
The last segment is allowed to be smaller than the others.
"""
if self._config.segsize > 0:
while len(self._open_segment) >= self._config.segsize:
self._flush_part(self._config.segsize)
if final and len(self._open_segment) > 0:
self._flush_part(len(self._open_segment))
if len(self._open_segment) == 0:
self._usage_hint = None
@property
def xx_eof(self):
"""
Current size of the zgy file, including any buffered unwritten data.
"""
return self._relay.xx_eof + len(self._open_segment)
def xx_write(self, data, offset, *, usagehint=UsageHint.Unknown):
"""
Write data to seismic store, buffering the writes to get larger
segment sizes. Writes are only allowed at offset 0 and at EOF.
This is less general then the parent type which lets us rewrite
any segment as long as its size does not change.
Segment 0 contains just the headers and is always written in one
operation, so this is not buffered. Segment 0 can be both smaller
and larger than segsize. Which is another reason to bypass the
buffering code. Also, if we are rewriting data we bypass the
buffering and require that the caller updates the entire segment.
ZGY will currently only rewrite segment 0.
If segsize is zero no buffering is | |
None:
"""
- Use both caches
- Execute and cache
- Reset the mem cache
- Execute again
- Check that the cached value is found in the disk cache
"""
# Use both memory and disk cache
f, cf = self._get_f_cf_functions(use_mem_cache=True, use_disk_cache=True)
# 1) Verify that it is executed.
_LOG.debug("\n%s", hprint.frame("Execute the 1st time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="no_cache")
# 2) Verify that it is not executed, since it's cached in memory.
_LOG.debug("\n%s", hprint.frame("Execute the 2nd time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="mem")
# 3) Reset memory cache.
hcache.clear_global_cache("mem", self.cache_tag)
# 4) Verify that it is not executed, since it's in the disk cache.
_LOG.debug("\n%s", hprint.frame("Execute the 3rd time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="disk")
# ////////////////////////////////////////////////////////////////////////////
def test_redefined_function(self) -> None:
"""
If the cached function is redefined, but it's still the same, then the
intrinsic function should not be recomputed.
"""
# Define the function inline imitating working in a notebook.
_LOG.debug("\n%s", hprint.frame("Define function"))
add = _get_add_function()
cached_add = hcache._Cached(add, tag=self.cache_tag)
# 1) Execute the first time.
_LOG.debug("\n%s", hprint.frame("Execute the 1st time"))
self._execute_and_check_state(
add, cached_add, 1, 2, exp_cf_state="no_cache"
)
# 2) Execute the second time. Must use memory cache.
_LOG.debug("\n%s", hprint.frame("Execute the 2nd time"))
self._execute_and_check_state(add, cached_add, 1, 2, exp_cf_state="mem")
# 3) Redefine the function inline.
_LOG.debug("\n%s", hprint.frame("Redefine function"))
add = _get_add_function()
cached_add = hcache._Cached(add, tag=self.cache_tag)
# 4) Execute the third time. Should still use memory cache.
_LOG.debug("\n%s", hprint.frame("Execute the 3rd time"))
self._execute_and_check_state(add, cached_add, 1, 2, exp_cf_state="mem")
# 5) Execute the fourth time. Should still use memory cache.
_LOG.debug("\n%s", hprint.frame("Execute the 4th time"))
self._execute_and_check_state(add, cached_add, 1, 2, exp_cf_state="mem")
# 6) Check that call with other arguments miss the cache.
_LOG.debug("\n%s", hprint.frame("Execute the 5th time"))
self._execute_and_check_state(
add, cached_add, 3, 4, exp_cf_state="no_cache"
)
def test_changed_function(self) -> None:
"""
If the function is redefined, but the code is not the same, then the
intrinsic function should be recomputed.
"""
# Define the function imitating working in a notebook.
_LOG.debug("\n%s", hprint.frame("Define function"))
def add(x: int, y: int) -> int:
add.executed = True # type: ignore[attr-defined]
return x + y
cached_add = hcache._Cached(add, tag=self.cache_tag)
# 1) Execute the first time.
_LOG.debug("\n%s", hprint.frame("Execute the 1st time"))
self._execute_and_check_state(
add, cached_add, 1, 2, exp_cf_state="no_cache"
)
# 2) Execute the second time. Must use memory cache.
_LOG.debug("\n%s", hprint.frame("Execute the 2nd time"))
self._execute_and_check_state(add, cached_add, 1, 2, exp_cf_state="mem")
# 3) Redefine the function with different code.
_LOG.debug("\n%s", hprint.frame("Redefine function"))
# pylint: disable=function-redefined
def add(x: int, y: int) -> int: # type: ignore[no-redef]
add.executed = True # type: ignore[attr-defined]
z = x + y
return z
cached_add = hcache._Cached(add, tag=self.cache_tag)
# 4) Execute the third time. Should still use memory cache.
_LOG.debug("\n%s", hprint.frame("Execute the 3rd time"))
self._execute_and_check_state(
add, cached_add, 1, 2, exp_cf_state="no_cache"
)
# 5) Execute the fourth time. Should still use memory cache.
_LOG.debug("\n%s", hprint.frame("Execute the 4th time"))
self._execute_and_check_state(add, cached_add, 1, 2, exp_cf_state="mem")
# 6) Check that call with other arguments miss the cache.
_LOG.debug("\n%s", hprint.frame("Execute the 5th time"))
self._execute_and_check_state(
add, cached_add, 3, 4, exp_cf_state="no_cache"
)
# #############################################################################
class _ResetFunctionSpecificCacheHelper(_ResetGlobalCacheHelper):
def setUp(self) -> None:
super().setUp()
# Create temp directories to store the cache.
self.disk_cache_dir = tempfile.mkdtemp()
# Clear global cache.
hcache.clear_global_cache("all", tag=self.cache_tag)
class TestFunctionSpecificCache1(_ResetFunctionSpecificCacheHelper):
def test_with_caching1(self) -> None:
"""
- Test using the function-specific disk cache
- Disable function-specific cache and switching to global cache
- Test using the global cache
"""
# Use a global cache and
_LOG.debug("\n%s", hprint.frame("Starting"))
_LOG.debug(
"# get_global_cache_info()=\n%s",
hcache.get_global_cache_info(tag=self.cache_tag),
)
f, cf = self._get_f_cf_functions(
use_mem_cache=False,
use_disk_cache=True,
disk_cache_path=self.disk_cache_dir,
)
_LOG.debug(
"# cf.get_function_cache_info()=\n%s", cf.get_function_cache_info()
)
# 1) Execute and verify that it is executed.
_LOG.debug("\n%s", hprint.frame("Execute the 1st time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="no_cache")
# 2) Execute and verify that it is not executed, since it's cached on disk.
_LOG.debug("\n%s", hprint.frame("Execute the 2nd time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="disk")
# 3) Clear the global cache.
_LOG.debug("\n%s", hprint.frame("clear_global_cache"))
hcache.clear_global_cache("all")
# 4) Execute and verify that it is not executed, since it's cached on disk.
_LOG.debug("\n%s", hprint.frame("Execute the 2nd time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="disk")
def test_with_caching2(self) -> None:
"""
- Test using the function-specific disk cache
- Disable function-specific cache and switching to global cache
- Test using the global cache
"""
# Use only per-function disk cache.
f, cf = self._get_f_cf_functions(
use_mem_cache=False, disk_cache_path=self.disk_cache_dir
)
# 1) Execute and verify that it is executed.
_LOG.debug("\n%s", hprint.frame("Execute the 1st time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="no_cache")
# 2) Clear the global cache.
_LOG.debug("\n%s", hprint.frame("clear_global_cache"))
hcache.clear_global_cache("all")
# 3) Execute and verify that it is not executed.
_LOG.debug("\n%s", hprint.frame("Execute the 2nd time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="disk")
# 4) Use the global cache.
_LOG.debug(
"\n%s", hprint.frame("Disable function cache and use global cache")
)
cf.set_function_cache_path(None)
# 5) Execute and verify that function is executed with global cache.
_LOG.debug("\n%s", hprint.frame("Execute the 3rd time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="no_cache")
# 6) Execute. Now we get the value from the memory cache since disabling
# the function cache means enabling the memory cache.
_LOG.debug("\n%s", hprint.frame("Execute the 4th time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="mem")
# 7) Restore back specific cache.
_LOG.debug("\n%s", hprint.frame("Restore function cache"))
cf.set_function_cache_path(self.disk_cache_dir)
# Verify that it is *NOT* executed with specific cache.
_LOG.debug("\n%s", hprint.frame("Execute the 5th time"))
self._execute_and_check_state(f, cf, 3, 4, exp_cf_state="disk")
# #############################################################################
class TestCachePerformance(_ResetGlobalCacheHelper):
def test_performance_dataframe(self) -> None:
"""
Test performance of the cache over pandas DataFrame.
"""
# Create a somewhat big DataFrame with random data.
df = pd.DataFrame(
np.random.randint(0, 100, size=(100, 4)), columns=list("ABCD")
)
print("testing pandas dataframe, with sample size", df.shape)
self._test_performance(df)
def test_performance_series(self) -> None:
"""
Test performance of the cache over pandas Series.
"""
# Create a somewhat big DataFrame with random data.
s = pd.Series(np.random.randint(0, 100, size=100))
print("testing pandas series, with sample size", s.shape)
self._test_performance(s)
@staticmethod
# pylint: disable=unused-argument
def _computation(*args: Any) -> None:
"""
Simulate work.
:param args: throw away arguments
"""
# Emulate small quantity of work.
time.sleep(0.01)
@staticmethod
def _timeit(func: Callable, *args: Any) -> float:
"""
Get performance measure of the call to fn with args.
:param fn: callable function
:param args: any arguments to pass to the function fn
:return: precise time in seconds
"""
perf_start = time.perf_counter()
func(*args)
perf_diff = time.perf_counter() - perf_start
return perf_diff
def _test_performance(self, val: Any) -> None:
"""
Test performance of the cache over some argument val.
:param val: any hashable argument
"""
# Create cached versions of the computation function.
_mem_cached_computation = hcache._Cached(
self._computation,
tag=self.cache_tag,
use_mem_cache=True,
use_disk_cache=False,
)
_disk_cached_computation = hcache._Cached(
self._computation,
tag=self.cache_tag,
use_mem_cache=False,
use_disk_cache=True,
)
# First step: no cache.
no_cache_ct = self._timeit(lambda: self._computation(val))
print("no cache run time=%f" % no_cache_ct)
# Second step: memory cache.
memory_no_cache_ct = self._timeit(lambda: _mem_cached_computation(val))
print("empty memory cache run time=%f" % memory_no_cache_ct)
print(
"empty memory cache overhead=%f" % (memory_no_cache_ct - no_cache_ct)
)
memory_cache_ct = self._timeit(lambda: _mem_cached_computation(val))
print("hot memory cache run time=%f" % memory_cache_ct)
print("hot memory cache benefit=%f" % (no_cache_ct - memory_cache_ct))
# Third step: disk cache.
disk_no_cache_ct = self._timeit(lambda: _disk_cached_computation(val))
print("empty disk cache run time=%f" % disk_no_cache_ct)
print("empty disk cache overhead=%f" % (disk_no_cache_ct - no_cache_ct))
disk_cache_ct = self._timeit(lambda: _disk_cached_computation(val))
print("hot disk cache run time=%f" % disk_cache_ct)
print("hot disk cache benefit=%f" % (no_cache_ct - disk_cache_ct))
# #############################################################################
class TestCacheDecorator(_ResetGlobalCacheHelper):
def test_decorated_function(self) -> None:
"""
Test decorator with both caches enabled.
"""
# Define the function inline imitating working in a notebook.
@hcache.cache(tag=self.cache_tag)
def add(x: int, y: int) -> int:
add.__wrapped__.executed = True
return x + y
# Execute the first time.
self._execute_and_check_state(
add.__wrapped__, add, 1, 2, exp_cf_state="no_cache"
)
# Execute the second time. Must use memory cache.
self._execute_and_check_state(
add.__wrapped__, add, 1, 2, exp_cf_state="mem"
)
def test_decorated_function_no_mem(self) -> None:
"""
Test decorator with only disk cache.
"""
# Define the function inline imitating working in a notebook.
@hcache.cache(tag=self.cache_tag, use_mem_cache=False)
def add(x: int, y: int) -> int:
add.__wrapped__.executed = True
return x + y
# Execute the first time.
self._execute_and_check_state(
add.__wrapped__, add, 1, 2, exp_cf_state="no_cache"
)
# Execute the second time. Must use disk cache.
self._execute_and_check_state(
add.__wrapped__, add, 1, 2, exp_cf_state="disk"
)
# #############################################################################
class TestAmpTask1407(_ResetGlobalCacheHelper):
def test1(self) -> None:
"""
A class method can't be | |
import numpy as np
import time
from .constants import log
from . import util
from . import convex
from . import nsphere
from . import grouping
from . import triangles
from . import transformations
try:
from scipy import spatial
from scipy import optimize
except ImportError:
log.warning('Scipy import failed!')
def oriented_bounds_2D(points, qhull_options='QbB'):
"""
Find an oriented bounding box for a set of 2D points.
Parameters
----------
points: (n,2) float, 2D points
Returns
----------
transform: (3,3) float, homogenous 2D transformation matrix to move the
input points so that the axis aligned bounding box
is CENTERED AT THE ORIGIN
rectangle: (2,) float, size of extents once input points are transformed
by transform
"""
# make sure input is a numpy array
points = np.asanyarray(points)
# create a convex hull object of our points
# 'QbB' is a qhull option which has it scale the input to unit box
# to avoid precision issues with very large/small meshes
convex = spatial.ConvexHull(points,
qhull_options=qhull_options)
# (n,2,3) line segments
hull_edges = convex.points[convex.simplices]
# (n,2) points on the convex hull
hull_points = convex.points[convex.vertices]
# direction of the edges of the hull polygon
edge_vectors = np.diff(hull_edges, axis=1).reshape((-1, 2))
# unitize vectors
edge_vectors /= np.linalg.norm(edge_vectors, axis=1).reshape((-1, 1))
# create a set of perpendicular vectors
perp_vectors = np.fliplr(edge_vectors) * [-1.0, 1.0]
# find the projection of every hull point on every edge vector
# this does create a potentially gigantic n^2 array in memory,
# and there is the 'rotating calipers' algorithm which avoids this
# however, we have reduced n with a convex hull and numpy dot products
# are extremely fast so in practice this usually ends up being pretty
# reasonable
x = np.dot(edge_vectors, hull_points.T)
y = np.dot(perp_vectors, hull_points.T)
# reduce the projections to maximum and minimum per edge vector
bounds = np.column_stack((x.min(axis=1),
y.min(axis=1),
x.max(axis=1),
y.max(axis=1)))
# calculate the extents and area for each edge vector pair
extents = np.diff(bounds.reshape((-1, 2, 2)),
axis=1).reshape((-1, 2))
area = np.product(extents, axis=1)
area_min = area.argmin()
#(2,) float of smallest rectangle size
rectangle = extents[area_min]
# find the (3,3) homogenous transformation which moves the input
# points to have a bounding box centered at the origin
offset = -bounds[area_min][:2] - (rectangle * .5)
theta = np.arctan2(*edge_vectors[area_min][::-1])
transform = transformations.planar_matrix(offset,
theta)
# we would like to consistently return an OBB with
# the largest dimension along the X axis
if np.less(*rectangle):
# a 90 degree rotation
flip = transformations.planar_matrix(theta=np.pi / 2)
# apply the rotation
transform = np.dot(flip, transform)
# switch X and Y in the OBB extents
rectangle = np.roll(rectangle, 1)
return transform, rectangle
def oriented_bounds(obj, angle_digits=1):
"""
Find the oriented bounding box for a Trimesh
Parameters
----------
obj: Trimesh object, (n,3) or (n,2) float set of points
angle_tol: float, angle in radians that OBB can be away from minimum volume
solution. Even with large values the returned extents will cover
the mesh albeit with larger than minimal volume.
Larger values may experience substantial speedups.
Acceptable values are floats >= 0.0.
The default is small (1e-6) but non-zero.
Returns
----------
to_origin: (4,4) float, transformation matrix which will move the center of the
bounding box of the input mesh to the origin.
extents: (3,) float, the extents of the mesh once transformed with to_origin
"""
# extract a set of convex hull vertices and normals from the input
# we bother to do this to avoid recomputing the full convex hull if
# possible
if hasattr(obj, 'convex_hull'):
# if we have been passed a mesh, use its existing convex hull to pull from
# cache rather than recomputing. This version of the cached convex hull has
# normals pointing in arbitrary directions (straight from qhull)
# using this avoids having to compute the expensive corrected normals
# that mesh.convex_hull uses since normal directions don't matter here
vertices = obj.convex_hull.vertices
hull_normals = obj.convex_hull.face_normals
elif util.is_sequence(obj):
points = np.asanyarray(obj)
if util.is_shape(points, (-1, 2)):
return oriented_bounds_2D(points)
elif util.is_shape(points, (-1, 3)):
hull_obj = spatial.ConvexHull(points)
vertices = hull_obj.points[hull_obj.vertices]
hull_normals, valid = triangles.normals(
hull_obj.points[hull_obj.simplices])
else:
raise ValueError('Points are not (n,3) or (n,2)!')
else:
raise ValueError(
'Oriented bounds must be passed a mesh or a set of points!')
# convert face normals to spherical coordinates on the upper hemisphere
# the vector_hemisphere call effectivly merges negative but otherwise
# identical vectors
spherical_coords = util.vector_to_spherical(
util.vector_hemisphere(hull_normals))
# the unique_rows call on merge angles gets unique spherical directions to check
# we get a substantial speedup in the transformation matrix creation
# inside the loop by converting to angles ahead of time
spherical_unique = grouping.unique_rows(spherical_coords,
digits=angle_digits)[0]
min_volume = np.inf
tic = time.time()
for spherical in spherical_coords[spherical_unique]:
# a matrix which will rotate each hull normal to [0,0,1]
to_2D = np.linalg.inv(transformations.spherical_matrix(*spherical))
# apply the transform here
projected = np.dot(to_2D, np.column_stack(
(vertices, np.ones(len(vertices)))).T).T[:, :3]
height = projected[:, 2].ptp()
rotation_2D, box = oriented_bounds_2D(projected[:, 0:2])
volume = np.product(box) * height
if volume < min_volume:
min_volume = volume
min_extents = np.append(box, height)
min_2D = to_2D.copy()
rotation_2D[0:2, 2] = 0.0
rotation_Z = transformations.planar_matrix_to_3D(rotation_2D)
# combine the 2D OBB transformation with the 2D projection transform
to_origin = np.dot(rotation_Z, min_2D)
# transform points using our matrix to find the translation for the
# transform
transformed = transformations.transform_points(vertices,
to_origin)
box_center = (transformed.min(axis=0) + transformed.ptp(axis=0) * .5)
to_origin[0:3, 3] = -box_center
log.debug('oriented_bounds checked %d vectors in %0.4fs',
len(spherical_unique),
time.time() - tic)
return to_origin, min_extents
def minimum_cylinder(obj, sample_count=10, angle_tol=.001):
"""
Find the approximate minimum volume cylinder which contains a mesh or
list of points.
Samples a hemisphere then uses scipy.optimize to pick the
final orientation of the cylinder.
A nice discussion about better ways to implement this is here:
https://www.staff.uni-mainz.de/schoemer/publications/ALGO00.pdf
Parameters
----------
obj: Trimesh object OR
(n,3) float, points in space
sample_count: int, how densely should we sample the hemisphere.
Angular spacing is 180 degrees / this number
Returns
----------
result: dict, with keys:
'radius' : float, radius of cylinder
'height' : float, height of cylinder
'transform' : (4,4) float, transform from the origin
to centered cylinder
"""
def volume_from_angles(spherical, return_data=False):
"""
Takes spherical coordinates and calculates the volume of a cylinder
along that vector
Parameters
---------
spherical: (2,) float, theta and phi
return_data: bool, flag for returned
Returns
--------
if return_data:
transform ((4,4) float)
radius (float)
height (float)
else:
volume (float)
"""
to_2D = transformations.spherical_matrix(*spherical, axes='rxyz')
projected = transformations.transform_points(hull, matrix=to_2D)
height = projected[:, 2].ptp()
try:
center_2D, radius = nsphere.minimum_nsphere(projected[:, 0:2])
except BaseException:
# in degenerate cases return as infinite volume
return np.inf
volume = np.pi * height * (radius ** 2)
if return_data:
center_3D = np.append(center_2D, projected[
:, 2].min() + (height * .5))
transform = np.dot(np.linalg.inv(to_2D),
transformations.translation_matrix(center_3D))
return transform, radius, height
return volume
hull = convex.hull_points(obj)
if not util.is_shape(hull, (-1, 3)):
raise ValueError('Input must be reducable to 3D points!')
# sample a hemisphere so local hill climbing can do its thing
samples = util.grid_linspace([[0, 0], [np.pi, np.pi]], sample_count)
# add the principal inertia vectors if we have a mesh
if hasattr(obj, 'principal_inertia_vectors'):
samples = np.vstack(
(samples, util.vector_to_spherical(
obj.principal_inertia_vectors)))
tic = [time.time()]
# the projected volume at each sample
volumes = np.array([volume_from_angles(i) for i in samples])
# the best vector in (2,) spherical coordinates
best = samples[volumes.argmin()]
tic.append(time.time())
# since we already explored the global space, set the bounds to be
# just around the sample that had the lowest volume
step = 2 * np.pi / sample_count
bounds = [(best[0] - step, best[0] + step),
(best[1] - step, best[1] + step)]
# run the optimization
r = optimize.minimize(volume_from_angles,
best,
tol=angle_tol,
method='SLSQP',
bounds=bounds)
tic.append(time.time())
log.info('Performed search in %f and minimize in %f', *np.diff(tic))
# actually chunk the information about the cylinder
transform, radius, height = volume_from_angles(r['x'], return_data=True)
result = {'transform': transform,
'radius': radius,
'height': height}
return result
def corners(bounds):
"""
Given a pair of axis aligned bounds, return all
8 corners of the bounding box.
Parameters
----------
bounds: (2,3) or (2,2) float, axis aligned bounds
Returns
----------
corners: (8,3) float, corner vertices of the cube
"""
bounds = | |
<filename>nimare/meta/cbma/ale.py
"""CBMA methods from the activation likelihood estimation (ALE) family."""
import logging
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from tqdm.auto import tqdm
from nimare import references
from nimare.due import due
from nimare.meta.cbma.base import CBMAEstimator, PairwiseCBMAEstimator
from nimare.meta.kernel import ALEKernel
from nimare.stats import null_to_p, nullhist_to_p
from nimare.transforms import p_to_z
from nimare.utils import _check_ncores, tqdm_joblib, use_memmap
LGR = logging.getLogger(__name__)
@due.dcite(references.ALE1, description="Introduces ALE.")
@due.dcite(
references.ALE2,
description="Modifies ALE algorithm to eliminate within-experiment "
"effects and generate MA maps based on subject group "
"instead of experiment.",
)
@due.dcite(
references.ALE3,
description="Modifies ALE algorithm to allow FWE correction and to "
"more quickly and accurately generate the null "
"distribution for significance testing.",
)
class ALE(CBMAEstimator):
"""Activation likelihood estimation.
Parameters
----------
kernel_transformer : :obj:`~nimare.meta.kernel.KernelTransformer`, optional
Kernel with which to convolve coordinates from dataset.
Default is ALEKernel.
null_method : {"approximate", "montecarlo"}, optional
Method by which to determine uncorrected p-values. The available options are
======================= =================================================================
"approximate" (default) Build a histogram of summary-statistic values and their
expected frequencies under the assumption of random spatial
associated between studies, via a weighted convolution.
This method is much faster, but slightly less accurate.
"montecarlo" Perform a large number of permutations, in which the coordinates
in the studies are randomly drawn from the Estimator's brain mask
and the full set of resulting summary-statistic values are
incorporated into a null distribution (stored as a histogram for
memory reasons).
This method is must slower, and is only slightly more accurate.
======================= =================================================================
n_iters : :obj:`int`, optional
Number of iterations to use to define the null distribution.
This is only used if ``null_method=="montecarlo"``.
Default is 10000.
n_cores : :obj:`int`, optional
Number of cores to use for parallelization.
This is only used if ``null_method=="montecarlo"``.
If <=0, defaults to using all available cores.
Default is 1.
**kwargs
Keyword arguments. Arguments for the kernel_transformer can be assigned here,
with the prefix ``kernel__`` in the variable name.
Another optional argument is ``mask``.
Attributes
----------
masker : :class:`~nilearn.input_data.NiftiMasker` or similar
Masker object.
inputs_ : :obj:`dict`
Inputs to the Estimator. For CBMA estimators, there is only one key: coordinates.
This is an edited version of the dataset's coordinates DataFrame.
null_distributions_ : :obj:`dict` of :class:`numpy.ndarray`
Null distributions for the uncorrected summary-statistic-to-p-value conversion and any
multiple-comparisons correction methods.
Entries are added to this attribute if and when the corresponding method is applied.
If ``null_method == "approximate"``:
- ``histogram_bins``: Array of bin centers for the null distribution histogram,
ranging from zero to the maximum possible summary statistic value for the Dataset.
- ``histweights_corr-none_method-approximate``: Array of weights for the null
distribution histogram, with one value for each bin in ``histogram_bins``.
If ``null_method == "montecarlo"``:
- ``histogram_bins``: Array of bin centers for the null distribution histogram,
ranging from zero to the maximum possible summary statistic value for the Dataset.
- ``histweights_corr-none_method-montecarlo``: Array of weights for the null
distribution histogram, with one value for each bin in ``histogram_bins``.
These values are derived from the full set of summary statistics from each
iteration of the Monte Carlo procedure.
- ``histweights_level-voxel_corr-fwe_method-montecarlo``: Array of weights for the
voxel-level FWE-correction null distribution, with one value for each bin in
``histogram_bins``. These values are derived from the maximum summary statistic
from each iteration of the Monte Carlo procedure.
If :meth:`correct_fwe_montecarlo` is applied:
- ``values_level-voxel_corr-fwe_method-montecarlo``: The maximum summary statistic
value from each Monte Carlo iteration. An array of shape (n_iters,).
- ``values_desc-size_level-cluster_corr-fwe_method-montecarlo``: The maximum cluster
size from each Monte Carlo iteration. An array of shape (n_iters,).
- ``values_desc-mass_level-cluster_corr-fwe_method-montecarlo``: The maximum cluster
mass from each Monte Carlo iteration. An array of shape (n_iters,).
Notes
-----
The ALE algorithm was originally developed in :footcite:t:`turkeltaub2002meta`,
then updated in :footcite:t:`turkeltaub2012minimizing` and
:footcite:t:`eickhoff2012activation`.
The ALE algorithm is also implemented as part of the GingerALE app provided by the BrainMap
organization (https://www.brainmap.org/ale/).
Available correction methods: :meth:`~nimare.meta.cbma.ale.ALE.correct_fwe_montecarlo`.
References
----------
.. footbibliography::
"""
def __init__(
self,
kernel_transformer=ALEKernel,
null_method="approximate",
n_iters=10000,
n_cores=1,
**kwargs,
):
if not (isinstance(kernel_transformer, ALEKernel) or kernel_transformer == ALEKernel):
LGR.warning(
f"The KernelTransformer being used ({kernel_transformer}) is not optimized "
f"for the {type(self).__name__} algorithm. "
"Expect suboptimal performance and beware bugs."
)
# Add kernel transformer attribute and process keyword arguments
super().__init__(kernel_transformer=kernel_transformer, **kwargs)
self.null_method = null_method
self.n_iters = n_iters
self.n_cores = _check_ncores(n_cores)
self.dataset = None
def _compute_summarystat_est(self, ma_values):
stat_values = 1.0 - np.prod(1.0 - ma_values, axis=0)
return stat_values
def _determine_histogram_bins(self, ma_maps):
"""Determine histogram bins for null distribution methods.
Parameters
----------
ma_maps
Notes
-----
This method adds one entry to the null_distributions_ dict attribute: "histogram_bins".
"""
if isinstance(ma_maps, list):
ma_values = self.masker.transform(ma_maps)
elif isinstance(ma_maps, np.ndarray):
ma_values = ma_maps
else:
raise ValueError(f"Unsupported data type '{type(ma_maps)}'")
# Determine bins for null distribution histogram
# Remember that numpy histogram bins are bin edges, not centers
# Assuming values of 0, .001, .002, etc., bins are -.0005-.0005, .0005-.0015, etc.
INV_STEP_SIZE = 100000
step_size = 1 / INV_STEP_SIZE
max_ma_values = np.max(ma_values, axis=1)
# round up based on resolution
max_ma_values = np.ceil(max_ma_values * INV_STEP_SIZE) / INV_STEP_SIZE
max_poss_ale = self._compute_summarystat(max_ma_values)
# create bin centers
hist_bins = np.round(np.arange(0, max_poss_ale + (1.5 * step_size), step_size), 5)
self.null_distributions_["histogram_bins"] = hist_bins
def _compute_null_approximate(self, ma_maps):
"""Compute uncorrected ALE null distribution using approximate solution.
Parameters
----------
ma_maps : list of imgs or numpy.ndarray
MA maps.
Notes
-----
This method adds two entries to the null_distributions_ dict attribute:
- "histogram_bins"
- "histweights_corr-none_method-approximate"
"""
if isinstance(ma_maps, list):
ma_values = self.masker.transform(ma_maps)
elif isinstance(ma_maps, np.ndarray):
ma_values = ma_maps
else:
raise ValueError(f"Unsupported data type '{type(ma_maps)}'")
assert "histogram_bins" in self.null_distributions_.keys()
def just_histogram(*args, **kwargs):
"""Collect the first output (weights) from numpy histogram."""
return np.histogram(*args, **kwargs)[0].astype(float)
# Derive bin edges from histogram bin centers for numpy histogram function
bin_centers = self.null_distributions_["histogram_bins"]
step_size = bin_centers[1] - bin_centers[0]
inv_step_size = 1 / step_size
bin_edges = bin_centers - (step_size / 2)
bin_edges = np.append(bin_centers, bin_centers[-1] + step_size)
ma_hists = np.apply_along_axis(just_histogram, 1, ma_values, bins=bin_edges, density=False)
# Normalize MA histograms to get probabilities
ma_hists /= ma_hists.sum(1)[:, None]
ale_hist = ma_hists[0, :].copy()
for i_exp in range(1, ma_hists.shape[0]):
exp_hist = ma_hists[i_exp, :]
# Find histogram bins with nonzero values for each histogram.
ale_idx = np.where(ale_hist > 0)[0]
exp_idx = np.where(exp_hist > 0)[0]
# Compute output MA values, ale_hist indices, and probabilities
ale_scores = (
1 - np.outer((1 - bin_centers[exp_idx]), (1 - bin_centers[ale_idx])).ravel()
)
score_idx = np.floor(ale_scores * inv_step_size).astype(int)
probabilities = np.outer(exp_hist[exp_idx], ale_hist[ale_idx]).ravel()
# Reset histogram and set probabilities.
# Use at() instead of setting values directly (ale_hist[score_idx] = probabilities)
# because there can be redundant values in score_idx.
ale_hist = np.zeros(ale_hist.shape)
np.add.at(ale_hist, score_idx, probabilities)
self.null_distributions_["histweights_corr-none_method-approximate"] = ale_hist
class ALESubtraction(PairwiseCBMAEstimator):
"""ALE subtraction analysis.
.. versionchanged:: 0.0.12
- Use memmapped array for null distribution and remove ``memory_limit`` parameter.
- Support parallelization and add progress bar.
- Add ALE-difference (stat) and -log10(p) (logp) maps to results.
.. versionchanged:: 0.0.8
* [FIX] Assume non-symmetric null distribution.
.. versionchanged:: 0.0.7
* [FIX] Assume a zero-centered and symmetric null distribution.
Parameters
----------
kernel_transformer : :obj:`~nimare.meta.kernel.KernelTransformer`, optional
Kernel with which to convolve coordinates from dataset.
Default is ALEKernel.
n_iters : :obj:`int`, optional
Default is 10000.
n_cores : :obj:`int`, optional
Number of processes to use for meta-analysis. If -1, use all available cores.
Default is 1.
.. versionadded:: 0.0.12
**kwargs
Keyword arguments. Arguments for the kernel_transformer can be assigned here,
with the prefix ``kernel__`` in the variable name. Another optional argument is ``mask``.
Attributes
----------
masker : :class:`~nilearn.input_data.NiftiMasker` or similar
Masker object.
inputs_ : :obj:`dict`
Inputs to the Estimator. For CBMA estimators, there is only one key: coordinates.
This is an edited version of the dataset's coordinates DataFrame.
Notes
-----
This method was originally developed in :footcite:t:`laird2005ale` and refined in
:footcite:t:`eickhoff2012activation`.
The ALE subtraction algorithm is also implemented as part of the GingerALE app provided by the
BrainMap organization (https://www.brainmap.org/ale/).
The voxel-wise null distributions used by this Estimator are very large, so they are not
retained as Estimator attributes.
Warnings
--------
This implementation contains one key difference from the original version.
In the original version, group 1 > group 2 difference values are only evaluated for voxels
significant in the group 1 | |
hikari.SnowflakeishOr[hikari.Command]]] = None,
) -> Client:
"""Build a `Client` from a `hikari.traits.RESTBotAware` instance.
Notes
-----
* This sets type dependency injectors for the hikari traits present in
`bot` (including `hikari.traits.RESTBotAware`).
* The endpoint used by `declare_global_commands` has a strict ratelimit
which, as of writing, only allows for 2 requests per minute (with that
ratelimit either being per-guild if targeting a specific guild
otherwise globally).
Parameters
----------
bot : hikari.traits.RESTBotAware
The bot client to build from.
Other Parameters
----------------
declare_global_commands : typing.Union[hikari.SnowflakeishSequenceOr[hikari.PartialGuild], hikari.SnowflakeishOr[hikari.PartialGuild], bool]
Whether or not to automatically set global slash commands when this
client is first started. Defaults to `False`.
If one or more guild objects/IDs are passed here then the registered
global commands will be set on the specified guild(s) at startup rather
than globally. This can be useful for testing/debug purposes as slash
commands may take up to an hour to propagate globally but will
immediately propagate when set on a specific guild.
set_global_commands : typing.Union[hikari.SnowflakeishOr[hikari.PartialGuild], bool]
Deprecated as of v2.1.1a1 alias of `declare_global_commands`.
command_ids : typing.Optional[collections.abc.Mapping[str, hikari.SnowflakeishOr[hikari.Command]]]
If provided, a mapping of top level command names to IDs of the commands to update.
This field is complementary to `declare_global_commands` and, while it
isn't necessarily required, this will in some situations help avoid
permissions which were previously set for a command from being lost
after a rename.
This currently isn't supported when multiple guild IDs are passed for
`declare_global_commands`.
""" # noqa: E501 - line too long
return cls(
rest=bot.rest,
server=bot.interaction_server,
declare_global_commands=declare_global_commands,
set_global_commands=set_global_commands,
command_ids=command_ids,
_stack_level=1,
).set_hikari_trait_injectors(bot)
async def __aenter__(self) -> Client:
await self.open()
return self
async def __aexit__(
self,
exception_type: typing.Optional[type[BaseException]],
exception: typing.Optional[BaseException],
exception_traceback: typing.Optional[types.TracebackType],
) -> None:
await self.close()
def __repr__(self) -> str:
return f"CommandClient <{type(self).__name__!r}, {len(self._components)} components, {self._prefixes}>"
@property
def defaults_to_ephemeral(self) -> bool:
# <<inherited docstring from tanjun.abc.Client>>.
return self._defaults_to_ephemeral
@property
def message_accepts(self) -> MessageAcceptsEnum:
"""Type of message create events this command client accepts for execution."""
return self._accepts
@property
def is_human_only(self) -> bool:
"""Whether this client is only executing for non-bot/webhook users messages."""
return typing.cast("checks.InjectableCheck", _check_human) in self._checks
@property
def cache(self) -> typing.Optional[hikari.api.Cache]:
# <<inherited docstring from tanjun.abc.Client>>.
return self._cache
@property
def checks(self) -> collections.Collection[tanjun_abc.CheckSig]:
"""Collection of the level `tanjun.abc.Context` checks registered to this client.
.. note::
These may be taking advantage of the standard dependency injection.
"""
return tuple(check.callback for check in self._checks)
@property
def components(self) -> collections.Collection[tanjun_abc.Component]:
# <<inherited docstring from tanjun.abc.Client>>.
return self._components.copy().values()
@property
def events(self) -> typing.Optional[hikari.api.EventManager]:
# <<inherited docstring from tanjun.abc.Client>>.
return self._events
@property
def listeners(
self,
) -> collections.Mapping[type[hikari.Event], collections.Collection[tanjun_abc.ListenerCallbackSig]]:
return utilities.CastedView(
self._listeners,
lambda x: [typing.cast(tanjun_abc.ListenerCallbackSig, callback.callback) for callback in x],
)
@property
def hooks(self) -> typing.Optional[tanjun_abc.AnyHooks]:
"""Top level `tanjun.abc.AnyHooks` set for this client.
These are called during both message and interaction command execution.
Returns
-------
typing.Optional[tanjun.abc.AnyHooks]
The top level `tanjun.abc.Context` based hooks set for this
client if applicable, else `None`.
"""
return self._hooks
@property
def slash_hooks(self) -> typing.Optional[tanjun_abc.SlashHooks]:
"""Top level `tanjun.abc.SlashHooks` set for this client.
These are only called during interaction command execution.
"""
return self._slash_hooks
@property
def is_alive(self) -> bool:
# <<inherited docstring from tanjun.abc.Client>>.
return self._loop is not None
@property
def loop(self) -> typing.Optional[asyncio.AbstractEventLoop]:
# <<inherited docstring from tanjun.abc.Client>>.
return self._loop
@property
def message_hooks(self) -> typing.Optional[tanjun_abc.MessageHooks]:
"""Top level `tanjun.abc.MessageHooks` set for this client.
These are only called during both message command execution.
"""
return self._message_hooks
@property
def metadata(self) -> collections.MutableMapping[typing.Any, typing.Any]:
# <<inherited docstring from tanjun.abc.Client>>.
return self._metadata
@property
def prefix_getter(self) -> typing.Optional[PrefixGetterSig]:
"""Prefix getter method set for this client.
For more information on this callback's signature see `PrefixGetter`.
"""
return typing.cast(PrefixGetterSig, self._prefix_getter.callback) if self._prefix_getter else None
@property
def prefixes(self) -> collections.Collection[str]:
"""Collection of the standard prefixes set for this client."""
return self._prefixes.copy()
@property
def rest(self) -> hikari.api.RESTClient:
# <<inherited docstring from tanjun.abc.Client>>.
return self._rest
@property
def server(self) -> typing.Optional[hikari.api.InteractionServer]:
# <<inherited docstring from tanjun.abc.Client>>.
return self._server
@property
def shards(self) -> typing.Optional[hikari_traits.ShardAware]:
# <<inherited docstring from tanjun.abc.Client>>.
return self._shards
@property
def voice(self) -> typing.Optional[hikari.api.VoiceComponent]:
# <<inherited docstring from tanjun.abc.Client>>.
return self._voice
async def _on_starting_event(self, _: hikari.StartingEvent, /) -> None:
await self.open()
async def _on_stopping_event(self, _: hikari.StoppingEvent, /) -> None:
await self.close()
async def clear_application_commands(
self,
*,
application: typing.Optional[hikari.SnowflakeishOr[hikari.PartialApplication]] = None,
guild: hikari.UndefinedOr[hikari.SnowflakeishOr[hikari.PartialGuild]] = hikari.UNDEFINED,
) -> None:
# <<inherited docstring from tanjun.abc.Client>>.
if application is None:
application = self._cached_application_id or await self.fetch_rest_application_id()
await self._rest.set_application_commands(application, (), guild=guild)
async def set_global_commands(
self,
*,
application: typing.Optional[hikari.SnowflakeishOr[hikari.PartialApplication]] = None,
guild: hikari.UndefinedOr[hikari.SnowflakeishOr[hikari.PartialGuild]] = hikari.UNDEFINED,
force: bool = False,
) -> collections.Sequence[hikari.Command]:
"""Alias of `Client.declare_global_commands`.
.. deprecated:: v2.1.1a1
Use `Client.declare_global_commands` instead.
"""
warnings.warn(
"The `Client.set_global_commands` method has been deprecated since v2.1.1a1. "
"Use `Client.declare_global_commands` instead.",
DeprecationWarning,
stacklevel=2,
)
return await self.declare_global_commands(application=application, guild=guild, force=force)
async def declare_global_commands(
self,
command_ids: typing.Optional[collections.Mapping[str, hikari.SnowflakeishOr[hikari.Command]]] = None,
*,
application: typing.Optional[hikari.SnowflakeishOr[hikari.PartialApplication]] = None,
guild: hikari.UndefinedOr[hikari.SnowflakeishOr[hikari.PartialGuild]] = hikari.UNDEFINED,
force: bool = False,
) -> collections.Sequence[hikari.Command]:
# <<inherited docstring from tanjun.abc.Client>>.
commands = (
command
for command in itertools.chain.from_iterable(
component.slash_commands for component in self._components.values()
)
if command.is_global
)
return await self.declare_application_commands(
commands, command_ids, application=application, guild=guild, force=force
)
async def declare_application_command(
self,
command: tanjun_abc.BaseSlashCommand,
/,
command_id: typing.Optional[hikari.Snowflakeish] = None,
*,
application: typing.Optional[hikari.SnowflakeishOr[hikari.PartialApplication]] = None,
guild: hikari.UndefinedOr[hikari.SnowflakeishOr[hikari.PartialGuild]] = hikari.UNDEFINED,
) -> hikari.Command:
# <<inherited docstring from tanjun.abc.Client>>.
builder = command.build()
if command_id:
response = await self._rest.edit_application_command(
application or self._cached_application_id or await self.fetch_rest_application_id(),
command_id,
guild=guild,
name=builder.name,
description=builder.description,
options=builder.options,
)
else:
response = await self._rest.create_application_command(
application or self._cached_application_id or await self.fetch_rest_application_id(),
guild=guild,
name=builder.name,
description=builder.description,
options=builder.options,
)
if not guild:
command.set_tracked_command(response) # TODO: is this fine?
return response
async def declare_application_commands(
self,
commands: collections.Iterable[tanjun_abc.BaseSlashCommand],
/,
command_ids: typing.Optional[collections.Mapping[str, hikari.SnowflakeishOr[hikari.Command]]] = None,
*,
application: typing.Optional[hikari.SnowflakeishOr[hikari.PartialApplication]] = None,
guild: hikari.UndefinedOr[hikari.SnowflakeishOr[hikari.PartialGuild]] = hikari.UNDEFINED,
force: bool = False,
) -> collections.Sequence[hikari.Command]:
# <<inherited docstring from tanjun.abc.Client>>.
command_ids = command_ids or {}
names_to_commands: dict[str, tanjun_abc.BaseSlashCommand] = {}
conflicts: set[str] = set()
builders: dict[str, hikari.api.CommandBuilder] = {}
for command in commands:
names_to_commands[command.name] = command
if command.name in builders:
conflicts.add(command.name)
builder = command.build()
if command_id := command_ids.get(command.name):
builder.set_id(hikari.Snowflake(command_id))
builders[command.name] = builder
if conflicts:
raise ValueError(
"Couldn't declare commands due to conflicts. The following command names have more than one command "
"registered for them " + ", ".join(conflicts)
)
if len(builders) > 100:
raise ValueError("You can only declare up to 100 top level commands in a guild or globally")
if not application:
application = self._cached_application_id or await self.fetch_rest_application_id()
target_type = "global" if guild is hikari.UNDEFINED else f"guild {int(guild)}"
if not force:
registered_commands = await self._rest.fetch_application_commands(application, guild=guild)
if len(registered_commands) == len(builders) and all(
_cmp_command(builders.get(command.name), command) for command in registered_commands
):
_LOGGER.info("Skipping bulk declare for %s slash commands since they're already declared", target_type)
return registered_commands
_LOGGER.info("Bulk declaring %s %s slash commands", len(builders), target_type)
responses = await self._rest.set_application_commands(application, list(builders.values()), guild=guild)
for response in responses:
if not guild:
names_to_commands[response.name].set_tracked_command(response) # TODO: is this fine?
if (expected_id := command_ids.get(response.name)) and hikari.Snowflake(expected_id) != response.id:
_LOGGER.warning(
"ID mismatch found for %s command %r, expected %s but got %s. "
"This suggests that any previous permissions set for this command will have been lost.",
target_type,
response.name,
expected_id,
response.id,
)
_LOGGER.info("Successfully declared %s (top-level) %s commands", len(responses), target_type)
if _LOGGER.isEnabledFor(logging.DEBUG):
_LOGGER.debug(
"Declared %s command ids; %s",
target_type,
", ".join(f"{response.name}: {response.id}" for response in responses),
)
return responses
def set_auto_defer_after(self: _ClientT, time: typing.Optional[float], /) -> _ClientT:
"""Set when this client should automatically defer execution of commands.
.. warning::
If `time` is set to `None` then automatic deferrals will be disabled.
This may lead to unexpected behaviour.
Parameters
----------
time : typing.Optional[float]
The time in seconds to defer interaction command responses after.
"""
self._auto_defer_after = float(time) if time is not None else None
return self
def set_ephemeral_default(self: _ClientT, state: bool, /) -> _ClientT:
"""Set whether slash contexts spawned by this client should default to ephemeral responses.
Parameters
----------
bool
Whether slash command contexts executed in this component should
should default to ephemeral.
This will be overridden by any response calls which specify flags
and defaults to `False`.
Returns
-------
SelfT
This component to enable | |
""" inventory is an obspy Inventory object """
import six
import logging
from collections import OrderedDict
import datetime
from obspy import UTCDateTime
from sqlalchemy import text
from .schema import Abbreviation, Format, Unit, Channel, Station, SimpleResponse, AmpParms, CodaParms, Sensitivity
from .schema import PZ, PZ_Data, Poles_Zeros, StaCorrection
# when active_only is true, only load currently active stations/channels
# this can be toggled to True by adding the keyword argument active=True
# to the main inventory2db function
ACTIVE_ONLY = False
# the PZ loading part is still buggy, make loading them optional
INCLUDE_PZ = False
# station or channel end-date when none has been provided
DEFAULT_ENDDATE = datetime.datetime(3000,1,1)
# noise level in m/s used for determining cutoff level for Md
CUTOFF_GM = 1.7297e-7
# units for seismic channels
SEISMIC_UNITS = ['M/S', 'm/s', 'M/S**2', 'm/s**2', 'M/S/S', 'm/s/s', 'CM/S', 'cm/s', 'CM/S**2', 'cm/s**2', 'CM/S/S', 'cm/s/s', 'M', 'm', 'CM', 'cm']
# simple_response DU/M/S or DU/M/S**2 or counts/(cm/sec) counts/(cm/sec2)
GAIN_UNITS = {'M/S' : 'DU/M/S',
'm/s' : 'DU/M/S',
'M/S**2' : 'DU/M/S**2',
'm/s**2' : 'DU/M/S**2',
'M/S/S' : 'DU/M/S**2',
'm/s/s' : 'DU/M/S**2',
'CM/S' : 'counts/(cm/sec)',
'cm/s': 'counts/(cm/sec)',
'CM/S**2' : 'counts/(cm/sec2)',
'cm/s**2' : 'counts/(cm/sec2)',
'CM/S/S' : 'counts/(cm/sec2)',
'cm/s/s' : 'counts/(cm/sec2)',
'M' : 'DU/M',
'm' : 'DU/M',
'CM' : 'counts/cm',
'cm' : 'counts/cm'
}
# keep track of successful and failed commits
commit_metrics = OrderedDict()
commit_metrics["stations_good"] = []
commit_metrics["stations_bad"] = []
commit_metrics["channels_good"] = []
commit_metrics["channels_bad"] = []
commit_metrics["response_good"] = []
commit_metrics["response_bad"] = []
commit_metrics["codaparms_good"]= []
commit_metrics["codaparms_bad"] = []
commit_metrics["ampparms_good"] = []
commit_metrics["ampparms_bad"] = []
commit_metrics["clip_bad"] = []
commit_metrics["sensitivity_good"] = []
commit_metrics["sensitivity_bad"] = []
commit_metrics["pz_good"] = []
commit_metrics["pz_bad"] = []
commit_metrics["poles_zeros_good"] = []
commit_metrics["poles_zeros_bad"] = []
def inventory2db(session, inventory, active=False, include_pz=False):
# ugly kluge to propagate these flags to all the methods
global ACTIVE_ONLY
global INCLUDE_PZ
ACTIVE_ONLY = active
INCLUDE_PZ = include_pz
if inventory.networks:
_networks2db(session, inventory.networks, inventory.source)
else:
logging.warning("This inventory has no networks, doing nothing.")
return
def _networks2db(session, networks, source):
for network in networks:
_network2db(session,network,source)
return
def _network2db(session, network, source):
net_id = None
if network.stations:
success,failed = _stations2db(session,network, source)
logging.info("\n Success: {} stations, failure: {} stations.\n".format(success,failed))
else:
# only insert an entry into D_Abbreviation
net_id = _get_net_id(session, network)
if not net_id:
logging.warning("Did not add network description to database")
return
def _get_net_id(session, network):
"""
given obspy Network object
get id from d_abbreviation with
same description (creates a new entry
if none exists yet).
"""
result = session.query(Abbreviation).filter_by(description=network.description).first()
if result:
return result.id
else:
network_entry = Abbreviation(description=network.description)
session.add(network_entry)
try:
session.commit()
result = session.query(Abbreviation).filter_by(description=network.description).first()
return result.id
except:
logging.error("Not able to commit abbreviation and get net_id")
return None
def _get_inid(session, channel):
"""
get id from d_abbreviation with
same instrument type description (creates a new entry
if none exists yet).
"""
result = session.query(Abbreviation).filter_by(description=channel.sensor.description).first()
if result:
return result.id
else:
instr_entry = Abbreviation(description=channel.sensor.description)
session.add(instr_entry)
try:
session.commit()
result = session.query(Abbreviation).filter_by(description=channel.sensor.description).first()
return result.id
except:
logging.error("Not able to commit abbreviation and get inid")
return None
def _get_unit(session, unit_name, unit_description):
"""
get id from d_unit with
same unit description (creates a new entry
if none exists yet).
"""
result = session.query(Unit).filter_by(name=unit_name, description=unit_description).first()
if result:
return result.id
else:
instr_entry = Unit(name=unit_name, description=unit_description)
session.add(instr_entry)
try:
session.commit()
result = session.query(Unit).filter_by(name=unit_name, description=unit_description).first()
return result.id
except:
logging.error("Not able to commit abbreviation and get unit id")
return None
def _get_format_id(session, format_name=None):
"""
get id from d_format
(creates a new entry
if none exists yet).
"""
if not format_name:
format_name="UNKNOWN"
result = session.query(Format).filter_by(name=format_name).first()
if result:
return result.id
else:
instr_entry = Format(name=format_name)
session.add(instr_entry)
try:
session.commit()
result = session.query(Format).filter_by(name=format_name).first()
return result.id
except:
logging.error("Not able to commit format_name and get format id")
return None
def _remove_station(session, network, station):
"""
Removes this station from station_data and will remove
its channels as well. See remove_channels.
"""
try:
# obspy objects?
network_code = network.code
station_code = station.code
except Exception as e:
# no, then assume regular strings
logging.info("Station: {}.{}".format(network,station))
network_code = network
station_code = station
status = 0
try:
status = session.query(Station).filter_by(net=network_code,sta=station_code).delete()
except Exception as e:
logging.error("remove_station: {}".format(e))
try:
session.commit()
logging.info("Removed {}.{} from {}".format(network_code,station_code,Station.__tablename__))
except Exception as e:
logging.error("Unable to delete station {}.{} from {}: {}".format(network_code,station_code,Station.__tablename__,e))
sys.exit()
if hasattr(station,"channels") and len(station.channels) > 0:
status = status + _remove_channels(session, network_code, station)
else:
# no need to construct channels, just using string should work:
status = status + _remove_channels(session, network_code, station_code)
return status
def _remove_channels(session, network_code, station):
try:
# obspy object?
station_code = station.code
except Exception as e:
# if not, assume a regular string
station_code = station
status = 0
# remove all channels for this station, not just the ones in the XML file
try:
status = session.query(Channel).filter_by(net=network_code,sta=station_code).delete()
except Exception as e:
logging.error("Unable to delete channels: {}.{}: {}".format(network_code,station_code,e))
try:
status = _remove_simple_responses(session, network_code, station_code)
except Exception as e:
logging.error("Unable to delete responses: {}.{}: {}".format(network_code,station_code,e))
try:
status = _remove_sensitivity(session, network_code, station_code)
except Exception as e:
logging.error("Unable to delete overall sensitivity: {}.{}: {}".format(network_code,station_code,e))
try:
status = _remove_poles_zeros(session, network_code, station_code)
except Exception as e:
logging.error("Unable to delete poles and zeros: {}.{}: {}".format(network_code,station_code,e))
try:
commit_status = session.commit()
logging.info("Successfully removed channels and instrument response for {}.{}".format(network_code,station_code))
except Exception as e:
logging.error("Unable to commit deletions from channels and response tables".format(e))
return status
def _remove_simple_responses(session, network_code, station_code):
try:
status = session.query(SimpleResponse).filter_by(net=network_code,sta=station_code).delete()
except Exception as e:
logging.error("remove_simple_responses: {}.{}: {}".format(network_code,station_code,e))
try:
status = session.query(CodaParms).filter_by(net=network_code,sta=station_code).delete()
except Exception as er:
logging.error("remove_simple_responses, codaparms: {}.{}: {}".format(network_code,station_code,er))
try:
status = session.query(AmpParms).filter_by(net=network_code,sta=station_code).delete()
except Exception as error:
logging.error("remove_simple_responses,ampparms: {}.{}: {}".format(network_code,station_code,error))
return status
def _remove_sensitivity(session, network_code, station_code):
try:
status = session.query(Sensitivity).filter_by(net=network_code,sta=station_code).delete()
except Exception as e:
logging.error("remove_sensitivity: {}.{}: {}".format(network_code,station_code,e))
return status
def _remove_poles_zeros(session, network_code, station_code):
"""
Removes any rows in poles_zeros for this station. Will also remove
the PZ and PZ_Data entries if there are no other poles_zeros rows that
refer to them, to limit the number of obsolete PZ,PZ_Data rows in the
database.
"""
pz_keys = set()
status = -1
logging.debug("In _remove_poles_zeros, for station {}.{}".format(network_code,station_code))
try:
all_in_list = session.query(Poles_Zeros.pz_key).filter_by(net=network_code,sta=station_code).all()
for key in all_in_list:
pz_keys.add(key)
logging.debug("Retrieved {} unique pole zero keys for {}.{}\n".format(len(pz_keys),network_code,station_code))
status = session.query(Poles_Zeros).filter_by(net=network_code,sta=station_code).delete()
logging.debug("Deleting poles_zeros entries: {}".format(status))
except Exception as e:
logging.error(e)
for key in pz_keys:
# do other poles_zeros entries using this key? yes, keep, no, remove.
rows_returned = session.query(Poles_Zeros.pz_key).filter(Poles_Zeros.pz_key==key, Poles_Zeros.net != network_code, Poles_Zeros.sta != station_code).all()
logging.debug("PZ KEY: {}. Number of other poles_zeros that use this set of poles and zeros: {}".format(key,len(rows_returned)))
if len(rows_returned) > 0:
logging.debug("PZ and PZ_Data in use, not removing")
else:
# remove as well.
status = status + session.query(PZ).filter_by(key=key).delete()
status1 = session.query(PZ_Data).filter_by(key=key).delete()
logging.debug("Removed {} PZ and PZ_data entries".format(status))
return status
def _remove_channel(session, network_code, station_code, channel):
"""
Removes this channel from channel_data and will remove
its response as well. See remove_simple_response
"""
status = 0
try:
status = session.query(Channel).filter_by(net=network_code,sta=station_code \
,seedchan=channel.code,location=fix(channel.location_code)).delete()
except Exception as e:
logging.error("remove_channel: {}".format(e))
try:
session.commit()
except Exception as e:
logging.error("Unable to delete channel {}.{}.{}.{}: {}".format(network_code,station_code,channel.code, channel.location_code,e))
if channel.response:
try:
my_status = _remove_simple_response(session, network_code, station_code, channel.code, channel.location_code)
except Exception as e:
logging.error("remove_channel ({}): {}".format(my_status, e))
return status
def _remove_simple_response(session, network_code, station_code, channel_code, location_code):
try:
status = session.query(SimpleResponse).filter_by(net=network_code,sta=station_code \
,seedchan=channel_code,location=fix(location_code)).delete()
except Exception as e:
logging.error("remove_simple_response: {}".format(e))
try:
status = session.query(CodaParms).filter_by(net=network_code,sta=station_code \
,seedchan=channel_code,location=fix(location_code)).delete()
except Exception as er:
logging.error("remove_simple_response, codaparms: {}".format(er))
try:
status = session.query(AmpParms).filter_by(net=network_code,sta=station_code \
,seedchan=channel_code,location=fix(location_code)).delete()
except Exception as error:
logging.error("remove_simple_response,ampparms: {}".format(error))
return status
def _station2db(session, network, station, source):
net_id = _get_net_id(session,network)
network_code = network.code
station_code = station.code
default_enddate = datetime.datetime(3000,1,1)
# first remove any prior meta-data associated with Net-Sta and Net-Sta-Chan-Loc
try:
status = _remove_station(session,network,station)
logging.info("Removed {} channels for station {}".format(status-1,station_code))
except Exception as e:
logging.error("Exception: {}".format(e))
db_station = Station(net=network.code, sta=station.code, ondate=station.start_date.datetime)
db_station.net_id = net_id
if hasattr(station,"end_date") and station.end_date:
db_station.offdate = station.end_date.datetime
else:
db_station.offdate = DEFAULT_ENDDATE
# return if ACTIVE_ONLY is true and the station's offdate pre-dates today
if ACTIVE_ONLY and db_station.offdate < UTCDateTime():
logging.info("Station {}.{} not active, not adding".format(network.code,station.code))
return
session.add(db_station)
db_station.lat = station.latitude
db_station.lon = station.longitude
db_station.elev = station.elevation
db_station.staname = station.site.name
try:
session.commit()
commit_metrics["stations_good"].append(station_code)
except Exception as e:
logging.error("Cannot save station_data: {}".format(e))
commit_metrics["stations_bad"].append(station_code)
if station.channels:
_channels2db(session, network_code, station_code, station.channels, source)
# magnitude station corrections
# only add default values if there is no entry in stacorrections for this station yet!
try:
logging.debug("Querying for station correction entries for {}.{}".format(network_code,station_code))
stacors = session.query(StaCorrection).filter_by(net=network_code,sta=station_code).all()
logging.debug("Number of station corrections: {}".format(len(stacors)))
if len(stacors) == 0:
# add default values for | |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
"""Prepare a project to run."""
from __future__ import absolute_import
from __future__ import print_function
from abc import ABCMeta, abstractmethod
import os
from copy import deepcopy
from anaconda_project.internal.metaclass import with_metaclass
from anaconda_project.internal.simple_status import SimpleStatus
from anaconda_project.internal.toposort import toposort_from_dependency_info
from anaconda_project.internal import conda_api
from anaconda_project.internal.py2_compat import is_string
from anaconda_project.local_state_file import LocalStateFile
from anaconda_project.provide import (_all_provide_modes, PROVIDE_MODE_DEVELOPMENT)
from anaconda_project.requirements_registry.provider import ProvideContext
from anaconda_project.requirements_registry.requirement import Requirement, EnvVarRequirement, UserConfigOverrides
from anaconda_project.requirements_registry.requirements.conda_env import CondaEnvRequirement
def _update_environ(dest, src):
"""Overwrite ``environ`` with any additions from the prepared environ.
Does not remove any variables from ``environ``.
"""
# updating os.environ can be a memory leak, so we only update
# those values that actually changed.
for key, value in src.items():
if key not in dest or dest[key] != value:
dest[key] = value
class PrepareResult(with_metaclass(ABCMeta)):
"""Abstract class describing the result of preparing the project to run."""
def __init__(self, statuses, environ, overrides, env_spec_name):
"""Construct an abstract PrepareResult."""
self._statuses = tuple(statuses)
self._environ = environ
self._overrides = overrides
self._env_spec_name = env_spec_name
def __bool__(self):
"""True if we were successful."""
return not self.failed
def __nonzero__(self):
"""True if we were successful."""
return self.__bool__() # pragma: no cover (py2 only)
@property
@abstractmethod
def failed(self):
"""True if we failed to do what this stage was intended to do.
If ``execute()`` returned non-None, the failure may not be fatal; stages
can continue to be executed and may resolve the issue.
"""
pass # pragma: no cover
@property
def statuses(self):
"""Get latest RequirementStatus if available.
If we failed before we even checked statuses, this will be an empty list.
"""
return self._statuses
def status_for(self, env_var_or_class):
"""Get status for the given env var or class, or None if unknown."""
for status in self.statuses:
if is_string(env_var_or_class):
if isinstance(status.requirement, EnvVarRequirement) and \
status.requirement.env_var == env_var_or_class:
return status
elif isinstance(status.requirement, env_var_or_class):
return status
return None
@property
def environ(self):
"""Computed environment variables for the project.
If ``failed`` is True, this environ dict may be unmodified
from the original provided to the prepare function.
"""
return self._environ
@property
def overrides(self):
"""Override object which was passed to prepare()."""
return self._overrides
@property
def errors(self):
"""Get lines of error output."""
raise NotImplementedError() # pragma: no cover
@property
def env_spec_name(self):
"""The env spec name we used for the prepare.
If the project was broken or the user provided bad input
before we could ask CondaEnvRequirement for the env spec
name, at the moment we sort of take a guess at the right
name in order to guarantee this is never None. The
guessing is a little bit broken. But it would be a very
obscure scenario where it matters.
"""
return self._env_spec_name
@property
def env_prefix(self):
"""The prefix of the prepared env, or None if none was created."""
status = self.status_for(CondaEnvRequirement)
if status is None:
return None
varname = status.requirement.env_var
return self._environ.get(varname, None)
class PrepareSuccess(PrepareResult):
"""Class describing the successful result of preparing the project to run."""
def __init__(self, statuses, command_exec_info, environ, overrides, env_spec_name):
"""Construct a PrepareSuccess indicating a successful prepare stage."""
super(PrepareSuccess, self).__init__(statuses, environ, overrides, env_spec_name)
self._command_exec_info = command_exec_info
assert self.env_spec_name is not None
@property
def failed(self):
"""Get False for PrepareSuccess."""
return False
@property
def command_exec_info(self):
"""``CommandExecInfo`` instance if available, None if not."""
return self._command_exec_info
@property
def errors(self):
"""Get empty list of errors."""
return []
def update_environ(self, environ):
"""Overwrite ``environ`` with any additions from the prepared environ.
Does not remove any variables from ``environ``.
"""
_update_environ(environ, self._environ)
class PrepareFailure(PrepareResult):
"""Class describing the failed result of preparing the project to run."""
def __init__(self, statuses, errors, environ, overrides, env_spec_name=None):
"""Construct a PrepareFailure indicating a failed prepare stage."""
super(PrepareFailure, self).__init__(statuses, environ, overrides, env_spec_name)
self._errors = errors
@property
def failed(self):
"""Get True for PrepareFailure."""
return True
@property
def errors(self):
"""Get non-empty list of errors."""
return self._errors
class ConfigurePrepareContext(object):
"""Information needed to configure a stage."""
def __init__(self, environ, local_state_file, default_env_spec_name, overrides, statuses):
"""Construct a ConfigurePrepareContext."""
self.environ = environ
self.local_state_file = local_state_file
self.default_env_spec_name = default_env_spec_name
self.overrides = overrides
self.statuses = statuses
if len(statuses) > 0:
from anaconda_project.requirements_registry.requirement import RequirementStatus
assert isinstance(statuses[0], RequirementStatus)
class PrepareStage(with_metaclass(ABCMeta)):
"""A step in the project preparation process."""
@property
@abstractmethod
def description_of_action(self):
"""Get a user-visible description of what happens if this step is executed."""
pass # pragma: no cover
@property
@abstractmethod
def failed(self):
"""Synonym for result.failed, only available after ``execute()``."""
pass # pragma: no cover
@abstractmethod
def configure(self):
"""Get a ``ConfigurePrepareContext`` or None if no configuration is needed.
Configuration should be done before execute().
Returns:
a ``ConfigurePrepareContext`` or None
"""
pass # pragma: no cover
@abstractmethod
def execute(self):
"""Run this step and return a new stage, or None if we are done or failed."""
pass # pragma: no cover
@property
@abstractmethod
def result(self):
"""The ``PrepareResult`` (only available if ``execute()`` has been called)."""
pass # pragma: no cover
@property
@abstractmethod
def environ(self):
"""The latest environment variables (from the result if any, otherwise the pre-execute ones)."""
pass # pragma: no cover
@property
@abstractmethod
def overrides(self):
"""User overrides."""
pass # pragma: no cover
@property
@abstractmethod
def statuses_before_execute(self):
"""``RequirementStatus`` list before execution.
This list includes all known requirements and their statuses, while the list
in the ``configure()`` context only includes those that should be configured
prior to this stage's execution.
"""
pass # pragma: no cover
@property
@abstractmethod
def statuses_after_execute(self):
"""``RequirementStatus`` list after execution.
This list includes all known requirements and their statuses, as changed
by ``execute()``. This property cannot be read prior to ``execute()``.
"""
pass # pragma: no cover
# This is defined to keep the same requirements from old_statuses
# in the refreshed list, even if they are missing from
# rechecked_statuses, and it does not add any new requirements
# from rechecked_statuses to the refreshed list.
def _refresh_status_list(old_statuses, rechecked_statuses):
new_by_req = dict()
for status in rechecked_statuses:
new_by_req[status.requirement] = status
updated = []
for status in old_statuses:
updated.append(new_by_req.get(status.requirement, status))
return updated
class _FunctionPrepareStage(PrepareStage):
"""A stage chain where the description and the execute function are passed in to the constructor."""
def __init__(self, environ, overrides, description, statuses, execute, config_context=None):
assert isinstance(environ, dict)
assert config_context is None or isinstance(config_context, ConfigurePrepareContext)
self._environ = environ
self._overrides = overrides
# the execute function is supposed to set these two (via accessor)
self._result = None
self._statuses_after_execute = None
self._description = description
self._statuses_before_execute = statuses
self._execute = execute
self._config_context = config_context
# def __repr__(self):
# return "_FunctionPrepareStage(%r)" % (self._description)
@property
def description_of_action(self):
return self._description
@property
def failed(self):
return self.result.failed
def configure(self):
return self._config_context
def execute(self):
return self._execute(self)
@property
def result(self):
if self._result is None:
raise RuntimeError("result property isn't available until after execute()")
return self._result
@property
def environ(self):
if self._result is None:
return self._environ
else:
return self.result.environ
@property
def overrides(self):
return self._overrides
@property
def statuses_before_execute(self):
return self._statuses_before_execute
@property
def statuses_after_execute(self):
if self._statuses_after_execute is None:
raise RuntimeError("statuses_after_execute isn't available until after execute()")
return self._statuses_after_execute
def set_result(self, result, rechecked_statuses):
assert result is not None
self._statuses_after_execute = _refresh_status_list(self._statuses_before_execute, rechecked_statuses)
self._result = result
class _AndThenPrepareStage(PrepareStage):
"""A stage chain which runs an ``and_then`` function after it executes successfully."""
def __init__(self, stage, and_then):
self._stage = stage
self._and_then = and_then
# def __repr__(self):
# return "_AndThenPrepareStage(%r, %r)" % (self._stage, self._and_then)
@property
def description_of_action(self):
return self._stage.description_of_action
@property
def failed(self):
return self._stage.failed
def configure(self):
return self._stage.configure()
def execute(self):
next = self._stage.execute()
if next is None:
if self._stage.failed:
return None
else:
return self._and_then(self._stage.statuses_after_execute)
else:
return _AndThenPrepareStage(next, self._and_then)
@property
def result(self):
return self._stage.result
@property
def environ(self):
return self._stage.environ
@property
def overrides(self):
return self._stage.overrides
@property
def statuses_before_execute(self):
return self._stage.statuses_before_execute
@property
def statuses_after_execute(self):
return self._stage.statuses_after_execute
def _after_stage_success(stage, and_then):
"""Run and_then function after stage executes successfully.
and_then may return another stage, or None. It takes
the current list of updated statuses as a parameter.
"""
assert stage is not None
return _AndThenPrepareStage(stage, and_then)
def _sort_statuses(environ, local_state, statuses, missing_vars_getter):
def get_node_key(status):
# If we add a Requirement that isn't an EnvVarRequirement,
# we can simply return the requirement object here as its
# own key I believe. But for now that doesn't happen.
assert hasattr(status.requirement, | |
<reponame>Syler1984/seismo-ml-phase-picker
from obspy.core.utcdatetime import UTCDateTime
import re
import os
from .converter import date_str
from obspy import read
def process_stations_file(path):
return None
def group_archives(archives):
"""
Takes archive definitions list and groups them together by stations and channel types.
"""
grouped = []
grouped_ids = []
for i in range(len(archives)):
if i in grouped_ids:
continue
current_group = [archives[i]]
group_tag = archives[i][0] + archives[i][1][:2] + archives[i][2] + archives[i][3]
for j in range(i + 1, len(archives)):
if j in grouped_ids:
continue
current_tag = archives[j][0] + archives[j][1][:2] + archives[j][2] + archives[j][3]
if current_tag == group_tag:
grouped_ids.append(j)
current_group.append(archives[j])
grouped_ids.append(i)
grouped.append(current_group)
return grouped
def filter_by_channel(archives, allowed_channels):
"""
Filters out archive groups which are not in allowed_channels.
"""
# Collections compare function
import collections
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
lens = [] # Get all unique allowed channels length
for l in [len(x) for x in allowed_channels]:
if l not in lens:
lens.append(l)
result_archives = []
for group in archives:
if len(group) not in lens:
continue
gr_channels = [x[1] for x in group]
is_present = False
for ch in allowed_channels:
if compare(gr_channels, ch):
is_present = True
break
if is_present:
result_archives.append(group)
return result_archives
def process_seisan_def(path, allowed_channels):
"""
Read SEISAN.DEF file and get list of all the stations with allowed channels.
"""
with open(path, 'r') as f:
lines = f.readlines()
records = []
tag = 'ARC_CHAN'
for line in lines:
if line[:len(tag)] != tag:
continue
entry = line[len(tag):].split()
station = line[40:45].strip()
channel = line[45:48]
code = line[48:50]
location = line[50:52]
start_date = entry[2] if len(entry) >= 3 else None
end_date = entry[3] if len(entry) >= 4 else None
records.append([station, channel, code, location, start_date, end_date])
grouped_records = group_archives(records)
grouped_records = filter_by_channel(grouped_records, allowed_channels)
return grouped_records
def group_by(lst, column, comp_margin = None):
"""
Groups list entities by column values.
"""
sorted_values = []
result_list = []
current_value = None
for i in range(0, len(lst)):
x = lst[i]
if x[column][0:comp_margin] in sorted_values or x[column][0:comp_margin] == current_value:
continue
current_value = x[column][0:comp_margin]
current_list = []
for j in range(i, len(lst)):
y = lst[j]
if y[column][0:comp_margin] != current_value:
continue
current_list.append(y)
sorted_values.append(current_value)
result_list.append(current_list)
return result_list
def process_archives_list(lst):
"""
Processes output of parse_seisan_def: combines into lists of three channeled entries.
"""
lst = group_by(lst, 0)
result = []
for x in lst:
channel_group = group_by(x, 1, 2)
for y in channel_group:
location_group = group_by(y, 3)
for z in location_group:
if len(z) == 3:
result.append(z)
return result
def process_seisan_def_mulplt(path, mulplt_data = None, allowed_channels = None):
"""
Parses seisan.def file and returns grouped lists like:
[station, channel, network_code, location_code, archive start date, archive end date (or None)].
"""
data = []
if mulplt_data is not None:
stations_channels = [x[0] + x[1] + x[2] for x in mulplt_data]
with open(path, "r") as f:
lines = f.readlines()
tag = "ARC_CHAN"
for line in lines:
if line[:len(tag)] == tag:
entry = line[len(tag):].split()
station = line[40:45].strip()
channel = line[45:48]
code = line[48:50]
location = line[50:52]
start_date = entry[2] if len(entry) >= 3 else None
end_date = entry[3] if len(entry) >= 4 else None
if mulplt_data is not None:
if station + channel not in stations_channels:
continue
parsed_line = [station, channel, code, location, start_date, end_date]
if allowed_channels:
is_channel_allowed = False
for ch in allowed_channels:
if ch == channel[:len(ch)]:
is_channel_allowed = True
if is_channel_allowed:
data.append(parsed_line)
else:
data.append(parsed_line)
return process_archives_list(data)
def parse_s_file(path, params):
"""
Parses s-file and returns all its events readings.
:param path:
:return:
"""
try:
with open(path, 'r') as f:
lines = f.readlines()
except UnicodeDecodeError:
return
except FileNotFoundError:
return
d_path = '02-0422-22D.S201601'
h_path = path.split('/')[-1]
if d_path == h_path:
print(f'FOUND {path}')
head = lines[0]
# Find events table
table_head = ' STAT SP IPHASW D HRMM SECON CODA AMPLIT PERI AZIMU VELO AIN AR TRES W DIS CAZ7'
for i, l in enumerate(lines):
if l[:len(table_head)] == table_head:
events_table = lines[i + 1:]
events_table_line_num = i + 1 + 1 # + 1 - because number should start with 1
# Parse head
magnitude = head[55:59].strip()
if len(magnitude):
magnitude = float(magnitude)
else:
magnitude = None
magnitude_type = head[59]
loc = head[21]
if loc != 'L':
print(f'In file "{path}": unsupported locale type "{loc}"! Skipping..')
return
depth = head[38:43].strip() # in Km
if len(depth):
depth = float(depth)
else:
depth = None
# Parse ID
event_id = None
q_id = re.compile(r'\bID:')
for l in lines:
f_iter = q_id.finditer(l)
found = False
for match in f_iter:
span = match.span()
if span == (57, 60):
found = True
break
if not found:
continue
event_id = l[span[1] : span[1] + 14]
break
year = None
month = None
day = None
if event_id:
year = int(event_id[:4])
month = int(event_id[4:6])
day = int(event_id[6:8])
# Parse events
events = []
for i, l in enumerate(events_table):
if not len(l.strip()):
continue
try:
station = l[1:6].strip()
instrument = l[6]
channel = l[7]
phase = l[10:14].strip()
hour = int(l[18:20].strip())
minute = int(l[20:22].strip())
second = float(l[22:28].strip())
distance = float(l[70:75].strip())
except ValueError as e:
continue
if second >= 60.:
minute_add = second // 60
second = (second % 60)
minute += minute_add
minute = int(minute)
if minute >= 60:
if hour != 23:
minute = 0
hour += 1
else:
minute = 59
minute = int(minute)
hour = int(hour)
if hour >= 24:
continue
utc_datetime = UTCDateTime(date_str(year, month, day, hour, minute, second))
# Events filtering
# TODO: Replace all min/max code for something better, that does full min and max support and checks for None values
if params['min_magnitude'] and (not magnitude or magnitude < float(params['min_magnitude'])):
if params['debug']:
print(f'DEBUG: Skipping event in {path}. Reason: low magnitude ({magnitude}).')
return
if params['max_depth'] and (not depth or depth > float(params['max_depth'])):
if params['debug']:
print(f'DEBUG: Skipping event in {path}. Reason: high depth ({depth}).')
return
if params['max_distance'] and (not distance or distance > float(params['max_distance'])):
if params['debug']:
print(f'DEBUG: Skipping event in {path}. Reason: high distance ({distance}).')
return
events.append({'station': station,
'instrument': instrument,
'channel': channel,
'phase': phase,
'year': year,
'month': month,
'day': day,
'hour': hour,
'minute': minute,
'second': second,
'distance': distance,
'magnitude': magnitude,
'depth': depth,
's_path': path,
'utc_datetime': utc_datetime,
'id': event_id,
'line_number': events_table_line_num + i})
if d_path == h_path:
print('RETURN:')
for i, s in enumerate(events):
print(f'{i}: {s}')
return events
def group_events(events):
"""
Groups events by ID, station, instrument and channel code.
"""
grouped = []
grouped_ids = []
for i, e in enumerate(events):
if i in grouped_ids:
continue
group_tag = e['id'] + e['station'] + e['instrument']
current_group = [e]
grouped_ids.append(i)
for j in range(i + 1, len(events)):
if j in grouped_ids:
continue
e2 = events[j]
e2_tag = e2['id'] + e2['station'] + e2['instrument']
if e2_tag == group_tag:
grouped_ids.append(j)
current_group.append(e2)
grouped.append(current_group)
return grouped
def filter_events(events, stations, db = False):
"""
Filters out phase lines with stations not defined in stations list. Also adds code and location to events.
"""
stations_tags = []
for s in stations:
# Station tag = [station_name, instrument, code, location, algorithm]
algorithm = '',
if len(s[0][1]) == 3:
algorithm = s[0][1][1]
stations_tags.append([s[0][0], s[0][1][0], s[0][2], s[0][3], algorithm])
if db:
print('TAGS:')
for x in stations_tags:
print(x)
print('')
filtered = []
for group in events:
event = group[0]
for tag in stations_tags:
if event['station'] == tag[0] and event['instrument'] == tag[1]:
filtered.append(group)
# Add code and location
for e in group:
e['code'] = tag[2]
e['location'] = tag[3]
e['algorithm'] = tag[4]
break
return filtered
def parse_s_dir(path, stations, params):
"""
Scans path directory, parses all s-files and returns filtered events_list grouped by stations.
:param path - path to the directory
:param stations - grouped stations list to filter out all stations which are not in this list.
"""
if path[-1] != '/':
path += '/'
try:
files = os.listdir(path)
files = [f'{path}{f}' for f in files]
except FileNotFoundError:
return
all_events = []
for f in files:
events = parse_s_file(f, params)
# TODO: remove this debug output
d_path = '02-0422-22D.S201601'
h_path = f.split('/')[-1]
if d_path == h_path and events:
print(f'\n\nFOUND LENGTH = {len(events)}')
if not events:
continue
events = group_events(events)
if d_path == h_path:
print(f'GROUP LENGTH {len(events)}')
if d_path == h_path:
print('\nSTATIONS:')
for x in stations:
print(x)
print('')
if d_path | |
64),
(8, "hybrid", 64),
(9, "hybrid", 64),
(0, "forward", 64),
(1, "forward", 64),
(2, "forward", 64),
(3, "forward", 64),
(4, "forward", 64),
(5, "forward", 64),
(6, "forward", 64),
(7, "forward", 64),
(8, "forward", 64),
(9, "forward", 64),
(0, "ibp", 64),
(1, "ibp", 64),
(2, "ibp", 64),
(3, "ibp", 64),
(4, "ibp", 64),
(5, "ibp", 64),
(6, "ibp", 64),
(7, "ibp", 64),
(8, "ibp", 64),
(9, "ibp", 64),
(0, "hybrid", 16),
(1, "hybrid", 16),
(2, "hybrid", 16),
(3, "hybrid", 16),
(4, "hybrid", 16),
(5, "hybrid", 16),
(6, "hybrid", 16),
(7, "hybrid", 16),
(8, "hybrid", 16),
(9, "hybrid", 16),
(0, "forward", 16),
(1, "forward", 16),
(2, "forward", 16),
(3, "forward", 16),
(4, "forward", 16),
(5, "forward", 16),
(6, "forward", 16),
(7, "forward", 16),
(8, "forward", 16),
(9, "forward", 16),
(0, "ibp", 16),
(1, "ibp", 16),
(2, "ibp", 16),
(3, "ibp", 16),
(4, "ibp", 16),
(5, "ibp", 16),
(6, "ibp", 16),
(7, "ibp", 16),
(8, "ibp", 16),
(9, "ibp", 16),
],
)
def test_DecomonConcatenate_1D_box(n0, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 2
decomon_op = DecomonConcatenate(axis=-1, dc_decomp=False, mode=mode)
inputs_0 = get_tensor_decomposition_1d_box(dc_decomp=False)
inputs_1 = get_tensor_decomposition_1d_box(dc_decomp=False)
inputs_0_ = get_standart_values_1d_box(n0, dc_decomp=False)
inputs_1_ = get_standart_values_1d_box(n0, dc_decomp=False)
x0, y0, z0, u_c0, W_u0, b_u0, l_c0, W_l0, b_l0 = inputs_0
x1, y1, z1, u_c1, W_u1, b_u1, l_c1, W_l1, b_l1 = inputs_1
x0_, y0_, z0_, u_c0_, W_u0_, b_u0_, l_c0_, W_l0_, b_l0_ = inputs_0_
x1_, y1_, z1_, u_c1_, W_u1_, b_u1_, l_c1_, W_l1_, b_l1_ = inputs_1_
if mode == "hybrid":
output_decomon = decomon_op(inputs_0[2:] + inputs_1[2:])
if mode == "forward":
output_decomon = decomon_op([z0, W_u0, b_u0, W_l0, b_l0] + [z1, W_u1, b_u1, W_l1, b_l1])
if mode == "ibp":
output_decomon = decomon_op([u_c0, l_c0] + [u_c1, l_c1])
model = Model(inputs_0[2:] + inputs_1[2:], output_decomon)
y_ = np.concatenate([y0_, y1_], -1)
# output_ = K.function(inputs_0[1:]+inputs_1[1:], output_decomon)(inputs_0_[1:]+inputs_1_[1:])
output_ = model.predict(inputs_0_[2:] + inputs_1_[2:])
u_, w_u_, b_u_, l_, w_l_, b_l_ = [None] * 6
z_ = z0_
if mode == "hybrid":
z_, u_, w_u_, b_u_, l_, w_l_, b_l_ = output_
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_ = output_
if mode == "ibp":
u_, l_ = output_
assert_output_properties_box(
inputs_0_[0], y_, None, None, z_[:, 0], z_[:, 1], u_, w_u_, b_u_, l_, w_l_, b_l_, name="add", decimal=decimal
)
K.set_epsilon(eps)
K.set_floatx("float32")
@pytest.mark.parametrize(
"n0, mode, floatx",
[
(0, "hybrid", 32),
(1, "hybrid", 32),
(0, "forward", 32),
(1, "forward", 32),
(0, "ibp", 32),
(1, "ibp", 32),
(0, "hybrid", 64),
(1, "hybrid", 64),
(0, "forward", 64),
(1, "forward", 64),
(0, "ibp", 64),
(1, "ibp", 64),
(0, "hybrid", 16),
(1, "hybrid", 16),
(0, "forward", 16),
(1, "forward", 16),
(0, "ibp", 16),
(1, "ibp", 16),
],
)
def test_DecomonAdd_multiD_box(n0, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 2
decomon_op = DecomonAdd(dc_decomp=False, mode=mode)
inputs_0 = get_tensor_decomposition_multid_box(n0, dc_decomp=False)
inputs_1 = get_tensor_decomposition_multid_box(n0, dc_decomp=False)
inputs_0_ = get_standard_values_multid_box(n0, dc_decomp=False)
inputs_1_ = get_standard_values_multid_box(n0, dc_decomp=False)
x0, y0, z0, u_c0, W_u0, b_u0, l_c0, W_l0, b_l0 = inputs_0
x1, y1, z1, u_c1, W_u1, b_u1, l_c1, W_l1, b_l1 = inputs_1
x0_, y0_, z0_, u_c0_, W_u0_, b_u0_, l_c0_, W_l0_, b_l0_ = inputs_0_
x1_, y1_, z1_, u_c1_, W_u1_, b_u1_, l_c1_, W_l1_, b_l1_ = inputs_1_
if mode == "hybrid":
output_decomon = decomon_op(inputs_0[2:] + inputs_1[2:])
if mode == "forward":
output_decomon = decomon_op([z0, W_u0, b_u0, W_l0, b_l0] + [z1, W_u1, b_u1, W_l1, b_l1])
if mode == "ibp":
output_decomon = decomon_op([u_c0, l_c0] + [u_c1, l_c1])
model = Model(inputs_0[2:] + inputs_1[2:], output_decomon)
y_ = y0_ + y1_
# output_ = K.function(inputs_0[1:]+inputs_1[1:], output_decomon)(inputs_0_[1:]+inputs_1_[1:])
output_ = model.predict(inputs_0_[2:] + inputs_1_[2:])
u_, w_u_, b_u_, l_, w_l_, b_l_ = [None] * 6
z_ = z0_
if mode == "hybrid":
z_, u_, w_u_, b_u_, l_, w_l_, b_l_ = output_
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_ = output_
if mode == "ibp":
u_, l_ = output_
assert_output_properties_box(
inputs_0_[0], y_, None, None, z_[:, 0], z_[:, 1], u_, w_u_, b_u_, l_, w_l_, b_l_, name="add", decimal=decimal
)
K.set_epsilon(eps)
K.set_floatx("float32")
@pytest.mark.parametrize(
"n0, mode, floatx",
[
(0, "hybrid", 32),
(1, "hybrid", 32),
(0, "forward", 32),
(1, "forward", 32),
(0, "ibp", 32),
(1, "ibp", 32),
(0, "hybrid", 64),
(1, "hybrid", 64),
(0, "forward", 64),
(1, "forward", 64),
(0, "ibp", 64),
(1, "ibp", 64),
(0, "hybrid", 16),
(1, "hybrid", 16),
(0, "forward", 16),
(1, "forward", 16),
(0, "ibp", 16),
(1, "ibp", 16),
],
)
def test_DecomonSubstract_multiD_box(n0, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 2
decomon_op = DecomonSubtract(dc_decomp=False, mode=mode)
inputs_0 = get_tensor_decomposition_multid_box(n0, dc_decomp=False)
inputs_1 = get_tensor_decomposition_multid_box(n0, dc_decomp=False)
inputs_0_ = get_standard_values_multid_box(n0, dc_decomp=False)
inputs_1_ = get_standard_values_multid_box(n0, dc_decomp=False)
x0, y0, z0, u_c0, W_u0, b_u0, l_c0, W_l0, b_l0 = inputs_0
x1, y1, z1, u_c1, W_u1, b_u1, l_c1, W_l1, b_l1 = inputs_1
x0_, y0_, z0_, u_c0_, W_u0_, b_u0_, l_c0_, W_l0_, b_l0_ = inputs_0_
x1_, y1_, z1_, u_c1_, W_u1_, b_u1_, l_c1_, W_l1_, b_l1_ = inputs_1_
if mode == "hybrid":
output_decomon = decomon_op(inputs_0[2:] + inputs_1[2:])
if mode == "forward":
output_decomon = decomon_op([z0, W_u0, b_u0, W_l0, b_l0] + [z1, W_u1, b_u1, W_l1, b_l1])
if mode == "ibp":
output_decomon = decomon_op([u_c0, l_c0] + [u_c1, l_c1])
model = Model(inputs_0[2:] + inputs_1[2:], output_decomon)
y_ = y0_ - y1_
# output_ = K.function(inputs_0[1:]+inputs_1[1:], output_decomon)(inputs_0_[1:]+inputs_1_[1:])
output_ = model.predict(inputs_0_[2:] + inputs_1_[2:])
u_, w_u_, b_u_, l_, w_l_, b_l_ = [None] * 6
z_ = z0_
if mode == "hybrid":
z_, u_, w_u_, b_u_, l_, w_l_, b_l_ = output_
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_ = output_
if mode == "ibp":
u_, l_ = output_
assert_output_properties_box(
inputs_0_[0], y_, None, None, z_[:, 0], z_[:, 1], u_, w_u_, b_u_, l_, w_l_, b_l_, name="add", decimal=decimal
)
K.set_floatx("float32")
K.set_epsilon(eps)
@pytest.mark.parametrize(
"n0, mode, floatx",
[
(0, "hybrid", 32),
(1, "hybrid", 32),
(0, "forward", 32),
(1, "forward", 32),
(0, "ibp", 32),
(1, "ibp", 32),
(0, "hybrid", 64),
(1, "hybrid", 64),
(0, "forward", 64),
(1, "forward", 64),
(0, "ibp", 64),
(1, "ibp", 64),
(0, "hybrid", 16),
(1, "hybrid", 16),
(0, "forward", 16),
(1, "forward", 16),
(0, "ibp", 16),
(1, "ibp", 16),
],
)
def test_DecomonAverage_multiD_box(n0, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 2
decomon_op = DecomonAverage(dc_decomp=False, mode=mode)
inputs_0 = get_tensor_decomposition_multid_box(n0, dc_decomp=False)
inputs_1 = get_tensor_decomposition_multid_box(n0, dc_decomp=False)
inputs_0_ = get_standard_values_multid_box(n0, dc_decomp=False)
inputs_1_ = get_standard_values_multid_box(n0, dc_decomp=False)
x0, y0, z0, u_c0, W_u0, b_u0, l_c0, W_l0, b_l0 = inputs_0
x1, y1, z1, u_c1, W_u1, b_u1, l_c1, W_l1, b_l1 = inputs_1
x0_, y0_, z0_, u_c0_, W_u0_, b_u0_, l_c0_, W_l0_, b_l0_ = inputs_0_
x1_, y1_, z1_, u_c1_, W_u1_, b_u1_, l_c1_, W_l1_, b_l1_ = inputs_1_
if mode == "hybrid":
output_decomon = decomon_op(inputs_0[2:] + inputs_1[2:])
if mode == "forward":
output_decomon = decomon_op([z0, W_u0, b_u0, W_l0, b_l0] + [z1, W_u1, b_u1, W_l1, b_l1])
if mode == "ibp":
output_decomon = decomon_op([u_c0, l_c0] + [u_c1, l_c1])
model = Model(inputs_0[2:] + inputs_1[2:], output_decomon)
y_ = (y0_ + y1_) / 2.0
# output_ = K.function(inputs_0[1:]+inputs_1[1:], output_decomon)(inputs_0_[1:]+inputs_1_[1:])
output_ = model.predict(inputs_0_[2:] + inputs_1_[2:])
u_, w_u_, b_u_, l_, w_l_, b_l_ = [None] * 6
z_ = z0_
if mode == "hybrid":
z_, u_, w_u_, b_u_, l_, w_l_, b_l_ = output_
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_ = output_
if mode == "ibp":
u_, l_ = output_
assert_output_properties_box(
inputs_0_[0], y_, None, None, z_[:, 0], z_[:, 1], u_, w_u_, b_u_, l_, w_l_, b_l_, name="add", decimal=decimal
)
K.set_epsilon(eps)
K.set_floatx("float32")
@pytest.mark.parametrize(
"n0, mode, floatx",
[
(0, "hybrid", 32),
(1, "hybrid", 32),
(0, "forward", 32),
(1, "forward", 32),
(0, "ibp", 32),
(1, "ibp", 32),
(0, "hybrid", 64),
(1, "hybrid", 64),
(0, "forward", 64),
(1, "forward", 64),
(0, "ibp", 64),
(1, "ibp", 64),
(0, "hybrid", 16),
(1, "hybrid", 16),
(0, "forward", 16),
(1, "forward", 16),
(0, "ibp", 16),
(1, "ibp", 16),
],
)
def test_DecomonMaximum_multiD_box(n0, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 2
decomon_op = DecomonMaximum(dc_decomp=False, mode=mode)
inputs_0 = get_tensor_decomposition_multid_box(n0, dc_decomp=False)
inputs_1 = get_tensor_decomposition_multid_box(n0, dc_decomp=False)
inputs_0_ = get_standard_values_multid_box(n0, dc_decomp=False)
inputs_1_ = get_standard_values_multid_box(n0, dc_decomp=False)
x0, y0, z0, u_c0, W_u0, b_u0, l_c0, W_l0, b_l0 = inputs_0
x1, y1, z1, u_c1, W_u1, b_u1, l_c1, W_l1, b_l1 = inputs_1
x0_, y0_, z0_, u_c0_, W_u0_, | |
0.023, 0.022, 0.022, 0.023, 0.025, 0.027,
0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024,
0.027, 0.031, 0.037, 0.046, 0.058, 0.074, 0.088, 0.101, 0.114,
0.128, 0.142, 0.155, 0.321, 0.525, 0.742 ]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
def test_stall4_cm(self):
R = 5.0
r = 0.5 * R
chord = 0.5
Omega = 100 * pi / 30
Uinf = 10.0
tsr = Omega * R / Uinf
newpolar = self.polar2.correction3D(
r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4
)
cl_3d = [-0.82374342, -0.73635957, -0.62607561, -0.51973994, -0.41893189,
-0.32049281, -0.22363306, -0.13151125, -0.05044467, 0.04878406,
0.2230304 , 0.33726265, 0.43491207, 0.55309262, 0.68390771,
0.72549134, 0.78523713, 0.86314507, 0.93631506, 1.00790573,
1.07791708, 1.12423867, 1.16266366, 1.20345763, 1.22293081,
1.09157913, 1.05893482, 1.043 , 1.043 , 1.048 ,
0.9595 , 0.8195 , 0.7]
cd_3d = [0.027, 0.025, 0.024, 0.023, 0.022, 0.022, 0.023, 0.025, 0.027,
0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024,
0.027, 0.031, 0.037, 0.046, 0.058, 0.074, 0.088, 0.101, 0.114,
0.128, 0.142, 0.155, 0.321, 0.525, 0.742]
cm_zeros = np.zeros(len(cd_3d))
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cm, cm_zeros, atol=1e-3)
class TestExtrap(unittest.TestCase):
def setUp(self):
alpha = [
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
]
cl = [
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
]
cd = [
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
]
cm = [
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
]
cm_zeros = np.zeros(len(cm))
Re = 1
self.polar = Polar(Re, alpha, cl, cd, cm)
self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros)
def test_extrap1(self):
cdmax = 1.29
newpolar = self.polar.extrapolate(cdmax=cdmax)
alpha_extrap = [
-180,
-170,
-160,
-150,
-140,
-130,
-120,
-110,
-100,
-90,
-80,
-70,
-60,
-50,
-40,
-30,
-20,
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
130,
140,
150,
160,
170,
180,
]
cl_extrap = [
0.0000,
0.2299,
0.4597,
0.4907,
0.5053,
0.4805,
0.4102,
0.2985,
0.1565,
0.0000,
-0.1565,
-0.2985,
-0.4102,
-0.4805,
-0.5053,
-0.4907,
-0.4637,
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
0.7010,
0.7219,
0.6864,
0.5860,
0.4264,
0.2235,
0.0000,
-0.1565,
-0.2985,
-0.4102,
-0.4805,
-0.5053,
-0.4907,
-0.4597,
-0.2299,
0.0000,
]
cd_extrap = [
0.1770,
0.2132,
0.3173,
0.4758,
0.6686,
0.8708,
1.0560,
1.1996,
1.2818,
1.2900,
1.2818,
1.1996,
1.0560,
0.8708,
0.6686,
0.4758,
0.3158,
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
0.4758,
0.6686,
0.8708,
1.0560,
1.1996,
1.2818,
1.2900,
1.2818,
1.1996,
1.0560,
0.8708,
0.6686,
0.4758,
0.3173,
0.2132,
0.1770,
]
cm_extrap = [
0.0000,
0.4000,
0.2431,
0.2568,
0.2865,
0.3185,
0.3458,
0.3632,
0.3672,
0.3559,
0.3443,
0.3182,
0.2808,
0.2362,
0.1886,
0.1414,
0.0942,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1710,
-0.2202,
-0.2637,
-0.3002,
-0.3284,
-0.3471,
-0.3559,
-0.3672,
-0.3632,
-0.3458,
-0.3185,
-0.2865,
-0.2568,
-0.2431,
-0.5000,
0.0000,
]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
def test_extrap2(self):
cdmax = 1.0
newpolar = self.polar.extrapolate(cdmax=cdmax)
alpha_extrap = [
-180,
-170,
-160,
-150,
-140,
-130,
-120,
-110,
-100,
-90,
-80,
-70,
-60,
-50,
-40,
-30,
-20,
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
130,
140,
150,
160,
170,
180,
]
cl_extrap = [
0.0000,
0.2299,
0.4597,
0.4411,
0.4287,
0.3943,
0.3297,
0.2364,
0.1225,
0.0000,
-0.1225,
-0.2364,
-0.3297,
-0.3943,
-0.4287,
-0.4411,
-0.4637,
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
0.6302,
0.6124,
0.5633,
0.4710,
0.3378,
0.1750,
0.0000,
-0.1225,
-0.2364,
-0.3297,
-0.3943,
-0.4287,
-0.4411,
-0.4597,
-0.2299,
0.0000,
]
cd_extrap = [
0.2135,
0.2404,
0.3176,
0.4349,
0.5767,
0.7241,
0.8568,
0.9560,
1.0069,
1.0000,
1.0069,
0.9560,
0.8568,
0.7241,
0.5767,
0.4349,
0.3158,
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
0.4349,
0.5767,
0.7241,
0.8568,
0.9560,
1.0069,
1.0000,
1.0069,
0.9560,
0.8568,
0.7241,
0.5767,
0.4349,
0.3176,
0.2404,
0.2135,
]
cm_extrap = [
0.0000,
0.4000,
0.2432,
0.2354,
0.2500,
0.2695,
0.2864,
0.2961,
0.2956,
0.2834,
0.2776,
0.2603,
0.2337,
0.2013,
0.1663,
0.1310,
0.0942,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1577,
-0.1930,
-0.2239,
-0.2494,
-0.2683,
-0.2798,
-0.2834,
-0.2956,
-0.2961,
-0.2864,
-0.2695,
-0.2500,
-0.2354,
-0.2432,
-0.5000,
0.0000,
]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
def test_extrap3(self):
cdmax = 1.5
newpolar = self.polar.extrapolate(cdmax)
alpha_extrap = [
-180,
-170,
-160,
-150,
-140,
-130,
-120,
-110,
-100,
-90,
-80,
-70,
-60,
-50,
-40,
-30,
-20,
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
130,
140,
150,
160,
170,
180,
]
cl_extrap = [
0.0000,
0.2299,
0.4597,
0.5266,
0.5608,
0.5429,
0.4685,
0.3434,
0.1810,
0.0000,
-0.1810,
-0.3434,
-0.4685,
-0.5429,
-0.5608,
-0.5266,
-0.4637,
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
0.7523,
0.8012,
0.7756,
0.6693,
0.4906,
0.2586,
0.0000,
-0.1810,
-0.3434,
-0.4685,
-0.5429,
-0.5608,
-0.5266,
-0.4597,
-0.2299,
0.0000,
]
cd_extrap = [
0.1506,
0.1936,
0.3170,
0.5054,
0.7351,
0.9771,
1.2003,
1.3760,
1.4809,
1.5000,
1.4809,
1.3760,
1.2003,
0.9771,
0.7351,
0.5054,
0.3158,
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
0.5054,
0.7351,
0.9771,
1.2003,
1.3760,
1.4809,
1.5000,
1.4809,
1.3760,
1.2003,
0.9771,
0.7351,
0.5054,
0.3170,
0.1936,
0.1506,
]
cm_extrap = [
0.0000,
0.4000,
0.2431,
0.2723,
0.3130,
0.3540,
0.3888,
0.4118,
0.4190,
0.4084,
0.3926,
0.3602,
0.3148,
0.2614,
0.2049,
0.1488,
0.0942,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1807,
-0.2399,
-0.2925,
-0.3370,
-0.3719,
-0.3959,
-0.4084,
-0.4190,
-0.4118,
-0.3888,
-0.3540,
-0.3130,
-0.2723,
-0.2431,
-0.5000,
0.0000,
]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
class TestMisc(unittest.TestCase):
def setUp(self):
alpha = [
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
]
cl = [
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
]
cd = [
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
]
cm = [
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
]
cm_zeros = np.zeros(len(cm))
Re = 1
self.polar = Polar(Re, alpha, cl, cd, cm)
self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros)
def test_unsteady(self):
alpha0, alpha1, alpha2, cnSlope, cn1, cn2, cd0, cm0 = self.polar.unsteadyParams()
np.testing.assert_allclose(alpha0, -0.32307692307692304)
np.testing.assert_allclose(alpha1, 9.260783831245934)
np.testing.assert_allclose(alpha2, -6.779334979177289)
np.testing.assert_allclose(cnSlope, 6.4380618436681765)
np.testing.assert_allclose(cn1, 0.9201540372961516)
np.testing.assert_allclose(cn2, -0.6377683435797556)
np.testing.assert_allclose(cd0, 0.012142307692307694)
np.testing.assert_allclose(cm0, -0.03336923076923077)
def test_fully_separated(self):
cl_fs, f_st = self.polar.cl_fully_separated()
| |
<filename>LDA/starter.py
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.spatial
# Gradient descent optimization
# The learning rate is specified by eta
class GDOptimizer(object):
def __init__(self, eta):
self.eta = eta
def initialize(self, layers):
pass
# This function performs one gradient descent step
# layers is a list of dense layers in the network
# g is a list of gradients going into each layer before the nonlinear activation
# a is a list of of the activations of each node in the previous layer going
def update(self, layers, g, a):
m = a[0].shape[1]
for layer, curGrad, curA in zip(layers, g, a):
layer.updateWeights(-self.eta / m * np.dot(curGrad, curA.T))
layer.updateBias(-self.eta / m * np.sum(curGrad, 1).reshape(layer.b.shape))
# Cost function used to compute prediction errors
class QuadraticCost(object):
# Compute the squared error between the prediction yp and the observation y
# This method should compute the cost per element such that the output is the
# same shape as y and yp
@staticmethod
def fx(y, yp):
return 0.5 * np.square(yp - y)
# Derivative of the cost function with respect to yp
@staticmethod
def dx(y, yp):
return y - yp
# Sigmoid function fully implemented as an example
class SigmoidActivation(object):
@staticmethod
def fx(z):
return 1 / (1 + np.exp(-z))
@staticmethod
def dx(z):
return SigmoidActivation.fx(z) * (1 - SigmoidActivation.fx(z))
# Hyperbolic tangent function
class TanhActivation(object):
# Compute tanh for each element in the input z
@staticmethod
def fx(z):
return np.tanh(z)
# Compute the derivative of the tanh function with respect to z
@staticmethod
def dx(z):
return 1 - np.square(np.tanh(z))
# Rectified linear unit
class ReLUActivation(object):
@staticmethod
def fx(z):
return np.maximum(0, z)
@staticmethod
def dx(z):
return (z > 0).astype('float')
# Linear activation
class LinearActivation(object):
@staticmethod
def fx(z):
return z
@staticmethod
def dx(z):
return np.ones(z.shape)
# This class represents a single hidden or output layer in the neural network
class DenseLayer(object):
# numNodes: number of hidden units in the layer
# activation: the activation function to use in this layer
def __init__(self, numNodes, activation):
self.numNodes = numNodes
self.activation = activation
def getNumNodes(self):
return self.numNodes
# Initialize the weight matrix of this layer based on the size of the matrix W
def initialize(self, fanIn, scale=1.0):
s = scale * np.sqrt(6.0 / (self.numNodes + fanIn))
self.W = np.random.normal(0, s, (self.numNodes, fanIn))
self.b = np.random.uniform(-1, 1, (self.numNodes, 1))
# Apply the activation function of the layer on the input z
def a(self, z):
return self.activation.fx(z)
# Compute the linear part of the layer
# The input a is an n x k matrix where n is the number of samples
# and k is the dimension of the previous layer (or the input to the network)
def z(self, a):
return self.W.dot(a) + self.b # Note, this is implemented where we assume a is k x n
# Compute the derivative of the layer's activation function with respect to z
# where z is the output of the above function.
# This derivative does not contain the derivative of the matrix multiplication
# in the layer. That part is computed below in the model class.
def dx(self, z):
return self.activation.dx(z)
# Update the weights of the layer by adding dW to the weights
def updateWeights(self, dW):
self.W = self.W + dW
# Update the bias of the layer by adding db to the bias
def updateBias(self, db):
self.b = self.b + db
# This class handles stacking layers together to form the completed neural network
class Model(object):
# inputSize: the dimension of the inputs that go into the network
def __init__(self, inputSize):
self.layers = []
self.inputSize = inputSize
# Add a layer to the end of the network
def addLayer(self, layer):
self.layers.append(layer)
# Get the output size of the layer at the given index
def getLayerSize(self, index):
if index >= len(self.layers):
return self.layers[-1].getNumNodes()
elif index < 0:
return self.inputSize
else:
return self.layers[index].getNumNodes()
# Initialize the weights of all of the layers in the network and set the cost
# function to use for optimization
def initialize(self, cost, initializeLayers=True):
self.cost = cost
if initializeLayers:
for i in range(0, len(self.layers)):
if i == len(self.layers) - 1:
self.layers[i].initialize(self.getLayerSize(i - 1))
else:
self.layers[i].initialize(self.getLayerSize(i - 1))
# Compute the output of the network given some input a
# The matrix a has shape n x k where n is the number of samples and
# k is the dimension
# This function returns
# yp - the output of the network
# a - a list of inputs for each layer of the newtork where
# a[i] is the input to layer i
# z - a list of values for each layer after evaluating layer.z(a) but
# before evaluating the nonlinear function for the layer
def evaluate(self, x):
curA = x.T
a = [curA]
z = []
for layer in self.layers:
z.append(layer.z(curA))
curA = layer.a(z[-1])
a.append(curA)
yp = a.pop()
return yp, a, z
# Compute the output of the network given some input a
# The matrix a has shape n x k where n is the number of samples and
# k is the dimension
def predict(self, a):
a, _, _ = self.evaluate(a)
return a.T
# Train the network given the inputs x and the corresponding observations y
# The network should be trained for numEpochs iterations using the supplied
# optimizer
def train(self, x, y, numEpochs, optimizer):
# Initialize some stuff
hist = []
optimizer.initialize(self.layers)
# Run for the specified number of epochs
for epoch in range(0, numEpochs):
# Feed forward
# Save the output of each layer in the list a
# After the network has been evaluated, a should contain the
# input x and the output of each layer except for the last layer
yp, a, z = self.evaluate(x)
# Compute the error
C = self.cost.fx(yp, y.T)
d = self.cost.dx(yp, y.T)
grad = []
# Backpropogate the error
idx = len(self.layers)
for layer, curZ in zip(reversed(self.layers), reversed(z)):
idx = idx - 1
# Here, we compute dMSE/dz_i because in the update
# function for the optimizer, we do not give it
# the z values we compute from evaluating the network
grad.insert(0, np.multiply(d, layer.dx(curZ)))
d = np.dot(layer.W.T, grad[0])
# Update the errors
optimizer.update(self.layers, grad, a)
# Compute the error at the end of the epoch
yh = self.predict(x)
C = self.cost.fx(yh, y)
C = np.mean(C)
hist.append(C)
return hist
def trainBatch(self, x, y, batchSize, numEpochs, optimizer):
# Copy the data so that we don't affect the original one when shuffling
x = x.copy()
y = y.copy()
hist = []
n = x.shape[0]
for epoch in np.arange(0, numEpochs):
# Shuffle the data
r = np.arange(0, x.shape[0])
x = x[r, :]
y = y[r, :]
e = []
# Split the data in chunks and run SGD
for i in range(0, n, batchSize):
end = min(i + batchSize, n)
batchX = x[i:end, :]
batchY = y[i:end, :]
e += self.train(batchX, batchY, 1, optimizer)
hist.append(np.mean(e))
return hist
########################################################################
######### Part b #######################################################
########################################################################
########################################################################
######### Gradient Computing and MLE ##################################
########################################################################
def compute_gradient_of_likelihood(single_obj_loc, sensor_loc, single_distance, noise=1):
"""
Compute the gradient of the loglikelihood function for part a.
Input:
single_obj_loc: 1 * d numpy array.
Location of the single object.
sensor_loc: k * d numpy array.
Location of sensor.
single_distance: k dimensional numpy array.
Observed distance of the object.
Output:
grad: d-dimensional numpy array.
"""
loc_difference = single_obj_loc - sensor_loc # k * d.
phi = np.linalg.norm(loc_difference, axis=1) # k.
weight = (phi - single_distance) / phi # k.
grad = -np.sum(np.expand_dims(weight, 1) * loc_difference, axis=0) / noise**2 # d
return grad
########################################################################
######### Part c #################################################
########################################################################
def log_likelihood(obj_loc, sensor_loc, distance, noise=1):
"""
This function computes the log likelihood (as expressed in Part a).
Input:
obj_loc: shape [1,2]
sensor_loc: shape [7,2]
distance: shape [7]
Output:
The log likelihood function value.
"""
diff_distance = np.sqrt(np.sum((sensor_loc - obj_loc)**2, axis=1)) - distance
func_value = -sum((diff_distance)**2) / (2 * noise**2)
return func_value
########################################################################
######### Part e, f, g #################################################
########################################################################
########################################################################
######### Gradient Computing and MLE ##################################
########################################################################
def compute_grad_likelihood_part_e(sensor_loc, obj_loc, distance, noise=1):
"""
| |
# -*- coding: utf-8 -*-
import codecs
from itertools import takewhile
from parglare import Parser
from parglare import termui as t
from .parser import SHIFT, REDUCE, ACCEPT, pos_to_line_col, Token
from .common import replace_newlines as _, position_context
from .export import dot_escape
from .termui import prints, h_print, a_print
def no_colors(f):
"""
Decorator for trace methods to prevent ANSI COLOR codes appearing in
the trace dot output.
"""
def nc_f(*args, **kwargs):
self = args[0]
t.colors = False
r = f(*args, **kwargs)
t.colors = self.debug_colors
return r
return nc_f
class GLRParser(Parser):
"""
A Tomita-style GLR parser.
"""
def __init__(self, *args, **kwargs):
table = kwargs.get('table', None)
lexical_disambiguation = kwargs.get('lexical_disambiguation', None)
if table is None:
# The default for GLR is not to use any strategy preferring shifts
# over reduce thus investigating all possibilities.
# These settings are only applicable if parse table is not computed
# yet. If it is, then leave None values to avoid
# "parameter overriden" warnings.
prefer_shifts = kwargs.get('prefer_shifts', None)
prefer_shifts_over_empty = kwargs.get('prefer_shifts_over_empty',
None)
prefer_shifts = False \
if prefer_shifts is None else prefer_shifts
prefer_shifts_over_empty = False \
if prefer_shifts_over_empty is None \
else prefer_shifts_over_empty
if lexical_disambiguation is None:
lexical_disambiguation = False
kwargs['prefer_shifts'] = prefer_shifts
kwargs['prefer_shifts_over_empty'] = prefer_shifts_over_empty
kwargs['lexical_disambiguation'] = lexical_disambiguation
super(GLRParser, self).__init__(*args, **kwargs)
def _check_parser(self):
"""
Conflicts in table are allowed with GLR.
"""
pass
def parse(self, input_str, position=0, file_name=None, extra=None):
"""
Parses the given input string.
Args:
input_str(str): A string to parse.
position(int): Position to start from.
file_name(str): File name if applicable. Used in error reporting.
extra: An object that keeps custom parsing state. If not given
initialized to dict.
"""
if self.debug:
a_print("*** PARSING STARTED\n")
self.debug_step = 0
if self.debug_trace:
self.dot_trace = ""
self.input_str = input_str
self.file_name = file_name
self.extra = {} if extra is None else extra
# Error reporting and recovery
self.errors = []
self.in_error_reporting = False
self.expected = set()
self.tokens_ahead = []
self.last_shifted_heads = []
# A stack of heads being reduced. Contains tuples (head, list of
# pending reductions). Used to perform reductions in a depth-first
# manner.
self.reducing_stack = []
# For optimization, keep only state ids for quick check before
# searching.
self.reducing_stack_states = []
# Heads that are fully reduced and thus are candidates for the next
# shifting or accepting. Fully reduced heads (heads without any pending
# reduction) from reducing_stack are merged to these heads.
self.reduced_heads = {}
# Heads created during shift operations.
self.shifted_heads = []
# Accepted (finished) heads
self.accepted_heads = []
# We start with a single parser head in state 0.
start_head = GSSNode(self, self.table.states[0], 0, position,
number_of_trees=1)
self._init_dynamic_disambiguation(start_head)
self.shifted_heads.append(start_head)
if self.debug and self.debug_trace:
self._trace_head(start_head)
# The main loop
while True:
if not self.in_error_reporting:
self.last_shifted_heads = list(self.shifted_heads)
self._do_reductions()
if self.in_error_reporting:
# Expected symbols are only those that can cause reduced head
# to shift.
self.expected = set([
h.token_ahead.symbol for h in self.reduced_heads
if h.token_ahead.symbol in h.state.actions
and SHIFT in [action.action
for action
in h.state.actions[h.token_ahead.symbol]]])
if self.debug:
a_print("*** LEAVING ERROR REPORTING MODE.",
new_line=True)
h_print("Tokens expected:",
', '.join([t.name for t in self.expected]),
level=1)
h_print("Tokens found:", self.tokens_ahead, level=1)
self.reduced_heads = {}
self.in_error_reporting = False
# After leaving error reporting mode, register error and try
# recovery if enabled
context = self.last_shifted_heads[0]
self.errors.append(
self._create_error(
context, self.expected,
tokens_ahead=self.tokens_ahead,
symbols_before=list(
{h.state.symbol
for h in self.last_shifted_heads}),
last_heads=self.last_shifted_heads))
if self.error_recovery:
if self.debug:
a_print("*** STARTING ERROR RECOVERY.",
new_line=True)
if self._do_recovery():
# Error recovery succeeded
if self.debug:
a_print(
"*** ERROR RECOVERY SUCCEEDED. CONTINUING.",
new_line=True)
continue
else:
break
else:
break
else:
self._do_shifts_accepts()
if not self.shifted_heads and not self.accepted_heads:
if self.debug:
a_print("*** ENTERING ERROR REPORTING MODE.",
new_line=True)
self._enter_error_reporting()
continue
if not self.shifted_heads:
break
if self.debug and self.debug_trace:
self._export_dot_trace()
if self.accepted_heads:
# Return results
results = [x.results for head in self.accepted_heads
for x in head.parents]
if self.debug:
a_print("*** {} sucessful parse(s).".format(len(results)))
self._remove_transient_state()
return results
else:
# Report error
self._remove_transient_state()
raise self.errors[-1]
def _do_reductions(self):
"""
Perform all possible reductions for this shift level.
"""
debug = self.debug
if debug:
a_print("** REDUCING", new_line=True)
self._debug_active_heads(self.shifted_heads)
if not self.in_error_reporting:
# First we shall find lookaheads for all shifted heads and split
# heads on lexical ambiguity.
shifted_heads = []
while self.shifted_heads:
head = self.shifted_heads.pop()
if head.token_ahead is not None:
# This might happen if this head is produced by error
# recovery
shifted_heads.append(head)
continue
if debug:
h_print("Finding lookaheads", new_line=True)
self._skipws(head, self.input_str)
tokens = self._next_tokens(head)
if debug:
self._debug_context(
head.position,
head.layout_content_ahead,
lookahead_tokens=tokens,
expected_symbols=head.state.actions.keys())
if tokens:
while tokens:
# For lexical ambiguity create a new head for each new
# token recognized ahead.
shifted_heads.append(head.for_token(tokens.pop()))
else:
# Can't find lookahead. This head can't progress
if debug:
h_print('No lookaheads found. Killing head.')
else:
shifted_heads = self.shifted_heads
while shifted_heads:
head = shifted_heads.pop()
self._prepare_reductions(head)
while self.reducing_stack:
while self.reducing_stack[-1][1]:
reduction = self.reducing_stack[-1][1].pop()
new_head = self._reduce(head, reduction)
if new_head is not None:
head = new_head
self._prepare_reductions(head)
# No more reduction for top of the stack head.
# Pop of the stack and merge to reduced heads.
head = self.reducing_stack.pop()[0]
self.reducing_stack_states.pop()
if self.debug:
h_print('No more reductions for head:', str(head),
level=1, new_line=True)
reduced_head = self.reduced_heads.get(head, None)
if reduced_head is None:
if self.debug:
h_print('Adding head to reduced heads.', level=1)
self.reduced_heads[head] = head
else:
reduced_head.merge_head(head, self)
def _do_shifts_accepts(self):
"""
Do shifts and accepts of the reduced heads
"""
debug = self.debug
if debug:
a_print("** SHIFTING", new_line=True)
self._debug_active_heads(self.reduced_heads.values())
while self.reduced_heads:
head, __ = self.reduced_heads.popitem()
actions = head.state.actions.get(head.token_ahead.symbol)
action = actions[0] if actions else None
if action is None or action.action == REDUCE:
if debug:
a_print("Can't shift head: ", str(head), new_line=True)
else:
if action.action == ACCEPT:
if debug:
a_print('**ACCEPTING HEAD: ', str(head))
self.accepted_heads.append(head)
else:
self._shift(head, action.state)
def _prepare_reductions(self, head):
"""
Finds all possible reduction for the given head and make a new stack
entry with pending reductions.
"""
debug = self.debug
if debug:
a_print("Preparing reductions for head: ", str(head),
new_line=True)
productions = []
symbol_actions = head.state.actions.get(head.token_ahead.symbol, [])
for symbol_action in symbol_actions:
action = symbol_action.action
if action is REDUCE:
productions.append(symbol_action.prod)
if debug:
h_print("\tProductions:\n\t\t",
'\n\t\t'.join([str(p) for p in productions]))
reductions = []
for production in productions:
if debug:
h_print('Processing production:', str(production),
level=1, new_line=True)
prod_len = len(production.rhs)
if prod_len == 0:
# Special case, empty reduction
reductions.append((head, production, [],
head.position, head.position))
else:
# Find roots of possible reductions by going backwards for
# prod_len steps following all possible paths. Collect
# subresults along the way to be used with semantic actions
to_process = [(head, [], prod_len, None)]
if debug:
h_print("Calculate reduction paths of length {}:"
.format(prod_len), level=1)
h_print("start node=",
"[{}], symbol={}, "
"length={}".format(head, head.state.symbol,
prod_len), level=2)
while to_process:
(node,
results,
length,
last_parent) = to_process.pop()
length = length - 1
if debug:
h_print("node = {}".format(node), level=2,
new_line=True)
h_print("backpath length = {}{}"
.format(prod_len - length,
" - ROOT" if not length else ""),
level=2)
first_parent = None
for parent in node.parents:
if debug:
h_print("", str(parent.head), level=3)
new_results = [parent.results] + results
if first_parent is None:
first_parent = parent
if last_parent is None:
last_parent = parent
if length:
to_process.append((parent.parent, new_results,
length, last_parent))
else:
reductions.append((parent.parent,
production,
new_results,
first_parent.start_position,
last_parent.end_position))
first_parent = parent
if debug:
h_print("Reduction paths = ", len(reductions), level=1,
new_line=True)
for idx, reduction in enumerate(reductions):
if debug:
h_print("Reduction {}:".format(idx + 1),
reductions,
level=1)
self.reducing_stack.append((head, reductions))
self.reducing_stack_states.append(head.state.state_id)
def _reduce(self, head, reduction):
"""
Executes the given reduction.
"""
root_head, production, results, \
start_position, end_position = reduction
if start_position is None:
start_position = end_position = root_head.position
state = root_head.state.gotos[production.symbol]
if self.debug:
self.debug_step += 1
a_print("{}. REDUCING head ".format(self.debug_step), str(head),
new_line=True)
a_print("by prod ", production, level=1)
a_print("to state {}:{}".format(state.state_id,
state.symbol), level=1)
a_print("root is ", root_head, level=1)
a_print("Position span: {} - {}".format(start_position,
end_position), level=1)
new_head = GSSNode(self, state, head.position,
head.shift_level,
number_of_trees=head.number_of_trees,
token_ahead=head.token_ahead)
parent = GSSNodeParent(root_head, new_head, results,
start_position, end_position,
production=production)
if not self.dynamic_filter or \
self._call_dynamic_filter(parent, head.state, state,
REDUCE, production, results):
parent.results = self._call_reduce_action(parent, results)
# Check for possible automata loops for the newly reduced head.
# Handle loops by creating GSS loops for empty reduction loops or
# rejecting cyclic reductions for non-empty reductions.
if self.debug:
h_print('Check loops. Reduce stack states:',
self.reducing_stack_states,
level=1)
if new_head.state.state_id in | |
import os
import vtk
import ctk
import qt
import slicer
from EditOptions import HelpButton
import LabelEffect
__all__ = [
'WandEffectOptions',
'WandEffectTool',
'WandEffectLogic',
'WandEffect'
]
#
# This defines the hooks to be come an editor effect.
#
#
# WandEffectOptions - see LabelEffect, EditOptions and Effect for superclasses
#
class WandEffectOptions(LabelEffect.LabelEffectOptions):
""" WandEffect-specfic gui
"""
def __init__(self, parent=0):
super(WandEffectOptions,self).__init__(parent)
# self.attributes should be tuple of options:
# 'MouseTool' - grabs the cursor
# 'Nonmodal' - can be applied while another is active
# 'Disabled' - not available
self.attributes = ('MouseTool')
self.displayName = 'Wand Effect'
def __del__(self):
super(WandEffectOptions,self).__del__()
def create(self):
super(WandEffectOptions,self).create()
self.toleranceFrame = qt.QFrame(self.frame)
self.toleranceFrame.setLayout(qt.QHBoxLayout())
self.frame.layout().addWidget(self.toleranceFrame)
self.widgets.append(self.toleranceFrame)
self.toleranceLabel = qt.QLabel("Tolerance:", self.toleranceFrame)
self.toleranceLabel.setToolTip("Set the tolerance of the wand in terms of background pixel values")
self.toleranceFrame.layout().addWidget(self.toleranceLabel)
self.widgets.append(self.toleranceLabel)
self.toleranceSpinBox = ctk.ctkDoubleSpinBox(self.toleranceFrame)
self.toleranceSpinBox.setToolTip("Set the tolerance of the wand in terms of background pixel values")
self.toleranceSpinBox.minimum = 0
self.toleranceSpinBox.maximum = 1000
self.toleranceSpinBox.suffix = ""
self.toleranceFrame.layout().addWidget(self.toleranceSpinBox)
self.widgets.append(self.toleranceSpinBox)
self.maxPixelsFrame = qt.QFrame(self.frame)
self.maxPixelsFrame.setLayout(qt.QHBoxLayout())
self.frame.layout().addWidget(self.maxPixelsFrame)
self.widgets.append(self.maxPixelsFrame)
self.maxPixelsLabel = qt.QLabel("Max Pixels per click:", self.maxPixelsFrame)
self.maxPixelsLabel.setToolTip("Set the maxPixels for each click")
self.maxPixelsFrame.layout().addWidget(self.maxPixelsLabel)
self.widgets.append(self.maxPixelsLabel)
self.maxPixelsSpinBox = ctk.ctkDoubleSpinBox(self.maxPixelsFrame)
self.maxPixelsSpinBox.setToolTip("Set the maxPixels for each click")
self.maxPixelsSpinBox.minimum = 1
self.maxPixelsSpinBox.maximum = 100000
self.maxPixelsSpinBox.suffix = ""
self.maxPixelsFrame.layout().addWidget(self.maxPixelsSpinBox)
self.widgets.append(self.maxPixelsSpinBox)
self.fillModeFrame = qt.QFrame(self.frame)
self.fillModeFrame.setLayout(qt.QHBoxLayout())
self.frame.layout().addWidget(self.fillModeFrame)
self.widgets.append(self.fillModeFrame)
self.fillModeCheckBox = qt.QCheckBox(self.fillModeFrame)
self.fillModeCheckBox.text = "Fill Volume"
self.fillModeCheckBox.setToolTip("Fill in 3D when checked, else fill plane")
self.fillModeFrame.layout().addWidget(self.fillModeCheckBox)
self.widgets.append(self.fillModeCheckBox)
HelpButton(self.frame, "Use this tool to label all voxels that are within a tolerance of where you click")
# don't connect the signals and slots directly - instead, add these
# to the list of connections so that gui callbacks can be cleanly
# disabled while the gui is being updated. This allows several gui
# elements to be interlinked with signal/slots but still get updated
# as a unit to the new value of the mrml node.
self.connections.append(
(self.toleranceSpinBox, 'valueChanged(double)', self.onToleranceSpinBoxChanged) )
self.connections.append(
(self.maxPixelsSpinBox, 'valueChanged(double)', self.onMaxPixelsSpinBoxChanged) )
self.connections.append(
(self.fillModeCheckBox, 'clicked()', self.onFillModeClicked) )
# Add vertical spacer
self.frame.layout().addStretch(1)
def destroy(self):
super(WandEffectOptions,self).destroy()
# note: this method needs to be implemented exactly as-is
# in each leaf subclass so that "self" in the observer
# is of the correct type
def updateParameterNode(self, caller, event):
node = self.editUtil.getParameterNode()
if node != self.parameterNode:
if self.parameterNode:
node.RemoveObserver(self.parameterNodeTag)
self.parameterNode = node
self.parameterNodeTag = node.AddObserver(vtk.vtkCommand.ModifiedEvent, self.updateGUIFromMRML)
def setMRMLDefaults(self):
super(WandEffectOptions,self).setMRMLDefaults()
disableState = self.parameterNode.GetDisableModifiedEvent()
self.parameterNode.SetDisableModifiedEvent(1)
defaults = (
("tolerance", "20"),
("maxPixels", "200"),
("fillMode", "Plane"),
)
for d in defaults:
param = "WandEffect,"+d[0]
pvalue = self.parameterNode.GetParameter(param)
if pvalue == '':
self.parameterNode.SetParameter(param, d[1])
self.parameterNode.SetDisableModifiedEvent(disableState)
def updateGUIFromMRML(self,caller,event):
super(WandEffectOptions,self).updateGUIFromMRML(caller,event)
params = ("tolerance", "maxPixels",)
for p in params:
if self.parameterNode.GetParameter("WandEffect,"+p) == '':
# don't update if the parameter node has not got all values yet
return
super(WandEffectOptions,self).updateGUIFromMRML(caller,event)
self.disconnectWidgets()
self.toleranceSpinBox.setValue( float(self.parameterNode.GetParameter("WandEffect,tolerance")) )
self.maxPixelsSpinBox.setValue( float(self.parameterNode.GetParameter("WandEffect,maxPixels")) )
self.fillModeCheckBox.checked = self.parameterNode.GetParameter("WandEffect,fillMode") == "Volume"
self.toleranceFrame.setHidden( self.thresholdPaint.checked )
self.connectWidgets()
def onToleranceSpinBoxChanged(self,value):
if self.updatingGUI:
return
self.updateMRMLFromGUI()
def onMaxPixelsSpinBoxChanged(self,value):
if self.updatingGUI:
return
self.updateMRMLFromGUI()
def onFillModeClicked(self):
if self.updatingGUI:
return
self.updateMRMLFromGUI()
def updateMRMLFromGUI(self):
disableState = self.parameterNode.GetDisableModifiedEvent()
self.parameterNode.SetDisableModifiedEvent(1)
super(WandEffectOptions,self).updateMRMLFromGUI()
self.parameterNode.SetParameter( "WandEffect,tolerance", str(self.toleranceSpinBox.value) )
self.parameterNode.SetParameter( "WandEffect,maxPixels", str(self.maxPixelsSpinBox.value) )
fillMode = "Volume" if self.fillModeCheckBox.checked else "Plane"
self.parameterNode.SetParameter( "WandEffect,fillMode", fillMode )
self.parameterNode.SetDisableModifiedEvent(disableState)
if not disableState:
self.parameterNode.InvokePendingModifiedEvent()
#
# WandEffectTool
#
class WandEffectTool(LabelEffect.LabelEffectTool):
"""
One instance of this will be created per-view when the effect
is selected. It is responsible for implementing feedback and
label map changes in response to user input.
This class observes the editor parameter node to configure itself
and queries the current view for background and label volume
nodes to operate on.
"""
def __init__(self, sliceWidget):
super(WandEffectTool,self).__init__(sliceWidget)
self.logic = WandEffectLogic(self.sliceWidget.sliceLogic())
def cleanup(self):
super(WandEffectTool,self).cleanup()
def processEvent(self, caller=None, event=None):
"""
handle events from the render window interactor
"""
if super(WandEffectTool,self).processEvent(caller,event):
return
if event == "LeftButtonPressEvent":
xy = self.interactor.GetEventPosition()
sliceLogic = self.sliceWidget.sliceLogic()
self.logic = WandEffectLogic(sliceLogic)
self.logic.undoRedo = self.undoRedo
self.logic.apply(xy)
self.abortEvent(event)
else:
pass
#
# WandEffectLogic
#
class WandEffectLogic(LabelEffect.LabelEffectLogic):
"""
This class contains helper methods for a given effect
type. It can be instanced as needed by an WandEffectTool
or WandEffectOptions instance in order to compute intermediate
results (say, for user feedback) or to implement the final
segmentation editing operation. This class is split
from the WandEffectTool so that the operations can be used
by other code without the need for a view context.
"""
def __init__(self,sliceLogic):
super(WandEffectLogic,self).__init__(sliceLogic)
self.sliceLogic = sliceLogic
self.fillMode = 'Plane' # can be Plane or Volume
def apply(self,xy):
#
# get the parameters from MRML
#
node = self.editUtil.getParameterNode()
tolerance = float(node.GetParameter("WandEffect,tolerance"))
maxPixels = float(node.GetParameter("WandEffect,maxPixels"))
self.fillMode = node.GetParameter("WandEffect,fillMode")
paintOver = int(node.GetParameter("LabelEffect,paintOver"))
paintThreshold = int(node.GetParameter("LabelEffect,paintThreshold"))
thresholdMin = float(node.GetParameter("LabelEffect,paintThresholdMin"))
thresholdMax = float(node.GetParameter("LabelEffect,paintThresholdMax"))
#
# get the label and background volume nodes
#
labelLogic = self.sliceLogic.GetLabelLayer()
labelNode = labelLogic.GetVolumeNode()
backgroundLogic = self.sliceLogic.GetBackgroundLayer()
backgroundNode = backgroundLogic.GetVolumeNode()
#
# get the ijk location of the clicked point
# by projecting through patient space back into index
# space of the volume. Result is sub-pixel, so round it
# (note: bg and lb will be the same for volumes created
# by the editor, but can be different if the use selected
# different bg nodes, but that is not handled here).
#
xyToIJK = labelLogic.GetXYToIJKTransform()
ijkFloat = xyToIJK.TransformDoublePoint(xy+(0,))
ijk = []
for element in ijkFloat:
try:
intElement = int(round(element))
except ValueError:
intElement = 0
ijk.append(intElement)
ijk.reverse()
ijk = tuple(ijk)
#
# Get the numpy array for the bg and label
#
import vtk.util.numpy_support, numpy
backgroundImage = backgroundNode.GetImageData()
labelImage = labelNode.GetImageData()
shape = list(backgroundImage.GetDimensions())
shape.reverse()
backgroundArray = vtk.util.numpy_support.vtk_to_numpy(backgroundImage.GetPointData().GetScalars()).reshape(shape)
labelArray = vtk.util.numpy_support.vtk_to_numpy(labelImage.GetPointData().GetScalars()).reshape(shape)
if self.fillMode == 'Plane':
# select the plane corresponding to current slice orientation
# for the input volume
ijkPlane = self.sliceIJKPlane()
i,j,k = ijk
if ijkPlane == 'JK':
backgroundDrawArray = backgroundArray[:,:,k]
labelDrawArray = labelArray[:,:,k]
ijk = (i, j)
if ijkPlane == 'IK':
backgroundDrawArray = backgroundArray[:,j,:]
labelDrawArray = labelArray[:,j,:]
ijk = (i, k)
if ijkPlane == 'IJ':
backgroundDrawArray = backgroundArray[i,:,:]
labelDrawArray = labelArray[i,:,:]
ijk = (j, k)
elif self.fillMode == 'Volume':
backgroundDrawArray = backgroundArray
labelDrawArray = labelArray
#
# do a recursive search for pixels to change
#
self.undoRedo.saveState()
value = backgroundDrawArray[ijk]
label = self.editUtil.getLabel()
if paintThreshold:
lo = thresholdMin
hi = thresholdMax
else:
lo = value - tolerance
hi = value + tolerance
pixelsSet = 0
toVisit = [ijk,]
# Create a map that contains the location of the pixels
# that have been already visited (added or considered to be added).
# This is required if paintOver is enabled because then we reconsider
# all pixels (not just the ones that have not labelled yet).
if paintOver:
labelDrawVisitedArray = numpy.zeros(labelDrawArray.shape,dtype='bool')
while toVisit != []:
location = toVisit.pop(0)
try:
l = labelDrawArray[location]
b = backgroundDrawArray[location]
except IndexError:
continue
if (not paintOver and l != 0):
# label filled already and not painting over, leave it alone
continue
if (paintOver and l == label):
# label is the current one, but maybe it was filled with another high/low value,
# so we have to visit it once (and only once) in this session, too
if labelDrawVisitedArray[location]:
# visited already, so don't try to fill it again
continue
else:
# we'll visit this pixel now, so mark it as visited
labelDrawVisitedArray[location] = True
if b < lo or b > hi:
continue
labelDrawArray[location] = label
if l != label:
# only count those pixels that were changed (to allow step-by-step growing by multiple mouse clicks)
pixelsSet += 1
if pixelsSet > maxPixels:
toVisit = []
else:
if self.fillMode == 'Plane':
# add the 4 neighbors to the stack
toVisit.append((location[0] - 1, location[1] ))
toVisit.append((location[0] + 1, location[1] ))
toVisit.append((location[0] , location[1] - 1 ))
toVisit.append((location[0] , location[1] + 1 ))
elif self.fillMode == 'Volume':
# add the 6 neighbors to the stack
toVisit.append((location[0] - 1, location[1] , location[2] ))
toVisit.append((location[0] + 1, location[1] , location[2] ))
toVisit.append((location[0] , location[1] - 1, location[2] ))
toVisit.append((location[0] , location[1] + 1, location[2] ))
toVisit.append((location[0] , location[1] , location[2] - 1))
toVisit.append((location[0] , location[1] , location[2] + 1))
# signal to slicer that the label needs to be updated
self.editUtil.markVolumeNodeAsModified(labelNode)
#
# The WandEffect class definition
#
class WandEffect(LabelEffect.LabelEffect):
"""Organizes the Options, Tool, and Logic classes into a single instance
that can be managed by the EditBox
"""
def | |
or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a service.SetInstanceAcceleratorRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.SetInstanceAcceleratorRequest):
request = service.SetInstanceAcceleratorRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_instance_accelerator]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
instance.Instance,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
def set_instance_machine_type(
self,
request: Union[service.SetInstanceMachineTypeRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates the machine type of a single Instance.
Args:
request (Union[google.cloud.notebooks_v1beta1.types.SetInstanceMachineTypeRequest, dict]):
The request object. Request for setting instance machine
type.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.notebooks_v1beta1.types.Instance`
The definition of a notebook instance.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a service.SetInstanceMachineTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.SetInstanceMachineTypeRequest):
request = service.SetInstanceMachineTypeRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.set_instance_machine_type
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
instance.Instance,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
def set_instance_labels(
self,
request: Union[service.SetInstanceLabelsRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates the labels of an Instance.
Args:
request (Union[google.cloud.notebooks_v1beta1.types.SetInstanceLabelsRequest, dict]):
The request object. Request for setting instance labels.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.notebooks_v1beta1.types.Instance`
The definition of a notebook instance.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a service.SetInstanceLabelsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.SetInstanceLabelsRequest):
request = service.SetInstanceLabelsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_instance_labels]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
instance.Instance,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
def delete_instance(
self,
request: Union[service.DeleteInstanceRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a single Instance.
Args:
request (Union[google.cloud.notebooks_v1beta1.types.DeleteInstanceRequest, dict]):
The request object. Request for deleting a notebook
instance.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a service.DeleteInstanceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.DeleteInstanceRequest):
request = service.DeleteInstanceRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_instance]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
def start_instance(
self,
request: Union[service.StartInstanceRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Starts a notebook instance.
Args:
request (Union[google.cloud.notebooks_v1beta1.types.StartInstanceRequest, dict]):
The request object. Request for starting a notebook
instance
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.notebooks_v1beta1.types.Instance`
The definition of a notebook instance.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a service.StartInstanceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.StartInstanceRequest):
request = service.StartInstanceRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.start_instance]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
instance.Instance,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
def stop_instance(
self,
request: Union[service.StopInstanceRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Stops a notebook instance.
Args:
request (Union[google.cloud.notebooks_v1beta1.types.StopInstanceRequest, dict]):
The request object. Request for stopping a notebook
instance
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.notebooks_v1beta1.types.Instance`
The definition of a notebook instance.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a service.StopInstanceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, service.StopInstanceRequest):
request = service.StopInstanceRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.stop_instance]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
instance.Instance,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
| |
"""Annotation spreadsheet import, parsing, and validation."""
from __future__ import absolute_import, division, print_function, unicode_literals
import csv
import logging
import os
from collections import OrderedDict
from copy import copy
from openpyxl import Workbook, load_workbook
from openpyxl.styles import Font, Protection
from openpyxl.worksheet.datavalidation import DataValidation
__all__ = ('FileImporter', 'FileExporter', )
logger = logging.getLogger(__name__)
# TODO: Construct these constants from the descriptor schema yaml files.
BASIC = [
'SAMPLE_NAME',
'READS_1',
'READS_2',
'BARCODES_FILE',
'SEQ_TYPE',
]
SAMPLE_INFO = [
'ANNOTATOR',
'ORGANISM',
'SOURCE',
'CELL_TYPE',
'STRAIN',
'GENOTYPE',
'MOLECULE',
'DESCRIPTION',
]
PROTOCOLS = [
'GROWTH_PROTOCOL',
'TREATMENT_PROTOCOL',
'EXTRACT_PROTOCOL',
'LIBRARY_PREP',
'FRAGMENTATION_METHOD',
]
SEQ_DATA = [
'SEQ_DATE',
'BARCODE_REMOVED',
]
READS_DATA = [
'BARCODE',
'INSTRUMENT_TYPE',
'FACILITY',
]
ANTIBODY = ['ANTIBODY']
OPTIONAL = [
'AGE',
'LIBRARY_STRATEGY',
'TISSUE',
'OTHER_CHAR_1',
'OTHER_CHAR_2',
]
COLUMNS = (
BASIC
+ SAMPLE_INFO
+ PROTOCOLS
# + SEQ_DATA # TODO: Validation incompatible with Resolwe
+ READS_DATA
+ ANTIBODY
+ OPTIONAL
)
ORGANISM = {
'Homo sapiens',
'Mus musculus',
'Dictyostelium discoideum',
'Rattus norvegicus',
}
MOLECULE = {
'total RNA',
'polyA RNA',
'cytoplasmic RNA',
'nuclear RNA',
'genomic DNA',
'protein',
'other',
}
SEQ_TYPE = {
'RNA-Seq',
'Chemical mutagenesis',
'miRNA-Seq',
'ncRNA-Seq'
'RNA-Seq (CAGE)',
'RNA-Seq (RACE)',
'ChIP-Seq',
'ChIPmentation',
'ChIP-Rx',
'MNase-Seq',
'MBD-Seq',
'MRE-Seq',
'Bisulfite-Seq',
'Bisulfite-Seq (reduced representation)',
'MeDIP-Seq',
'DNase-Hypersensitivity',
'Tn-Seq',
'FAIRE-seq',
'SELEX',
'RIP-Seq',
'ChIA-PET',
'eClIP',
'OTHER',
}
EMPTY = [
'',
'N/A',
'NONE',
None,
]
REQUIRED = {
'SAMPLE_NAME',
'SEQ_TYPE',
'ANNOTATOR',
'ORGANISM',
'SOURCE',
'MOLECULE',
}
LIMITED = {
'SEQ_TYPE': SEQ_TYPE,
'ORGANISM': ORGANISM,
'MOLECULE': MOLECULE,
'BARCODE_REMOVED': {'1', '0'},
}
class FileImporter(object):
"""Import annotation spreadsheet.
:param str annotation_path: path to a sample annotation spreadsheet.
"""
def __init__(self, annotation_path):
"""Validate the annotation sheet and create the sample list."""
self._is_file(annotation_path)
entry_list = self._populate_entries(annotation_path)
self.valid_samples, self.invalid_names = self._create_all_samples(entry_list)
if self.invalid_names:
logger.error(
"\nInvalid annotations were provided for the following samples: %s."
"\nPlease fill in all bolded columns of the template "
"generated by the `export_annotation` method of"
" your collection.",
', '.join(self.invalid_names)
)
def _is_file(self, path):
"""Check is the provided path exists."""
if not os.path.isfile(path):
raise OSError(
"The provided annotation file '{}' "
"does not exist.".format(path)
)
def _get_spreadsheet_extension(self, path):
"""Find spreadsheet file extension."""
return os.path.splitext(path)[1]
def _read_xls(self, path):
"""Read Excel spreadsheet annotation file."""
workbook = load_workbook(path)
worksheet = workbook.active
rows = worksheet.rows
header = [cell.value for cell in next(rows)]
return [self._parse_row(header, row) for row in rows]
def _parse_row(self, header, row):
"""Convert spreadsheet row into sample entry."""
return {head: self._parse_cell(cell) for head, cell in zip(header, row)}
def _parse_cell(self, cell):
"""Interpret spreadsheet cell."""
if isinstance(cell.value, float):
return str(cell.value)
elif cell.value in EMPTY:
return ''
else:
return cell.value
def _read_text_file(self, path):
"""Read simple spreadsheet annotation file."""
with open(path, 'rb') as sample_sheet:
return list(csv.DictReader(sample_sheet, delimiter='\t'))
def _populate_entries(self, path):
"""Check the format of annotation file and assign read function."""
if self._get_spreadsheet_extension(path) in ['.xls', '.xlsx', '.xlsm']:
return self._read_xls(path)
elif self._get_spreadsheet_extension(path) in ['.txt', '.tab', '.tsv']:
return self._read_text_file(path)
else:
raise TypeError(
"Annotation spreadsheet extension '{}' not recognised. Options"
" are: '.xls', '.xlsx', '.xlsm', '.txt', '.tab', "
"'.tsv'.".format(self._get_spreadsheet_extension(path))
)
def _create_all_samples(self, entries):
"""Create a sample from each samplesheet entry."""
valid_samples = OrderedDict()
invalid_names = set()
for entry in entries:
self._create_sample(entry, valid_samples, invalid_names)
return valid_samples, invalid_names
def _create_sample(self, entry, valid_samples, invalid_names):
"""Create a sample from a samplesheet entry, if unique."""
name = entry['SAMPLE_NAME']
if name in invalid_names or name in valid_samples:
logger.error(
"The sample name '%s' is duplicated. Please use unique "
"sample names.",
name
)
invalid_names.add(name)
valid_samples.pop(name, None)
else:
try:
valid_samples[name] = Sample(entry)
except ValueError as ex:
invalid_names.add(name)
logger.error(ex)
class FileExporter(object):
"""Export annotation spreadsheet.
:param str annotation_path: path to write the sample annotation spreadsheet
:param sample_list: a list of resdk sample objects
"""
def __init__(self, sample_list=[], export_path=None):
"""Initialize the samplesheet template."""
self.path = export_path
self._samples = sample_list
self._template = self._create_template(COLUMNS)
for sample in sample_list:
self._add_entry(sample)
self._template.save(filename=self.path)
def _create_template(self, headers):
"""Construct a template samplesheet."""
template = Workbook()
sheet = template.active
# Add headers and lock the sheet
sheet.append(headers)
sheet.protection.sheet = True
# Apply formats to everything
for cell in sheet[1]:
self._apply_xlsm_formats(sheet, cell)
# Return the template
return template
def _apply_xlsm_formats(self, sheet, cell):
"""Apply column-specific, macro-enabled spreadsheet formats."""
# Create styles
normal = Font(name='Arial')
bold = copy(normal)
bold.bold = True
# Acquire indices and headers
header = cell.value
col_id = cell.column
col = sheet.column_dimensions[col_id]
col.font = normal
col.width = self._get_column_width(header)
# Lock only the headers
col.protection = Protection(locked=False)
cell.font = normal # Required for locking (openpyxl bug?)
# Format the required columns
if header in REQUIRED:
cell.font = bold
# Format the columns with limited options
try:
options = '"{}"'.format(','.join(LIMITED[header]))
valid = DataValidation(type="list", formula1=options)
valid.error = "Invalid {}.".format(header)
sheet.add_data_validation(valid)
valid.add(self._get_column_body(col_id))
col.width = self._get_column_width(LIMITED[header])
except KeyError:
pass
# Format the date column
if header == 'SEQ_DATE':
valid_date = DataValidation(type="date")
valid_date.error = "Invalid date."
sheet.add_data_validation(valid_date)
valid_date.add(self._get_column_body(col_id))
def _get_column_body(self, column):
"""Give the indices for the entire column, minus the header."""
return '{0}2:{0}1048576'.format(column)
def _get_column_width(self, words, factor=1.7, limits=(8, 20)):
"""Choose a column width based on the given list of words."""
if isinstance(words, str):
words = [words]
width = factor * max([len(word) for word in words])
if width > limits[1]:
width = limits[1]
elif width < limits[0]:
width = limits[0]
return width
def _add_entry(self, sample):
"""Add a sample as an entry to the samplesheet."""
sheet = self._template.active
annotation = self._extract_descriptors(sample)
entry = [annotation.get(header, '') for header in COLUMNS]
sheet.append(entry)
def _extract_descriptors(self, sample):
"""Extract all sample annotation info as a dictionary."""
# Populate the sample info
info = {'SAMPLE_NAME': sample.name}
if sample.descriptor:
info.update(sample.descriptor['sample'])
info.update(self._extract_optional(info.pop('optional_char', [])))
# Populate the raw sequencing characteristics
try:
reads = sample.data.filter(type='data:reads')
info.update(self._extract_seqinfo(reads[0].descriptor))
except IndexError:
logger.warning("No reads found for sample '%s'.", sample.name)
except KeyError:
logger.warning("Sample '%s' reads not annotated.", sample.name)
# Eliminate null values and capitalize headers
return {key.upper(): '' if val in EMPTY else val for key, val in info.items()}
def _extract_optional(self, char_list):
"""Convert a list of optional characteristics into a dictionary."""
return dict(char.split(':') for char in char_list)
def _extract_seqinfo(self, info):
"""Extract reads annotation info from a sample."""
entry = {'SEQ_TYPE': info['experiment_type']}
if 'reads_info' in info:
entry.update(_dict_upper(info['reads_info']))
if 'protocols' in info:
entry.update(_dict_upper(info['protocols']))
return entry
class Sample(object):
"""Create a Sample like object.
:param dict entry: a dictionary containing header:data pairs generated from
an annotation spreadsheet
"""
# TODO: Abstract this to handle other descriptor schema types.
def __init__(self, entry):
"""Validate the entry and construct the sample descriptor."""
self.valid = self.validate(entry)
self._build_descriptors(entry)
self.community_tag = self._get_community_tag(self.seq_type.lower())
def _build_descriptors(self, entry):
"""Extract the sample meta-data."""
self.name = entry['SAMPLE_NAME']
self.path = entry['READS_1']
self.path2 = entry['READS_2']
self.path3 = entry['BARCODES_FILE']
self.seq_type = entry['SEQ_TYPE']
self.barcode = entry['BARCODE']
# Build reads descriptor
protocols = {char.lower(): entry[char] for char in PROTOCOLS}
antibody = {
'antibody_information': {'manufacturer': entry['ANTIBODY']}
}
protocols.update(antibody)
reads_info = {char.lower(): entry[char] for char in READS_DATA}
self.reads_annotation = {'protocols': protocols, 'reads_info': reads_info}
# TODO: Fix format incompatibility between openpyxl and Resolwe
# for char in SEQ_DATA:
# if entry[char]:
# self.reads_annotation['reads_info'][char.lower()] = entry[char]
# Build remaining sample descriptor
self.molecule = entry['MOLECULE']
self.organism = entry['ORGANISM']
self.annotator = entry['ANNOTATOR']
self.source = entry['SOURCE']
self.sample_annotation = {
'sample': {
'cell_type': entry['CELL_TYPE'],
'strain': entry['STRAIN'],
'genotype': entry['GENOTYPE'],
'description': entry['DESCRIPTION'],
}
}
# Include only if they are non-empty, to not override error-checking
if self.seq_type:
self.reads_annotation['experiment_type'] = self.seq_type
fields = [
('organism', self.organism),
('molecule', self.molecule),
('annotator', self.annotator),
('source', self.source),
]
reqfields = {label: info for label, info in fields if info}
self.sample_annotation['sample'].update(reqfields)
# Include optional columns
optional = [
'{0}:{1}'.format(char, entry[char])
for char in sorted(OPTIONAL) if entry[char]
]
self.sample_annotation['sample']['optional_char'] = optional
def validate(self, entry):
"""Validate the annotation spreadsheet file."""
# Check column headers
diff1 = set(COLUMNS) - set(entry.keys())
diff2 = set(entry.keys()) - set(COLUMNS)
err_head = (
"Headers '{}' {}. You should use the headers generated by"
" the `export_annotation` method of your collection."
)
if diff1:
raise KeyError(
err_head.format("', '".join(diff1), "are missing")
)
if diff2:
raise KeyError(
err_head.format("', '".join(diff2), "not recognized")
)
# Check required, restricted values
err_req = "For the sample, '{}', '{}' is not a valid {}."
restricted = [
('organism', ORGANISM),
('molecule', MOLECULE),
('seq_type', SEQ_TYPE),
]
for var_name, options in restricted:
var = entry[var_name.upper()]
if var not in options:
raise ValueError(
err_req.format(
entry['SAMPLE_NAME'], var, var_name.upper()
)
)
# Check required, unrestricted values
for var_name in ['sample_name', 'annotator', 'source']:
var = entry[var_name.upper()]
if var.upper() in EMPTY:
raise ValueError(
err_req.format(
entry['SAMPLE_NAME'], var, var_name.upper()
)
)
def _get_community_tag(self, experiment):
"""Prepare community tags."""
if 'rna' in | |
"""
Tests for the BNMF Gibbs sampler.
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../"
sys.path.append(project_location)
import numpy, math, pytest, itertools
from BNMTF.code.models.bnmf_gibbs_optimised import bnmf_gibbs_optimised
""" Test constructor """
def test_init():
# Test getting an exception when R and M are different sizes, and when R is not a 2D array.
R1 = numpy.ones(3)
M = numpy.ones((2,3))
I,J,K = 5,3,1
lambdaU = numpy.ones((I,K))
lambdaV = numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
with pytest.raises(AssertionError) as error:
bnmf_gibbs_optimised(R1,M,K,priors)
assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 1-dimensional."
R2 = numpy.ones((4,3,2))
with pytest.raises(AssertionError) as error:
bnmf_gibbs_optimised(R2,M,K,priors)
assert str(error.value) == "Input matrix R is not a two-dimensional array, but instead 3-dimensional."
R3 = numpy.ones((3,2))
with pytest.raises(AssertionError) as error:
bnmf_gibbs_optimised(R3,M,K,priors)
assert str(error.value) == "Input matrix R is not of the same size as the indicator matrix M: (3, 2) and (2, 3) respectively."
# Similarly for lambdaU, lambdaV
R4 = numpy.ones((2,3))
lambdaU = numpy.ones((2+1,1))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
with pytest.raises(AssertionError) as error:
bnmf_gibbs_optimised(R4,M,K,priors)
assert str(error.value) == "Prior matrix lambdaU has the wrong shape: (3, 1) instead of (2, 1)."
lambdaU = numpy.ones((2,1))
lambdaV = numpy.ones((3+1,1))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
with pytest.raises(AssertionError) as error:
bnmf_gibbs_optimised(R4,M,K,priors)
assert str(error.value) == "Prior matrix lambdaV has the wrong shape: (4, 1) instead of (3, 1)."
# Test getting an exception if a row or column is entirely unknown
lambdaU = numpy.ones((2,1))
lambdaV = numpy.ones((3,1))
M1 = [[1,1,1],[0,0,0]]
M2 = [[1,1,0],[1,0,0]]
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
with pytest.raises(AssertionError) as error:
bnmf_gibbs_optimised(R4,M1,K,priors)
assert str(error.value) == "Fully unobserved row in R, row 1."
with pytest.raises(AssertionError) as error:
bnmf_gibbs_optimised(R4,M2,K,priors)
assert str(error.value) == "Fully unobserved column in R, column 2."
# Finally, a successful case
I,J,K = 3,2,2
R5 = 2*numpy.ones((I,J))
lambdaU = numpy.ones((I,K))
lambdaV = numpy.ones((J,K))
M = numpy.ones((I,J))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
BNMF = bnmf_gibbs_optimised(R5,M,K,priors)
assert numpy.array_equal(BNMF.R,R5)
assert numpy.array_equal(BNMF.M,M)
assert BNMF.I == I
assert BNMF.J == J
assert BNMF.K == K
assert BNMF.size_Omega == I*J
assert BNMF.alpha == alpha
assert BNMF.beta == beta
assert numpy.array_equal(BNMF.lambdaU,lambdaU)
assert numpy.array_equal(BNMF.lambdaV,lambdaV)
# And when lambdaU and lambdaV are integers
I,J,K = 3,2,2
R5 = 2*numpy.ones((I,J))
lambdaU = 3.
lambdaV = 4.
M = numpy.ones((I,J))
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
BNMF = bnmf_gibbs_optimised(R5,M,K,priors)
assert numpy.array_equal(BNMF.R,R5)
assert numpy.array_equal(BNMF.M,M)
assert BNMF.I == I
assert BNMF.J == J
assert BNMF.K == K
assert BNMF.size_Omega == I*J
assert BNMF.alpha == alpha
assert BNMF.beta == beta
assert numpy.array_equal(BNMF.lambdaU,lambdaU*numpy.ones((I,K)))
assert numpy.array_equal(BNMF.lambdaV,lambdaV*numpy.ones((J,K)))
""" Test initialing parameters """
def test_initialise():
I,J,K = 5,3,2
R = numpy.ones((I,J))
M = numpy.ones((I,J))
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
# First do a random initialisation - we can then only check whether values are correctly initialised
init = 'random'
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.initialise(init)
assert BNMF.tau >= 0.0
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert BNMF.U[i,k] >= 0.0
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
assert BNMF.V[j,k] >= 0.0
# Then initialise with expectation values
init = 'exp'
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.initialise(init)
assert BNMF.tau >= 0.0
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert BNMF.U[i,k] == 1./2.
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
assert BNMF.V[j,k] == 1./3.
#assert BNMF.tau == 3./1.
""" Test computing values for alpha, beta, mu, tau. """
I,J,K = 5,3,2
R = numpy.ones((I,J))
M = numpy.ones((I,J))
M[0,0], M[2,2], M[3,1] = 0, 0, 0
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
init = 'exp' #U=1/2,V=1/3
def test_alpha_s():
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.initialise(init)
alpha_s = alpha + 6.
assert BNMF.alpha_s() == alpha_s
def test_beta_s():
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.initialise(init)
beta_s = beta + .5*(12*(2./3.)**2) #U*V.T = [[1/6+1/6,..]]
assert abs(BNMF.beta_s() - beta_s) < 0.000000000000001
def test_tauU():
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.initialise(init)
BNMF.tau = 3.
#V^2 = [[1/9,1/9],[1/9,1/9],[1/9,1/9]], sum_j V^2 = [2/9,1/3,2/9,2/9,1/3] (index=i)
tauU = 3.*numpy.array([[2./9.,2./9.],[1./3.,1./3.],[2./9.,2./9.],[2./9.,2./9.],[1./3.,1./3.]])
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert BNMF.tauU(k)[i] == tauU[i,k]
def test_muU():
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.initialise(init)
BNMF.tau = 3.
#U*V^T - Uik*Vjk = [[1/6,..]], so Rij - Ui * Vj + Uik * Vjk = 5/6
tauU = 3.*numpy.array([[2./9.,2./9.],[1./3.,1./3.],[2./9.,2./9.],[2./9.,2./9.],[1./3.,1./3.]])
muU = 1./tauU * ( 3. * numpy.array([[2.*(5./6.)*(1./3.),10./18.],[15./18.,15./18.],[10./18.,10./18.],[10./18.,10./18.],[15./18.,15./18.]]) - lambdaU )
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert abs(BNMF.muU(tauU[:,k],k)[i] - muU[i,k]) < 0.000000000000001
def test_tauV():
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.initialise(init)
BNMF.tau = 3.
#U^2 = [[1/4,1/4],[1/4,1/4],[1/4,1/4],[1/4,1/4],[1/4,1/4]], sum_i U^2 = [1,1,1] (index=j)
tauV = 3.*numpy.array([[1.,1.],[1.,1.],[1.,1.]])
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
assert BNMF.tauV(k)[j] == tauV[j,k]
def test_muV():
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.initialise(init)
BNMF.tau = 3.
#U*V^T - Uik*Vjk = [[1/6,..]], so Rij - Ui * Vj + Uik * Vjk = 5/6
tauV = 3.*numpy.array([[1.,1.],[1.,1.],[1.,1.]])
muV = 1./tauV * ( 3. * numpy.array([[4.*(5./6.)*(1./2.),4.*(5./6.)*(1./2.)],[4.*(5./6.)*(1./2.),4.*(5./6.)*(1./2.)],[4.*(5./6.)*(1./2.),4.*(5./6.)*(1./2.)]]) - lambdaV )
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
assert BNMF.muV(tauV[:,k],k)[j] == muV[j,k]
""" Test some iterations, and that the values have changed in U and V. """
def test_run():
I,J,K = 10,5,2
R = numpy.ones((I,J))
M = numpy.ones((I,J))
M[0,0], M[2,2], M[3,1] = 0, 0, 0
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
init = 'exp' #U=1/2,V=1/3
U_prior = numpy.ones((I,K))/2.
V_prior = numpy.ones((J,K))/3.
iterations = 15
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.initialise(init)
(Us,Vs,taus) = BNMF.run(iterations)
assert BNMF.all_U.shape == (iterations,I,K)
assert BNMF.all_V.shape == (iterations,J,K)
assert BNMF.all_tau.shape == (iterations,)
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
assert Us[0,i,k] != U_prior[i,k]
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
assert Vs[0,j,k] != V_prior[j,k]
assert taus[1] != alpha/float(beta)
""" Test approximating the expectations for U, V, tau """
def test_approx_expectation():
burn_in = 2
thinning = 3 # so index 2,5,8 -> m=3,m=6,m=9
(I,J,K) = (5,3,2)
Us = [numpy.ones((I,K)) * 3*m**2 for m in range(1,10+1)] #first is 1's, second is 4's, third is 9's, etc.
Vs = [numpy.ones((J,K)) * 2*m**2 for m in range(1,10+1)]
taus = [m**2 for m in range(1,10+1)]
expected_exp_tau = (9.+36.+81.)/3.
expected_exp_U = numpy.array([[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.],[9.+36.+81.,9.+36.+81.]])
expected_exp_V = numpy.array([[(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.)],[(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.)],[(9.+36.+81.)*(2./3.),(9.+36.+81.)*(2./3.)]])
R = numpy.ones((I,J))
M = numpy.ones((I,J))
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.all_U = Us
BNMF.all_V = Vs
BNMF.all_tau = taus
(exp_U, exp_V, exp_tau) = BNMF.approx_expectation(burn_in,thinning)
assert expected_exp_tau == exp_tau
assert numpy.array_equal(expected_exp_U,exp_U)
assert numpy.array_equal(expected_exp_V,exp_V)
""" Test computing the performance of the predictions using the expectations """
def test_predict():
burn_in = 2
thinning = 3 # so index 2,5,8 -> m=3,m=6,m=9
(I,J,K) = (5,3,2)
Us = [numpy.ones((I,K)) * 3*m**2 for m in range(1,10+1)] #first is 1's, second is 4's, third is 9's, etc.
Vs = [numpy.ones((J,K)) * 2*m**2 for m in range(1,10+1)]
Us[2][0,0] = 24 #instead of 27 - to ensure we do not get 0 variance in our predictions
taus = [m**2 for m in range(1,10+1)]
R = numpy.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15]],dtype=float)
M = numpy.ones((I,J))
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
#expected_exp_U = numpy.array([[125.,126.],[126.,126.],[126.,126.],[126.,126.],[126.,126.]])
#expected_exp_V = numpy.array([[84.,84.],[84.,84.],[84.,84.]])
#R_pred = numpy.array([[21084.,21084.,21084.],[ 21168.,21168.,21168.],[21168.,21168.,21168.],[21168.,21168.,21168.],[21168.,21168.,21168.]])
M_test = numpy.array([[0,0,1],[0,1,0],[0,0,0],[1,1,0],[0,0,0]]) #R->3,5,10,11, P_pred->21084,21168,21168,21168
MSE = (444408561. + 447872569. + 447660964. + 447618649) / 4.
R2 = 1. - (444408561. + 447872569. + 447660964. + 447618649) / (4.25**2+2.25**2+2.75**2+3.75**2) #mean=7.25
Rp = 357. / ( math.sqrt(44.75) * math.sqrt(5292.) ) #mean=7.25,var=44.75, mean_pred=21147,var_pred=5292, corr=(-4.25*-63 + -2.25*21 + 2.75*21 + 3.75*21)
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
BNMF.all_U = Us
BNMF.all_V = Vs
BNMF.all_tau = taus
performances = BNMF.predict(M_test,burn_in,thinning)
assert performances['MSE'] == MSE
assert performances['R^2'] == R2
assert performances['Rp'] == Rp
""" Test the evaluation measures MSE, R^2, Rp """
def test_compute_statistics():
R = numpy.array([[1,2],[3,4]],dtype=float)
M = numpy.array([[1,1],[0,1]])
I, J, K = 2, 2, 3
lambdaU = 2*numpy.ones((I,K))
lambdaV = 3*numpy.ones((J,K))
alpha, beta = 3, 1
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
BNMF = bnmf_gibbs_optimised(R,M,K,priors)
R_pred = numpy.array([[500,550],[1220,1342]],dtype=float)
M_pred = numpy.array([[0,0],[1,1]])
MSE_pred = (1217**2 + 1338**2) / 2.0
R2_pred = 1. - (1217**2+1338**2)/(0.5**2+0.5**2) #mean=3.5
Rp_pred = 61. / ( math.sqrt(.5) * math.sqrt(7442.) ) #mean=3.5,var=0.5,mean_pred=1281,var_pred=7442,cov=61
| |
<reponame>dgh2466/public_tools
#!/usr/bin/env python
"""
olevba.py
olevba is a script to parse OLE and OpenXML files such as MS Office documents
(e.g. Word, Excel), to extract VBA Macro code in clear text, deobfuscate
and analyze malicious macros.
Supported formats:
- Word 97-2003 (.doc, .dot), Word 2007+ (.docm, .dotm)
- Excel 97-2003 (.xls), Excel 2007+ (.xlsm, .xlsb)
- PowerPoint 2007+ (.pptm, .ppsm)
- Word 2003 XML (.xml)
- Word/Excel Single File Web Page / MHTML (.mht)
Author: <NAME> - http://www.decalage.info
License: BSD, see source code or documentation
olevba is part of the python-oletools package:
http://www.decalage.info/python/oletools
olevba is based on source code from officeparser by <NAME>
https://github.com/unixfreak0037/officeparser
"""
# === LICENSE ==================================================================
# olevba is copyright (c) 2014-2015 <NAME> (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# olevba contains modified source code from the officeparser project, published
# under the following MIT License (MIT):
#
# officeparser is copyright (c) 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#------------------------------------------------------------------------------
# CHANGELOG:
# 2014-08-05 v0.01 PL: - first version based on officeparser code
# 2014-08-14 v0.02 PL: - fixed bugs in code, added license from officeparser
# 2014-08-15 PL: - fixed incorrect value check in PROJECTHELPFILEPATH Record
# 2014-08-15 v0.03 PL: - refactored extract_macros to support OpenXML formats
# and to find the VBA project root anywhere in the file
# 2014-11-29 v0.04 PL: - use olefile instead of OleFileIO_PL
# 2014-12-05 v0.05 PL: - refactored most functions into a class, new API
# - added detect_vba_macros
# 2014-12-10 v0.06 PL: - hide first lines with VB attributes
# - detect auto-executable macros
# - ignore empty macros
# 2014-12-14 v0.07 PL: - detect_autoexec() is now case-insensitive
# 2014-12-15 v0.08 PL: - improved display for empty macros
# - added pattern extraction
# 2014-12-25 v0.09 PL: - added suspicious keywords detection
# 2014-12-27 v0.10 PL: - added OptionParser, main and process_file
# - uses xglob to scan several files with wildcards
# - option -r to recurse subdirectories
# - option -z to scan files in password-protected zips
# 2015-01-02 v0.11 PL: - improved filter_vba to detect colons
# 2015-01-03 v0.12 PL: - fixed detect_patterns to detect all patterns
# - process_file: improved display, shows container file
# - improved list of executable file extensions
# 2015-01-04 v0.13 PL: - added several suspicious keywords, improved display
# 2015-01-08 v0.14 PL: - added hex strings detection and decoding
# - fixed issue #2, decoding VBA stream names using
# specified codepage and unicode stream names
# 2015-01-11 v0.15 PL: - added new triage mode, options -t and -d
# 2015-01-16 v0.16 PL: - fix for issue #3 (exception when module name="text")
# - added several suspicious keywords
# - added option -i to analyze VBA source code directly
# 2015-01-17 v0.17 PL: - removed .com from the list of executable extensions
# - added scan_vba to run all detection algorithms
# - decoded hex strings are now also scanned + reversed
# 2015-01-23 v0.18 PL: - fixed issue #3, case-insensitive search in code_modules
# 2015-01-24 v0.19 PL: - improved the detection of IOCs obfuscated with hex
# strings and StrReverse
# 2015-01-26 v0.20 PL: - added option --hex to show all hex strings decoded
# 2015-01-29 v0.21 PL: - added Dridex obfuscation decoding
# - improved display, shows obfuscation name
# 2015-02-01 v0.22 PL: - fixed issue #4: regex for URL, e-mail and exe filename
# - added Base64 obfuscation decoding (contribution from
# @JamesHabben)
# 2015-02-03 v0.23 PL: - triage now uses VBA_Scanner results, shows Base64 and
# Dridex strings
# - exception handling in detect_base64_strings
# 2015-02-07 v0.24 PL: - renamed option --hex to --decode, fixed display
# - display exceptions with stack trace
# - added several suspicious keywords
# - improved Base64 detection and decoding
# - fixed triage mode not to scan attrib lines
# 2015-03-04 v0.25 PL: - added support for Word 2003 XML
# 2015-03-22 v0.26 PL: - added suspicious keywords for sandboxing and
# virtualisation detection
# 2015-05-06 v0.27 PL: - added support for MHTML files with VBA macros
# (issue #10 reported by Greg from SpamStopsHere)
# 2015-05-24 v0.28 PL: - improved support for MHTML files with modified header
# (issue #11 reported by <NAME>)
# 2015-05-26 v0.29 PL: - improved MSO files parsing, taking into account
# various data offsets (issue #12)
# - improved detection of MSO files, avoiding incorrect
# parsing errors (issue #7)
# 2015-05-29 v0.30 PL: - added suspicious keywords suggested by @ozhermit,
# <NAME> (issue #9), issue #13
# 2015-06-16 v0.31 PL: - added generic VBA expression deobfuscation (chr,asc,etc)
# 2015-06-19 PL: - added options -a, -c, --each, --attr
# 2015-06-21 v0.32 PL: - always display decoded strings which are printable
# - fix VBA_Scanner.scan to return raw strings, not repr()
# 2015-07-09 v0.40 PL: - removed usage of sys.stderr which causes issues
# 2015-07-12 PL: - added Hex function decoding to VBA Parser
# 2015-07-13 PL: - added Base64 function decoding to VBA Parser
# 2015-09-06 PL: - improved VBA_Parser, refactored the main functions
# 2015-09-13 PL: - moved main functions to a class VBA_Parser_CLI
# - fixed issue when analysis was done twice
# 2015-09-15 PL: - remove duplicate IOCs from results
# 2015-09-16 PL: - join long VBA lines ending with underscore before scan
# - disabled unused option --each
# 2015-09-22 v0.41 PL: - added new option --reveal
# - added suspicious strings for PowerShell.exe options
__version__ = '0.41'
#------------------------------------------------------------------------------
# TODO:
# + option --fast to disable VBA expressions parsing
# + do not use logging, but a provided logger (null logger by default)
# + setup logging (common with other oletools)
# + add xor bruteforcing like bbharvest
# + options -a and -c should imply -d
# TODO later:
# + performance improvement: instead of searching each keyword separately,
# first split vba code into a list of words (per line), then check each
# word against a dict. (or put vba words into a set/dict?)
# + for regex, maybe combine them into a single re with named groups?
# + add Yara support, include sample rules? plugins like balbuzard?
# + add balbuzard support
# + output to file (replace print by file.write, sys.stdout by default)
# + look for VBA in embedded documents (e.g. Excel in Word)
# + support SRP streams (see Lenny's article + links and sample)
# - python 3.x support
# - add support | |
# -*- coding: utf-8 -*-
"""
"""
from instr import container
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import chirp
import matplotlib.pyplot as plt
from math import ceil, fsum
#import pandas as pd
#import qgrid
from copy import deepcopy
from struct import pack
import re
def IEEE_block_format(block):
#IEEE 488.2 definite length block format
return '#' + str(len(str(len(block)))) + str(len(block)) + block
class sequence:
#list of dictionaries
def __init__(self, name = 'seq0'):
self.data = []
self.name = name
self.last_comp = 0.
def append(self, **kwargs):
dictionary = {}
dictionary['name'] = kwargs.get('name', '{name}_{index:03d}'.format(name = self.name, index = len(self.data)))
dictionary['repeat'] = kwargs.get('repeat', 1)
dictionary['repeat_0'] = int(round(dictionary['repeat'],0)) if not np.isinf(dictionary['repeat']) else 0
dictionary['repeat_1'] = int(round(dictionary['repeat'],0)) if not np.isinf(dictionary['repeat']) else 1
dictionary['wait'] = kwargs.get('wait', False)
dictionary['go_to'] = kwargs.get('go_to', None)
if kwargs.get('relative', False) and dictionary ['go_to'] is not None:
dictionary['go_to'] += len(self.data)
dictionary['target'] = kwargs.get('target', 0)
dictionary['seq?'] = kwargs.get('seq?', False)
dictionary['start'] = self.data[-1]['end'] if self.data else 0.
dictionary['end'] = kwargs.get('end')
self.data.append(dictionary)
def undo_append(self, **kwargs):
self.data.pop()
def format_pandas(self):
pass
def interact(self):
pass
def format_MAGIC3002(self, terminator = '\n', id_list = ['ch1', 'ch2']):
line_list = []
for dictionary in deepcopy(self.data):
format_string = ''
for ch_id in id_list:
format_string += '"{name}.seq",' if dictionary['seq?'] else '"{name}_%s.wfm",'%(ch_id)
format_string += '{repeat_0:.0f},{wait:.0f},{go_to:.0f},{target:.0f}'
dictionary['go_to'] = dictionary['go_to']+1 if dictionary['go_to'] is not None else 0
line_list.append(format_string.format(**dictionary))
optional_info = ['JUMP_MODE SOFTWARE', 'JUMP_TIMING SYNC']
return terminator.join(['MAGIC 3002','LINES %d'%len(line_list)] + line_list + optional_info)
def format_TekSCPI(self, id_list = ['ch1', 'ch2']):
commands = ['SEQ:LENG 0','SEQ:LENG %d'%len(self.data)]
for n, dictionary in enumerate(deepcopy(self.data)):
dictionary.update({'index': n+1})
cmds = []
if dictionary['seq?']:
cmds = ['SEQ:ELEM{index}:SUBS "{name}"',]
else:
for ch_id in id_list:
cmds.append('SEQ:ELEM{index}:WAV%s "{name}_%s"'%(re.findall('^.*?([0-9]+)$',ch_id)[-1], ch_id))
cmds.append('SEQ:ELEM{index}:LOOP:'+ ('INF 1' if np.isinf(dictionary['repeat']) else 'COUN {repeat:.0f}'))
cmds.append('SEQ:ELEM{index}:TWA {wait:.0f}')
if dictionary['go_to'] is not None:
dictionary['go_to'] += 1
cmds += ['SEQ:ELEM{index}:GOTO:IND {go_to:.0f}', 'SEQ:ELEM{index}:GOTO:STAT 1']
commands += [cmd.format(**dictionary) for cmd in cmds]
return commands
def major_channel(wfm_ch_list):
if len(wfm_ch_list) == 1:
return wfm_ch_list[0]
elif len(wfm_ch_list) == 2:
data0, data1 = wfm_ch_list[0].seq.data, wfm_ch_list[1].seq.data
if len(data0) == 0:
return wfm_ch_list[1]
elif len(data1) == 0:
return wfm_ch_list[0]
if len(data0) != len(data1):
raise Exception('Waveforms are not compatible.')
candidate_list = []
for dict0, dict1 in zip(data0, data1):
if dict0['repeat_0'] != dict1['repeat_0'] or dict0['seq?'] != dict1['seq?']:
raise Exception('Waveforms are not compatible.')
if dict0['start'] != dict1['start'] or dict0['end'] != dict1['end']:
raise Exception('Waveforms are not compatible.')
if dict0['wait'] != dict1['wait']:
candidate_list.append(0 if dict0['wait'] else 1)
if dict0['go_to'] is not None or dict1['go_to'] is not None:
if dict0['go_to'] != dict1['go_to']:
raise Exception('Waveforms are not compatible.')
candidate_list.append(0 if dict0['go_to'] is not None else 1)
if len(candidate_list) == 0:
return wfm_ch_list[0]
if max(candidate_list) == min(candidate_list):
return wfm_ch_list[candidate_list[0]]
else:
raise Exception('Waveforms are not compatible.')
else:
return major_channel(wfm_ch_list[:-2] + [major_channel(wfm_ch_list[-2:]),])
class waveform_channel(object):
def __init__(self, instr, **kwargs):
self.instr = instr
self.default_value = kwargs.get('default_value', 0.)
self.default_frequency = kwargs.get('default_frequency', 0.)
self.ch_list = [self,]
self.ch_id = kwargs.get('ch_id', 0)
self.name = ""
self.scale = kwargs.get('scale', 1) # scale = 0.5 for a 6 dB loss in the line
self.refresh()
@property
def t_sample(self):
return self.instr.t_sample
@t_sample.setter
def t_sample(self, newval):
self.instr.t_sample = newval
@property
def pulse_time(self):
return self._pulse[-1][0] if self._pulse else 0.
@pulse_time.setter
def pulse_time(self, newval):
if not newval == self.pulse_time:
raise Exception('You are not supposed to change pulse_time.')
def dwell(self, **kwargs):
duration, pos, bur = self._check_inputs(**kwargs)
if len(pos) == 0:#if the position is not specified
pos = [self._pulse[-1][1] if len(self._pulse) else self.default_value]
if len(self._pulse) > 0 and np.isnan(self._pulse[-1][1]):#previously ramping to nowhere
self._pulse[-1] = (self._pulse[-1][0], pos[0])
else:#the last position is given
self._pulse.append((self.pulse_time, pos[0]))
self._pulse.append((self.pulse_time + duration, pos[0]))
self._phase = 'dwell'
def ramp(self, **kwargs):
if '_from' in kwargs:
self.dwell(duration = 0, at = kwargs.pop('_from'))
duration, pos, bur = self._check_inputs(**kwargs)
if len(self._pulse) == 0: #First segment
self._pulse.append((0, self.default_value))
elif np.isnan(self._pulse[-1][1]): #if the previous segment is also a ramp
self._pulse[-1] = (self.pulse_time + duration, pos[0]) #make an unified ramp segment
else:
self._pulse.append((self.pulse_time + duration, pos[0] if pos else np.nan))
self._phase = 'ramp'
def excurse(self, **kwargs):
duration, pos, bur = self._check_inputs(**kwargs)
self.dwell(duration = duration, at = pos[0])
self.ramp(duration = 0., to = self.default_value)
def compensate(self, **kwargs):
duration, pos, bur = self._check_inputs(**kwargs)
target = pos[0] if len(pos) == 1 else self.default_value
if np.isnan(self._pulse[-1][1]):
raise Exception("Cannot compensate while ramping to nowhere.")
self.section(division = False, repeat = 1)
seq_indx = [i for i, seq_dict in enumerate(self.seq.data) if seq_dict['end'] > self.seq.last_comp][0]
tarr, wfm_list = self.processed_wave(start = self.seq.last_comp)
wfm_weight = fsum([fsum(wfm)*seq_dict['repeat_0'] for wfm, seq_dict in zip(wfm_list, self.seq.data[seq_indx:])])
self.seq.undo_append()
cval = (self.time_global()* target - wfm_weight*float(self.t_sample))/duration
self.dwell(duration = duration, at = cval)
self._phase = 'compensated'
self.seq.last_comp = self.pulse_time
return cval
def burst(self, **kwargs):
if self._phase == 'ramp':
raise Exception("Cannot burst while ramping to nowhere.")
duration, pos, bur = self._check_inputs(**kwargs)
if duration > 0.:
amp, phase, freq, env = bur
if np.isnan(amp) or np.isnan(freq):
raise Exception('Amp and freq cannot be omitted.')
self._burst.append(((self.pulse_time, self.pulse_time + duration), bur))
if kwargs.get('auto_dwell', True):
self.dwell(**kwargs)
self._phase = 'burst'
def time_global(self, pulse_time = None):
pulse_time = self.pulse_time if pulse_time is None else pulse_time
pre_secs = [_dict for _dict in self.seq.data if _dict['end'] <= pulse_time]
seq_gtime = fsum([(_dict['end']-_dict['start'])*_dict['repeat_1'] for _dict in pre_secs])
seq_ctime = pre_secs[-1]['end'] if pre_secs else 0.
return pulse_time - seq_ctime + seq_gtime
def dividable(self):
start = self.seq.data[-1]['end'] if self.seq.data else 0
end = self.pulse_time
degeneracy = max((0, len([True for t, val in self._pulse if t == start])-1))
pulse_vals = [val for t, val in self._pulse if start <= t <= end][degeneracy:]
burst_not_in_range = all([(end <= seg[0][0] or seg[0][1] <= start) for seg in self._burst])
pulse_val_changes = len(pulse_vals) and max(pulse_vals) != min(pulse_vals)
return not pulse_val_changes and burst_not_in_range
def keep_up(self, time):
to_go = time - self.time_global()
if to_go > 0.:
self.dwell(duration = to_go)
def section(self,**kwargs):
repeat = kwargs.pop('repeat', 1)
start = self.seq.data[-1]['end'] if self.seq.data else 0
end = self.pulse_time
if start == end:
return
if kwargs.get('division', True if repeat == 1 else False) and self.dividable():
degeneracy = len([True for t, val in self._pulse if t == start])
pulse_vals = [val for t, val in self._pulse if start <= t <= end][degeneracy:]
unit, rep = auto_division((end-start)/self.t_sample)
if rep > 1:
end = start + unit * self.t_sample
repeat *= rep
self._pulse = self._pulse[:-len(pulse_vals)]
self._pulse.append((start, pulse_vals[0]))
self._pulse.append((end, pulse_vals[0]))
if start < self.seq.last_comp and self._phase != 'compensated':
print 'Warning: the section continues after compensation.'
self.seq.append(end = end, repeat = repeat, **kwargs)
def refresh(self):
self._pulse, self._burst = [], [] #Pulse is pulse, burst is burst.
self.scaled_waveform_list, self.waveform_list, self.t_array_list = [], [], []
self._phase, self.seq = 'new', sequence()
def flatten_waves(self, scaled = False):
wfm_flatten = np.zeros(0)
wfm_list = self.waveform_list if scaled else self.scaled_waveform_list
for wfm, seq_dict in zip(wfm_list, self.seq.data):
wfm_flatten = np.append(wfm_flatten, [wfm]*seq_dict['repeat_1'])
tarr_flatten = np.arange(0.5, len(wfm_flatten), 1.)*self.t_sample
return tarr_flatten, wfm_flatten
def compose(self, **kwargs):
to_go = kwargs['time'] - self.time_global() if 'time' in kwargs else 0.
if not self.seq.data or to_go > 0.:
self.section(new = True, **kwargs)
if np.isnan(self._pulse[-1][1]):
raise Exception("Cannot compose while ramping to nowhere.")
self.t_array_list, self.scaled_waveform_list = self.processed_wave(**kwargs)
self.waveform_list = []
for wfm in self.scaled_waveform_list:
self.waveform_list.append(wfm/float(self.scale))
self._phase = 'composed'
def processed_wave(self, **kwargs):
tarr_list, wfm_list = [], []
arg_start, arg_end = kwargs.pop('start', 0.), kwargs.pop('end', np.inf)
for _dict in self.seq.data:
if arg_start < _dict['end'] and _dict['start'] < arg_end:
start, end = max(_dict['start'], arg_start), min(_dict['end'], arg_end)
tarr, rawwave = self.raw_wave_concat(end = end, t_resolution = self.t_sample,
start = start, **kwargs)
tarr_list.append(tarr)
wave = rawwave #process here for further calibration, preamplification etc.
wfm_list.append(wave)
return tarr_list, wfm_list
def _processed_wave(self, **kwargs):
#depracted version to prevent memory error for very long pulses
tarr, rawwave = self.raw_wave_concat(end = self.pulse_time, t_resolution = self.t_sample, **kwargs)
wave = rawwave
#process here for calibration, preamplification etc
tarr_list, wfm_list = [], []
for _dict in self.seq.data:
start, end = _dict['start'], _dict['end']
rng = np.logical_and(start <= tarr, tarr < end)
tarr_list.append(tarr[rng])
wfm_list.append(wave[rng])
return tarr_list, wfm_list
def raw_wave_concat(self, end, t_resolution, start = 0.):
tarr = np.linspace(start, end, round((end-start)/t_resolution)+1.)[:-1]+0.5*t_resolution
#Pulse is pulse.
ts = [segment[0] for segment in self._pulse]
vals = [segment[1] for segment in self._pulse]
pfunc = interp1d(ts, vals, bounds_error = False, assume_sorted = True)
pulseraw = pfunc(tarr)
#Burst is burst.
| |
given by fastest framerate possible (by exposure time and readout).
"XI_ACQ_TIMING_MODE_FRAME_RATE": c_uint(1), #Selects a mode when sensor frame acquisition frequency is set to XI_PRM_FRAMERATE
"XI_ACQ_TIMING_MODE_FRAME_RATE_LIMIT": c_uint(2), #Selects a mode when sensor frame acquisition frequency is limited by XI_PRM_FRAMERATE
}
#Enumerator for data target modes
XI_TRANSPORT_DATA_TARGET_MODE = {
"XI_TRANSPORT_DATA_TARGET_CPU_RAM": c_uint(0), #Selects a CPU RAM as target for delivered data from the camera.
"XI_TRANSPORT_DATA_TARGET_GPU_RAM": c_uint(1), #Selects a GPU RAM as target for delivered data from the camera.
}
#Enumeration for XI_PRM_GPI_SELECTOR for CB cameras.
XI_GPI_SEL_CB = {
"XI_GPI_SEL_CB_IN1": c_uint(1), #Input1 - Pin3 (Opto Isolated).
"XI_GPI_SEL_CB_IN2": c_uint(2), #Input2 - Pin4 (Opto Isolated).
"XI_GPI_SEL_CB_INOUT1": c_uint(3), #Input/Output1 - Pin6
"XI_GPI_SEL_CB_INOUT2": c_uint(4), #Input/Output2 - Pin7
"XI_GPI_SEL_CB_INOUT3": c_uint(5), #Input/Output3 - Pin11
"XI_GPI_SEL_CB_INOUT4": c_uint(6), #Input/Output4 - Pin12
}
#Enumeration for XI_PRM_GPO_SELECTOR for CB cameras.
XI_GPO_SEL_CB = {
"XI_GPO_SEL_CB_OUT1": c_uint(1), #Output1 - Pin8 (Opto Isolated).
"XI_GPO_SEL_CB_OUT2": c_uint(2), #Output2 - Pin9 (Opto Isolated).
"XI_GPO_SEL_CB_INOUT1": c_uint(3), #Input/Output1 - Pin6
"XI_GPO_SEL_CB_INOUT2": c_uint(4), #Input/Output2 - Pin7
"XI_GPO_SEL_CB_INOUT3": c_uint(5), #Input/Output3 - Pin11
"XI_GPO_SEL_CB_INOUT4": c_uint(6), #Input/Output4 - Pin12
}
#structure containing information about GPI functionality
XI_GPI_MODE = {
"XI_GPI_OFF": c_uint(0), #Input off. In this mode the input level can be get using parameter XI_PRM_GPI_LEVEL.
"XI_GPI_TRIGGER": c_uint(1), #Trigger input
"XI_GPI_EXT_EVENT": c_uint(2), #External signal input. It is not implemented yet.
}
#Enumerator for GPI port selection.
XI_GPI_SELECTOR = {
"XI_GPI_PORT1": c_uint(1), #GPI port 1
"XI_GPI_PORT2": c_uint(2), #GPI port 2
"XI_GPI_PORT3": c_uint(3), #GPI port 3
"XI_GPI_PORT4": c_uint(4), #GPI port 4
"XI_GPI_PORT5": c_uint(5), #GPI port 5
"XI_GPI_PORT6": c_uint(6), #GPI port 6
}
#structure containing information about GPO functionality
XI_GPO_MODE = {
"XI_GPO_OFF": c_uint(0), #Output off
"XI_GPO_ON": c_uint(1), #Logical level.
"XI_GPO_FRAME_ACTIVE": c_uint(2), #On from exposure started until read out finished.
"XI_GPO_FRAME_ACTIVE_NEG": c_uint(3), #Off from exposure started until read out finished.
"XI_GPO_EXPOSURE_ACTIVE": c_uint(4), #On during exposure(integration) time
"XI_GPO_EXPOSURE_ACTIVE_NEG": c_uint(5), #Off during exposure(integration) time
"XI_GPO_FRAME_TRIGGER_WAIT": c_uint(6), #On when sensor is ready for next trigger edge.
"XI_GPO_FRAME_TRIGGER_WAIT_NEG": c_uint(7), #Off when sensor is ready for next trigger edge.
"XI_GPO_EXPOSURE_PULSE": c_uint(8), #Short On/Off pulse on start of each exposure.
"XI_GPO_EXPOSURE_PULSE_NEG": c_uint(9), #Short Off/On pulse on start of each exposure.
"XI_GPO_BUSY": c_uint(10), #ON when camera is busy (trigger mode - starts with trigger reception and ends with end of frame transfer from sensor; freerun - active when acq active)
"XI_GPO_BUSY_NEG": c_uint(11), #OFF when camera is busy (trigger mode - starts with trigger reception and ends with end of frame transfer from sensor; freerun - active when acq active)
"XI_GPO_HIGH_IMPEDANCE": c_uint(12), #Hi impedance of output (if three state logic is used).
"XI_GPO_FRAME_BUFFER_OVERFLOW": c_uint(13), #Frame buffer overflow status.
}
#Enumerator for GPO port selection.
XI_GPO_SELECTOR = {
"XI_GPO_PORT1": c_uint(1), #GPO port 1
"XI_GPO_PORT2": c_uint(2), #GPO port 2
"XI_GPO_PORT3": c_uint(3), #GPO port 3
"XI_GPO_PORT4": c_uint(4), #GPO port 4
"XI_GPO_PORT5": c_uint(5), #GPO port 5
"XI_GPO_PORT6": c_uint(6), #GPO port 6
}
#structure containing information about LED functionality
XI_LED_MODE = {
"XI_LED_HEARTBEAT": c_uint(0), #Blinking (1Hz) if all is OK (CURRERA-R only).
"XI_LED_TRIGGER_ACTIVE": c_uint(1), #On if trigger detected (CURRERA-R only).
"XI_LED_EXT_EVENT_ACTIVE": c_uint(2), #On if external signal detected (CURRERA-R only)
"XI_LED_LINK": c_uint(3), #On if link is OK (Currera-R only)
"XI_LED_ACQUISITION": c_uint(4), #On if data streaming is on
"XI_LED_EXPOSURE_ACTIVE": c_uint(5), #On if sensor is integrating
"XI_LED_FRAME_ACTIVE": c_uint(6), #On if frame is active (exposure or readout)
"XI_LED_OFF": c_uint(7), #Off
"XI_LED_ON": c_uint(8), #On
"XI_LED_BLINK": c_uint(9), #Blinking (1Hz)
}
#Enumerator for LED selection.
XI_LED_SELECTOR = {
"XI_LED_SEL1": c_uint(1), #LED 1
"XI_LED_SEL2": c_uint(2), #LED 2
"XI_LED_SEL3": c_uint(3), #LED 3
"XI_LED_SEL4": c_uint(4), #LED 4
}
#structure contains frames counter
XI_COUNTER_SELECTOR = {
"XI_CNT_SEL_TRANSPORT_SKIPPED_FRAMES": c_uint(0), #Number of skipped frames on transport layer (e.g. when image gets lost while transmission). Occur when capacity of transport channel does not allow to transfer all data.
"XI_CNT_SEL_API_SKIPPED_FRAMES": c_uint(1), #Number of skipped frames on API layer. Occur when application does not process the images as quick as they are received from the camera.
"XI_CNT_SEL_TRANSPORT_TRANSFERRED_FRAMES": c_uint(2), #Number of successfully transferred frames on transport layer.
"XI_CNT_SEL_FRAME_MISSED_TRIGGER_DUETO_OVERLAP": c_uint(3), #Number of missed triggers due to overlap.
"XI_CNT_SEL_FRAME_MISSED_TRIGGER_DUETO_FRAME_BUFFER_OVR": c_uint(4), #Number of missed triggers due to frame buffer full.
"XI_CNT_SEL_FRAME_BUFFER_OVERFLOW": c_uint(5), #Frame buffer full counter.
}
#structure containing information about time stamp reset arming
XI_TS_RST_MODE = {
"XI_TS_RST_ARM_ONCE": c_uint(0), #TimeStamp reset is armed once, after execution engine is disabled
"XI_TS_RST_ARM_PERSIST": c_uint(1), #TimeStamp reset is armed permanently if source is selected
}
#structure containing information about possible timestamp reset sources
XI_TS_RST_SOURCE = {
"XI_TS_RST_OFF": c_uint(0), #No source selected, timestamp reset effectively disabled
"XI_TS_RST_SRC_GPI_1": c_uint(1), #TimeStamp reset source selected GPI1 (after de bounce)
"XI_TS_RST_SRC_GPI_2": c_uint(2), #TimeStamp reset source selected GPI2 (after de bounce)
"XI_TS_RST_SRC_GPI_3": c_uint(3), #TimeStamp reset source selected GPI3 (after de bounce)
"XI_TS_RST_SRC_GPI_4": c_uint(4), #TimeStamp reset source selected GPI4 (after de bounce)
"XI_TS_RST_SRC_GPI_1_INV": c_uint(5), #TimeStamp reset source selected GPI1 inverted (after de bounce)
"XI_TS_RST_SRC_GPI_2_INV": c_uint(6), #TimeStamp reset source selected GPI2 inverted (after de bounce)
"XI_TS_RST_SRC_GPI_3_INV": c_uint(7), #TimeStamp reset source selected GPI3 inverted (after de bounce)
"XI_TS_RST_SRC_GPI_4_INV": c_uint(8), #TimeStamp reset source selected GPI4 inverted (after de bounce)
"XI_TS_RST_SRC_GPO_1": c_uint(9), #TimeStamp reset source selected GPO1 (after de bounce)
"XI_TS_RST_SRC_GPO_2": c_uint(10), #TimeStamp reset source selected GPO2 (after de bounce)
"XI_TS_RST_SRC_GPO_3": c_uint(11), #TimeStamp reset source selected GPO3 (after de bounce)
"XI_TS_RST_SRC_GPO_4": c_uint(12), #TimeStamp reset source selected GPO4 (after de bounce)
"XI_TS_RST_SRC_GPO_1_INV": c_uint(13), #TimeStamp reset source selected GPO1 inverted (after de bounce)
"XI_TS_RST_SRC_GPO_2_INV": c_uint(14), #TimeStamp reset source selected GPO2 inverted (after de bounce)
"XI_TS_RST_SRC_GPO_3_INV": c_uint(15), #TimeStamp reset source selected GPO3 inverted (after de bounce)
"XI_TS_RST_SRC_GPO_4_INV": c_uint(16), #TimeStamp reset source selected GPO4 inverted (after de bounce)
"XI_TS_RST_SRC_TRIGGER": c_uint(17), #TimeStamp reset source selected TRIGGER (signal for sensor)
"XI_TS_RST_SRC_TRIGGER_INV": c_uint(18), #TimeStamp reset source selected TRIGGER (signal for sensor)
"XI_TS_RST_SRC_SW": c_uint(19), #TimeStamp reset source selected software (has immediate effect and is self cleared)
"XI_TS_RST_SRC_EXPACTIVE": c_uint(20), #TimeStamp reset source selected exposure active
"XI_TS_RST_SRC_EXPACTIVE_INV": c_uint(21), #TimeStamp reset source selected exposure active
"XI_TS_RST_SRC_FVAL": c_uint(22), #TimeStamp reset source selected frame valid signal from sensor
"XI_TS_RST_SRC_FVAL_INV": c_uint(23), #TimeStamp reset source selected frame valid inverted signal from sensor
}
#structure containing information about parameters type
XI_PRM_TYPE = {
"xiTypeInteger": c_uint(0), #integer parameter type
"xiTypeFloat": c_uint(1), #float parameter type
"xiTypeString": c_uint(2), #string parameter type
"xiTypeEnum": c_uint(0), #enumerator parameter type
"xiTypeBoolean": c_uint(0), #boolean parameter type
"xiTypeCommand": c_uint(0), #command parameter type
}
#Turn parameter On/Off
XI_SWITCH = {
"XI_OFF": c_uint(0), #Turn parameter off
"XI_ON": c_uint(1), #Turn parameter on
}
#Temperature selector
XI_TEMP_SELECTOR = {
"XI_TEMP_IMAGE_SENSOR_DIE_RAW": c_uint(0), #Not calibrated temperature of image sensor die (silicon) - e.g. sensor register value
"XI_TEMP_IMAGE_SENSOR_DIE": c_uint(1), #Calibrated temperature of image sensor die (silicon) - in degrees of Celsius
"XI_TEMP_SENSOR_BOARD": c_uint(2), #Sensor board temperature
"XI_TEMP_INTERFACE_BOARD": c_uint(3), #Interface board temperature
"XI_TEMP_FRONT_HOUSING": c_uint(4), #Front part of camera housing temperature
"XI_TEMP_REAR_HOUSING": c_uint(5), #Rear part of camera housing temperature
"XI_TEMP_TEC1_COLD": c_uint(6), #TEC1 cold side temperature
"XI_TEMP_TEC1_HOT": c_uint(7), #TEC1 hot side temperature
}
#Temperature selector
XI_TEMP_CTRL_MODE_SELECTOR = {
"XI_TEMP_CTRL_MODE_OFF": c_uint(0), #Temperature controlling is disabled (no fan or TEC (peltier) is enabled)
"XI_TEMP_CTRL_MODE_AUTO": c_uint(1), #Automated temperature controlling is enabled - based on selected thermomether and target temperature.
"XI_TEMP_CTRL_MODE_MANUAL": c_uint(2), #Manual controlling of temperature elements is enabled. Application can control the elements.
}
#Temperature element selector
XI_TEMP_ELEMENT_SELECTOR = {
"XI_TEMP_ELEM_TEC1": c_uint(11), #Temperature element TEC1 (peltier closest to sensor)
"XI_TEMP_ELEM_TEC2": c_uint(12), #Temperature element TEC2 (peltier)
"XI_TEMP_ELEM_FAN1": c_uint(31), #Temperature element fan current or rotation
"XI_TEMP_ELEM_FAN1_THRS_TEMP": c_uint(32), #Temperature element fan start rotation threshold temperature
}
#Data packing(grouping) types.
XI_OUTPUT_DATA_PACKING_TYPE = {
"XI_DATA_PACK_XI_GROUPING": c_uint(0), #Data grouping (10g160, 12g192, 14g224).
"XI_DATA_PACK_PFNC_LSB_PACKING": c_uint(1), #Data packing (10p, 12p)
}
#Downsampling types
XI_DOWNSAMPLING_TYPE = {
"XI_BINNING": c_uint(0), #Downsampling is using binning
"XI_SKIPPING": c_uint(1), #Downsampling is using skipping
}
#Image correction function
XI_IMAGE_CORRECTION_SELECTOR = {
"XI_CORRECTION_TYPE_SELECTOR": c_uint(0), #Correction Type selected see XI_TYPE_CORRECTION_SELECTOR
"XI_DEFECT_ID": c_uint(1), #Select defect id
"XI_DEFECTS_COUNT_BY_TYPE": c_uint(2), #Count of defects selected by current XI_DEFECT_TYPE
"XI_DEFECT_TYPE": c_uint(3), #Type of defect see XI_IMAGE_DEFECT_TYPE
"XI_DEFECT_SUB_TYPE": c_uint(4), #Defect sub type see XI_IMAGE_DEFECT_SUB_TYPE
"XI_DEFECT_POS_X": c_uint(5), #Defect position x
"XI_DEFECT_POS_Y": c_uint(6), #Defect position y
"XI_DEFECT_CMD_ADD": c_uint(7), #Write cached defect to the list
"XI_DEFECT_CMD_DELETE": c_uint(8), #Delete defect to the list
"XI_DEFECT_CMD_APPLY_CHANGES": c_uint(9), #Apply changes
"XI_DEFECT_CMD_LIST_CLEAR": c_uint(10), #Clear list
"XI_DEFECT_CMD_LISTS_CLEAR": c_uint(11), #Clear lists
"XI_DEFECT_CMD_SAVE": c_uint(12), #Save list to device
"XI_CORRECTION_TYPE_ENABLED": c_uint(13), #Enable or disable correction type
| |
val
def get_coolant_loops(self) -> List[CoolantView]:
if self.coolant_connection == ComponentCoolantCnxn.DISCONNECTED:
return []
elif self.coolant_connection == ComponentCoolantCnxn.HAB_ONE:
return [self._parent.coolant_loops[0]]
elif self.coolant_connection == ComponentCoolantCnxn.HAB_TWO:
return [self._parent.coolant_loops[1]]
elif self.coolant_connection == ComponentCoolantCnxn.HAB_BOTH:
return [self._parent.coolant_loops[0],
self._parent.coolant_loops[1]]
elif self.coolant_connection == ComponentCoolantCnxn.AYSE_ONE:
return [self._parent.coolant_loops[2]]
@property
def coolant_connection(self) -> int:
return int(self._array[self._n * _N_COMPONENT_FIELDS + 5])
@coolant_connection.setter
def coolant_connection(self, val: ComponentCoolantCnxn):
log.info(f'setting coolant {self._n} to {float(val)}')
self._array[self._n * _N_COMPONENT_FIELDS + 5] = float(val)
class RadiatorView:
"""Represents a single Radiator.
Should not be instantiated outside of EngineeringState.
Useful function: get_coolant_loops()! Gives the coolant loops this radiator is attached to.
e.g.
physics_state.engineering.radiator[RAD2].get_coolant_loops()[0].coolant_temp
"""
def __init__(self, parent: 'EngineeringState', array_rep: np.ndarray, radiator_n: int):
"""Called by an EngineeringState factory.
parent: an EngineeringState that this RadiatorView will use to get the associated coolant loop.
array_rep: an array that, starting at 0, contains all data for all radiators.
radiator_n: an index specifying which component, starting at 0.
"""
self._parent = parent
self._array = array_rep
self._n = radiator_n
def name(self):
return strings.RADIATOR_NAMES[self._n]
def get_coolant_loop(self) -> CoolantView:
return self._parent.coolant_loops[self.attached_to_coolant_loop - 1]
@property
def attached_to_coolant_loop(self) -> int:
return int(self._array[self._n * _N_RADIATOR_FIELDS + 0])
@attached_to_coolant_loop.setter
def attached_to_coolant_loop(self, val: int):
self._array[self._n * _N_RADIATOR_FIELDS + 0] = val
@property
def functioning(self) -> bool:
return bool(self._array[self._n * _N_RADIATOR_FIELDS + 1])
@functioning.setter
def functioning(self, val: bool):
self._array[self._n * _N_RADIATOR_FIELDS + 1] = val
class EngineeringState:
"""Wrapper around protos.EngineeringState.
Access with physics_state.engineering, e.g.
eng_state = physics_state.engineering
eng_state.master_alarm = True
print(eng_state.components[AUXCOM].resistance)
eng_state.components[LOS].connected = True
eng_state.radiators[RAD2].functioning = False
eng_state.radiators[RAD2].get_coolant_loop().coolant_temp = 50
"""
N_ENGINEERING_FIELDS = (
_N_COMPONENTS * _N_COMPONENT_FIELDS +
_N_COOLANT_LOOPS * _N_COOLANT_FIELDS +
_N_RADIATORS * _N_RADIATOR_FIELDS
)
_COMPONENT_START_INDEX = 0
_COOLANT_START_INDEX = _COMPONENT_START_INDEX + _N_COMPONENTS * _N_COMPONENT_FIELDS
_RADIATOR_START_INDEX = _COOLANT_START_INDEX + _N_COOLANT_LOOPS * _N_COOLANT_FIELDS
class ComponentList:
"""Allows engineering.components[LOS] style indexing."""
def __init__(self, owner: 'EngineeringState'):
self._owner = owner
def __getitem__(self, index: Union[str, int]) -> ComponentView:
if isinstance(index, str):
index = strings.COMPONENT_NAMES.index(index)
elif index >= _N_COMPONENTS:
raise IndexError()
return ComponentView(
self._owner,
self._owner._array[self._owner._COMPONENT_START_INDEX:self._owner._COOLANT_START_INDEX],
index
)
# Use list slicing (with strides, so there's two colons) to get a list of
# all values of each quantity for each Component.
# We only define this accessor for fields we use in _derive.
def Temperature(self) -> np.ndarray:
return self._owner._array[self._owner._COMPONENT_START_INDEX+1:self._owner._COOLANT_START_INDEX:_N_COMPONENT_FIELDS]
def Resistance(self) -> np.ndarray:
return self._owner._array[self._owner._COMPONENT_START_INDEX+2:self._owner._COOLANT_START_INDEX:_N_COMPONENT_FIELDS]
def Voltage(self) -> np.ndarray:
return self._owner._array[self._owner._COMPONENT_START_INDEX+3:self._owner._COOLANT_START_INDEX:_N_COMPONENT_FIELDS]
def Current(self) -> np.ndarray:
return self._owner._array[self._owner._COMPONENT_START_INDEX+4:self._owner._COOLANT_START_INDEX:_N_COMPONENT_FIELDS]
class CoolantLoopList:
"""Allows engineering.coolant_loops[LP1] style indexing."""
def __init__(self, owner: 'EngineeringState'):
self._owner = owner
def __getitem__(self, index: Union[str, int]) -> CoolantView:
if isinstance(index, str):
index = strings.COOLANT_LOOP_NAMES.index(index)
elif index >= _N_COOLANT_LOOPS:
raise IndexError()
return CoolantView(
self._owner._array[self._owner._COOLANT_START_INDEX:self._owner._RADIATOR_START_INDEX],
index
)
# As above, list slicing with strides.
def CoolantTemp(self) -> np.ndarray:
return self._owner._array[self._owner._COOLANT_START_INDEX+0:self._owner._RADIATOR_START_INDEX:_N_COOLANT_FIELDS]
class RadiatorList:
"""Allows engineering.radiators[RAD1] style indexing."""
def __init__(self, owner: 'EngineeringState'):
self._owner = owner
def __getitem__(self, index: Union[str, int]) -> RadiatorView:
if isinstance(index, str):
index = strings.RADIATOR_NAMES.index(index)
elif index >= _N_RADIATORS:
raise IndexError()
return RadiatorView(
self._owner,
self._owner._array[self._owner._RADIATOR_START_INDEX:],
index
)
# And as above, list slicing with strides.
def Functioning(self) -> np.ndarray:
return self._owner._array[self._owner._RADIATOR_START_INDEX+1::_N_RADIATOR_FIELDS]
def __init__(self,
array_rep: np.ndarray, proto_state: protos.EngineeringState, *,
parent_state: 'PhysicsState', populate_array: bool):
"""Called by a PhysicsState on creation.
array_rep: a sufficiently-sized array to store all component, coolant,
and radiator data. EngineeringState has full control over
contents, starting at element 0.
proto_state: the underlying proto we're wrapping.
parent_state: provides a way for EngineeringState to mirror a couple
pieces of data from the parent, e.g. hab fuel.
populate_array: flag that is set when we need to fill array_rep with data.
"""
assert len(proto_state.components) == _N_COMPONENTS
assert len(proto_state.coolant_loops) == _N_COOLANT_LOOPS
assert len(proto_state.radiators) == _N_RADIATORS
self.components = self.ComponentList(self)
self.coolant_loops = self.CoolantLoopList(self)
self.radiators = self.RadiatorList(self)
self._array = array_rep
self._proto_state = proto_state
self._parent_state = parent_state
if populate_array:
# We've been asked to populate the data array.
# The order of data in the array is of course important.
write_marker = 0
# Is this loop janky? I would say yes! Could this result in
# out-of-bounds writes? I hope not!
for proto_list, descriptor in [
(proto_state.components, protos.EngineeringState.Component.DESCRIPTOR),
(proto_state.coolant_loops, protos.EngineeringState.CoolantLoop.DESCRIPTOR),
(proto_state.radiators, protos.EngineeringState.Radiator.DESCRIPTOR),
]:
for proto in proto_list:
for field in descriptor.fields:
array_rep[write_marker] = getattr(proto, field.name)
write_marker += 1
@property
def habitat_fuel(self):
return self._parent_state[strings.HABITAT].fuel
@property
def ayse_fuel(self):
return self._parent_state[strings.AYSE].fuel
@property
def master_alarm(self) -> bool:
return self._proto_state.master_alarm
@master_alarm.setter
def master_alarm(self, val: bool):
self._proto_state.master_alarm = val
@property
def radiation_alarm(self) -> bool:
return self._proto_state.radiation_alarm
@radiation_alarm.setter
def radiation_alarm(self, val: bool):
self._proto_state.radiation_alarm = val
@property
def asteroid_alarm(self) -> bool:
return self._proto_state.asteroid_alarm
@asteroid_alarm.setter
def asteroid_alarm(self, val: bool):
self._proto_state.asteroid_alarm = val
@property
def hab_reactor_alarm(self) -> bool:
return self._proto_state.hab_reactor_alarm
@hab_reactor_alarm.setter
def hab_reactor_alarm(self, val: bool):
self._proto_state.hab_reactor_alarm = val
@property
def ayse_reactor_alarm(self) -> bool:
return self._proto_state.ayse_reactor_alarm
@ayse_reactor_alarm.setter
def ayse_reactor_alarm(self, val: bool):
self._proto_state.ayse_reactor_alarm = val
@property
def hab_gnomes(self) -> bool:
return self._proto_state.hab_gnomes
@hab_gnomes.setter
def hab_gnomes(self, val: bool):
self._proto_state.hab_gnomes = val
# TODO(patrick): Make sure this is also represented in the proto, and array rep.
@property
def rad_shield_percentage(self) -> int:
return self._proto_state.rad_shield_percentage
@rad_shield_percentage.setter
def rad_shield_percentage(self, val: int):
self._proto_state.rad_shield_percentage = val
def as_proto(self) -> protos.EngineeringState:
"""Returns a deep copy of this EngineeringState as a protobuf."""
constructed_protobuf = protos.EngineeringState()
constructed_protobuf.CopyFrom(self._proto_state)
for component_data, component in zip(self.components, constructed_protobuf.components):
(
component.connected, component.temperature,
component.resistance, component.voltage,
component.current, component.coolant_connection
) = (
component_data.connected, component_data.temperature,
component_data.resistance, component_data.voltage,
component_data.current, component_data.coolant_connection
)
for coolant_data, coolant in zip(self.coolant_loops, constructed_protobuf.coolant_loops):
(
coolant.coolant_temp, coolant.primary_pump_on,
coolant.secondary_pump_on
) = (
coolant_data.coolant_temp, coolant_data.primary_pump_on,
coolant_data.secondary_pump_on
)
for radiator_data, radiator in zip(self.radiators, constructed_protobuf.radiators):
(
radiator.attached_to_coolant_loop, radiator.functioning,
) = (
radiator_data.attached_to_coolant_loop, radiator_data.functioning,
)
return constructed_protobuf
class PhysicsState:
"""The physical state of the system for use in solve_ivp and elsewhere.
The following operations are supported:
# Construction without a y-vector, taking all data from a PhysicalState
PhysicsState(None, protos.PhysicalState)
# Faster Construction from a y-vector and protos.PhysicalState
PhysicsState(ivp_solution.y, protos.PhysicalState)
# Access of a single Entity in the PhysicsState, by index or Entity name
my_entity: Entity = PhysicsState[0]
my_entity: Entity = PhysicsState['Earth']
# Iteration over all Entitys in the PhysicsState
for entity in my_physics_state:
print(entity.name, entity.pos)
# Convert back to a protos.PhysicalState (this almost never happens)
my_physics_state.as_proto()
Example usage:
y = PhysicsState(y_1d, physical_state)
entity = y[0]
y[HABITAT] = habitat
scipy.solve_ivp(y.y0())
See help(PhysicsState.__init__) for how to initialize. Basically, the first
`y` instantiated in the lifetime of the program will be created by a call to
PhysicsState.__init__. But for the program to have good performance,
PhysicsState.__init__ should have both parameters filled if it's being
called more than once a second while OrbitX is running normally.
"""
class NoEntityError(ValueError):
"""Raised when an entity is not found."""
pass
# For if an entity is not landed to anything
NO_INDEX = -1
# The number of single-element values at the end of the y-vector.
# Currently just SRB_TIME and TIME_ACC are appended to the end. If there
# are more values appended to the end, increment this and follow the same
# code for .srb_time and .time_acc
N_SINGULAR_ELEMENTS = 2
ENTITY_START_INDEX = 0
ENGINEERING_START_INDEX = -(EngineeringState.N_ENGINEERING_FIELDS)
SRB_TIME_INDEX = ENGINEERING_START_INDEX - 2
TIME_ACC_INDEX = SRB_TIME_INDEX + 1
# Datatype of internal y-vector
DTYPE = np.float64
def __init__(self,
y: Optional[np.ndarray],
proto_state: protos.PhysicalState):
"""Collects data from proto_state and y, when y is not None.
There are two kinds of values we care about:
1) values that change during simulation (like position, velocity, etc)
2) values that do not change (like mass, radius, name, etc)
If both proto_state and y are given, 1) is taken from y and
2) is taken from proto_state. This is a very quick operation.
If y is None, both 1) and 2) are taken from proto_state, and a new
y vector is generated. This is a somewhat expensive operation."""
assert isinstance(proto_state, protos.PhysicalState)
assert isinstance(y, np.ndarray) or y is None
# self._proto_state will have positions, velocities, etc for all
# entities. DO NOT USE THESE they will be stale. Use the accessors of
# this class instead!
self._proto_state = protos.PhysicalState()
self._proto_state.CopyFrom(proto_state)
self._n = len(proto_state.entities)
self._entity_names = \
[entity.name for entity in self._proto_state.entities]
self._array_rep: np.ndarray
if y is None:
# We rely on having an internal array representation we can refer
# to, so we have to build up this array representation.
self._array_rep = np.empty(
len(proto_state.entities) | |
MPQ (Ctrl+Alt+O)', [NORMAL,DISABLED][FOLDER]),
('save', self.save, 'Save (Ctrl+S)', DISABLED),
('saveas', self.saveas, 'Save As (Ctrl+Alt+A)', DISABLED),
('savempq', self.savempq, 'Save MPQ (Ctrl+Alt+M)', DISABLED),
('close', self.close, 'Close (Ctrl+W)', DISABLED),
10,
('undo', self.undo, 'Undo (Ctrl+Z)', DISABLED),
('redo', self.redo, 'Redo (Ctrl+Y)', DISABLED),
10,
('order', self.order, 'File Order', NORMAL, True),
('idsort', self.idsort, 'Sort by ID', NORMAL, True),
('bwsort', self.bwsort, 'Sory by BroodWar', NORMAL, True),
('flagsort', self.flagsort, 'Sort by Flags', NORMAL, True),
('stringsort', self.stringsort, 'Sort by String', NORMAL, True),
10,
('register', self.register, 'Set as default *.bin editor (Windows Only)', [DISABLED,NORMAL][win_reg]),
('help', self.help, 'Help (F1)', NORMAL),
('about', self.about, 'About PyAI', NORMAL),
10,
('exit', self.exit, 'Exit (Alt+F4)', NORMAL)
],
[
('add', self.add, 'Add Blank Script (Insert)', DISABLED),
('remove', self.remove, 'Remove Scripts (Delete)', DISABLED),
4,
('find', self.find, 'Find Scripts (Ctrl+F)', DISABLED),
10,
('export', self.export, 'Export Scripts (Ctrl+Alt+E)', DISABLED),
('import', self.iimport, 'Import Scripts (Ctrl+Alt+I)', DISABLED),
('listimport', self.listimport, 'Import a List of Files (Ctrl+L)', DISABLED),
4,
('reference', self.reference, 'Print Reference when Decompiling', NORMAL, False),
('saveextra', self.extrainfo, 'Save Information Comments and Labels', NORMAL, False),
10,
('codeedit', self.codeedit, 'Edit AI Script (Ctrl+E)', DISABLED),
('edit', self.edit, 'Edit AI ID, String, and Extra Info. (Ctrl+I)', DISABLED),
('flags', self.editflags, 'Edit Flags (Ctrl+G)', DISABLED),
10,
('extdef', self.extdef, 'Manage External Definition Files (Ctrl+X)', NORMAL),
('tbl', self.managetbl, 'Manage TBL file (Ctrl+T)', NORMAL),
('asc3topyai', self.managedat, 'Manage MPQ and DAT Settings (Ctrl+U)', NORMAL),
4,
('openset', self.openset, 'Open TBL and DAT Settings', NORMAL),
('saveset', self.saveset, 'Save TBL and DAT Settings', NORMAL),
]
]
self.buttons = {}
for pad,bar,buttons in zip([2,1],[Frame(self),Frame(self)],bars):
for btn in buttons:
if isinstance(btn, tuple):
image = get_img(btn[0])
if len(btn) == 4:
button = Button(bar, image=image, width=20, height=20, command=btn[1], state=btn[3])
elif btn[4]:
button = Radiobutton(bar, image=image, width=20, height=20, command=btn[1], state=btn[3], indicatoron=0, variable=self.sort, value=btn[0])
else:
button = Checkbutton(bar, image=image, width=20, height=20, state=btn[3], indicatoron=0, variable=btn[1])
button.image = image
button.tooltip = Tooltip(button, btn[2], couriernew)
button.pack(side=LEFT)
if button.winfo_reqwidth() > 26:
button['width'] = 18
if button.winfo_reqheight() > 26:
button['height'] = 18
self.buttons[btn[0]] = button
else:
Frame(bar, width=btn).pack(side=LEFT)
bar.pack(side=TOP, fill=X, padx=2, pady=pad)
self.sort.set('order')
#Listbox
listframe = Frame(self, bd=2, relief=SUNKEN)
scrollbar = Scrollbar(listframe)
self.listbox = Listbox(listframe, selectmode=EXTENDED, font=couriernew, activestyle=DOTBOX, width=1, height=1, bd=0, highlightthickness=0, yscrollcommand=scrollbar.set, exportselection=0)
bind = [
('<MouseWheel>', self.scroll),
('<Home>', lambda a,i=0: self.move(a,i)),
('<End>', lambda a,i=END: self.move(a,i)),
('<Up>', lambda a,i=-1: self.move(a,i)),
('<Left>', lambda a,i=-1: self.move(a,i)),
('<Down>', lambda a,i=1: self.move(a,i)),
('<Right>', lambda a,i=-1: self.move(a,i)),
('<Prior>', lambda a,i=-10: self.move(a,i)),
('<Next>', lambda a,i=10: self.move(a,i)),
]
for b in bind:
self.bind(*b)
self.listbox.bind('<ButtonRelease-3>', self.popup)
self.listbox.bind('<Double-Button-1>', self.codeedit)
self.listbox.tooltip = ListboxTooltip(self.listbox, couriernew)
self.listbox.get_entry = self.get_entry
scrollbar.config(command=self.listbox.yview)
scrollbar.pack(side=RIGHT, fill=Y)
self.listbox.pack(side=LEFT, fill=BOTH, expand=1)
listframe.pack(fill=BOTH, padx=2, pady=2, expand=1)
listmenu = [
('Add Blank Script (Insert)', self.add, 4), # 0
('Remove Scripts (Delete)', self.remove, 0), # 1
None,
('Export Scripts (Ctrl+Alt+E)', self.export, 0), # 3
('Import Scripts (Ctrl+Alt+I)', self.iimport, 0), # 4
None,
('Edit AI Script (Ctrl+E)', self.codeedit, 5), #6
('Edit Script ID, String, and AI Info (Ctrl+I)', self.edit, 8), # 7
('Edit Flags (Ctrl+G)', self.editflags, 8), # 8
]
self.listmenu = Menu(self, tearoff=0)
for m in listmenu:
if m:
l,c,u = m
self.listmenu.add_command(label=l, command=c, underline=u)
else:
self.listmenu.add_separator()
#Statusbar
self.status = StringVar()
self.scriptstatus = StringVar()
statusbar = Frame(self)
Label(statusbar, textvariable=self.status, bd=1, relief=SUNKEN, anchor=W).pack(side=LEFT, expand=1, padx=1, fill=X)
image = get_img('save')
self.editstatus = Label(statusbar, image=image, bd=0, state=DISABLED)
self.editstatus.image = image
self.editstatus.pack(side=LEFT, padx=1, fill=Y)
Label(statusbar, textvariable=self.scriptstatus, bd=1, relief=SUNKEN, anchor=W).pack(side=LEFT, expand=1, padx=1, fill=X)
self.status.set('Load your files or create new ones.')
statusbar.pack(side=BOTTOM, fill=X)
if 'window' in self.settings:
loadsize(self, self.settings, 'window', True)
self.mpqhandler = MPQHandler(self.settings.get('mpqs',[]))
if not 'mpqs' in self.settings:
self.mpqhandler.add_defaults()
e = self.open_files()
if guifile:
self.open(aiscript=guifile)
start_new_thread(check_update, (self,))
if e:
self.managedat(err=e)
def open_files(self):
self.mpqhandler.open_mpqs()
err = None
try:
unitsdat = DAT.UnitsDAT()
upgradesdat = DAT.UpgradesDAT()
techdat = DAT.TechDAT()
unitsdat.load_file(self.mpqhandler.get_file(self.settings['unitsdat']))
upgradesdat.load_file(self.mpqhandler.get_file(self.settings['upgradesdat']))
techdat.load_file(self.mpqhandler.get_file(self.settings['techdatadat']))
if not self.tbl:
file = self.select_file('Open a stat_txt.tbl first', True, '.tbl', [('TBL Files','*.tbl'),('All Files','*')])
if not file:
r = True
tbl = TBL.TBL()
tbl.load_file(file)
self.stat_txt = file
self.tbl = tbl
except PyMSError, e:
err = e
else:
self.unitsdatdat = unitsdat
self.upgrades = upgradesdat
self.techdat = techdat
if self.ai:
self.ai.unitsdat = unitsdat
self.ai.upgradesdat = upgradesdat
self.ai.techdat = techdat
self.mpqhandler.close_mpqs()
return err
# Misc. functions
def title(self, text=None):
global LONG_VERSION
if not text:
text = self.titletext
Tk.title(self,'PyAI %s (%s)' % (LONG_VERSION, text))
self.titletext = text
def get_entry(self, index):
match = re.match('(....)\s{5}(\s\s|BW)\s{5}([01]{3})\s{5}(.+)', self.listbox.get(index))
id = match.group(1)
return (id, match.group(2) == 'BW', match.group(3), self.ai.ais[id][1], match.group(4))
def entry_text(self, id, bw, flags, string):
if isinstance(string, int):
string = TBL.decompile_string(self.ai.tbl.strings[string])
if len(string) > 50:
string = string[:47] + '...'
aiinfo = ''
if id in self.ai.aiinfo:
aiinfo = self.ai.aiinfo[id][0]
return '%s %s %s %s%s%s' % (id, [' ','BW'][bw], flags, string, ' ' * (55-len(string)), aiinfo)
def set_entry(self, index, id, bw, flags, string):
if index != END:
self.listbox.delete(index)
self.listbox.insert(index, self.entry_text(id, bw, flags, string))
def resort(self):
{'order':self.order,'idsort':self.idsort,'bwsort':self.bwsort,'flagsort':self.flagsort,'stringsort':self.stringsort}[self.sort.get()]()
def select_file(self, title, open=True, ext='.bin', filetypes=[('AI Scripts','*.bin'),('All Files','*')], parent=None):
if parent == None:
parent = self
path = self.settings.get('lastpath', BASE_DIR)
file = [tkFileDialog.asksaveasfilename,tkFileDialog.askopenfilename][open](parent=parent, title=title, defaultextension=ext, filetypes=filetypes, initialdir=path)
if file:
self.settings['lastpath'] = os.path.dirname(file)
return file
def add_undo(self, type, data):
max = self.settings.get('undohistory', 10)
if not max:
return
if self.redos:
self.redos = []
self.buttons['redo']['state'] = DISABLED
self.menus['Edit'].entryconfig(1, state=DISABLED)
if not self.undos:
self.buttons['undo']['state'] = NORMAL
self.menus['Edit'].entryconfig(0, state=NORMAL)
self.undos.append((type, data))
if len(self.undos) > max:
del self.undos[0]
def action_states(self):
file = [NORMAL,DISABLED][not self.ai]
select = [NORMAL,DISABLED][not self.listbox.curselection()]
for entry in [4,5,6,7]:
self.menus['File'].entryconfig(entry, state=file)
for entry in [3,4,9,10,11,14,19,20]:
self.menus['Edit'].entryconfig(entry, state=file)
for btn in ['save','saveas','close','add','import','listimport','codeedit']:
self.buttons[btn]['state'] = file
if not FOLDER:
self.buttons['savempq']['state'] = file
for entry in [5,6,8,15,16]:
self.menus['Edit'].entryconfig(entry, state=select)
for btn in ['remove','find','export','edit','flags']:
self.buttons[btn]['state'] = select
def unsaved(self):
if self.tbledited:
save = askquestion(parent=self, title='Save Changes?', message="Save changes to '%s'?" % self.stat_txt, default=YES, type=YESNOCANCEL)
if save != 'no':
if save == 'cancel':
return True
self.tbl.compile(self.stat_txt)
self.tbledited = False
if self.ai and self.edited:
aiscript = self.aiscript
if not aiscript:
aiscript = 'aiscript.bin'
bwscript = self.bwscript
if not bwscript:
bwscript = 'bwscript.bin'
save = askquestion(parent=self, title='Save Changes?', message="Save changes to '%s' and '%s'?" % (aiscript, bwscript), default=YES, type=YESNOCANCEL)
if save != 'no':
if save == 'cancel':
return True
if self.aiscript:
self.save()
else:
return self.saveas()
def edittbl(self, edited=None):
if edited == None:
return self.tbledited
self.tbledited = edited
def stattxt(self, file=None):
if file == None:
return self.stat_txt
self.stat_txt = file
def popup(self, e):
if self.ai:
if not self.listbox.curselection():
s = DISABLED
else:
s = NORMAL
for i in [1,3,7,8]:
self.listmenu.entryconfig(i, state=s)
self.listmenu.post(e.x_root, e.y_root)
def scroll(self, e):
if e.delta > 0:
self.listbox.yview('scroll', -2, 'units')
else:
self.listbox.yview('scroll', 2, 'units')
def move(self, e, a):
if self.listbox.curselection():
if a == END:
a = self.listbox.size()-2
elif a not in [0,END]:
a = max(min(self.listbox.size()-1, int(self.listbox.curselection()[0]) + a),0)
self.listbox.select_clear(0,END)
self.listbox.select_set(a)
self.listbox.see(a)
# Acitions
def new(self, key=None):
if not self.unsaved():
self.ai = AIBIN.AIBIN(False, self.unitsdat, self.upgradesdat, self.techdat, self.tbl)
self.ai.bwscript = AIBIN.BWBIN(self.unitsdat, self.upgradesdat, self.techdat, self.tbl)
self.ai.bwscript.tbl = self.tbl
self.strings = {}
self.aiscript = None
self.bwscript = None
self.edited = False
self.editstatus['state'] = DISABLED
self.undos = []
self.redos = []
self.title('aiscript.bin, bwscript.bin')
self.status.set('Editing new file!')
self.listbox.delete(0, END)
self.action_states()
self.scriptstatus.set('aiscript.bin: 0 (0 B) bwscript.bin: 0 (0 B)')
def open(self, key=None, aiscript=None, bwscript=None):
if not self.unsaved():
if not aiscript:
aiscript = self.select_file('Open aiscript.bin')
if not aiscript:
return
if not bwscript:
bwscript = self.select_file('Open bwscript.bin (Cancel to only open aiscript.bin)')
warnings = []
try:
ai = AIBIN.AIBIN(bwscript, self.unitsdat, self.upgradesdat, self.techdat, self.tbl)
warnings.extend(ai.warnings)
warnings.extend(ai.load_file(aiscript, True))
except PyMSError, e:
ErrorDialog(self, e)
return
self.ai = ai
self.strings = {}
for id,ai in self.ai.ais.iteritems():
if not ai[1] in self.strings:
self.strings[ai[1]] = []
self.strings[ai[1]].append(id)
self.aiscript = aiscript
self.bwscript = bwscript
self.edited = False
self.editstatus['state'] = DISABLED
self.undos = []
self.redos = []
if not bwscript:
bwscript = 'bwscript.bin'
self.title('%s, %s' % (aiscript,bwscript))
self.status.set('Load Successful!')
self.resort()
self.action_states()
s = 'aiscript.bin: %s (%s B) ' % (len(self.ai.ais),sum(self.ai.aisizes.values()))
if self.ai.bwscript:
s += ' bwscript.bin: %s (%s B)' % (len(self.ai.bwscript.ais),sum(self.ai.bwscript.aisizes.values()))
self.scriptstatus.set(s)
if warnings:
WarningDialog(self, warnings)
def open_default(self, key=None):
self.open(key, os.path.join(BASE_DIR, 'Libs','MPQ','Scripts','aiscript.bin'),os.path.join(BASE_DIR, 'Libs','MPQ','Scripts','bwscript.bin'))
def open_mpq(self):
file = self.select_file('Open MPQ', True, '.mpq', [('MPQ Files','*.mpq'),('Embedded MPQ Files','*.exe'),('All Files','*')])
if not file:
return
h = SFileOpenArchive(file)
if SFInvalidHandle(h):
ErrorDialog(self, PyMSError('Open','Could not open MPQ "%s"' % file))
return
ai = SFile(file='scripts\\aiscript.bin')
bw = SFile(file='scripts\\bwscirpt.bin')
for t in ['ai','bw']:
f = SFileOpenFileEx(h, 'scripts\\%sscript.bin' % t)
if f in [None,-1]:
if t == 'ai':
SFileCloseArchive(h)
ErrorDialog(self, PyMSError('Open','Could not find aiscript.bin in the MPQ.'))
return
bw = None
continue
r = SFileReadFile(f)
SFileCloseFile(f)
if t == 'ai':
ai.text = r[0]
else:
bw.text = r[0]
SFileCloseArchive(h)
self.open(None,ai,bw)
def save(self, key=None, ai=None, bw=None):
if key and self.buttons['save']['state'] != NORMAL:
return
if ai == None:
ai = self.aiscript
if bw == None and self.ai.bwscript.ais:
bw = self.bwscript
if ai == None:
self.saveas()
return
if self.tbledited:
file = self.select_file("Save stat_txt.tbl (Cancel doesn't stop bin saving)", False, '.tbl', [('TBL Files','*.tbl'),('All Files','*')])
if file:
self.stat_txt = file
try:
self.tbl.compile(file, extra=self.extrainfo.get())
except PyMSError, e:
ErrorDialog(self, e)
return
self.tbledited = False
try:
self.ai.compile(ai, bw, extra=self.extrainfo.get())
self.aiscript = ai
if bw != None:
self.bwscript = bw
self.status.set('Save Successful!')
except PyMSError, e:
ErrorDialog(self, e)
def saveas(self, key=None):
if key and self.buttons['saveas']['state'] != NORMAL:
return
aiscript = self.select_file('Save aiscript.bin As', False)
if not aiscript:
return True
bwscript = None
if self.ai.bwscript.ais:
bwscript = self.select_file('Save bwscript.bin As (Cancel to save aiscript.bin only)', False)
if self.save(ai=aiscript, bw=bwscript):
self.tbledited = False
self.title('%s, %s' % (self.aiscript,self.bwscript))
def savempq(self, key=None):
file = self.select_file('Save MPQ to...', False, '.mpq', [('MPQ Files','*.mpq'),('Self-executing MPQ','*.exe'),('All Files','*')], self)
if file:
if file.endswith('%sexe' % os.extsep):
if os.path.exists(file):
h = MpqOpenArchiveForUpdate(file, MOAU_OPEN_ALWAYS | MOAU_MAINTAIN_LISTFILE)
else:
try:
copy(os.path.join(BASE_DIR,'Libs','Data','SEMPQ.exe'), file)
h = MpqOpenArchiveForUpdate(file, MOAU_OPEN_ALWAYS | MOAU_MAINTAIN_LISTFILE)
except:
h = -1
else:
h = MpqOpenArchiveForUpdate(file, MOAU_OPEN_ALWAYS | MOAU_MAINTAIN_LISTFILE)
if h == -1:
ErrorDialog(self, PyMSError('Saving','Could not open %sMPQ "%s".' % (['','SE'][file.endswith('%sexe' % os.extsep)],file)))
return
ai = SFile()
bw = SFile()
try:
self.ai.compile(ai, bw, self.extrainfo.get())
except PyMSError, e:
ErrorDialog(self, e)
undone = []
for f,s in [('ai',ai),('bw',bw)]:
try:
MpqAddFileFromBuffer(h, s.text, 'scripts\\%sscript.bin' % f, MAFA_COMPRESS | MAFA_REPLACE_EXISTING)
except:
undone.append('scripts\\%sscript.bin' % f)
MpqCloseUpdatedArchive(h)
if undone:
askquestion(parent=self, title='Save problems', message='%s could not be saved to the MPQ.' % ' and '.join(undone), type=OK)
def close(self, key=None):
if key and self.buttons['close']['state'] != NORMAL:
return
if not self.unsaved():
self.ai = None
self.strings = {}
self.aiscript = None
self.bwscript = None
self.edited = False
self.editstatus['state'] = DISABLED
self.undos = []
self.redos = []
self.title('No files loaded')
self.status.set('Load your files or create new ones.')
self.listbox.delete(0, END)
self.action_states()
self.scriptstatus.set('')
def register(self, e=None):
try:
register_registry('PyAI','AI','bin',os.path.join(BASE_DIR, 'PyAI.pyw'),os.path.join(BASE_DIR,'Images','PyAI.ico'))
except PyMSError, e:
ErrorDialog(self, e)
def help(self, e=None):
webbrowser.open('file:///%s' % os.path.join(BASE_DIR, 'Docs', 'PyAI.html'))
def about(self):
thanks = [
('bajadulce',"Testing, support, and hosting! I can't thank you enough!"),
('ashara','Lots of help with beta testing and ideas'),
('MamiyaOtaru','Found lots of bugs, most importantly ones on Mac and Linux.'),
('Heinerman','File specs and command information'),
('modmaster50','Lots of ideas, testing, and support, thanks a lot!')
]
AboutDialog(self, 'PyAI', LONG_VERSION, thanks)
def exit(self, e=None):
if not self.unsaved():
savesize(self, self.settings)
self.settings['stat_txt'] = self.stat_txt
self.settings['highlights'] = self.highlights
self.settings['reference'] = self.reference.get()
self.settings['extrainfo'] = self.extrainfo.get()
self.settings['imports'] = self.imports
self.settings['extdefs'] = self.extdefs
try:
f = file(os.path.join(BASE_DIR,'Settings','PyAI.txt'),'w')
f.write(pprint(self.settings))
f.close()
except:
pass
self.destroy()
def order(self, key=None):
if self.ai:
sel = []
if self.listbox.size():
for index in self.listbox.curselection():
try:
sel.append(self.get_entry(index)[0])
except:
pass
self.listbox.delete(0, END)
for id,ai in self.ai.ais.iteritems():
self.set_entry(END, id, not ai[0], AIBIN.convflags(ai[2]), ai[1])
if sel and id in sel:
self.listbox.select_set(END)
if not sel:
self.listbox.select_set(0)
def idsort(self, key=None):
if self.ai:
sel = []
if self.listbox.size():
for index in self.listbox.curselection():
try:
sel.append(self.get_entry(index)[0])
except:
pass
self.listbox.delete(0, END)
ais = list(self.ai.ais.keynames)
ais.sort()
for id in ais:
ai | |
<gh_stars>1-10
"""IMAP Query builder"""
import datetime
import itertools
import functools
from collections import UserString
from typing import Iterable, Optional, Dict, Any, List, Union
from .consts import SHORT_MONTH_NAMES
from .utils import clean_uids, quote
class Header:
__slots__ = ('name', 'value')
def __init__(self, name: str, value: str):
if not isinstance(name, str):
raise TypeError('Header-name expected str value, "{}" received'.format(type(name)))
self.name = quote(name)
if not isinstance(value, str):
raise TypeError('Header-value expected str value, "{}" received'.format(type(value)))
self.value = quote(value)
def __str__(self):
return '{0.name}: {0.value}'.format(self)
class UidRange:
"""
* - represents the largest number in use.
x:y - represents sequence range, example: 4:*
NOTE: UID range of <value>:* always includes the UID of the last message in the mailbox,
even if <value> is higher than any assigned UID value ->
any UID range with * indicates at least one message (with the highest numbered UID), unless the mailbox is empty.
"""
__slots__ = ('start', 'end')
def __init__(self, start: str, end: Optional[str] = None):
self.start = str(start).strip()
if not (self.start.isdigit() or self.start == '*'):
raise TypeError('UidRange start arg must be str with digits or *')
if end is None:
self.end = None
else:
self.end = str(end).strip()
if not (self.end.isdigit() or self.end == '*'):
raise TypeError('UidRange end arg must be str with digits or *')
def __str__(self):
return '{}{}'.format(self.start, ':{}'.format(self.end) if self.end else '')
class LogicOperator(UserString):
def __init__(
self,
*converted_strings,
answered: Optional[bool] = None,
seen: Optional[bool] = None,
flagged: Optional[bool] = None,
draft: Optional[bool] = None,
deleted: Optional[bool] = None,
keyword: Optional[Union[str, List[str]]] = None,
no_keyword: Optional[Union[str, List[str]]] = None,
from_: Optional[Union[str, List[str]]] = None,
to: Optional[Union[str, List[str]]] = None,
subject: Optional[Union[str, List[str]]] = None,
body: Optional[Union[str, List[str]]] = None,
text: Optional[Union[str, List[str]]] = None,
bcc: Optional[Union[str, List[str]]] = None,
cc: Optional[Union[str, List[str]]] = None,
date: Optional[Union[datetime.date, List[datetime.date]]] = None,
date_gte: Optional[Union[datetime.date, List[datetime.date]]] = None,
date_lt: Optional[Union[datetime.date, List[datetime.date]]] = None,
sent_date: Optional[Union[datetime.date, List[datetime.date]]] = None,
sent_date_gte: Optional[Union[datetime.date, List[datetime.date]]] = None,
sent_date_lt: Optional[Union[datetime.date, List[datetime.date]]] = None,
size_gt: Optional[int] = None,
size_lt: Optional[int] = None,
new: Optional[bool] = None,
old: Optional[bool] = None,
recent: Optional[bool] = None,
all: Optional[bool] = None, # noqa
uid: Optional[Union[str, Iterable[str], UidRange]] = None,
header: Optional[Header] = None,
gmail_label: Optional[Union[str, List[str]]] = None): # todo newline after drop 3.5
self.converted_strings = converted_strings
for val in converted_strings:
if not any(isinstance(val, t) for t in (str, UserString)):
raise TypeError('Unexpected type "{}" for converted part, str like obj expected'.format(type(val)))
unconverted_dict = {k: v for k, v in locals().items() if k in SEARCH_KEYS and v is not None}
self.converted_params = ParamConverter(unconverted_dict).convert()
if not any((self.converted_strings, self.converted_params)):
raise ValueError('{} expects params'.format(self.__class__.__name__))
super().__init__(self.combine_params())
def combine_params(self) -> str:
"""combine self.converted_strings and self.converted_params to IMAP search criteria format"""
raise NotImplementedError
@staticmethod
def prefix_join(operator: str, params: Iterable[str]) -> str:
"""Join params by prefix notation rules, enclose in parenthesis"""
return '({})'.format(functools.reduce(lambda a, b: '{}{} {}'.format(operator, a, b), params))
class AND(LogicOperator):
"""When multiple keys are specified, the result is the intersection of all the messages that match those keys."""
def combine_params(self) -> str:
return self.prefix_join('', itertools.chain(self.converted_strings, self.converted_params))
class OR(LogicOperator):
"""OR <search-key1> <search-key2> Messages that match either search key."""
def combine_params(self) -> str:
return self.prefix_join('OR ', itertools.chain(self.converted_strings, self.converted_params))
class NOT(LogicOperator):
"""NOT <search-key> Messages that do not match the specified search key."""
def combine_params(self) -> str:
return 'NOT {}'.format(self.prefix_join('', itertools.chain(self.converted_strings, self.converted_params)))
class ParamConverter:
"""Convert search params to IMAP format"""
multi_key_allowed = (
'keyword', 'no_keyword', 'from_', 'to', 'subject', 'body', 'text', 'bcc', 'cc',
'date', 'date_gte', 'date_lt', 'sent_date', 'sent_date_gte', 'sent_date_lt',
'header', 'gmail_label',
)
def __init__(self, params: Dict[str, Any]):
self.params = params
def _gen_values(self, key: str, value: Any) -> Iterable[Any]:
"""Values generator"""
# single value
if key not in self.multi_key_allowed or isinstance(value, str):
yield value
else:
try:
# multiple values
for i in iter(value):
yield i
except TypeError:
# single value
yield value
def convert(self) -> List[str]:
"""
:return: params in IMAP format
"""
converted = []
for key, raw_val in sorted(self.params.items(), key=lambda x: x[0]):
for val in self._gen_values(key, raw_val):
convert_func = getattr(self, 'convert_{}'.format(key), None)
if not convert_func:
raise KeyError('"{}" is an invalid parameter.'.format(key))
converted.append(convert_func(key, val))
return converted
@classmethod
def format_date(cls, value: datetime.date) -> str:
"""To avoid locale affects"""
return '{}-{}-{}'.format(value.day, SHORT_MONTH_NAMES[value.month - 1], value.year)
@staticmethod
def cleaned_str(key: str, value: str) -> str:
if type(value) is not str:
raise TypeError('"{}" expected str value, "{}" received'.format(key, type(value)))
return str(value)
@staticmethod
def cleaned_date(key: str, value: datetime.date) -> datetime.date:
if type(value) is not datetime.date:
raise TypeError('"{}" expected datetime.date value, "{}" received'.format(key, type(value)))
return value
@staticmethod
def cleaned_bool(key: str, value: bool) -> bool:
if type(value) is not bool:
raise TypeError('"{}" expected bool value, "{}" received'.format(key, type(value)))
return bool(value)
@staticmethod
def cleaned_true(key: str, value: bool) -> True:
if value is not True:
raise TypeError('"{}" expected "True", "{}" received'.format(key, type(value)))
return True
@staticmethod
def cleaned_uint(key: str, value: int) -> int:
if type(value) is not int or int(value) < 0:
raise TypeError('"{}" expected int value >= 0, "{}" received'.format(key, type(value)))
return int(value)
@staticmethod
def cleaned_uid(key: str, value: Union[str, Iterable[str], UidRange]) -> str:
# range
if isinstance(value, UidRange):
return str(value)
# set
try:
return clean_uids(value)
except TypeError as e:
raise TypeError('{} parse error: {}'.format(key, str(e)))
@staticmethod
def cleaned_header(key: str, value: Header) -> Header:
if not isinstance(value, Header):
raise TypeError('"{}" expected Header (H) value, "{}" received'.format(key, type(value)))
return value
def convert_answered(self, key, value) -> str:
"""Messages [with/without] the Answered flag set. (ANSWERED, UNANSWERED)"""
return 'ANSWERED' if self.cleaned_bool(key, value) else 'UNANSWERED'
def convert_seen(self, key, value) -> str:
"""Messages that [have/do not have] the Seen flag set. (SEEN, UNSEEN)"""
return 'SEEN' if self.cleaned_bool(key, value) else 'UNSEEN'
def convert_flagged(self, key, value) -> str:
"""Messages [with/without] the Flagged flag set. (FLAGGED, UNFLAGGED)"""
return 'FLAGGED' if self.cleaned_bool(key, value) else 'UNFLAGGED'
def convert_draft(self, key, value) -> str:
"""Messages that [have/do not have] the Draft flag set. (DRAFT, UNDRAFT)"""
return 'DRAFT' if self.cleaned_bool(key, value) else 'UNDRAFT'
def convert_deleted(self, key, value) -> str:
"""Messages that [have/do not have] the Deleted flag set. (DELETED, UNDELETED)"""
return 'DELETED' if self.cleaned_bool(key, value) else 'UNDELETED'
def convert_keyword(self, key, value) -> str:
"""Messages with the specified keyword flag set. (KEYWORD)"""
return 'KEYWORD {}'.format(self.cleaned_str(key, value))
def convert_no_keyword(self, key, value) -> str:
"""Messages that do not have the specified keyword flag set. (UNKEYWORD)"""
return 'UNKEYWORD {}'.format(self.cleaned_str(key, value))
def convert_from_(self, key, value) -> str:
"""Messages that contain the specified string in the envelope structure's FROM field."""
return 'FROM {}'.format(quote(self.cleaned_str(key, value)))
def convert_to(self, key, value) -> str:
"""Messages that contain the specified string in the envelope structure's TO field."""
return 'TO {}'.format(quote(self.cleaned_str(key, value)))
def convert_subject(self, key, value) -> str:
"""Messages that contain the specified string in the envelope structure's SUBJECT field."""
return 'SUBJECT {}'.format(quote(self.cleaned_str(key, value)))
def convert_body(self, key, value) -> str:
"""Messages that contain the specified string in the body of the message."""
return 'BODY {}'.format(quote(self.cleaned_str(key, value)))
def convert_text(self, key, value) -> str:
"""Messages that contain the specified string in the header or body of the message."""
return 'TEXT {}'.format(quote(self.cleaned_str(key, value)))
def convert_bcc(self, key, value) -> str:
"""Messages that contain the specified string in the envelope structure's BCC field."""
return 'BCC {}'.format(quote(self.cleaned_str(key, value)))
def convert_cc(self, key, value) -> str:
"""Messages that contain the specified string in the envelope structure's CC field."""
return 'CC {}'.format(quote(self.cleaned_str(key, value)))
def convert_date(self, key, value) -> str:
"""
Messages whose internal date (disregarding time and timezone)
is within the specified date. (ON)
"""
return 'ON {}'.format(self.format_date(self.cleaned_date(key, value)))
def convert_date_gte(self, key, value) -> str:
"""
Messages whose internal date (disregarding time and timezone)
is within or later than the specified date. (SINCE)
"""
return 'SINCE {}'.format(self.format_date(self.cleaned_date(key, value)))
def convert_date_lt(self, key, value) -> str:
"""
Messages whose internal date (disregarding time and timezone)
is earlier than the specified date. (BEFORE)
"""
return 'BEFORE {}'.format(self.format_date(self.cleaned_date(key, value)))
def convert_sent_date(self, key, value) -> str:
"""
Messages whose [RFC-2822] Date: header (disregarding time and timezone)
is within the specified date. (SENTON)
"""
return 'SENTON {}'.format(self.format_date(self.cleaned_date(key, value)))
def convert_sent_date_gte(self, key, value) -> str:
"""
Messages whose [RFC-2822] Date: header (disregarding time and timezone)
is within or later than the specified date. (SENTSINCE)
"""
return 'SENTSINCE {}'.format(self.format_date(self.cleaned_date(key, value)))
def convert_sent_date_lt(self, | |
<filename>tensor2tensor/models/video/next_frame_glow.py
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental testbed for nfg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import common_video
from tensor2tensor.layers import modalities
from tensor2tensor.models.research import glow
from tensor2tensor.models.research import glow_ops
from tensor2tensor.utils import registry
import tensorflow as tf
import tensorflow_probability as tfp
arg_scope = tf.contrib.framework.arg_scope
@registry.register_hparams
def next_frame_glow_hparams():
"""Hparams for next_frame_glow."""
hparams = glow.glow_hparams()
# Possible modes are conditional and unconditional
hparams.add_hparam("gen_mode", "conditional")
hparams.add_hparam("learn_top_scale", False)
hparams.add_hparam("condition_all_levels", True)
# For each video, substitutes "num_input_frames + num_output_frames" with a
# randomly sampled patch of length "num_train_frames" during training.
# -1 indicates that the entire video is used for training.
hparams.add_hparam("num_train_frames", -1)
# The following are hparams that model the latent transitions.
# Encoder that maps the latents to a Gaussian distribution.
# This function is used to model the prior over z_{t}. Can be,
# Pointwise -> point-wise multiplication of z_{t-1}.
# conv_net -> one-layer convolution over z_{t-1} .. z_{t - num_cond_latents}
hparams.add_hparam("latent_dist_encoder", "conv_net")
# Number of latents used in the encoder above.
hparams.add_hparam("num_cond_latents", 1)
hparams.add_hparam("latent_architecture", "glow_resnet")
hparams.add_hparam("latent_apply_dilations", False)
hparams.add_hparam("latent_dilation_rates", [1, 3])
# Use latent skip connections
hparams.add_hparam("model_input", False)
hparams.add_hparam("cond_first_frame", False)
hparams.add_hparam("latent_skip", True)
hparams.add_hparam("latent_encoder_depth", 2)
hparams.add_hparam("latent_encoder_width", 512)
hparams.add_hparam("latent_dropout", 0.0)
hparams.add_hparam("latent_pre_output_channels", 512)
hparams.add_hparam("latent_activation", "relu")
# Pretrains the glow encoder for "pretrain_steps" number of steps.
# By default, don't pretrain and learn end-to-end
hparams.add_hparam("pretrain_steps", -1)
hparams.modality = {
"inputs": modalities.VideoModalityL1Raw,
"targets": modalities.VideoModalityL1Raw,
}
hparams.init_batch_size = 256
hparams.batch_size = 32
# Possible options: are prev_frame, single_conv and normal
hparams.top_prior = "single_conv"
return hparams
@registry.register_hparams
def frame_glow_hparams():
"""Unconditional generation on video-frames."""
hparams = next_frame_glow_hparams()
hparams.gen_mode = "unconditional"
hparams.num_train_frames = 1
return hparams
def get_cond_latents(all_latents=None, hparams=None):
"""Get z^{cond}_{t} given z^{1..t-1}.
Args:
all_latents: list of list of tensors,
outer-size equals no.of time_steps-1
inner-size equals hparams.n_levels.
hparams: See next_frame_glow_hparams.
Returns:
cond_latents: conditional latents at time-step t.
"""
cond_latents = None
if hparams.gen_mode == "conditional":
if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]:
num_cond_latents = (hparams.num_cond_latents +
int(hparams.cond_first_frame))
if len(all_latents) >= num_cond_latents:
cond_latents = all_latents[-hparams.num_cond_latents:]
if hparams.cond_first_frame:
cond_latents = [all_latents[0]] + cond_latents
elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]:
if all_latents:
cond_latents = all_latents[-1]
if hparams.gen_mode == "conditional":
global_step = tf.train.get_or_create_global_step()
condition = tf.greater(global_step, hparams.pretrain_steps)
else:
condition = tf.constant(False, dtype=tf.bool)
return condition, cond_latents
@registry.register_model
class NextFrameGlow(glow.Glow):
"""Extend Glow for video."""
def init_preprocess_single(self, features):
for label in ["inputs", "targets"]:
features[label] = common_layers.convert_rgb_to_real(features[label])
return features
def init_preprocess(self, features):
"""Preprocessing as per the input modality.
Equivalent to calling self.bottom(features).
Args:
features: dict of strings to tensors.
Returns:
features: dict of strings to tensors.
"""
return features.map(self.init_preprocess_single)
def preprocess(self, x):
"""Converts x from [0, 1] to [-0.5, 0.5].
All inputs are already normalized to be in the range [0, 1] through the
VideoModalityL1Raw modality.
Args:
x: 4-D Tensor.
Returns:
x: Scaled such that x lies in-between -0.5 and 0.5
"""
return x - 0.5
def infer(self, features, *args, **kwargs): # pylint: disable=arguments-differ
del args, kwargs
# Make a copy of features that can be used in the call to self
# that builds the graph.
new_features = {}
new_features["inputs"] = features["inputs"]
new_features["targets"] = features["infer_targets"]
_, _ = self(new_features) # pylint: disable=not-callable
if self.hparams.gen_mode == "unconditional":
num_target_frames = 1
else:
num_target_frames = self.hparams.video_num_target_frames
ops = [glow_ops.get_variable_ddi, glow_ops.actnorm]
var_scope = tf.variable_scope("next_frame_glow/body", reuse=True)
all_frames = []
# If eps=None, images are sampled from the prior.
with arg_scope(ops, init=False), var_scope:
for target_frame in range(1, num_target_frames + 1):
# subscript -> timestep, superscript -> level.
# self.z_sample equals z^0_{t} (top-level latent)
# (X_{t}, z^{1..l}_{t}) = Glow(z^0_{t}, z^{1..l}_{t-1})
# Get current set of cond_latents.
cond_level, cond_level_latents = get_cond_latents(
self.all_level_latents, self.hparams)
glow_vals = glow_ops.encoder_decoder(
"codec", self.z_sample, self.hparams, eps=None, reverse=True,
cond_latents=cond_level_latents, states=self.level_states,
condition=cond_level, temperature=self.temperature)
predicted_frame, _, curr_latents, self.level_states = glow_vals
all_frames.append(predicted_frame)
self.all_level_latents.append(curr_latents)
# Compute z^0_{t+1} = f(z^0_{t})
if target_frame < num_target_frames:
cond_top, cond_top_latents = get_cond_latents(
self.all_top_latents, self.hparams)
prior_dist = self.top_prior(
condition=cond_top, cond_latents=cond_top_latents)
self.z_sample = prior_dist.sample()
self.all_top_latents.append(self.z_sample)
all_frames = tf.stack(all_frames)
predicted_video = common_video.swap_time_and_batch_axes(all_frames)
# The video-decode API requires the predicted video to be the same shape
# as the target-video. Hence, for unconditional generation,
# tile across time to ensure same shape.
if self.hparams.gen_mode == "unconditional":
predicted_video = tf.tile(
predicted_video, [1, self.hparams.video_num_target_frames, 1, 1, 1])
predicted_video = self.scale(predicted_video)
# Output of a single decode / sample.
output_features = {}
output_features["targets"] = tf.zeros_like(predicted_video)
output_features["outputs"] = predicted_video
output_features["scores"] = tf.zeros_like(predicted_video)
return output_features
def get_squeeze_prior(self):
"""Model the prior over z_{t} as a function of X_{t-1}.
Returns:
objective: float, log-likelihood.
dist: instance of tfp.distributions.Normal.
Raises:
ValueError: If input_height is not equal to input_width, not even
or if the image width is smaller than the latent width.
"""
_, prior_height, _, prior_channels = self.z_top_shape
_, input_height, input_width, _ = common_layers.shape_list(self.input_frame)
if input_height != input_width:
raise ValueError("input height should be equal to input width")
if input_height % 2 != 0:
raise ValueError("input height should be even")
if input_height < prior_height:
raise ValueError("input should be larger than the prior.")
# mean, log_std = NN(X_0)
# Reduce the spatial dimension by a factor of "squeeze_factor".
# and convolve with a stride of 2
squeeze_factor = input_height // (2 * prior_height)
x = glow_ops.squeeze(
"prior_squeeze", self.input_frame, factor=squeeze_factor, reverse=False)
mean_and_log_std = glow_ops.conv(
"prior_conv", x, 2*prior_channels, stride=[2, 2], apply_actnorm=False,
conv_init="zeros")
mean, log_scale = tf.split(mean_and_log_std, num_or_size_splits=2, axis=-1)
return tfp.distributions.Normal(mean, tf.exp(log_scale))
def top_cond_prior(self, name, cond_top_latents):
"""Maps the conditional top latents to a distribution.
Args:
name: variable scope.
cond_top_latents: Tensor or a list of tensors.
Latent variables at the previous time-step.
If "pointwise", this is a single tensor.
If "conv_net", this is a list of tensors with length
equal to hparams.num_cond_latents.
Returns:
cond_dist: tfp.distributions.Normal
Raises:
ValueError: If cond_top_latents are not of the expected length.
"""
with tf.variable_scope("top", reuse=tf.AUTO_REUSE):
if self.hparams.latent_dist_encoder == "pointwise":
last_latent = cond_top_latents
top = glow_ops.scale_gaussian_prior(
name, cond_top_latents, trainable=self.hparams.learn_top_scale)
elif self.hparams.latent_dist_encoder == "conv_net":
num_cond_latents = (self.hparams.num_cond_latents +
int(self.hparams.cond_first_frame))
if len(cond_top_latents) != num_cond_latents:
raise ValueError(
"Expected length of cond_top_latents %d, got %d"
% (num_cond_latents, len(cond_top_latents)))
last_latent = cond_top_latents[-1]
output_channels = common_layers.shape_list(last_latent)[-1]
cond_top_latents = tf.concat(cond_top_latents, axis=-1)
# Maps the latent-stack to a distribution.
top = glow_ops.latent_to_dist(
name, cond_top_latents, hparams=self.hparams,
output_channels=output_channels)
elif self.hparams.latent_dist_encoder == "conv_lstm":
last_latent = cond_top_latents
output_channels = common_layers.shape_list(cond_top_latents)[-1]
# (h_t, c_t) = LSTM(z_{t-1}; (h_{t-1}, c_{t-1}))
# (mu_t, sigma_t) = conv(h_t)
_, self.top_state = common_video.conv_lstm_2d(
cond_top_latents, self.top_state, self.hparams.latent_encoder_width,
kernel_size=3, name="conv_lstm")
top = glow_ops.single_conv_dist(
name, self.top_state.h, output_channels=output_channels)
elif self.hparams.latent_dist_encoder == "conv3d_net":
last_latent = cond_top_latents[-1]
top = glow_ops.temporal_latent_to_dist(
"conv3d", tf.stack(cond_top_latents, axis=1), self.hparams)
# mu(z_{t}) = z_{t-1} + latent_encoder(z_{cond})
if self.hparams.latent_skip:
top = tfp.distributions.Normal(last_latent + top.loc, top.scale)
return top
def uncond_top_dist(self):
"""Get an unconditional prior distribution on the top latent."""
prior_dist = glow_ops.top_prior(
"unconditional", self.z_top_shape, learn_prior="single_conv")
return prior_dist.loc, prior_dist.scale
def cond_top_dist(self, cond_latents):
"""Get a conditional prior distribution on the top latent."""
prior_dist = self.top_cond_prior("conditional", cond_latents)
return prior_dist.loc, prior_dist.scale
def top_prior(self, condition=False, cond_latents=None):
"""Objective based on the prior over latent z.
Args:
condition: Whether or not to condition on cond_latents.
cond_latents: tensor or list of tensors depending on
hparams.latent_dist_encoder
Returns:
objective: float, log-likelihood of z under the prior.
dist: instance of tfp.distributions.Normal, prior distribution.
Raises:
ValueError: If input is smaller than the prior, uneven height
or rectangular.
"""
if isinstance(condition, bool):
condition = tf.constant(condition, dtype=tf.bool)
self._all_conds.append(condition)
if self.hparams.gen_mode == "conditional":
# cond_top_latents is None when
# latent_dist_encoder is a lstm and frame_ind == 0.
# latent_dist_encoder is conv_net and frame_ind < num_cond_frames.
marginal_mean, marginal_scale = self.uncond_top_dist()
if cond_latents is None:
mean, scale = marginal_mean, marginal_scale
else:
cond_mean, cond_scale = self.cond_top_dist(cond_latents)
mean, scale = tf.cond(
condition, lambda: (cond_mean, cond_scale),
lambda: (marginal_mean, marginal_scale))
return glow_ops.TemperedNormal(mean, scale, self.temperature)
if self.hparams.top_prior == "prev_frame":
return self.get_squeeze_prior()
else:
return super(NextFrameGlow, self).top_prior()
def get_z_top_shape(self, init=False):
"""Get latent shape | |
<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DashboardArgs', 'Dashboard']
@pulumi.input_type
class DashboardArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Dashboard resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if dashboard_properties is not None:
pulumi.set(__self__, "dashboard_properties", dashboard_properties)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to
create the dashboard.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="dashboardProperties")
def dashboard_properties(self) -> Optional[pulumi.Input[str]]:
"""
JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
"""
return pulumi.get(self, "dashboard_properties")
@dashboard_properties.setter
def dashboard_properties(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dashboard_properties", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _DashboardState:
def __init__(__self__, *,
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Dashboard resources.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if dashboard_properties is not None:
pulumi.set(__self__, "dashboard_properties", dashboard_properties)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="dashboardProperties")
def dashboard_properties(self) -> Optional[pulumi.Input[str]]:
"""
JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
"""
return pulumi.get(self, "dashboard_properties")
@dashboard_properties.setter
def dashboard_properties(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dashboard_properties", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to
create the dashboard.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Dashboard(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dashboard_properties: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a shared dashboard in the Azure Portal.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
config = pulumi.Config()
md_content = config.get("mdContent")
if md_content is None:
md_content = "# Hello all :)"
video_link = config.get("videoLink")
if video_link is None:
video_link = "https://www.youtube.com/watch?v=......"
current = azure.core.get_subscription()
my_group = azure.core.ResourceGroup("my-group", location="West Europe")
my_board = azure.dashboard.Dashboard("my-board",
resource_group_name=my_group.name,
location=my_group.location,
tags={
"source": "managed",
},
dashboard_properties=f\"\"\"{{
"lenses": {{
"0": {{
"order": 0,
"parts": {{
"0": {{
"position": {{
"x": 0,
"y": 0,
"rowSpan": 2,
"colSpan": 3
}},
"metadata": {{
"inputs": [],
"type": "Extension/HubsExtension/PartType/MarkdownPart",
"settings": {{
"content": {{
"settings": {{
"content": "{md_content}",
"subtitle": "",
"title": ""
}}
}}
}}
}}
}},
"1": {{
"position": {{
"x": 5,
"y": 0,
"rowSpan": 4,
"colSpan": 6
}},
"metadata": {{
"inputs": [],
"type": "Extension/HubsExtension/PartType/VideoPart",
"settings": {{
"content": {{
"settings": {{
"title": "Important Information",
"subtitle": "",
"src": "{video_link}",
"autoplay": true
}}
}}
}}
}}
}},
"2": {{
"position": {{
"x": 0,
"y": 4,
"rowSpan": 4,
"colSpan": 6
}},
"metadata": {{
"inputs": [
{{
"name": "ComponentId",
"value": "/subscriptions/{current.subscription_id}/resourceGroups/myRG/providers/microsoft.insights/components/myWebApp"
}}
],
"type": "Extension/AppInsightsExtension/PartType/AppMapGalPt",
"settings": {{}},
"asset": {{
"idInputName": "ComponentId",
"type": "ApplicationInsights"
}}
}}
}}
}}
}}
}},
"metadata": {{
"model": {{
"timeRange": {{
"value": {{
"relative": {{
"duration": 24,
"timeUnit": 1
}}
}},
"type": "MsPortalFx.Composition.Configuration.ValueTypes.TimeRange"
}},
"filterLocale": {{
"value": "en-us"
}},
"filters": {{
"value": {{
"MsPortalFx_TimeRange": {{
"model": {{
"format": "utc",
"granularity": "auto",
"relative": "24h"
}},
"displayCache": {{
"name": "UTC Time",
"value": "Past 24 hours"
}},
"filteredPartIds": [
"StartboardPart-UnboundPart-ae44fef5-76b8-46b0-86f0-2b3f47bad1c7"
]
}}
}}
}}
}}
}}
}}
\"\"\")
```
It is recommended to follow the steps outlined
[here](https://docs.microsoft.com/en-us/azure/azure-portal/azure-portal-dashboards-create-programmatically#fetch-the-json-representation-of-the-dashboard) to create a Dashboard in the Portal and extract the relevant JSON to use in this resource. From the extracted JSON, the contents of the `properties: {}` object can used. Variables can be injected as needed - see above example.
## Import
Dashboards can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:dashboard/dashboard:Dashboard my-board /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg1/providers/Microsoft.Portal/dashboards/00000000-0000-0000-0000-000000000000
```
Note the URI in the above sample can be found using the Resource Explorer tool in the Azure Portal.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dashboard_properties: JSON data representing dashboard body. See above for details on how to obtain this from the Portal.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Shared Dashboard. This should be be 64 chars max, only alphanumeric and hyphens (no spaces). For a more friendly display name, add the `hidden-title` tag.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to
create the dashboard.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DashboardArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a shared dashboard in the Azure Portal.
## Example Usage
```python
import pulumi
import pulumi_azure | |
as applied by the Moses toolkit.
"""
def __init__(self,alignment_file):
self.alignment_file = alignment_file
self.word_pairs = defaultdict(lambda: defaultdict(int))
self.word_source = defaultdict(int)
self.word_target = defaultdict(int)
def load_word_pairs(self,src_lang,target_lang):
"""main function. overwrite this to import data in different format."""
fileobj = handle_file(self.alignment_file,'open','r')
for line in fileobj:
line = line.split(b' ||| ')
if line[-1].endswith(b' |||'):
line[-1] = line[-1][:-4]
line.append(b'')
src = line[0]
target = line[1]
self.word_pairs[src][target] += 1
self.word_source[src] += 1
self.word_target[target] += 1
def dot_product(a,b):
"""calculate dot product from two lists"""
# optimized for PyPy (much faster than enumerate/map)
s = 0
i = 0
for x in a:
s += x * b[i]
i += 1
return s
def priority_sort_models(models):
"""primary models should have priority before supplementary models.
zipped with index to know which weight model belongs to
"""
return [(model,priority,i) for (i,(model,priority)) in sorted(zip(range(len(models)),models),key=lambda x: x[1][1])]
def cross_entropy(model_interface,reference_interface,weights,score,mode,flags):
"""calculate cross entropy given all necessary information.
don't call this directly, but use one of the Combine_TMs methods.
"""
weights = normalize_weights(weights,mode,flags)
if 'compare_cross-entropies' in flags and flags['compare_cross-entropies']:
num_results = len(model_interface.models)
else:
num_results = 1
cross_entropies = [[0]*num_results for i in range(model_interface.number_of_features)]
oov = [0]*num_results
oov2 = 0
other_translations = [0]*num_results
ignored = [0]*num_results
n = [0]*num_results
total_pairs = 0
for src in reference_interface.word_pairs:
for target in reference_interface.word_pairs[src]:
c = reference_interface.word_pairs[src][target]
for i in range(num_results):
if src in model_interface.phrase_pairs and target in model_interface.phrase_pairs[src]:
if ('compare_cross-entropies' in flags and flags['compare_cross-entropies']) or ('intersected_cross-entropies' in flags and flags['intersected_cross-entropies']):
if 0 in model_interface.phrase_pairs[src][target][0][0]: #only use intersection of models for comparability
# update unknown words statistics
if model_interface.phrase_pairs[src][target][0][0][i]:
ignored[i] += c
elif src in model_interface.phrase_source and model_interface.phrase_source[src][i]:
other_translations[i] += c
else:
oov[i] += c
continue
if ('compare_cross-entropies' in flags and flags['compare_cross-entropies']):
tmp_weights = [[0]*i+[1]+[0]*(num_results-i-1)]*model_interface.number_of_features
elif ('intersected_cross-entropies' in flags and flags['intersected_cross-entropies']):
tmp_weights = weights
features = score(tmp_weights,src,target,model_interface,flags)
else:
features = score(weights,src,target,model_interface,flags)
#if weight is so low that feature gets probability zero
if 0 in features:
#sys.stderr.write('Warning: 0 probability in model {0}: source phrase: {1!r}; target phrase: {2!r}\n'.format(i,src,target))
#sys.stderr.write('Possible reasons: 0 probability in phrase table; very low (or 0) weight; recompute lexweight and different alignments\n')
#sys.stderr.write('Phrase pair is ignored for cross_entropy calculation\n\n')
continue
n[i] += c
for j in range(model_interface.number_of_features):
cross_entropies[j][i] -= log(features[j],2)*c
elif src in model_interface.phrase_source and not ('compare_cross-entropies' in flags and flags['compare_cross-entropies']):
other_translations[i] += c
else:
oov2 += c
total_pairs += c
oov2 = int(oov2/num_results)
for i in range(num_results):
try:
for j in range(model_interface.number_of_features):
cross_entropies[j][i] /= n[i]
except ZeroDivisionError:
sys.stderr.write('Warning: no matching phrase pairs between reference set and model\n')
for j in range(model_interface.number_of_features):
cross_entropies[j][i] = 0
if 'compare_cross-entropies' in flags and flags['compare_cross-entropies']:
return [tuple([ce[i] for ce in cross_entropies]) + (other_translations[i],oov[i],ignored[i],n[i],total_pairs) for i in range(num_results)], (n[0],total_pairs,oov2)
else:
return tuple([ce[0] for ce in cross_entropies]) + (other_translations[0],oov2,total_pairs)
def cross_entropy_light(model_interface,reference_interface,weights,score,mode,flags,cache):
"""calculate cross entropy given all necessary information.
don't call this directly, but use one of the Combine_TMs methods.
Same as cross_entropy, but optimized for speed: it doesn't generate all of the statistics,
doesn't normalize, and uses caching.
"""
weights = normalize_weights(weights,mode,flags)
cross_entropies = [0]*model_interface.number_of_features
for (src,target,c) in cache:
features = score(weights,src,target,model_interface,flags,cache=True)
if 0 in features:
#sys.stderr.write('Warning: 0 probability in model {0}: source phrase: {1!r}; target phrase: {2!r}\n'.format(i,src,target))
#sys.stderr.write('Possible reasons: 0 probability in phrase table; very low (or 0) weight; recompute lexweight and different alignments\n')
#sys.stderr.write('Phrase pair is ignored for cross_entropy calculation\n\n')
continue
for i in range(model_interface.number_of_features):
cross_entropies[i] -= log(features[i],2)*c
return cross_entropies
def _get_reference_cache(reference_interface,model_interface):
"""creates a data structure that allows for a quick access
to all relevant reference set phrase/word pairs and their frequencies.
"""
cache = []
n = 0
for src in reference_interface.word_pairs:
for target in reference_interface.word_pairs[src]:
if src in model_interface.phrase_pairs and target in model_interface.phrase_pairs[src]:
c = reference_interface.word_pairs[src][target]
cache.append((src,target,c))
n += c
return cache,n
def _get_lexical_filter(reference_interface,model_interface):
"""returns dictionaries that store the words and word pairs needed
for perplexity optimization. We can use these dicts to load fewer data into memory for optimization."""
e2f_filter = defaultdict(set)
f2e_filter = defaultdict(set)
for src in reference_interface.word_pairs:
for target in reference_interface.word_pairs[src]:
if src in model_interface.phrase_pairs and target in model_interface.phrase_pairs[src]:
e2f_alignment,f2e_alignment = model_interface.get_word_alignments(src,target)
for s,t_list in e2f_alignment:
for t in t_list:
e2f_filter[s].add(t)
for t,s_list in f2e_alignment:
for s in s_list:
f2e_filter[t].add(s)
return e2f_filter,f2e_filter
def _hillclimb_move(weights,stepsize,mode,flags):
"""Move function for hillclimb algorithm. Updates each weight by stepsize."""
for i,w in enumerate(weights):
yield normalize_weights(weights[:i]+[w+stepsize]+weights[i+1:],mode,flags)
for i,w in enumerate(weights):
new = w-stepsize
if new >= 1e-10:
yield normalize_weights(weights[:i]+[new]+weights[i+1:],mode,flags)
def _hillclimb(scores,best_weights,objective,model_interface,reference_interface,score_function,mode,flags,precision,cache,n):
"""first (deprecated) implementation of iterative weight optimization."""
best = objective(best_weights)
i = 0 #counts number of iterations with same stepsize: if greater than 10, it is doubled
stepsize = 512 # initial stepsize
move = 1 #whether we found a better set of weights in the current iteration. if not, it is halfed
sys.stderr.write('Hillclimb: step size: ' + str(stepsize))
while stepsize > 0.0078:
if not move:
stepsize /= 2
sys.stderr.write(' ' + str(stepsize))
i = 0
move = 1
continue
move = 0
for w in _hillclimb_move(list(best_weights),stepsize,mode,flags):
weights_tuple = tuple(w)
if weights_tuple in scores:
continue
scores[weights_tuple] = cross_entropy_light(model_interface,reference_interface,[w for m in range(model_interface.number_of_features)],score_function,mode,flags,cache)
if objective(weights_tuple)+precision < best:
best = objective(weights_tuple)
best_weights = weights_tuple
move = 1
if i and not i % 10:
sys.stderr.write('\nIteration '+ str(i) + ' with stepsize ' + str(stepsize) + '. current cross-entropy: ' + str(best) + '- weights: ' + str(best_weights) + ' ')
stepsize *= 2
sys.stderr.write('\nIncreasing stepsize: '+ str(stepsize))
i = 0
i += 1
return best_weights
def optimize_cross_entropy_hillclimb(model_interface,reference_interface,initial_weights,score_function,mode,flags,precision=0.000001):
"""find weights that minimize cross-entropy on a tuning set
deprecated (default is now L-BFGS (optimize_cross_entropy)), but left in for people without SciPy
"""
scores = {}
best_weights = tuple(initial_weights[0])
cache,n = _get_reference_cache(reference_interface,model_interface)
# each objective is a triple: which score to minimize from cross_entropy(), which weights to update accordingly, and a comment that is printed
objectives = [(lambda x: scores[x][i]/n,[i],'minimize cross-entropy for feature {0}'.format(i)) for i in range(model_interface.number_of_features)]
scores[best_weights] = cross_entropy_light(model_interface,reference_interface,initial_weights,score_function,mode,flags,cache)
final_weights = initial_weights[:]
final_cross_entropy = [0]*model_interface.number_of_features
for i,(objective, features, comment) in enumerate(objectives):
best_weights = min(scores,key=objective)
sys.stderr.write('Optimizing objective "' + comment +'"\n')
best_weights = _hillclimb(scores,best_weights,objective,model_interface,reference_interface,score_function,feature_specific_mode(mode,i,flags),flags,precision,cache,n)
sys.stderr.write('\nCross-entropy:' + str(objective(best_weights)) + ' - weights: ' + str(best_weights)+'\n\n')
for j in features:
final_weights[j] = list(best_weights)
final_cross_entropy[j] = objective(best_weights)
return final_weights,final_cross_entropy
def optimize_cross_entropy(model_interface,reference_interface,initial_weights,score_function,mode,flags):
"""find weights that minimize cross-entropy on a tuning set
Uses L-BFGS optimization and requires SciPy
"""
if not optimizer == 'l-bfgs':
sys.stderr.write('SciPy is not installed. Falling back to naive hillclimb optimization (instead of L-BFGS)\n')
return optimize_cross_entropy_hillclimb(model_interface,reference_interface,initial_weights,score_function,mode,flags)
cache,n = _get_reference_cache(reference_interface,model_interface)
# each objective is a triple: which score to minimize from cross_entropy(), which weights to update accordingly, and a comment that is printed
objectives = [(lambda w: cross_entropy_light(model_interface,reference_interface,[[1]+list(w) for m in range(model_interface.number_of_features)],score_function,feature_specific_mode(mode,i,flags),flags,cache)[i],[i],'minimize cross-entropy for feature {0}'.format(i)) for i in range(model_interface.number_of_features)] #optimize cross-entropy for p(s|t)
final_weights = initial_weights[:]
final_cross_entropy = [0]*model_interface.number_of_features
for i,(objective, features, comment) in enumerate(objectives):
sys.stderr.write('Optimizing objective "' + comment +'"\n')
initial_values = [1]*(len(model_interface.models)-1) # we leave value of first model at 1 and optimize all others (normalized of course)
best_weights, best_point, data = fmin_l_bfgs_b(objective,initial_values,approx_grad=True,bounds=[(0.000000001,None)]*len(initial_values))
best_weights = normalize_weights([1]+list(best_weights),feature_specific_mode(mode,i,flags),flags)
sys.stderr.write('Cross-entropy after L-BFGS optimization: ' + str(best_point/n) + ' - weights: ' + str(best_weights)+'\n')
for j in features:
final_weights[j] = list(best_weights)
final_cross_entropy[j] = best_point/n
return final_weights,final_cross_entropy
def feature_specific_mode(mode,i,flags):
"""in mode 'counts', only the default Moses features can be recomputed from raw frequencies;
all other features are interpolated by default.
This fucntion mostly serves optical purposes (i.e. normalizing a single weight vector for logging),
since normalize_weights also handles a mix of interpolated and recomputed features.
"""
if mode == 'counts' and i not in [flags['i_e2f'],flags['i_e2f_lex'],flags['i_f2e'],flags['i_f2e_lex']]:
return 'interpolate'
else:
return mode
def redistribute_probability_mass(weights,src,target,interface,flags,mode='interpolate'):
"""the conditional probability p(x|y) is undefined for cases where p(y) = 0
this function redistributes the probability mass to only consider models for which p(y) > 0
"""
i_e2f = flags['i_e2f']
i_e2f_lex = | |
#!/usr/bin/env python
# pythond version: 2.7.10
import os
import sys,urllib,string,re
import urllib2
import webbrowser
from time import localtime, strftime
import json
global DEBUG
DEBUG = 0
FILENAME_SCRIPT_START_DAYTIME = '.script_starttime.conf'
# KEY name list
SUBJECT_KEY = 'subject'
_NUMBER_KEY = '_number'
OWNER_KEY = 'owner'
NAME_KEY = 'name'
EMAIL_KEY = 'email'
CHANGE_ID_KEY = 'change_id'
UPDATED_KEY = 'updated'
BRANCH_KEY = 'branch'
PROJECT_KEY = 'project'
# gerrit server url
URL_COMMIT_PRE = 'http://diana.devops.letv.com/#/c/'
def log(text):
if DEBUG:
print text
def exit(tempFileName):
if DEBUG != 1 and len(tempFileName) > 0:
try:
os.remove(tempFileName)
except OSError, e:
print 'Temp file:' + tempFileName + ' delete maybe failed. Please remove it manually!'
saveScriptStartTime()
print 'End'
sys.exit(1)
# file handler is returned by this function
def getNetJsonData(url):
print url
log('Using firefox cookie to open this url:{!s}'.format(url))
cj = firefox()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0');
req.add_header('Accept', 'application/json');
req.add_header('Accept-Language', 'aen-US,en;q=0.5');
req.add_header('Accept-Encoding', 'deflate');
req.add_header('Content-Type', 'application/json; charset=UTF-8');
req.add_header('x-content-type-options', 'nosniff');
fHandler = opener.open(req)
return fHandler
# format net json data. all json items are in one line after get it from network.
# we need formatted it to one temp file and parse it
def saveNetJsonData(netDataFileHandler, tempJsonFileName):
# save formated data to one temporary file result.json
jsonFileHandler = open(tempJsonFileName,'w')
json_items_number = 0
jsonLines = 0;
while 1:
line = netDataFileHandler.readline()
if len(line) == 0:
break;
# sometimes ")]}'" exited in first line and it's blocked json parser
if jsonLines == 0 and line.find('[') < 0:
log("\')]}\' header exited. remove it.")
continue;
jsonFileHandler.write(line);
jsonLines += 1
jsonFileHandler.close()
log('json lines'.format(format(jsonLines)))
return jsonLines
# 1. remove unicode format
# 2. remove not used '"'
# 3. remove space in start and end of the string
# 4. remove ','
def formatString(stringData):
stringData = stringData.replace('\\"','\u0022')
stringData = stringData.strip()
stringData = stringData.strip('"')
stringData = stringData.replace('\u0022','"')
stringData = stringData.replace('\u003e','>')
stringData = stringData.replace('\u0027','\'')
stringData = stringData.replace(',',' ')
return stringData
def itemInList(itemList, item):
item = string.lower(item.strip('\n').strip())
for i in itemList:
i = string.lower(i.strip('\n').strip())
#print 'i:' + i + '-item:' + item
if item.find(i) >= 0:
return 1
return 0
NAME_LIST_FILE_NAME = 'namelist.txt'
def loadNameList():
try:
print '\nLoading name list...\n'
fileH = open(NAME_LIST_FILE_NAME, 'r')
lines = fileH.readlines()
fileH.close()
except IOError, e:
lines = []
return lines
def writeDataToExcel(dictList, fileName, filterByNameOrMailList):
print 'saving data to file:' + fileName
filterList = []
if filterByNameOrMailList == '1':
#load name or mail list
filterList = loadNameList()
excelFileHandler = open(fileName, 'w')
excelFileHandler.write('Title,Link,Branch,Project, Updated,owner,Comments\n')
resultCount = 0
for item in dictList:
#print item
print '.',
sys.stdout.flush()
subject = item[SUBJECT_KEY]
subject = formatString(subject)
gerritNumber = item[_NUMBER_KEY]
branch = item[BRANCH_KEY]
project = item[PROJECT_KEY]
# convert int value to string
if isinstance(gerritNumber, int) == True:
gerritNumber = str(gerritNumber)
change_id = item[CHANGE_ID_KEY]
change_id = change_id.strip()
if change_id[-1:] == '"':
change_id = change_id[:-1]
updated = item[UPDATED_KEY]
updated = updated.strip()
updated = updated[:19]
ownerName = ''
ownerEmail = ''
isInList = 0
try:
#print 'owner info:',
ownerInfo = item[OWNER_KEY]
#print ownerInfo
ownerName = ownerInfo[NAME_KEY]
ownerEmail = ownerInfo[EMAIL_KEY]
except KeyError, e:
print 'No owner info found for gerrit number:' + gerritNumber
if len(filterList) > 0:
try:
isInList = itemInList(filterList, ownerName)
log('filter by name result:{0}'.format(isInList))
if isInList != 1:
isInList = itemInList(filterList, ownerEmail)
log('filter by email result:{0}'.format(isInList))
# if item is not match filter, ignore it.
if isInList != 1:
continue
except KeyError, e:
print 'No owner info found!'
continue
if len(subject) > 0 and len(gerritNumber) > 0:
pid = ''
assertValue = ''
link = URL_COMMIT_PRE + gerritNumber
#pValue = getPID(gerritNumber)
#sepIndex = pValue.find(':')
#if sepIndex > 0:
# pid = pValue[:sepIndex]
# assertValue = pValue[sepIndex + 1:]
#log(pValue)
lineData = '"' + subject + '"'
lineData += ',' + link
lineData += ',' + branch
lineData += ',' + project
lineData += ',' + updated
lineData += ',' + ownerEmail
lineData += ',' + assertValue # comments. empty first
lineData += '\n'
log(lineData)
resultCount += 1
excelFileHandler.write(lineData.encode("utf-8"))
else:
print 'Error dict'
print item
print ' '
excelFileHandler.close()
return resultCount
def parserJsonFile(fileName,excelFileName,filterByNameOrMailList):
jsonFileHandler = open(fileName,'r')
jsonItems = json.load(jsonFileHandler)
jsonFileHandler.close()
jsonItemsCount = len(jsonItems)
log('json items count:{0}'.format(format(jsonItemsCount)))
log('parsing ended.')
#whose json file parsed ended. prepare to write corresponding data to excel file
#log('Parser Result:{0}'.format(len(dictList)))
#begin get patch info for every commit.
log('Begin get patch info for every commit')
resultCount = writeDataToExcel(jsonItems, excelFileName,filterByNameOrMailList)
return resultCount
BRANCH_COND_HISTORY_SAVING_FILE_NAME = '.branch.history'
def readHistoryBranchCondition():
try:
fileH = open(BRANCH_COND_HISTORY_SAVING_FILE_NAME, 'r')
lines = fileH.readlines()
fileH.close()
except IOError, e:
lines = []
return lines
def writeQueryBranchCondition(branch, isCreateNew):
branch = branch.strip('\n').strip()
if isCreateNew == 1:
flag = 'w'
else:
flag = 'a+'
lines = readHistoryBranchCondition()
for line in lines:
line = line.strip('\n').strip()
if line == branch:
return
fileH = open(BRANCH_COND_HISTORY_SAVING_FILE_NAME, flag)
line = fileH.write(branch + '\n')
fileH.close()
PROJECT_COND_HISTORY_SAVING_FILE_NAME = '.project.history'
def readHistoryProjectCondition():
try:
fileH = open(PROJECT_COND_HISTORY_SAVING_FILE_NAME, 'r')
lines = fileH.readlines()
fileH.close()
except IOError, e:
lines = []
return lines
def writeQueryProjectCondition(project, isCreateNew):
project = project.strip('\n').strip()
if isCreateNew == 1:
flag = 'w'
else:
flag = 'a+'
lines = readHistoryProjectCondition()
for line in lines:
line = line.strip('\n').strip()
if line == project:
return
fileH = open(PROJECT_COND_HISTORY_SAVING_FILE_NAME, flag)
line = fileH.write(project + '\n')
fileH.close()
# check if user want to exit
def checkIfExit(inputValue):
if len(inputValue) <= 0:
return
if inputValue == 'e' or inputValue == 'E' or inputValue == 'exit' or inputValue == 'quit' or inputValue == 'q':
exit('')
def inputQueryCondition():
branch = ''
project = ''
status = ''
owner = ''
projectCondition = ''
branchCondition = ''
statusCondition = ''
ownerCondition = ''
ret = []
print '\nPlease input query condition following guide!'
while 1:
if len(branch) <= 0:
historyBranchs = readHistoryBranchCondition()
if len(historyBranchs) > 0:
print '\nPlease select git name in below list or input it manually.'
else:
print '\nPlease input git name.'
index = 1
for line in historyBranchs:
if len(line) == 0:
continue
line = line.strip()
line = line.strip('\n')
print '{0} : {1}'.format(index, line)
index += 1
if index > 1:
prompt = '\nbranch(1):'
else:
prompt = '\nbranch(ruby_dev_leui):'
branch = raw_input(prompt)
branch = branch.strip()
checkIfExit(branch)
value = -1
if len(branch) > 0:
if len(historyBranchs) > 0:
if branch.isdigit():
value = string.atoi(branch)
if value > len(historyBranchs):
value = 1
elif len(historyBranchs) > 0:
value = 1
if len(historyBranchs) > 0 and value > 0 and value <= len(historyBranchs):
branch = historyBranchs[value -1]
branch = branch.strip('\n').strip()
#value = 1
#if len(historyBranchs) > 0 and len(branch) > 0 and branch.isdigit():
# value = string.atoi(branch)
#if len(historyBranchs) > 0 and value > 0 and value <= len(historyBranchs):
# branch = historyBranchs[value - 1]
# branch = branch.strip('\n').strip()
#else:
# branch = 'l-mr1-yukonodm'
branchCondition = URL_BRANCH_PRE + branch
#if len(branch) > 0:
# branchCondition = URL_BRANCH_PRE + branch
#elif len(historyBranch) > 0:
# branch = historyBranch
#
print '\nbranch:' + branch
if len(project) <= 0:
lines = readHistoryProjectCondition()
if len(lines) > 0:
print '\nPlease select git name in below list or input it manually.'
else:
print '\nPlease input git name.'
index = 1
for line in lines:
if len(line) == 0:
continue
line = line.strip()
line = line.strip('\n')
print '{0} : {1}'.format(index, line)
index += 1
if index > 1:
prompt = '\ngit name(1):'
else:
prompt = '\ngit name(ruby/platform/packages/services/Telephony):'
project = raw_input(prompt)
project = project.strip()
checkIfExit(project)
value = -1
if len(project) > 0:
if len(lines) > 0:
if project.isdigit():
value = string.atoi(project)
if value > len(lines):
value = 1
elif len(lines) > 0:
value = 1
if len(lines) > 0 and value > 0 and value <= len(lines):
project = lines[value -1]
project = project.strip('\n').strip()
if len(project) == 0:
project = 'ruby/platform/packages/services/Telephony'
#if project == '1':
# project = 'platform/packages/apps/InCallUI'
#elif project == '2':
# project = 'platform/packages/services/Telecomm'
#elif project == '3':
# project = 'platform/packages/services/Telephony'
projectCondition = URL_PROJECT_PRE + project
print '\ngit name:' + project
if len(status) <= 0:
status = raw_input('\nstatus(merged):')
status = status.strip()
if len(status) <= 0:
status = 'merged'
statusCondition = URL_STATUS_PRE + status
print '\nstatus:' + status
if len(owner) <= 0:
print '\nOwner filter:'
print 'Enter : Not filter by owner.'
print '1 : Filter by name | |
"""
=========
numpy_ext
=========
An extension library for NumPy_ that implements common array operations not present in NumPy.
.. _numpy: https://numpy.org/
Installation
------------
**Regular installation**::
pip install numpy_ext
**Installation for development**::
git clone https://github.com/3jane/numpy_ext.git
cd numpy_ext
pip install -e .[dev] # note: make sure you are using pip>=20
Window operations
-----------------
- :func:`numpy_ext.expanding`
- :func:`numpy_ext.expanding_apply`
- :func:`numpy_ext.rolling`
- :func:`numpy_ext.rolling_apply`
Operations with nans
--------------------
- :func:`numpy_ext.nans`
- :func:`numpy_ext.drop_na`
- :func:`numpy_ext.fill_na`
- :func:`numpy_ext.fill_not_finite`
- :func:`numpy_ext.prepend_na`
Others
------
- :func:`numpy_ext.apply_map`
- :func:`numpy_ext.expstep_range`
Functions
---------
"""
from functools import partial
from typing import Callable, Any, Union, Generator, Tuple, List
import numpy as np
from joblib import Parallel, delayed
Number = Union[int, float]
def expstep_range(
start: Number,
end: Number,
min_step: Number = 1,
step_mult: Number = 1,
round_func: Callable = None
) -> np.ndarray:
"""
Return spaced values within a given interval. Step is increased by a multiplier on each iteration.
Parameters
----------
start : int or float
Start of interval, inclusive
end : int or float
End of interval, exclusive
min_step : int or float, optional
Minimal step between values. Must be bigger than 0. Default is 1.
step_mult : int or float, optional
Multiplier by which to increase the step on each iteration. Must be bigger than 0. Default is 1.
round_func: Callable, optional
Vectorized rounding function, e.g. np.ceil, np.floor, etc. Default is None.
Returns
-------
np.ndarray
Array of exponentially spaced values.
Examples
--------
>>> expstep_range(1, 100, min_step=1, step_mult=1.5)
array([ 1. , 2. , 3.5 , 5.75 , 9.125 ,
14.1875 , 21.78125 , 33.171875 , 50.2578125 , 75.88671875])
>>> expstep_range(1, 100, min_step=1, step_mult=1.5, round_func=np.ceil)
array([ 1., 2., 4., 6., 10., 15., 22., 34., 51., 76.])
>>> expstep_range(start=-1, end=-100, min_step=1, step_mult=1.5)
array([ -1. , -2. , -3.5 , -5.75 ,
-9.125 , -14.1875 , -21.78125 , -33.171875 ,
-50.2578125 , -75.88671875])
Generate array of ints
>>> expstep_range(start=100, end=1, min_step=1, step_mult=1.5).astype(int)
array([100, 99, 97, 95, 91, 86, 79, 67, 50, 25])
"""
if step_mult <= 0:
raise ValueError('mult_step should be bigger than 0')
if min_step <= 0:
raise ValueError('min_step should be bigger than 0')
last = start
values = []
step = min_step
sign = 1 if start < end else -1
while start < end and last < end or start > end and last > end:
values.append(last)
last += max(step, min_step) * sign
step = abs(step * step_mult)
values = np.array(values)
if not round_func:
return values
values = np.array(round_func(values))
_, idx = np.unique(values, return_index=True)
return values[np.sort(idx)]
def apply_map(func: Callable[[Any], Any], array: Union[List, np.ndarray]) -> np.ndarray:
"""
Apply a function element-wise to an array.
Parameters
----------
func : Callable[[Any], Any]
Function that accepts one argument and returns a single value.
array : Union[List, np.ndarray]
Input array or a list. Any lists will be converted to np.ndarray first.
Returns
-------
np.ndarray
Resulting array.
Examples
--------
>>> apply_map(lambda x: 0 if x < 3 else 1, [[2, 2], [3, 3]])
array([[0, 0],
[1, 1]])
"""
array = np.array(array)
array_view = array.flat
array_view[:] = [func(x) for x in array_view]
return array
#############################
# Operations with nans
#############################
def nans(shape: Union[int, Tuple[int]], dtype=np.float64) -> np.ndarray:
"""
Return a new array of a given shape and type, filled with np.nan values.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., (2, 3) or 2.
dtype: data-type, optional
Returns
-------
np.ndarray
Array of np.nans of the given shape.
Examples
--------
>>> nans(3)
array([nan, nan, nan])
>>> nans((2, 2))
array([[nan, nan],
[nan, nan]])
>>> nans(2, np.datetime64)
array(['NaT', 'NaT'], dtype=datetime64)
"""
if np.issubdtype(dtype, np.integer):
dtype = np.float
arr = np.empty(shape, dtype=dtype)
arr.fill(np.nan)
return arr
def drop_na(array: np.ndarray) -> np.ndarray:
"""
Return a given array flattened and with nans dropped.
Parameters
----------
array : np.ndarray
Input array.
Returns
-------
np.ndarray
New array without nans.
Examples
--------
>>> drop_na(np.array([np.nan, 1, 2]))
array([1., 2.])
"""
return array[~np.isnan(array)]
def fill_na(array: np.ndarray, value: Any) -> np.ndarray:
"""
Return a copy of array with nans replaced with a given value.
Parameters
----------
array : np.ndarray
Input array.
value : Any
Value to replace nans with.
Returns
-------
np.ndarray
A copy of array with nans replaced with the given value.
Examples
--------
>>> fill_na(np.array([np.nan, 1, 2]), -1)
array([-1., 1., 2.])
"""
ar = array.copy()
ar[np.isnan(ar)] = value
return ar
def fill_not_finite(array: np.ndarray, value: Any = 0) -> np.ndarray:
"""
Return a copy of array with nans and infs replaced with a given value.
Parameters
----------
array : np.ndarray
Input array.
value : Any, optional
Value to replace nans and infs with. Default is 0.
Returns
-------
np.ndarray
A copy of array with nans and infs replaced with the given value.
Examples
--------
>>> fill_not_finite(np.array([np.nan, np.inf, 1, 2]), 99)
array([99., 99., 1., 2.])
"""
ar = array.copy()
ar[~np.isfinite(array)] = value
return ar
def prepend_na(array: np.ndarray, n: int) -> np.ndarray:
"""
Return a copy of array with nans inserted at the beginning.
Parameters
----------
array : np.ndarray
Input array.
n : int
Number of elements to insert.
Returns
-------
np.ndarray
New array with nans added at the beginning.
Examples
--------
>>> prepend_na(np.array([1, 2]), 2)
array([nan, nan, 1., 2.])
"""
return np.hstack(
(
nans(n, array[0].dtype) if len(array) and hasattr(array[0], 'dtype') else nans(n),
array
)
)
#############################
# window operations
#############################
def rolling(
array: np.ndarray,
window: int,
skip_na: bool = False,
as_array: bool = False
) -> Union[Generator[np.ndarray, None, None], np.ndarray]:
"""
Roll a fixed-width window over an array.
The result is either a 2-D array or a generator of slices, controlled by `as_array` parameter.
Parameters
----------
array : np.ndarray
Input array.
window : int
Size of the rolling window.
skip_na : bool, optional
If False, the sequence starts with (window-1) windows filled with nans. If True, those are omitted.
Default is False.
as_array : bool, optional
If True, return a 2-D array. Otherwise, return a generator of slices. Default is False.
Returns
-------
np.ndarray or Generator[np.ndarray, None, None]
Rolling window matrix or generator
Examples
--------
>>> rolling(np.array([1, 2, 3, 4, 5]), 2, as_array=True)
array([[nan, 1.],
[ 1., 2.],
[ 2., 3.],
[ 3., 4.],
[ 4., 5.]])
Usage with numpy functions
>>> arr = rolling(np.array([1, 2, 3, 4, 5]), 2, as_array=True)
>>> np.sum(arr, axis=1)
array([nan, 3., 5., 7., 9.])
"""
if not any(isinstance(window, t) for t in [int, np.integer]):
raise TypeError(f'Wrong window type ({type(window)}) int expected')
window = int(window)
if array.size < window:
raise ValueError('array.size should be bigger than window')
def rows_gen():
if not skip_na:
yield from (prepend_na(array[:i + 1], (window - 1) - i) for i in np.arange(window - 1))
starts = np.arange(array.size - (window - 1))
yield from (array[start:end] for start, end in zip(starts, starts + window))
return np.array([row for row in rows_gen()]) if as_array else rows_gen()
def rolling_apply(func: Callable, window: int, *arrays: np.ndarray, n_jobs: int = 1, **kwargs) -> np.ndarray:
"""
Roll a fixed-width window over an array or a group of arrays, producing slices.
Apply a function to each slice / group of slices, transforming them into a value.
Perform computations in parallel, optionally.
Return a new np.ndarray with the resulting values.
Parameters
----------
func : Callable
The function to apply to each slice or a group of slices.
window : int
Window size.
*arrays : list
List of input arrays.
n_jobs : int, optional
Parallel tasks count for joblib. If 1, joblib won't be used. Default is 1.
**kwargs : dict
Input parameters (passed to func, must be named).
Returns
-------
np.ndarray
Examples
--------
>>> arr = np.array([1, 2, 3, 4, 5])
>>> rolling_apply(sum, 2, arr)
array([nan, 3., 5., 7., 9.])
>>> arr2 = np.array([1.5, 2.5, 3.5, 4.5, 5.5])
>>> func = lambda a1, a2, k: (sum(a1) + max(a2)) * k
>>> rolling_apply(func, 2, arr, arr2, k=-1)
array([ nan, -5.5, -8.5, -11.5, -14.5])
"""
if not any(isinstance(window, t) for t in [int, np.integer]):
raise TypeError(f'Wrong window type ({type(window)}) int expected')
window = int(window)
if max(len(x.shape) for x in arrays) != 1:
raise ValueError('Wrong array shape. Supported only 1D arrays')
if len({array.size for array in arrays}) != 1:
raise ValueError('Arrays must be the same length')
def _apply_func_to_arrays(idxs):
return func(*[array[idxs[0]:idxs[-1] + 1] for array in arrays], **kwargs)
array = arrays[0]
rolls = rolling(
| |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="list_level.py">
# Copyright (c) 2021 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import datetime
import six
import json
class ListLevel(object):
"""DTO container with a document list level.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'link': 'WordsApiLink',
'alignment': 'str',
'font': 'Font',
'is_legal': 'bool',
'linked_style': 'Style',
'number_format': 'str',
'number_position': 'float',
'number_style': 'str',
'restart_after_level': 'int',
'start_at': 'int',
'tab_position': 'float',
'text_position': 'float',
'trailing_character': 'str'
}
attribute_map = {
'link': 'Link',
'alignment': 'Alignment',
'font': 'Font',
'is_legal': 'IsLegal',
'linked_style': 'LinkedStyle',
'number_format': 'NumberFormat',
'number_position': 'NumberPosition',
'number_style': 'NumberStyle',
'restart_after_level': 'RestartAfterLevel',
'start_at': 'StartAt',
'tab_position': 'TabPosition',
'text_position': 'TextPosition',
'trailing_character': 'TrailingCharacter'
}
def __init__(self, link=None, alignment=None, font=None, is_legal=None, linked_style=None, number_format=None, number_position=None, number_style=None, restart_after_level=None, start_at=None, tab_position=None, text_position=None, trailing_character=None): # noqa: E501
"""ListLevel - a model defined in Swagger""" # noqa: E501
self._link = None
self._alignment = None
self._font = None
self._is_legal = None
self._linked_style = None
self._number_format = None
self._number_position = None
self._number_style = None
self._restart_after_level = None
self._start_at = None
self._tab_position = None
self._text_position = None
self._trailing_character = None
self.discriminator = None
if link is not None:
self.link = link
if alignment is not None:
self.alignment = alignment
if font is not None:
self.font = font
if is_legal is not None:
self.is_legal = is_legal
if linked_style is not None:
self.linked_style = linked_style
if number_format is not None:
self.number_format = number_format
if number_position is not None:
self.number_position = number_position
if number_style is not None:
self.number_style = number_style
if restart_after_level is not None:
self.restart_after_level = restart_after_level
if start_at is not None:
self.start_at = start_at
if tab_position is not None:
self.tab_position = tab_position
if text_position is not None:
self.text_position = text_position
if trailing_character is not None:
self.trailing_character = trailing_character
@property
def link(self):
"""Gets the link of this ListLevel. # noqa: E501
Gets or sets the link to the document. # noqa: E501
:return: The link of this ListLevel. # noqa: E501
:rtype: WordsApiLink
"""
return self._link
@link.setter
def link(self, link):
"""Sets the link of this ListLevel.
Gets or sets the link to the document. # noqa: E501
:param link: The link of this ListLevel. # noqa: E501
:type: WordsApiLink
"""
self._link = link
@property
def alignment(self):
"""Gets the alignment of this ListLevel. # noqa: E501
Gets or sets the justification of the actual number of the list item. # noqa: E501
:return: The alignment of this ListLevel. # noqa: E501
:rtype: str
"""
return self._alignment
@alignment.setter
def alignment(self, alignment):
"""Sets the alignment of this ListLevel.
Gets or sets the justification of the actual number of the list item. # noqa: E501
:param alignment: The alignment of this ListLevel. # noqa: E501
:type: str
"""
allowed_values = ["Left", "Center", "Right"] # noqa: E501
if not alignment.isdigit():
if alignment not in allowed_values:
raise ValueError(
"Invalid value for `alignment` ({0}), must be one of {1}" # noqa: E501
.format(alignment, allowed_values))
self._alignment = alignment
else:
self._alignment = allowed_values[int(alignment) if six.PY3 else long(alignment)]
@property
def font(self):
"""Gets the font of this ListLevel. # noqa: E501
Gets or sets character formatting used for the list label. # noqa: E501
:return: The font of this ListLevel. # noqa: E501
:rtype: Font
"""
return self._font
@font.setter
def font(self, font):
"""Sets the font of this ListLevel.
Gets or sets character formatting used for the list label. # noqa: E501
:param font: The font of this ListLevel. # noqa: E501
:type: Font
"""
self._font = font
@property
def is_legal(self):
"""Gets the is_legal of this ListLevel. # noqa: E501
Gets or sets a value indicating whether the level turns all inherited numbers to Arabic, false if it preserves their number style. # noqa: E501
:return: The is_legal of this ListLevel. # noqa: E501
:rtype: bool
"""
return self._is_legal
@is_legal.setter
def is_legal(self, is_legal):
"""Sets the is_legal of this ListLevel.
Gets or sets a value indicating whether the level turns all inherited numbers to Arabic, false if it preserves their number style. # noqa: E501
:param is_legal: The is_legal of this ListLevel. # noqa: E501
:type: bool
"""
self._is_legal = is_legal
@property
def linked_style(self):
"""Gets the linked_style of this ListLevel. # noqa: E501
Gets or sets the paragraph style that is linked to this list level. # noqa: E501
:return: The linked_style of this ListLevel. # noqa: E501
:rtype: Style
"""
return self._linked_style
@linked_style.setter
def linked_style(self, linked_style):
"""Sets the linked_style of this ListLevel.
Gets or sets the paragraph style that is linked to this list level. # noqa: E501
:param linked_style: The linked_style of this ListLevel. # noqa: E501
:type: Style
"""
self._linked_style = linked_style
@property
def number_format(self):
"""Gets the number_format of this ListLevel. # noqa: E501
Gets or sets the number format for the list level. # noqa: E501
:return: The number_format of this ListLevel. # noqa: E501
:rtype: str
"""
return self._number_format
@number_format.setter
def number_format(self, number_format):
"""Sets the number_format of this ListLevel.
Gets or sets the number format for the list level. # noqa: E501
:param number_format: The number_format of this ListLevel. # noqa: E501
:type: str
"""
self._number_format = number_format
@property
def number_position(self):
"""Gets the number_position of this ListLevel. # noqa: E501
Gets or sets the position (in points) of the number or bullet for the list level. # noqa: E501
:return: The number_position of this ListLevel. # noqa: E501
:rtype: float
"""
return self._number_position
@number_position.setter
def number_position(self, number_position):
"""Sets the number_position of this ListLevel.
Gets or sets the position (in points) of the number or bullet for the list level. # noqa: E501
:param number_position: The number_position of this ListLevel. # noqa: E501
:type: float
"""
self._number_position = number_position
@property
def number_style(self):
"""Gets the number_style of this ListLevel. # noqa: E501
Gets or sets the number style for this list level. # noqa: E501
:return: The number_style of this ListLevel. # noqa: E501
:rtype: str
"""
return self._number_style
@number_style.setter
def number_style(self, number_style):
"""Sets the number_style of this ListLevel.
Gets or sets the number style for this list level. # noqa: E501
:param number_style: The number_style of this ListLevel. # noqa: E501
:type: str
"""
allowed_values = ["Arabic", "UppercaseRoman", "LowercaseRoman", "UppercaseLetter", "LowercaseLetter", "Ordinal", "Number", "OrdinalText", "Hex", "ChicagoManual", "Kanji", "KanjiDigit", "AiueoHalfWidth", "IrohaHalfWidth", "ArabicFullWidth", "ArabicHalfWidth", "KanjiTraditional", "KanjiTraditional2", "NumberInCircle", "DecimalFullWidth", "Aiueo", "Iroha", "LeadingZero", "Bullet", "Ganada", "Chosung", "GB1", "GB2", "GB3", "GB4", "Zodiac1", "Zodiac2", "Zodiac3", "TradChinNum1", "TradChinNum2", "TradChinNum3", "TradChinNum4", "SimpChinNum1", "SimpChinNum2", "SimpChinNum3", "SimpChinNum4", "HanjaRead", "HanjaReadDigit", "Hangul", "Hanja", "Hebrew1", "Arabic1", "Hebrew2", "Arabic2", "HindiLetter1", "HindiLetter2", "HindiArabic", "HindiCardinalText", "ThaiLetter", "ThaiArabic", "ThaiCardinalText", "VietCardinalText", "NumberInDash", "LowercaseRussian", "UppercaseRussian", "None", "Custom"] # noqa: E501
if not number_style.isdigit():
if number_style not in allowed_values:
raise ValueError(
"Invalid value for `number_style` ({0}), must be one of {1}" # noqa: E501
.format(number_style, allowed_values))
self._number_style = number_style
else:
self._number_style = allowed_values[int(number_style) if six.PY3 else long(number_style)]
@property
def restart_after_level(self):
"""Gets the restart_after_level of this ListLevel. # | |
ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of network)
representing the connectivity of each network with other networks
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = []
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
self.nodes_ranges = np.arange(len(self.labels_dic['nodes']))
for network in net:
self.outer_idx = np.setdiff1d(self.nodes_ranges, self.labels_dic[network])
self.subj_matrix = self.matrix[self.labels_dic[network]]
self.subj_matrix = self.subj_matrix[:,self.outer_idx]
self.streamlines_sum = np.sum(np.sum(self.subj_matrix))
self.conn_measure = self.streamlines_sum/self.outer_idx.shape[0]
self.all_conn.append(self.conn_measure)
self.all_conn = np.array(self.all_conn)
self.all_conn = self.all_conn.reshape(len(self.matrices_files), len(net))
return self.all_conn
def net_ranking(self, sbj_number, nodes_number, make_symmetric=True,
upper_threshold=None, lower_threshold=None,
percentage_value=False):
'''
computing how much each node is connected with the other network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
percentage_value: Boolean|
True return values express in percentage_value
False return raw values
Returns
-------
float data : numpy array |
numpy a 3D array (dim number of subject X number of network X number of network)
representing the connectivity of each node with all the networks
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = self.node_ranking(sbj_number, nodes_number, len(net), make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold)
self.all_conn_rank = np.zeros([sbj_number, len(net), len(net)])
for subj in range(len(self.matrices_files)):
self.subj2use = self.all_conn[subj,:,:]
for network in net:
self.net2use = self.subj2use[self.labels_dic[network],:]
if percentage_value==False:
self.all_conn_rank[subj, net.index(network), :] = np.mean(self.net2use, axis=0)
else:
self.all_conn_rank[subj, net.index(network), :] = 100* np.mean(self.net2use, axis=0)/np.sum(np.mean(self.net2use, axis=0))
return self.all_conn_rank
def all_standard_metrics(self, sbj_number, nodes_number, networks_number,
make_symmetric=True, upper_threshold=None,
lower_threshold=None, percentage_value=False):
self.metrics_dict = {
"nodes_overall_conn": self.nodes_overall_conn(make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"node_inner_conn": self.node_inner_conn(sbj_number, nodes_number, make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"node_outer_conn": self.node_outer_conn(sbj_number, nodes_number, make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"node_ranking": self.node_ranking(sbj_number, nodes_number, networks_number, make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"net_inner_conn": self.net_inner_conn(make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"net_outer_conn": self.net_outer_conn(make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold),
"net_ranking": self.net_ranking(sbj_number, nodes_number, make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold, percentage_value=percentage_value)
}
return self.metrics_dict
class Graph_Theory(object):
def __init__(self, matrices_files, net_label_txt, labels_dic):
self.matrices_files = matrices_files
self.net_label_txt = net_label_txt
self.labels_dic = labels_dic
def nodal_degree(self, sbj_number, nodes_number, make_symmetric=True,
upper_threshold=None, lower_threshold=None, binarize=False):
'''
computing graph theory node measures regardless of network affiliation
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
binarize: Boolean|
True will make the connectivity matrix binary
Default is False
Returns
-------
dict: : dictonary with the following keys |
degree: int | Number of links connected to the node
in_degree: int | Number of inward links
out_degree: int | Number of outward links
joint_in_degree: int | number of vertices with in_degree>out_degree
joint_out_degree: int | number of vertices with out_degree>in_degree
joint_bilateral: int | number of vertices with in_degree==out_degree
node_strength_dir: int | node strength (in-strength + out-strength)
node_strength_undir: int | sum of weights of links connected to the node
'''
self.all_nodal_degree = {
"degree": np.zeros([sbj_number, nodes_number]),
# "in_degree" : np.zeros([sbj_number, nodes_number]),
# "out_degree" : np.zeros([sbj_number, nodes_number]),
# "joint_in_degree" : np.zeros([sbj_number, nodes_number]),
# "joint_out_degree" : np.zeros([sbj_number, nodes_number]),
# "joint_bilateral" : np.zeros([sbj_number, nodes_number]),
# "node_strength_dir": np.zeros([sbj_number, nodes_number]),
"node_strength_undir":np.zeros([sbj_number, nodes_number])
}
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
if binarize==True:
self.matrix = bct.algorithms.binarize(self.matrix)
else:
self.matrix = self.matrix
np.fill_diagonal(self.matrix,0)
self.deg = bct.algorithms.degrees_und(self.matrix)
# self.all_nodal_degree['in_degree'][subj] = self.inp
# self.all_nodal_degree['out_degree'][subj] = self.od
self.all_nodal_degree['degree'][subj] = self.deg
# self.J, self.J_od, self.J_id, self.J_bl = bct.algorithms.jdegree(self.matrix)
# self.all_nodal_degree['joint_in_degree'][subj] = self.J_id
# self.all_nodal_degree['joint_out_degree'][subj] = self.J_od
# self.all_nodal_degree['joint_bilateral'][subj] = self.J_bl
# self.nodestr_dir = bct.algorithms.strengths_dir(self.matrix)
# self.all_nodal_degree['node_strength_dir'][subj] = self.nodestr_dir
self.nodestr_undir = bct.algorithms.strengths_und(self.matrix)
self.all_nodal_degree['node_strength_undir'][subj] = self.nodestr_undir
return self.all_nodal_degree
def network_level_degree(self, sbj_number, nodes_number, label_dic,
make_symmetric=True, upper_threshold=None,
lower_threshold=None, binarize=False,):
'''
computing graph theory node measures specific for each network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
label_dic: dict |
dictonary computed using files.labels()
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
binarize: Boolean|
True will make the connectivity matrix binary
Default is False
Returns
-------
dict: : dictonary with the following keys |
degree: int | Number of links connected to the node
in_degree: int | Number of inward links
out_degree: int | Number of outward links
joint_in_degree: int | number of vertices with in_degree>out_degree
joint_out_degree: int | number of vertices with out_degree>in_degree
joint_bilateral: int | number of vertices with in_degree==out_degree
node_strength_dir: int | node strength (in-strength + out-strength)
node_strength_undir: int | sum of weights of links connected to the node
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.degree = self.nodal_degree(sbj_number, nodes_number, make_symmetric=make_symmetric, upper_threshold=upper_threshold, lower_threshold=lower_threshold, binarize=binarize)
self.values = np.zeros([sbj_number, len(self.degree.keys()), len(net)])
self.list = list(self.degree.keys())
for subject in range(sbj_number):
for key in self.list:
for network in net:
self.values[subject, self.list.index(key), net.index(network)] = np.mean(self.degree[key][subject][label_dic[network]])
self.d = {}
for i in self.degree.keys():
self.d[i] = self.values[:, self.list.index(i), :]
return self.d
def physical_connectivity(self, sbj_number, networks_number, label_dic,
make_symmetric=True, upper_threshold=None,
lower_threshold=None, binarize=False):
'''
Density is the fraction of present connections to possible connections.
Parameters
----------
sbj_number: int |
number of subjects
networks_number: int|
number of networks
label_dic: dict |
dictonary computed using files.labels()
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
binarize= Boolean|
True will make the connectivity matrix binary
Default is False
Returns
-------
dict: : dictonary with the following keys |
Density_und: int | Density is the fraction of present connections
to possible connections
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.physical_connectivity = {
"Density_und": np.zeros([sbj_number, networks_number]),
}
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix | |
<filename>samsungctl/remote_websocket.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import base64
import logging
import threading
import ssl
import websocket
import requests
import time
import json
import socket
from . import exceptions
from . import application
from . import websocket_base
from . import wake_on_lan
from .utils import LogIt, LogItWithReturn
logger = logging.getLogger('samsungctl')
URL_FORMAT = "ws://{}:{}/api/v2/channels/samsung.remote.control?name={}"
SSL_URL_FORMAT = "wss://{}:{}/api/v2/channels/samsung.remote.control?name={}"
class RemoteWebsocket(websocket_base.WebSocketBase):
"""Object for remote control connection."""
@LogIt
def __init__(self, config):
self.receive_lock = threading.Lock()
self.send_event = threading.Event()
websocket_base.WebSocketBase.__init__(self, config)
@property
@LogItWithReturn
def has_ssl(self):
try:
response = requests.get(
' http://{0}:8001/api/v2/'.format(self.config.host),
timeout=3
)
return(
json.loads(response.content.decode('utf-8'))['device']['TokenAuthSupport']
)
except (ValueError, KeyError):
return False
except (requests.HTTPError, requests.exceptions.ConnectTimeout):
return None
@LogIt
def open(self):
if self.sock is not None:
return True
self._starting = True
with self.receive_lock:
power = self.power
if not self.config.paired and not power:
self.power = True
if not self.power:
raise RuntimeError(
'Unable to pair with TV.. Is the TV off?!?'
)
if self.sock is not None:
self.close()
if self.config.port == 8002 or self.has_ssl:
self.config.port = 8002
if self.config.token:
logger.debug('using saved token: ' + self.config.token)
token = "&token=" + self.config.token
else:
token = ''
sslopt = {"cert_reqs": ssl.CERT_NONE}
url = SSL_URL_FORMAT.format(
self.config.host,
self.config.port,
self._serialize_string(self.config.name)
) + token
else:
self.config.port = 8001
sslopt = {}
url = URL_FORMAT.format(
self.config.host,
self.config.port,
self._serialize_string(self.config.name)
)
try:
self.sock = websocket.create_connection(url, sslopt=sslopt)
except:
if not self.config.paired:
raise RuntimeError('Unable to connect to the TV')
if not self._running:
logger.info('Is the TV on?!?')
self._starting = False
return False
auth_event = threading.Event()
def unauthorized_callback(_):
auth_event.set()
self.unregister_receive_callback(
auth_callback,
'event',
'ms.channel.connect'
)
if self.config.port == 8001:
logger.debug(
"Websocket connection failed. Trying ssl connection"
)
self.config.port = 8002
self.open()
else:
self.close()
raise RuntimeError('Authentication denied')
def auth_callback(data):
if 'data' in data and 'token' in data["data"]:
self.config.token = data['data']["token"]
logger.debug('new token: ' + self.config.token)
logger.debug("Access granted.")
auth_event.set()
self.unregister_receive_callback(
unauthorized_callback,
'event',
'ms.channel.unauthorized'
)
if 'data' in data and 'token' in data["data"]:
self.config.token = data['data']["token"]
logger.debug('new token: ' + self.config.token)
logger.debug("Access granted.")
if not power and not self.config.paired:
self.power = False
self.config.paired = True
if self.config.path:
self.config.save()
auth_event.set()
self.register_receive_callback(
auth_callback,
'event',
'ms.channel.connect'
)
self.register_receive_callback(
unauthorized_callback,
'event',
'ms.channel.unauthorized'
)
if not self._running:
self._thread = threading.Thread(target=self.loop)
self._thread.start()
if self.config.paired:
auth_event.wait(5.0)
else:
auth_event.wait(30.0)
if not auth_event.isSet():
if not self.config.paired and self.config.port == 8001:
logger.debug(
"Websocket connection failed. Trying ssl connection"
)
self.config.port = 8002
return self.open()
else:
self.close()
raise RuntimeError('Auth Failure')
self._starting = False
self.send_event.wait(0.5)
return True
else:
self._starting = False
return True
@LogIt
def send(self, method, **params):
if self.sock is None:
if method != 'ms.remote.control':
if self.power:
self.open()
else:
logger.info('Is the TV on?!?')
payload = dict(
method=method,
params=params
)
self.sock.send(json.dumps(payload))
self.send_event.wait(0.3)
@LogIt
def power(self, value):
event = threading.Event()
if value and not self.power:
if self.mac_address:
count = 0
wake_on_lan.send_wol(self.mac_address)
event.wait(1.0)
while not self.power and count < 20:
if not self._running:
try:
self.open()
except:
pass
wake_on_lan.send_wol(self.mac_address)
event.wait(1.0)
count += 1
if count == 20:
logger.error(
'Unable to power on the TV, '
'check network connectivity'
)
else:
logging.error('Unable to get TV\'s mac address')
elif not value and self.power:
if self.sock is None:
self.open()
count = 0
power_off = dict(
Cmd='Click',
DataOfCmd='KEY_POWEROFF',
Option="false",
TypeOfRemote="SendRemoteKey"
)
power = dict(
Cmd='Click',
DataOfCmd='KEY_POWER',
Option="false",
TypeOfRemote="SendRemoteKey"
)
logger.info("Sending control command: " + str(power))
self.send("ms.remote.control", **power)
logger.info("Sending control command: " + str(power_off))
self.send("ms.remote.control", **power_off)
while self.power and count < 10:
event.wait(1.0)
count += 1
if count == 10:
logger.info('Unable to power off the TV')
power = property(fget=websocket_base.WebSocketBase.power, fset=power)
@LogIt
def control(self, key, cmd='Click'):
"""
Send a control command.
cmd can be one of the following
'Click'
'Press'
'Release'
"""
if key == 'KEY_POWERON':
if not self.power:
self.power = True
return
elif key == 'KEY_POWEROFF':
if self.power:
self.power = False
return
elif key == 'KEY_POWER':
self.power = not self.power
return
elif self.sock is None:
if not self.power:
logger.info('Is the TV on?!?')
return
self.open()
with self.receive_lock:
params = dict(
Cmd=cmd,
DataOfCmd=key,
Option="false",
TypeOfRemote="SendRemoteKey"
)
logger.info("Sending control command: " + str(params))
self.send("ms.remote.control", **params)
_key_interval = 0.5
@LogItWithReturn
def get_application(self, pattern):
for app in self.applications:
if pattern in (app.app_id, app.name):
return app
@property
@LogItWithReturn
def applications(self):
eden_event = threading.Event()
installed_event = threading.Event()
eden_data = []
installed_data = []
@LogIt
def eden_app_get(data):
logger.debug('eden apps: ' + str(data))
if 'data' in data:
eden_data.extend(data['data']['data'])
eden_event.set()
@LogIt
def installed_app_get(data):
logger.debug('installed apps: ' + str(data))
if 'data' in data:
installed_data.extend(data['data']['data'])
installed_event.set()
self.register_receive_callback(
eden_app_get,
'event',
'ed.edenApp.get'
)
self.register_receive_callback(
installed_app_get,
'event',
'ed.installedApp.get'
)
for event in ['ed.edenApp.get', 'ed.installedApp.get']:
params = dict(
data='',
event=event,
to='host'
)
self.send('ms.channel.emit', **params)
eden_event.wait(10.0)
installed_event.wait(10.0)
self.unregister_receive_callback(
eden_app_get,
'event',
'ed.edenApp.get'
)
self.unregister_receive_callback(
installed_app_get,
'data',
None
)
if not eden_event.isSet():
logger.debug('ed.edenApp.get timed out')
if not installed_event.isSet():
logger.debug('ed.installedApp.get timed out')
if eden_data and installed_data:
updated_apps = []
for eden_app in eden_data[:]:
for installed_app in installed_data[:]:
if eden_app['appId'] == installed_app['appId']:
installed_data.remove(installed_app)
eden_data.remove(eden_app)
eden_app.update(installed_app)
updated_apps += [eden_app]
break
else:
updated_apps = []
updated_apps += eden_data + installed_data
for app in updated_apps[:]:
updated_apps.remove(app)
updated_apps += [application.Application(self, **app)]
logger.debug('applications returned: ' + str(updated_apps))
return updated_apps
@LogIt
def register_receive_callback(self, callback, key, data):
self._registered_callbacks += [[callback, key, data]]
@LogIt
def unregister_receive_callback(self, callback, key, data):
if [callback, key, data] in self._registered_callbacks:
self._registered_callbacks.remove([callback, key, data])
@LogIt
def on_message(self, message):
response = json.loads(message)
logger.debug('incoming message: ' + message)
for callback, key, data in self._registered_callbacks[:]:
if key in response and (data is None or response[key] == data):
callback(response)
self._registered_callbacks.remove([callback, key, data])
break
else:
if 'params' in response and 'event' in response['params']:
event = response['params']['event']
if event == 'd2d_service_message':
data = json.loads(response['params']['data'])
if 'event' in data:
for callback, key, _ in self._registered_callbacks[:]:
if key == data['event']:
callback(data)
self._registered_callbacks.remove(
[callback, key, None]
)
break
@property
def artmode(self):
"""
{
"method":"",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"request\":\"get_artmode_status\",
\"id\":\"30852acd-1b7d-4496-8bef-53e1178fa839\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}"
"""
params = dict(
clientIp=socket.gethostbyname(socket.gethostname()),
data=json.dumps(
dict(
request='get_artmode_status',
id=self.config.id
)
),
deviceName=self._serialize_string(self.config.name),
event='art_app_request',
to='host'
)
response = []
event = threading.Event()
def artmode_callback(data):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"127.0.0.1",
"data":"{
\"id\":\"259320d8-f368-48a4-bf03-789f24a22c0f\",
\"event\":\"artmode_status\",
\"value\":\"off\",
\"target_client_id\":\"84b12082-5f28-461e-8e81-b98ad1c1ffa\"
}",
"deviceName":"Smart Device",
"event":"d2d_service_message",
"to":"84b12082-5f28-461e-8e81-b98ad1c1ffa"
}
}
"""
if data['value'] == 'on':
response.append(True)
else:
response.append(False)
event.set()
self.register_receive_callback(
artmode_callback,
'artmode_status',
None
)
self.send('ms.channel.emit', **params)
event.wait(2.0)
self.unregister_receive_callback(
artmode_callback,
'artmode_status',
None
)
if not event.isSet():
logging.debug('get_artmode_status: timed out')
else:
return response[0]
@artmode.setter
def artmode(self, value):
"""
{
"method":"ms.channel.emit",
"params":{
"clientIp":"192.168.1.20",
"data":"{
\"id\":\"545fc0c1-bd9b-48f5-8444-02f9c519aaec\",
\"value\":\"on\",
\"request\":\"set_artmode_status\"
}",
"deviceName":"W1Bob25lXWlQaG9uZQ==",
"event":"art_app_request",
"to":"host"
}
}
"""
if value:
value = 'on'
else:
value = 'off'
params = dict(
clientIp=socket.gethostbyname(socket.gethostname()),
data=json.dumps(
dict(
request='set_artmode_status',
value=value,
id=self.config.id
)
),
deviceName=self._serialize_string(self.config.name),
event='art_app_request',
to='host'
)
self.send('ms.channel.emit', **params)
@LogIt
def input_text(self, text):
params = dict(
Cmd=self._serialize_string(text),
TypeOfRemote="SendInputString",
DataOfCmd="base64"
)
self.send('ms.remote.control', **params)
@LogIt
def start_voice_recognition(self):
"""Activates voice recognition."""
with self.receive_lock:
event = threading.Event()
def voice_callback(_):
event.set()
self.register_receive_callback(
voice_callback,
'event',
'ms.voiceApp.standby'
)
params = dict(
Cmd='Press',
DataOfCmd='KEY_BT_VOICE',
Option="false",
TypeOfRemote="SendRemoteKey"
)
logger.info("Sending control command: " + str(params))
self.send("ms.remote.control", **params)
event.wait(2.0)
self.unregister_receive_callback(
voice_callback,
'event',
'ms.voiceApp.standby'
)
if not event.isSet():
logger.debug('ms.voiceApp.standby timed out')
@LogIt
def stop_voice_recognition(self):
"""Activates voice recognition."""
with self.receive_lock:
event = threading.Event()
def voice_callback(_):
event.set()
self.register_receive_callback(
voice_callback,
'event',
'ms.voiceApp.hide'
)
params = dict(
Cmd='Release',
DataOfCmd='KEY_BT_VOICE',
Option="false",
TypeOfRemote="SendRemoteKey"
)
logger.info("Sending control command: " + str(params))
self.send("ms.remote.control", **params)
event.wait(2.0)
self.unregister_receive_callback(
voice_callback,
'event',
'ms.voiceApp.hide'
)
if not event.isSet():
logger.debug('ms.voiceApp.hide timed out')
@staticmethod
def _serialize_string(string):
if isinstance(string, str):
string = str.encode(string)
return base64.b64encode(string).decode("utf-8")
@property
@LogItWithReturn
def mouse(self):
return Mouse(self)
class Mouse(object):
@LogIt
def __init__(self, remote):
self._remote = remote
self._is_running = False
self._commands = []
self._ime_start_event = threading.Event()
self._ime_update_event = threading.Event()
self._touch_enable_event = threading.Event()
self._send_event = threading.Event()
@property
@LogItWithReturn
def is_running(self):
return self._is_running
@LogIt
def clear(self):
if not self.is_running:
del self._commands[:]
@LogIt
def _send(self, cmd, **kwargs):
"""Send a control command."""
if not self._remote.connection:
raise exceptions.ConnectionClosed()
if not self.is_running:
params = {
"Cmd": cmd,
"TypeOfRemote": "ProcessMouseDevice"
}
params.update(kwargs)
payload = json.dumps({
"method": "ms.remote.control",
"params": params
})
self._commands += [payload]
@LogIt
def left_click(self):
self._send('LeftClick')
@LogIt
def right_click(self):
self._send('RightClick')
@LogIt
def move(self, x, y):
position = dict(
x=x,
y=y,
Time=str(time.time())
)
self._send('Move', Position=position)
@LogIt
def add_wait(self, wait):
if self._is_running:
self._commands += [wait]
@LogIt
def stop(self):
if self.is_running:
self._send_event.set()
self._ime_start_event.set()
self._ime_update_event.set()
self._touch_enable_event.set()
@LogIt
def run(self):
if self._remote.sock is None:
logger.error('Is the TV on??')
return
if not self.is_running:
self._send_event.clear()
self._ime_start_event.clear()
self._ime_update_event.clear()
self._touch_enable_event.clear()
self._is_running = True
with self._remote.receive_lock:
@LogIt
def ime_start(_):
self._ime_start_event.set()
@LogIt
def ime_update(_):
self._ime_update_event.set()
@LogIt
def touch_enable(_):
self._touch_enable_event.set()
| |
of this logical network matches
the specified value.
:return: A list of dictionaries each stands for a matched logical network
"""
if id:
# specify id ,, find it,, filter it
lgnetkey = LogicalNetwork.default_key(id)
return await self._dumpone(lgnetkey,kwargs)
else:
if physicalnetwork:
# specify physicalnetwork , find it in physicalnetwork, filter it
self._reqid += 1
reqid = ('viperflow',self._reqid)
physicalnetwork_map_key = PhysicalNetworkMap.default_key(physicalnetwork)
def walk_map(key,value,walk,save):
if value is None:
return
for weakobj in value.logicnetworks.dataset():
lgnet_key = weakobj.getkey()
with suppress(WalkKeyNotRetrieved):
lgnet_obj = walk(lgnet_key)
if all(getattr(lgnet_obj,k,None) == v for k ,v in kwargs.items()):
save(lgnet_key)
with request_context(reqid, self.app_routine):
_, values = await call_api(self.app_routine,'objectdb','walk',
{'keys':[physicalnetwork_map_key],
'walkerdict':{physicalnetwork_map_key:walk_map},
'requestid':reqid})
return [dump(r) for r in values]
else:
# find it in all set , filter it
self._reqid += 1
reqid = ('viperflow',self._reqid)
lgnet_set_key = LogicalNetworkSet.default_key()
def walk_set(key,value,walk,save):
if value is None:
return
for weakobj in value.set.dataset():
lgnet_key = weakobj.getkey()
with suppress(WalkKeyNotRetrieved):
lgnet_obj = walk(lgnet_key)
if all(getattr(lgnet_obj,k,None) == v for k,v in kwargs.items()):
save(lgnet_key)
with request_context(reqid, self.app_routine):
_, values = await call_api(self.app_routine,"objectdb","walk",
{'keys':[lgnet_set_key],
"walkerdict":{lgnet_set_key:walk_set},"requestid":reqid})
return [dump(r) for r in values]
async def createlogicalport(self, logicalnetwork: str,
id: (str, None) = None,
subnet: (str, None) = None,
**kwargs: {"?mac_address": mac_address_type,
"?ip_address": ip_address_type,
"?hostname": (str, None),
"?extra_dhcp_options": dhcp_options_type}):
"""
Create logical port
:param logicalnetwork: logical network containing this port
:param id: logical port id. If omitted an UUID is created.
:param subnet: subnet containing this port
:param \*\*kwargs: customized options for creating logical ports.
Common options are:
mac_address
port MAC address
ip_address
port IP address
:return: a dictionary for the logical port
"""
if not id:
id = str(uuid1())
if subnet:
port = {'logicalnetwork':logicalnetwork,'id':id,'subnet':subnet}
else:
port = {'logicalnetwork':logicalnetwork,'id':id}
port.update(kwargs)
return await self.createlogicalports([port])
@checked
async def createlogicalports(self, ports: [{"?id": str,
"logicalnetwork": str,
"?subnet": str,
"?mac_address": mac_address_type,
"?ip_address": ip_address_type,
"?hostname": (str, None),
"?extra_dhcp_options": dhcp_options_type}]):
"""
Create multiple logical ports in a transaction
"""
parameter_dict = OrderedDict()
keys = set()
for port in ports:
port = copy.deepcopy(port)
if 'id' not in port:
port['id'] = str(uuid1())
key = LogicalPort.default_key(port['id'])
if key in parameter_dict:
raise ValueError("Repeated ID: "+ port['id'])
if 'logicalnetwork' not in port:
raise ValueError("must specify logicalnetwork ID")
keys.add(key)
keys.add(LogicalNetwork.default_key(port['logicalnetwork']))
keys.add(LogicalNetworkMap.default_key(port['logicalnetwork']))
if 'subnet' in port:
keys.add(SubNet.default_key(port['subnet']))
keys.add(SubNetMap.default_key(port['subnet']))
parameter_dict[key] = port
keys.add(LogicalPortSet.default_key())
def walker(walk, write):
# Process logical ports with specified IP address first,
# so the automatically allocated IPs do not conflict
# with specified IPs
for key, parameters in sorted(parameter_dict.items(),
key=lambda x: 'ip_address' in x[1],
reverse=True):
with suppress(WalkKeyNotRetrieved):
value = walk(key)
value = set_new(value, LogicalPort.create_instance(parameters['id']))
with suppress(WalkKeyNotRetrieved):
lognet_id = parameters['logicalnetwork']
lognet = walk(LogicalNetwork.default_key(lognet_id))
if not lognet:
raise ValueError("Logical network " + lognet_id + " not exists")
value.network = lognet.create_reference()
logmap = walk(LogicalNetworkMap.default_key(lognet_id))
logmap.ports.dataset().add(value.create_weakreference())
write(logmap.getkey(), logmap)
if 'subnet' in parameters:
subnet_id = parameters['subnet']
subnet = walk(SubNet.default_key(subnet_id))
if not subnet:
raise ValueError("Subnet " + subnet_id + " not exists")
if subnet.create_weakreference() not in logmap.subnets.dataset():
raise ValueError("Specified subnet " + subnet_id + " is not in logical network " + lognet_id)
subnet_map = walk(SubNetMap.default_key(subnet_id))
value.subnet = subnet.create_reference()
if 'ip_address' in parameters:
ip_address = parse_ip4_address(parameters['ip_address'])
value.ip_address = ip4_addr.formatter(ip_address)
# check ip_address in cidr
start = parse_ip4_address(subnet.allocated_start)
end = parse_ip4_address(subnet.allocated_end)
try:
assert start <= ip_address <= end
if hasattr(subnet, 'gateway'):
assert ip_address != parse_ip4_address(subnet.gateway)
except Exception:
raise ValueError("Specified ip_address " + parameters['ip_address'] + " is not an usable IP address in subnet " + subnet_id)
if str(ip_address) not in subnet_map.allocated_ips:
subnet_map.allocated_ips[str(ip_address)] = value.create_weakreference()
else:
raise ValueError("IP address " + parameters['ip_address'] + " has been used in subnet " + subnet_id)
else:
# allocated ip_address from cidr
start = parse_ip4_address(subnet.allocated_start)
end = parse_ip4_address(subnet.allocated_end)
gateway = None
if hasattr(subnet, "gateway"):
gateway = parse_ip4_address(subnet.gateway)
for ip_address in range(start, end + 1):
if str(ip_address) not in subnet_map.allocated_ips and ip_address != gateway:
value.ip_address = ip4_addr.formatter(ip_address)
subnet_map.allocated_ips[str(ip_address)] = value.create_weakreference()
break
else:
raise ValueError("Cannot allocate an available IP address from subnet " + subnet_id)
write(subnet_map.getkey(), subnet_map)
# Process other parameters
for k,v in parameters.items():
if k not in ('id', 'logicalnetwork', 'subnet', 'ip_address'):
setattr(value, k, v)
write(key, value)
with suppress(WalkKeyNotRetrieved):
logport_set = walk(LogicalPortSet.default_key())
logport_set.set.dataset().add(value.create_weakreference())
write(logport_set.getkey(), logport_set)
await call_api(self.app_routine, 'objectdb', 'writewalk', {'keys': keys, 'walker': walker})
return await self._dumpkeys(parameter_dict)
async def updatelogicalport(self, id: str, **kwargs: {"?mac_address": mac_address_type,
"?ip_address": ip_address_type,
"?hostname": (str, None),
"?extra_dhcp_options": dhcp_options_type}):
"Update attributes of the specified logical port"
if not id :
raise ValueError("must specify id")
port = {"id":id}
port.update(kwargs)
return await self.updatelogicalports([port])
@checked
async def updatelogicalports(self, ports: [{"id": str,
"?mac_address": mac_address_type,
"?ip_address": ip_address_type,
"?hostname": (str, None),
"?extra_dhcp_options": dhcp_options_type}]):
"Update multiple logcial ports"
# ports [{"id":id,...},{...}]
parameter_dict = OrderedDict()
for port in ports:
port = copy.deepcopy(port)
key = LogicalPort.default_key(port['id'])
if key in parameter_dict:
raise ValueError("Repeated ID: " + port['id'])
if 'logicalnetwork' in port:
raise ValueError("logical network cannot be changed")
if 'network' in port:
raise ValueError("logical network cannot be changed")
if 'subnet' in port:
raise ValueError("subnet cannot be changed")
parameter_dict[key] = port
def walker(walk, write):
# Must deallocate all IP addresses before allocating new
deallocated_all = True
for key, parameters in parameter_dict.items():
try:
value = walk(key)
if value is None:
raise ValueError("Logical port " + parameters['id'] + " not exists")
if 'ip_address' in parameters and hasattr(value, 'subnet') and hasattr(value, 'ip_address'):
# Subnet is needed when allocating IP address
ensure_keys(walk, value.subnet.getkey())
subnet_map = walk(SubNetMap._subnet.leftkey(value.subnet))
del subnet_map.allocated_ips[str(parse_ip4_address(value.ip_address))]
write(subnet_map.getkey(), subnet_map)
except WalkKeyNotRetrieved:
deallocated_all = False
if not deallocated_all:
return
# Update processing
for key, parameters in parameter_dict.items():
value = walk(key)
if 'ip_address' in parameters and hasattr(value, 'subnet'):
with suppress(WalkKeyNotRetrieved):
ensure_keys(walk, value.subnet.getkey(),
SubNetMap._subnet.leftkey(value.subnet))
try:
subnet = walk(value.subnet.getkey())
except WalkKeyNotRetrieved:
# Also retrieve subnet map to prevent another try
ensure_keys(walk, SubNetMap._subnet.leftkey(value.subnet))
raise
subnet_map = walk(SubNetMap._subnet.leftkey(value.subnet))
ip_address = parse_ip4_address(parameters['ip_address'])
start = parse_ip4_address(subnet.allocated_start)
end = parse_ip4_address(subnet.allocated_end)
try:
assert start <= ip_address <= end
if hasattr(subnet,"gateway"):
assert ip_address != parse_ip4_address(subnet.gateway)
except Exception:
raise ValueError("Specified ip_address " + parameters['ip_address'] + " is not an usable IP address in subnet " + subnet.id)
if str(ip_address) not in subnet_map.allocated_ips:
subnet_map.allocated_ips[str(ip_address)] = value.create_weakreference()
write(subnet_map.getkey(), subnet_map)
else:
raise ValueError("Cannot allocate an available IP address from subnet " + subnet.id)
for k, v in parameters.items():
if k not in ('id',):
setattr(value, k, v)
write(key, value)
await call_api(self.app_routine, 'objectdb', 'writewalk', {'keys': tuple(parameter_dict.keys()),
'walker': walker})
return await self._dumpkeys(parameter_dict)
async def deletelogicalport(self, id: str):
"Delete logical port"
p = {"id":id}
return await self.deletelogicalports([p])
@checked
async def deletelogicalports(self, ports: [{"id": str}]):
"Delete multiple logical ports"
parameter_dict = OrderedDict()
for port in ports:
key = LogicalPort.default_key(port['id'])
if key in parameter_dict:
raise ValueError("Repeated ID: " + port['id'])
parameter_dict[key] = port
def walker(walk, write):
for key, parameters in parameter_dict.items():
with suppress(WalkKeyNotRetrieved):
value = walk(key)
if value is None:
raise ValueError("Logical port " + parameters['id'] + " not exists")
with suppress(WalkKeyNotRetrieved):
lognet_map = walk(LogicalNetworkMap._network.leftkey(value.network))
lognet_map.ports.dataset().discard(value.create_weakreference())
write(lognet_map.getkey(), lognet_map)
if hasattr(value, 'subnet'):
with suppress(WalkKeyNotRetrieved):
subnet_map = walk(SubNetMap._subnet.leftkey(value.subnet))
del subnet_map.allocated_ips[str(parse_ip4_address(value.ip_address))]
write(subnet_map.getkey(), subnet_map)
with suppress(WalkKeyNotRetrieved):
logport_set = walk(LogicalPortSet.default_key())
logport_set.set.dataset().discard(value.create_weakreference())
write(logport_set.getkey(), logport_set)
write(key, None)
await call_api(self.app_routine, 'objectdb', 'writewalk',{'keys': tuple(parameter_dict) + (LogicalPortSet.default_key(),),
'walker': walker})
return {"status":'OK'}
async def listlogicalports(self,id = None,logicalnetwork = None,**kwargs):
"""
Query logical port
:param id: If specified, returns only logical port with this ID.
:param logicalnetwork: If specified, returns only logical ports in this network.
:param \*\*kwargs: customzied filters
:return: return matched logical ports
"""
if id:
# specify id , find it ,, filter it
lgportkey = LogicalPort.default_key(id)
return await self._dumpone(lgportkey,kwargs)
else:
if logicalnetwork:
# specify logicalnetwork , find in logicalnetwork map , filter it
lgnet_map_key = LogicalNetworkMap.default_key(logicalnetwork)
self._reqid += 1
reqid = ('viperflow',self._reqid)
def walk_map(key,value,walk,save):
if value is None:
return
for weakobj in value.ports.dataset():
lgportkey = weakobj.getkey()
with suppress(WalkKeyNotRetrieved):
lgport_obj = walk(lgportkey)
if all(getattr(lgport_obj,k,None) == v for k,v in kwargs.items()):
save(lgportkey)
with request_context(reqid, self.app_routine):
_, values = await call_api(self.app_routine,'objectdb','walk',
{'keys':[lgnet_map_key],
'walkerdict':{lgnet_map_key:walk_map},
'requestid':reqid})
return [dump(r) for r in values]
else:
logport_set_key = LogicalPortSet.default_key()
self._reqid += 1
reqid = ('viperflow',self._reqid)
def walk_set(key,value,walk,save):
if value is None:
return
for weakobj in value.set.dataset():
lgportkey = weakobj.getkey()
try:
lgport_obj = walk(lgportkey)
except KeyError:
pass
else:
if all(getattr(lgport_obj,k,None) == | |
0x23, 0xe4, 0x21,
0x01, 0x22, 0x1a, 0x81, 0xb5, 0xb4, 0x21, 0x22,
0x86, 0x48, 0x41, 0xa6, 0x52, 0x22, 0x2c, 0xa2,
0x18, 0x2c, 0x21, 0x85, 0xe2, 0x41, 0x08, 0x11,
0x26, 0x38, 0x21, 0x3a, 0x31, 0x21, 0xc1, 0x8a,
0x14, 0xa2, 0x35, 0xa8, 0x5e, 0x32, 0x28, 0x16,
0x62, 0x28, 0x43, 0x54, 0x28, 0xf0, 0x51, 0xe7,
0x60, 0x42, 0x84, 0x26, 0x02, 0xc1, 0x45, 0x31,
0x28, 0x47, 0x41, 0x5a, 0x01, 0x8f, 0x23, 0x42,
0x5a, 0x62, 0x85, 0x58, 0x26, 0xf0, 0x13, 0x42,
0xa4, 0x63, 0x21, 0xf4, 0x18, 0x29, 0x1a, 0x06,
0x52, 0x20, 0x58, 0x32, 0x46, 0xc2, 0x14, 0x48,
0x47, 0x13, 0x4a, 0x82, 0x71, 0x53, 0x11, 0x29,
0x44, 0x03, 0x18, 0x10, 0x81, 0x37, 0x14, 0xab,
0x41, 0x8d, 0x1c, 0x48, 0x42, 0x2d, 0x48, 0x85,
0x8c, 0x26, 0xb8, 0x22, 0x94, 0x42, 0x2a, 0xbc,
0x44, 0x61, 0x86, 0x20, 0x37, 0x44, 0x8f, 0x9c,
0x07, 0x44, 0x22, 0x44, 0x22, 0x20, 0x02, 0x20,
0x21, 0x42, 0x08, 0x40, 0x12, 0x04, 0x41, 0x2c,
0x12, 0xc4, 0x22, 0x41, 0x83, 0x14, 0x84, 0x12,
0x04, 0x80, 0x02, 0x42, 0x00, 0x18, 0x00, 0x10,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
0x01, 0x16, 0x08, 0x84, 0x40, 0x08, 0xdf, 0x17,
0x09, 0x22, 0x4f, 0x43, 0xf1, 0x34, 0x16, 0x3f,
0x11, 0x61, 0x41, 0x4f, 0x63, 0xf3, 0x1a, 0x1c,
0xc5, 0xec, 0x46, 0xf6, 0x24, 0x26, 0x2b, 0x57,
0x8d, 0x1a, 0x3e, 0x18, 0xcd, 0x1c, 0x2b, 0x11,
0x2f, 0x41, 0xf1, 0x58, 0x1c, 0xcf, 0x65, 0xf1,
0x53, 0x31, 0x4f, 0x61, 0xd1, 0x57, 0xf5, 0x2c,
0x74, 0x5f, 0x17, 0xf5, 0x64, 0x64, 0x5f, 0x52,
0x02, 0x5f, 0x51, 0xf4, 0x74, 0x66, 0x7f, 0x13,
0xa3, 0x36, 0x1f, 0x14, 0xe4, 0x25, 0xf5, 0x36,
0x36, 0x22, 0x1f, 0x1e, 0xee, 0x24, 0xa4, 0xab,
0x16, 0xf2, 0x37, 0x37, 0x2b, 0x21, 0x5f, 0x73,
0xb3, 0x52, 0xf6, 0x31, 0x33, 0x2b, 0x77, 0x5f,
0x77, 0xb3, 0x72, 0xf7, 0x21, 0x23, 0xaf, 0x84,
0x74, 0x41, 0xb3, 0x42, 0x74, 0x84, 0x76, 0x82,
0xf2, 0x56, 0x46, 0xab, 0xcc, 0xcf, 0x46, 0xa3,
0x11, 0xcf, 0xc3, 0xb3, 0x38, 0xf1, 0x3c, 0x24,
0x5d, 0xbf, 0xf0, 0x34, 0x34, 0x4f, 0xc3, 0xf7,
0x34, 0x36, 0x3f, 0x13, 0xf3, 0x34, 0x14, 0x3e,
0x26, 0xef, 0x83, 0xd1, 0xcc, 0xf1, 0x74, 0x74,
0x4f, 0x61, 0xf1, 0x7e, 0x5c, 0xaf, 0xa3, 0xf1,
0x32, 0x38, 0x4f, 0xe1, 0xf1, 0x16, 0x34, 0x2f,
0x61, 0xf1, 0x5a, 0x1c, 0xcf, 0xc7, 0xf7, 0x77,
0x65, 0x7a, 0xf7, 0x33, 0x39, 0x6f, 0xc2, 0xf7,
0x55, 0x51, 0x2b, 0x76, 0x1f, 0x12, 0xf2, 0x84,
0x86, 0x3f, 0x35, 0xf5, 0x56, 0x56, 0x3f, 0x51,
0xa7, 0x73, 0x1f, 0x16, 0xe6, 0x37, 0xf6, 0x26,
0x66, 0x2b, 0x67, 0x9f, 0x9e, 0xaa, 0x66, 0xba,
0xef, 0x23, 0xf3, 0x37, 0x37, 0x2b, 0x33, 0x5f,
0x73, 0xb3, 0x72, 0xf7, 0x71, 0x73, 0x2b, 0x77,
0x5f, 0x77, 0xf7, 0x76, 0x74, 0x1f, 0x36, 0xb6,
0x6a, 0xf6, 0xc9, 0x83, 0x2b, 0xec, 0x4f, 0x6d,
0xfd, 0x92, 0xd2, 0x6f, 0x65, 0xb5, 0xda, 0xfd,
0x7c, 0x74, 0x8f, 0x83, 0xf3, 0x7e, 0x3c, 0x8b,
0x77, 0xef, 0x47, 0xf7, 0x2c, 0x7e, 0xf0, 0x34,
0x34, 0x3a, 0xe5, 0x23, 0xf1, 0x37, 0x15, 0xcf,
0x81, 0xf3, 0x34, 0x36, 0xef, 0xc1, 0xf1, 0x18,
0x1c, 0x6e, 0x64, 0x4f, 0x63, 0xf3, 0x72, 0x5c,
0xaf, 0x81, 0xf1, 0x12, 0x3a, 0x2f, 0x81, 0xf1,
0x16, 0x14, 0x16, 0xf2, 0x5e, 0x58, 0x8f, 0x27,
0xf1, 0x77, 0x35, 0x7e, 0x72, 0x3f, 0x93, 0xf5,
0x3e, 0x78, 0x1f, 0x55, 0xa5, 0x76, 0x1f, 0x12,
0xc2, 0x82, 0x5f, 0x75, 0xf5, 0x74, 0x72, 0x7f,
0x53, 0xa7, 0x77, 0x1f, 0x14, 0xe4, 0x33, 0xf7,
0x36, 0x36, 0x2a, 0xf2, 0xe1, 0xe9, 0x6e, 0x62,
0xba, 0xef, 0x23, 0xf3, 0x37, 0x37, 0x2b, 0x33,
0x5f, 0x73, 0xb3, 0x72, 0xf7, 0x71, 0x73, 0x2b,
0x57, 0x5f, 0x77, 0xf7, 0x62, 0x64, 0x1f, 0x36,
0xb6, 0x42, 0xf6, 0x41, 0x43, 0x2b, 0x44, 0x4f,
0x64, 0xb5, 0x72, 0xf9, 0x54, 0x46, 0x2f, 0x8f,
0xfc, 0x74, 0x74, 0x8f, 0x83, 0xf3, 0x36, 0x34,
0x3e, 0x38, 0x4f, 0x46, 0xf3, 0xc8, 0xa6, 0xa0,
0x33, 0xcf, 0x42, 0xe3, 0x23, 0xf3, 0x37, 0x35,
0xcf, 0xc3, 0xf3, 0x34, 0x32, 0x6f, 0xc3, 0xd3,
0x8c, 0xf1, 0x54, 0x54, 0x4f, 0x63, 0xf3, 0x7a,
0x7c, 0xaf, 0x83, 0xf3, 0x12, 0x38, 0x4f, 0xe1,
0xb1, 0x32, 0xe1, 0x21, 0xf1, 0x5e, 0x5a, 0xef,
0x47, 0xf7, 0x73, 0x73, 0x2b, 0x77, 0x7f, 0xd7,
0xf3, 0x74, 0x78, 0x1f, 0x55, 0x85, 0x54, 0x55,
0x4d, 0x86, 0x3f, 0x15, 0xe5, 0x67, 0xf7, 0x37,
0x55, 0x3a, 0xf7, 0x71, 0x71, 0x7e, 0x62, 0x6f,
0x63, 0xb3, 0x32, 0xf2, 0xe1, 0xe9, 0x6e, 0x62,
0xbe, 0xb2, 0x3a, 0xf3, 0x37, 0x37, 0x2b, 0x33,
0x5f, 0x73, 0xb3, 0x72, 0xf7, 0x71, 0x73, 0xab,
0x67, 0x5f, 0x76, 0xf6, 0x62, 0x44, 0x1f, 0x36,
0xf6, 0x4a, 0x68, 0x9f, 0xbc, 0xbc, 0x62, 0xfc,
0xd4, 0x56, 0x2f, 0x2d, 0xfd, 0x56, 0x56, 0x2f,
0x8d, 0xfd, 0x74, 0x74, 0x3a, 0xf3, 0x7c, 0x7c,
0x7e, 0x68, 0x4f, 0x46, 0xf7, 0xf8, 0xb5, 0x00,
0x90, 0x14, 0x10, 0x44, 0x98, 0x14, 0x16, 0x08,
0x20, 0x22, 0x01, 0x12, 0x20, 0x41, 0xa4, 0x41,
0x64, 0x12, 0x42, 0x20, 0x04, 0x00, 0x44, 0x21,
0x42, 0x18, 0x42, 0x00, 0x2b, 0x12, 0x42, 0x00,
0x00, 0x00, 0x20, 0x21, 0x02, 0x22, 0x00, 0x48,
0x80, 0x44, 0x82, 0x04, 0x12, 0x83, 0x24, 0x21,
0x04, 0x1f, 0xaf, 0x06, 0x18, 0x44, 0x00, 0x18,
0x00, 0x10, 0x04, 0x18, 0x12, 0x21, 0x84, 0x2d,
0x14, 0x10, 0x42, 0x18, 0x92, 0x44, 0x81, 0xc0,
0x14, 0x40, 0x04, 0x12, 0x48, 0x44, 0x20, 0x41,
0x82, 0x82, 0x04, 0x24, 0x1a, 0x42, 0x82, 0xa2,
0x12, 0xa0, 0x16, 0x10, 0x88, 0x24, 0x04, 0xc0,
0x48, 0x42, 0x80, 0x08, 0xe0, 0x82, 0x24, 0x24,
0x02, 0x26, 0xe9, 0x5e, 0x0b, 0x2c, 0xf4, 0x11,
0x24, 0x85, 0xf3, 0x11, 0x24, 0x83, 0xf4, 0x11,
0x24, 0xa7, 0x14, 0x1f, 0x41, 0xb6, 0x4a, 0xd1,
0x41, 0xb2, 0x4a, 0x99, 0x21, 0xaf, 0x24, 0x99,
0x45, 0xab, 0x94, 0x53, 0xf2, 0x4a, 0x93, 0x43,
0xf2, 0x42, 0x93, 0x47, 0x82, 0xaf, 0x14, 0xf9,
0x24, 0x48, 0x1c, 0xf9, 0x24, 0x4a, 0x1c, 0xf8,
0x24, 0x4a, 0x16, 0xf1, 0x24, 0x4a, 0xd6, 0xf1,
0x24, 0x4a, 0x96, 0xc5, 0xca, 0x9e, 0x24, 0xac,
0xf5, 0x91, 0x24, 0x2c, 0xf4, 0x91, 0x24, 0xa5,
0xf3, 0x91, 0x24, 0x83, 0xf4, 0x91, 0x24, 0xeb,
0x84, 0x1f, 0x48, 0xb2, 0x4a, 0xd1, 0x41, 0xb2,
0x4a, 0x99, 0x21, 0xab, 0x94, 0x51, 0xaf, 0x44,
0x39, 0x24, 0xab, 0x94, 0x43, 0xf2, 0x42, 0x92,
0x47, 0x8a, 0x29, 0xf9, 0x24, 0x48, 0x98, 0x4f,
0xa2, 0xf4, 0xaa, 0x72, 0xc0, 0x42, 0x1f, 0x81,
0x14, 0xb8, 0x11, 0x34, 0x48, 0x17, 0x41, 0xa3,
0x54, 0x41, 0xab, 0x16, 0x1d, 0x24, 0x2f, 0x44,
0x99, 0x21, 0xaf, 0x44, 0x19, 0xb1, 0x48, 0x3b,
0x21, 0xa9, 0x19, 0x81, 0x59, 0x81, 0x98, 0x33,
0x82, 0xfb, 0x22, 0x5a, 0x1c, 0xea, 0xa2, 0x6d,
0x11, 0x4f, 0xa2, 0xec, 0x19, 0xc2, 0x4a, 0x16,
0xc1, 0x4b, 0x8a, 0xc2, 0x4b, 0x1e, 0x2c, 0x1c,
0xf4, 0x11, 0x6c, 0x85, 0xa2, 0x69, 0x87, 0x14,
0x1a, 0x72, 0x4a, 0xb1, 0x81, 0xb2, 0x48, 0xa1,
0x21, 0xaf, 0x54, 0x99, 0x21, 0xaf, 0x44, 0x1b,
0xb1, 0x48, 0x03, 0xab, 0x84, 0xb0, 0x42, 0x28,
0x92, 0x92, 0x2b, 0x42, 0x98, 0x2f, 0x23, 0xf4,
0x98, 0xc3, 0x40, 0x08, 0x00, 0x00, 0x00, 0x00,
0x40, 0x04, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x80, 0x42, 0x08, 0x00,
0x00, 0x10, 0x08, 0xd0, 0x12, 0x07, 0x49, 0x01,
0x00, 0x23, 0xe6, 0x14, 0x61, 0x46, 0x00, 0x85,
0x42, 0xc2, 0x12, 0x15, 0x08, 0x94, 0x40, 0x88,
0x31, 0x24, 0x18, 0xa0, 0x42, 0x80, 0x88, 0x02,
0x16, 0x14, 0xc8, 0x84, 0x48, 0x84, 0x90, 0x22,
0x40, 0xa4, 0x81, 0x10, 0x12, 0xc2, 0x43, 0xb0,
0x24, 0x11, 0x28, 0x71, 0x12, 0x31, 0x18, 0x89,
0x41, 0x48, 0xa2, 0x18, 0x10, 0x02, 0x85, 0x92,
0x28, 0x00, 0x00, 0x5f, 0x5d, 0x05, 0x1c, 0x02,
0x2d, 0x28, 0x2e, 0x18, 0x12, 0x50, 0x12, 0x24,
0x4d, 0x11, 0x13, 0x62, 0x91, 0x2a, 0x08, 0x89,
0x58, 0x42, 0x1a, 0x02, 0x2d, 0x88, 0x2b, 0x48,
0x11, 0x2b, 0x94, 0x11, 0x22, 0x13, 0x14, 0x02,
0x80, 0x72, 0x42, 0x6a, 0x68, 0x80, 0x84, 0x01,
0x36, 0x02, 0x21, 0x47, 0x18, 0xa0, 0x18, 0x00,
0x31, 0xce, 0x24, 0x90, 0xa1, 0x48, 0x82, 0x21,
0x4d, 0x4a, 0xa0, 0x21, 0x81, 0x1a, 0xc8, 0x44,
0x18, 0x26, 0x78, 0x1c, 0x61, 0xc4, 0x7f, 0x6c,
0x08, 0x17, 0x41, 0x46, 0xc2, 0x28, 0x28, 0x2b,
0x43, 0x30, 0x61, 0x30, 0x11, 0x13, 0x52, 0x86,
0x2e, 0x82, 0x13, 0xc2, 0x88, 0x35, 0x88, 0x03,
0x1e, 0x82, 0x4e, 0x24, 0x12, 0x2f, 0x21, 0x94,
0x61, 0x20, 0x89, 0x22, 0x28, 0xc1, 0x22, 0x86,
0x7c, 0x42, 0x04, 0x15, 0x82, 0x02, 0x89, 0x71,
0x92, 0x62, 0x24, 0x86, 0x04, | |
polygon.factory()
obj_.build(child_)
self.polygon.append(obj_)
elif nodeName_ == 'wire':
obj_ = wire.factory()
obj_.build(child_)
self.wire.append(obj_)
elif nodeName_ == 'text':
obj_ = text.factory()
obj_.build(child_)
self.text.append(obj_)
elif nodeName_ == 'pin':
obj_ = pin.factory()
obj_.build(child_)
self.pin.append(obj_)
elif nodeName_ == 'circle':
obj_ = circle.factory()
obj_.build(child_)
self.circle.append(obj_)
elif nodeName_ == 'rectangle':
obj_ = rectangle.factory()
obj_.build(child_)
self.rectangle.append(obj_)
elif nodeName_ == 'frame':
obj_ = frame.factory()
obj_.build(child_)
self.frame.append(obj_)
# end class symbol
class deviceset(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, uservalue=None, prefix=None, name=None, description=None, gates=None, devices=None):
self.uservalue = _cast(None, uservalue)
self.prefix = _cast(None, prefix)
self.name = _cast(None, name)
self.description = description
self.gates = gates
self.devices = devices
def factory(*args_, **kwargs_):
if deviceset.subclass:
return deviceset.subclass(*args_, **kwargs_)
else:
return deviceset(*args_, **kwargs_)
factory = staticmethod(factory)
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_gates(self): return self.gates
def set_gates(self, gates): self.gates = gates
def get_devices(self): return self.devices
def set_devices(self, devices): self.devices = devices
def get_uservalue(self): return self.uservalue
def set_uservalue(self, uservalue): self.uservalue = uservalue
def get_prefix(self): return self.prefix
def set_prefix(self, prefix): self.prefix = prefix
def get_name(self): return self.name
def set_name(self, name): self.name = name
def export(self, outfile, level, namespace_='t:', name_='deviceset', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='deviceset')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='t:', name_='deviceset'):
if self.uservalue is not None and 'uservalue' not in already_processed:
already_processed.append('uservalue')
outfile.write(' uservalue=%s' % (self.gds_format_string(quote_attrib(self.uservalue).encode(ExternalEncoding), input_name='uservalue'), ))
if self.prefix is not None and 'prefix' not in already_processed:
already_processed.append('prefix')
outfile.write(' prefix=%s' % (self.gds_format_string(quote_attrib(self.prefix).encode(ExternalEncoding), input_name='prefix'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='t:', name_='deviceset', fromsubclass_=False):
if self.description is not None:
self.description.export(outfile, level, namespace_, name_='description')
if self.gates is not None:
self.gates.export(outfile, level, namespace_, name_='gates', )
if self.devices is not None:
self.devices.export(outfile, level, namespace_, name_='devices', )
def hasContent_(self):
if (
self.description is not None or
self.gates is not None or
self.devices is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='deviceset'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.uservalue is not None and 'uservalue' not in already_processed:
already_processed.append('uservalue')
showIndent(outfile, level)
outfile.write('uservalue = "%s",\n' % (self.uservalue,))
if self.prefix is not None and 'prefix' not in already_processed:
already_processed.append('prefix')
showIndent(outfile, level)
outfile.write('prefix = "%s",\n' % (self.prefix,))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
if self.description is not None:
showIndent(outfile, level)
outfile.write('description=model_.description(\n')
self.description.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.gates is not None:
showIndent(outfile, level)
outfile.write('gates=model_.gates(\n')
self.gates.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.devices is not None:
showIndent(outfile, level)
outfile.write('devices=model_.devices(\n')
self.devices.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('uservalue', node)
if value is not None and 'uservalue' not in already_processed:
already_processed.append('uservalue')
self.uservalue = value
value = find_attr_value_('prefix', node)
if value is not None and 'prefix' not in already_processed:
already_processed.append('prefix')
self.prefix = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'description':
obj_ = description.factory()
obj_.build(child_)
self.set_description(obj_)
elif nodeName_ == 'gates':
obj_ = gates.factory()
obj_.build(child_)
self.set_gates(obj_)
elif nodeName_ == 'devices':
obj_ = devices.factory()
obj_.build(child_)
self.set_devices(obj_)
# end class deviceset
class device(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, package=None, connects=None, technologies=None):
self.name = _cast(None, name)
self.package = _cast(None, package)
self.connects = connects
self.technologies = technologies
def factory(*args_, **kwargs_):
if device.subclass:
return device.subclass(*args_, **kwargs_)
else:
return device(*args_, **kwargs_)
factory = staticmethod(factory)
def get_connects(self): return self.connects
def set_connects(self, connects): self.connects = connects
def get_technologies(self): return self.technologies
def set_technologies(self, technologies): self.technologies = technologies
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_package(self): return self.package
def set_package(self, package): self.package = package
def export(self, outfile, level, namespace_='t:', name_='device', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='device')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='t:', name_='device'):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
if self.package is not None and 'package' not in already_processed:
already_processed.append('package')
outfile.write(' package=%s' % (self.gds_format_string(quote_attrib(self.package).encode(ExternalEncoding), input_name='package'), ))
def exportChildren(self, outfile, level, namespace_='t:', name_='device', fromsubclass_=False):
if self.connects is not None:
self.connects.export(outfile, level, namespace_, name_='connects')
if self.technologies is not None:
self.technologies.export(outfile, level, namespace_, name_='technologies')
def hasContent_(self):
if (
self.connects is not None or
self.technologies is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='device'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
if self.package is not None and 'package' not in already_processed:
already_processed.append('package')
showIndent(outfile, level)
outfile.write('package = "%s",\n' % (self.package,))
def exportLiteralChildren(self, outfile, level, name_):
if self.connects is not None:
showIndent(outfile, level)
outfile.write('connects=model_.connects(\n')
self.connects.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.technologies is not None:
showIndent(outfile, level)
outfile.write('technologies=model_.technologies(\n')
self.technologies.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
value = find_attr_value_('package', node)
if value is not None and 'package' not in already_processed:
already_processed.append('package')
self.package = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'connects':
obj_ = connects.factory()
obj_.build(child_)
self.set_connects(obj_)
elif nodeName_ == 'technologies':
obj_ = technologies.factory()
obj_.build(child_)
self.set_technologies(obj_)
# end class device
class bus(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, segment=None):
self.name = _cast(None, name)
self.segment = segment
def factory(*args_, **kwargs_):
if bus.subclass:
return bus.subclass(*args_, **kwargs_)
else:
return bus(*args_, **kwargs_)
factory = staticmethod(factory)
def get_segment(self): return self.segment
def set_segment(self, segment): self.segment = segment
def get_name(self): return self.name
def set_name(self, name): self.name = name
def export(self, outfile, level, namespace_='t:', name_='bus', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='bus')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='t:', name_='bus'):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='t:', name_='bus', fromsubclass_=False):
if self.segment is not None:
self.segment.export(outfile, level, namespace_, name_='segment', )
def hasContent_(self):
if (
self.segment is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='bus'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
if self.segment is not None:
showIndent(outfile, level)
outfile.write('segment=model_.segment(\n')
self.segment.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'segment':
obj_ = segment.factory()
obj_.build(child_)
self.set_segment(obj_)
# end class bus
class net(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, classxx=None, segment=None):
self.name = _cast(None, name)
self.classxx = _cast(None, classxx)
self.segment = segment
def factory(*args_, **kwargs_):
if net.subclass:
return net.subclass(*args_, **kwargs_)
else:
return net(*args_, **kwargs_)
factory = staticmethod(factory)
def get_segment(self): return self.segment
def set_segment(self, segment): self.segment = segment
def get_name(self): return self.name
def | |
''' Utilities for handling "databases" for QUAD4M analyses.
DESCRIPTION:
This module helps create and manage "databases" of:
* Geometries (db_geoms)
* Earhtquakes (db_accs)
* Non-linear properties (db_nonlins)
* Random fields (db_rfs)
MAIN FUNCTIONS:
This module contains the following functions:
* uptdate_db_geoms
*
'''
# ------------------------------------------------------------------------------
# Import Modules
# ------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import os
import warnings
# LLGEO
import llgeo.quad4m.geometry as q4m_geom
import llgeo.utilities.files as llgeo_fls
# ------------------------------------------------------------------------------
# Main Functions
# ------------------------------------------------------------------------------
def update_db_accs(path_db, file_db, acc, tstep):
'''
This will be completed at another time.
I initially processed the ground motions before doing things this way, and I
don't want to waste time re-doing work. So, problem for another time.
For now, all the motions have been processed already and are saved.
Either way, not sure that this function can even be written, since the nature
of the text files would be different for each type of project. So maybe it's
just better to do individually each time.
'''
return False
def update_db_geoms(path_db, file_db, path_DXF, new_DXF_files, path_check):
''' Adds new entries to database of geometries
Purpose
-------
Given a list of dxf files, this:
* Processes new entries by generating elems and nodes dataframes and
getting sizes of mesh.
* Saves pkl for each new geometry with all info
* Updates the summary file "file_db" with new entries and returns it.
* Returns list of dict with geometry info that was saved.
Each processed geometry dictionary contains the following keys:
*id | entry id
*name | entry name
*fname | name of file where dfs are saved (includes extension .pkl)
*W | maximum width of the overall mesh
*H | maximum height of the overall meesh
*nelm | number of elements in the mesh
*welm | average width of all elements in mesh
*helm | average height of all elements in mesh
nodes | dataframe with node info (see llgeo/quad4m/geometry.py)
elems | dataframe with element info (see llgeo/quad4m/geometry.py)
readme | short description of file
(Items marked with * are included in summary file)
Parameters
----------
path_db : str
directory containing geometry "database".
file_db : str
name of "database" summary file (usually ending in .pkl).
path_DXF : str
directory contianing new DXF files to be processed
new_DXF_files : list of str
list of dxf file names (usually ending in .dxf)
path_check : str
directory where "check" DXFs will be printed out
If doesn't exist, will exit eith error.
if set to False, then no check DXFs will be printed
Returns
-------
db_geoms : dataframe
"database" summary file, which now includes information on new_DXF_files
geom_dicts : list of dictionaries
Each element corresponds to a the DXF files provided in "new_DXF_files".
Each element is a dict containing geometry info as described above.
'''
# Get the current database
db_geoms = get_db(path_db, file_db, db_type = 'geoms' )
# Determine current id based on database
if len(db_geoms) > 0:
i = np.max(db_geoms['id'])
else:
i = 0
# Readme to be included in new entries
readme = ''' This geometry was processed using llgeo/quad4m/db_utils.
It contains dataframes of elems and nodes, and some summary
info. Will be used to probabilistically run ground response
analyses using QUAD4MU.'''
# Loop through new files and process them
geom_dicts = []
for new_DXF_file in new_DXF_files:
# Name of entry to be processed
name = new_DXF_file.replace('.dxf', '')
# If name already exists, read data continue to next entry
if name in db_geoms['name'].tolist():
# Warn user that no new data is being processed
mssg = 'Entry alread exists: {:10s}'.format(name)
mssg += '\n Reading (not creating) data'
warnings.showwarning(mssg , UserWarning, 'db_utils.py', '')
# Determine name of entry
f_exist = db_geoms.loc[db_geoms['name'] == name, 'fname'].item()
# Read existing file and add to output dictionary
geom_dicts += [llgeo_fls.read_pkl(path_db, f_exist)]
continue
# Otherwise, process new entry
i += 1 # Update entry ID
nodes, elems = q4m_geom.dxf_to_dfs(path_DXF, new_DXF_file)
W, H, N, w, h = q4m_geom.get_mesh_sizes(nodes, elems)
# Save new entry to pickle in database directory
fname = '{i:03d}_{name}.pkl'.format(i = i, name = name)
out_data = {'id': i, 'name': name, 'fname': fname, 'W': W, 'H': H,
'nelm': N, 'welm': w, 'helm':h, 'nodes':nodes,
'elems':elems, 'readme': readme}
llgeo_fls.save_pkl(path_db, fname, out_data, True)
# Make sure check directory exists (if needed)
if path_check and not os.path.exists(path_check):
err = 'DXF check directory does not exists\n'
err += 'Create it, or set path_check = False'
raise Exception(err)
# Output DXFs as a check (if path_check is not False)
elif path_check:
file_check = fname.replace('.pkl', '.dxf')
q4m_geom.dfs_to_dxf(path_check, file_check, nodes, elems)
# Add summary info to db_geoms
cols = list(db_geoms)
new_row = pd.DataFrame([[i, name, fname, W, H, N, w, h]], columns= cols)
db_geoms = db_geoms.append(new_row, ignore_index = True)
# Add new data for list export
geom_dicts += [out_data]
# Save db_geoms summary file
db_geoms.to_pickle(path_db + file_db)
return db_geoms, geom_dicts
def get_unique_accs(db_accs, cols = ['T', 'type', 'name']):
''' Sometimes, acceleration database contains duplicate earthquakes
(same earhquake and return period, but different orientation).
This function returns unique earthquakes (as defined by "cols").
Just returns the first entry it finds, so it's pretty arbitary.
'''
# Remove duplicates looking only at "cols"
opts = {'keep':'first', 'inplace':True, 'ignore_index':True}
db_accs.drop_duplicates(subset = cols, **opts)
return db_accs
# ------------------------------------------------------------------------------
# Helper Functions
# ------------------------------------------------------------------------------
def search(db, conditions, return_col = 'all'):
''' Returns entries from db that meet desired condition
Purpose
-------
Given a "database" summary file (db), this returns the entries that match
the conditions specified in the dictionary "conditions".
Parameters
----------
db : dataframe
Database summary file
conditions : dict
Conditions to be met. Ex: {'T': 2475} will return db entries in which
the column T has a value of 2475. So far, only equality is checked
(no > or <)
return_col : list of str (or str) (optional)
list of column names to return, or a single string for one coloumn
if a single column is given, then the return will be a numpy array (not
dataframe series). Otherwise, the return will be a DataFrame.
Defaults to returning all columns.
Returns
-------
result : numpy array or dataframe
db entries that match condition, with output columns dictated by
return_col. If there is only one return_col, then result is np array,
otherwise it is a dataframe.
Notes
-----
* TODO-wishlist: could this include > and < at some point?
'''
# Find which db entries meet ALL conditions
masks = [ db[col] == val for col, val in conditions.items()]
all_mask = np.all(masks, axis = 0)
# If return_col is 'all', then return all columns.
if return_col == 'all':
return_col = list(db)
# Extract desied columns
result = db.loc[all_mask, return_col]
# If only one column was requested, change to numpy array
if not isinstance(return_col, list):
result = result.values
return result
def get_db(path_db, file_db, db_type = False, reset = False):
''' Gets the summary dataframe of available geometries.
Purpose
-------
This function gets the dataframe that contains summary information of the
available geometries in the "database" stored in "path_db".
If path_db + file_db does not exist:
An empty DF will be created, saved as pkl, and returned.
If path_db + file_db already exists and reset = False:
Nothing will be created/saved. Existing pkl will be read and returned.
(BE CAREFUL WITH THIS USE)
If path_db + file_db already exists and reset = True:
An empty DF will be created, saved as pkl, and returned.
CAREFUL: this will override existing file.
(Not generally used directly)
Parameters
----------
path_db : str
path to the geometry "database".
file_db : str
name of "database" summary file (usually ending in .pkl).
db_type : str
type of dataframe to get. One of: geoms | accs | nonlins | rfs |
only needed if database is being created for the first time.
reset : bool (optional)
set TRUE | |
<filename>lib/core/dump.py
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import cgi
import hashlib
import os
import re
import shutil
import tempfile
import threading
from lib.core.common import Backend
from lib.core.common import checkFile
from lib.core.common import dataToDumpFile
from lib.core.common import dataToStdout
from lib.core.common import getSafeExString
from lib.core.common import getUnicode
from lib.core.common import isListLike
from lib.core.common import normalizeUnicode
from lib.core.common import openFile
from lib.core.common import prioritySortColumns
from lib.core.common import randomInt
from lib.core.common import safeCSValue
from lib.core.common import unicodeencode
from lib.core.common import unsafeSQLIdentificatorNaming
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.dicts import DUMP_REPLACEMENTS
from lib.core.enums import CONTENT_STATUS
from lib.core.enums import CONTENT_TYPE
from lib.core.enums import DBMS
from lib.core.enums import DUMP_FORMAT
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapValueException
from lib.core.exception import SqlmapSystemException
from lib.core.replication import Replication
from lib.core.settings import DUMP_FILE_BUFFER_SIZE
from lib.core.settings import HTML_DUMP_CSS_STYLE
from lib.core.settings import IS_WIN
from lib.core.settings import METADB_SUFFIX
from lib.core.settings import MIN_BINARY_DISK_DUMP_SIZE
from lib.core.settings import TRIM_STDOUT_DUMP_SIZE
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import WINDOWS_RESERVED_NAMES
from thirdparty.magic import magic
from extra.safe2bin.safe2bin import safechardecode
class Dump(object):
"""
This class defines methods used to parse and output the results
of SQL injection actions
"""
def __init__(self):
self._outputFile = None
self._outputFP = None
self._lock = threading.Lock()
def _write(self, data, newline=True, console=True, content_type=None):
if conf.api:
dataToStdout(data, content_type=content_type, status=CONTENT_STATUS.COMPLETE)
return
text = "%s%s" % (data, "\n" if newline else " ")
if console:
dataToStdout(text)
if kb.get("multiThreadMode"):
self._lock.acquire()
try:
self._outputFP.write(text)
except IOError, ex:
errMsg = "error occurred while writing to log file ('%s')" % getSafeExString(ex)
raise SqlmapGenericException(errMsg)
if kb.get("multiThreadMode"):
self._lock.release()
kb.dataOutputFlag = True
def flush(self):
if self._outputFP:
try:
self._outputFP.flush()
except IOError:
pass
def setOutputFile(self):
self._outputFile = os.path.join(conf.outputPath, "log")
try:
self._outputFP = openFile(self._outputFile, "ab" if not conf.flushSession else "wb")
except IOError, ex:
errMsg = "error occurred while opening log file ('%s')" % getSafeExString(ex)
raise SqlmapGenericException(errMsg)
def getOutputFile(self):
return self._outputFile
def singleString(self, data, content_type=None):
self._write(data, content_type=content_type)
def string(self, header, data, content_type=None, sort=True):
kb.stickyLevel = None
if conf.api:
self._write(data, content_type=content_type)
return
if isListLike(data):
self.lister(header, data, content_type, sort)
elif data is not None:
_ = getUnicode(data)
if _.endswith("\r\n"):
_ = _[:-2]
elif _.endswith("\n"):
_ = _[:-1]
if _.strip(' '):
_ = _.strip(' ')
if "\n" in _:
self._write("%s:\n---\n%s\n---" % (header, _))
else:
self._write("%s: %s" % (header, ("'%s'" % _) if isinstance(data, basestring) else _))
else:
self._write("%s:\tNone" % header)
def lister(self, header, elements, content_type=None, sort=True):
if elements and sort:
try:
elements = set(elements)
elements = list(elements)
elements.sort(key=lambda x: x.lower() if isinstance(x, basestring) else x)
except:
pass
if conf.api:
self._write(elements, content_type=content_type)
return
if elements:
self._write("%s [%d]:" % (header, len(elements)))
for element in elements:
if isinstance(element, basestring):
self._write("[*] %s" % element)
elif isListLike(element):
self._write("[*] " + ", ".join(getUnicode(e) for e in element))
if elements:
self._write("")
def banner(self, data):
self.string("banner", data, content_type=CONTENT_TYPE.BANNER)
def currentUser(self, data):
self.string("current user", data, content_type=CONTENT_TYPE.CURRENT_USER)
def currentDb(self, data):
if Backend.isDbms(DBMS.MAXDB):
self.string("current database (no practical usage on %s)" % Backend.getIdentifiedDbms(), data, content_type=CONTENT_TYPE.CURRENT_DB)
elif Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.PGSQL, DBMS.HSQLDB):
self.string("current schema (equivalent to database on %s)" % Backend.getIdentifiedDbms(), data, content_type=CONTENT_TYPE.CURRENT_DB)
else:
self.string("current database", data, content_type=CONTENT_TYPE.CURRENT_DB)
def hostname(self, data):
self.string("hostname", data, content_type=CONTENT_TYPE.HOSTNAME)
def dba(self, data):
self.string("current user is DBA", data, content_type=CONTENT_TYPE.IS_DBA)
def users(self, users):
self.lister("database management system users", users, content_type=CONTENT_TYPE.USERS)
def userSettings(self, header, userSettings, subHeader, content_type=None):
self._areAdmins = set()
if isinstance(userSettings, (tuple, list, set)):
self._areAdmins = userSettings[1]
userSettings = userSettings[0]
users = userSettings.keys()
users.sort(key=lambda x: x.lower() if isinstance(x, basestring) else x)
if conf.api:
self._write(userSettings, content_type=content_type)
return
if userSettings:
self._write("%s:" % header)
for user in users:
settings = userSettings[user]
if settings is None:
stringSettings = ""
else:
stringSettings = " [%d]:" % len(settings)
if user in self._areAdmins:
self._write("[*] %s (administrator)%s" % (user, stringSettings))
else:
self._write("[*] %s%s" % (user, stringSettings))
if settings:
settings.sort()
for setting in settings:
self._write(" %s: %s" % (subHeader, setting))
if userSettings:
self.singleString("")
def dbs(self, dbs):
self.lister("available databases", dbs, content_type=CONTENT_TYPE.DBS)
def dbTables(self, dbTables):
if isinstance(dbTables, dict) and len(dbTables) > 0:
if conf.api:
self._write(dbTables, content_type=CONTENT_TYPE.TABLES)
return
maxlength = 0
for tables in dbTables.values():
for table in tables:
if table and isListLike(table):
table = table[0]
maxlength = max(maxlength, len(unsafeSQLIdentificatorNaming(normalizeUnicode(table) or unicode(table))))
lines = "-" * (int(maxlength) + 2)
for db, tables in dbTables.items():
tables.sort()
self._write("Database: %s" % unsafeSQLIdentificatorNaming(db) if db else "Current database")
if len(tables) == 1:
self._write("[1 table]")
else:
self._write("[%d tables]" % len(tables))
self._write("+%s+" % lines)
for table in tables:
if table and isListLike(table):
table = table[0]
table = unsafeSQLIdentificatorNaming(table)
blank = " " * (maxlength - len(normalizeUnicode(table) or unicode(table)))
self._write("| %s%s |" % (table, blank))
self._write("+%s+\n" % lines)
elif dbTables is None or len(dbTables) == 0:
self.singleString("No tables found", content_type=CONTENT_TYPE.TABLES)
else:
self.string("tables", dbTables, content_type=CONTENT_TYPE.TABLES)
def dbTableColumns(self, tableColumns, content_type=None):
if isinstance(tableColumns, dict) and len(tableColumns) > 0:
if conf.api:
self._write(tableColumns, content_type=content_type)
return
for db, tables in tableColumns.items():
if not db:
db = "All"
for table, columns in tables.items():
maxlength1 = 0
maxlength2 = 0
colType = None
colList = columns.keys()
colList.sort(key=lambda x: x.lower() if isinstance(x, basestring) else x)
for column in colList:
colType = columns[column]
column = unsafeSQLIdentificatorNaming(column)
maxlength1 = max(maxlength1, len(column or ""))
maxlength2 = max(maxlength2, len(colType or ""))
maxlength1 = max(maxlength1, len("COLUMN"))
lines1 = "-" * (maxlength1 + 2)
if colType is not None:
maxlength2 = max(maxlength2, len("TYPE"))
lines2 = "-" * (maxlength2 + 2)
self._write("Database: %s\nTable: %s" % (unsafeSQLIdentificatorNaming(db) if db else "Current database", unsafeSQLIdentificatorNaming(table)))
if len(columns) == 1:
self._write("[1 column]")
else:
self._write("[%d columns]" % len(columns))
if colType is not None:
self._write("+%s+%s+" % (lines1, lines2))
else:
self._write("+%s+" % lines1)
blank1 = " " * (maxlength1 - len("COLUMN"))
if colType is not None:
blank2 = " " * (maxlength2 - len("TYPE"))
if colType is not None:
self._write("| Column%s | Type%s |" % (blank1, blank2))
self._write("+%s+%s+" % (lines1, lines2))
else:
self._write("| Column%s |" % blank1)
self._write("+%s+" % lines1)
for column in colList:
colType = columns[column]
column = unsafeSQLIdentificatorNaming(column)
blank1 = " " * (maxlength1 - len(column))
if colType is not None:
blank2 = " " * (maxlength2 - len(colType))
self._write("| %s%s | %s%s |" % (column, blank1, colType, blank2))
else:
self._write("| %s%s |" % (column, blank1))
if colType is not None:
self._write("+%s+%s+\n" % (lines1, lines2))
else:
self._write("+%s+\n" % lines1)
def dbTablesCount(self, dbTables):
if isinstance(dbTables, dict) and len(dbTables) > 0:
if conf.api:
self._write(dbTables, content_type=CONTENT_TYPE.COUNT)
return
maxlength1 = len("Table")
maxlength2 = len("Entries")
for ctables in dbTables.values():
for tables in ctables.values():
for table in tables:
maxlength1 = max(maxlength1, len(normalizeUnicode(table) or unicode(table)))
for db, counts in dbTables.items():
self._write("Database: %s" % unsafeSQLIdentificatorNaming(db) if db else "Current database")
lines1 = "-" * (maxlength1 + 2)
blank1 = " " * (maxlength1 - len("Table"))
lines2 = "-" * (maxlength2 + 2)
blank2 = " " * (maxlength2 - len("Entries"))
self._write("+%s+%s+" % (lines1, lines2))
self._write("| Table%s | Entries%s |" % (blank1, blank2))
self._write("+%s+%s+" % (lines1, lines2))
sortedCounts = counts.keys()
sortedCounts.sort(reverse=True)
for count in sortedCounts:
tables = counts[count]
if count is None:
count = "Unknown"
tables.sort(key=lambda x: x.lower() if isinstance(x, basestring) else x)
for table in tables:
blank1 = " " * (maxlength1 - len(normalizeUnicode(table) or unicode(table)))
blank2 = " " * (maxlength2 - len(str(count)))
self._write("| %s%s | %d%s |" % (table, blank1, count, blank2))
self._write("+%s+%s+\n" % (lines1, lines2))
else:
logger.error("unable to retrieve the number of entries for any table")
def dbTableValues(self, tableValues):
replication = None
rtable = None
dumpFP = None
appendToFile = False
warnFile = False
if tableValues is None:
return
db = tableValues["__infos__"]["db"]
if not db:
db = "All"
table = tableValues["__infos__"]["table"]
if conf.api:
self._write(tableValues, content_type=CONTENT_TYPE.DUMP_TABLE)
return
dumpDbPath = os.path.join(conf.dumpPath, unsafeSQLIdentificatorNaming(db))
if conf.dumpFormat == DUMP_FORMAT.SQLITE:
replication = Replication(os.path.join(conf.dumpPath, "%s.sqlite3" % unsafeSQLIdentificatorNaming(db)))
elif conf.dumpFormat in (DUMP_FORMAT.CSV, DUMP_FORMAT.HTML):
if not os.path.isdir(dumpDbPath):
try:
os.makedirs(dumpDbPath, 0755)
except:
warnFile = True
_ = unicodeencode(re.sub(r"[^\w]", "_", unsafeSQLIdentificatorNaming(db)))
dumpDbPath = os.path.join(conf.dumpPath, "%s-%s" % (_, hashlib.md5(unicodeencode(db)).hexdigest()[:8]))
if not os.path.isdir(dumpDbPath):
try:
os.makedirs(dumpDbPath, 0755)
except Exception, ex:
try:
tempDir = tempfile.mkdtemp(prefix="sqlmapdb")
except IOError, _:
errMsg = "unable to write to the temporary directory ('%s'). " % _
errMsg += "Please make sure that your disk is not full and "
errMsg += "that you have sufficient write permissions to "
errMsg += "create temporary files and/or directories"
raise SqlmapSystemException(errMsg)
warnMsg = "unable to create dump directory "
warnMsg += "'%s' (%s). " % (dumpDbPath, getSafeExString(ex))
warnMsg += "Using temporary directory '%s' instead" % tempDir
logger.warn(warnMsg)
| |
<gh_stars>10-100
# Copyright (C) 2010-2012 Canonical Ltd.
# Licenced under the txaws licence available at /LICENSE in the txaws source.
import os
from twisted.trial.unittest import TestCase
from txaws.wsdl import (
WSDLParseError, LeafSchema, NodeSchema, NodeItem, SequenceSchema,
SequenceItem, WSDLParser, etree)
class WsdlBaseTestCase(TestCase):
if not etree:
skip = "lxml is either not installed or broken on your system."
class NodeSchemaTestCase(WsdlBaseTestCase):
def test_create_with_bad_tag(self):
"""
L{NodeSchema.create} raises an error if the tag of the given element
doesn't match the expected one.
"""
schema = NodeSchema("foo", [LeafSchema("bar")])
root = etree.fromstring("<egg><bar>spam</bar></egg>")
error = self.assertRaises(WSDLParseError, schema.create, root)
self.assertEqual("Expected response with tag 'foo', but got "
"'egg' instead", error.args[0])
def test_add_with_invalid_min(self):
"""
L{NodeSchema.add} allows the C{min_occurs} parameter to only be
C{None}, zero or one.
"""
schema = NodeSchema("foo")
self.assertRaises(RuntimeError, schema.add, LeafSchema("bar"),
min_occurs=-1)
self.assertRaises(RuntimeError, schema.add, LeafSchema("bar"),
min_occurs=2)
def test_dump(self):
"""
L{NodeSchema.dump} creates an L{etree.Element} out of a L{NodeItem}.
"""
schema = NodeSchema("foo", [LeafSchema("bar")])
foo = NodeItem(schema)
foo.bar = "spam"
self.assertEqual("<foo><bar>spam</bar></foo>",
etree.tostring(schema.dump(foo)))
def test_dump_with_multiple_children(self):
"""
L{NodeSchema.dump} supports multiple children.
"""
schema = NodeSchema("foo", [LeafSchema("bar"), LeafSchema("egg")])
foo = NodeItem(schema)
foo.bar = "spam1"
foo.egg = "spam2"
self.assertEqual("<foo><bar>spam1</bar><egg>spam2</egg></foo>",
etree.tostring(schema.dump(foo)))
def test_dump_with_missing_attribute(self):
"""
L{NodeSchema.dump} ignores missing attributes if C{min_occurs} is zero.
"""
schema = NodeSchema("foo")
schema.add(LeafSchema("bar"), min_occurs=0)
foo = NodeItem(schema)
self.assertEqual("<foo/>", etree.tostring(schema.dump(foo)))
class NodeItemTestCase(WsdlBaseTestCase):
def test_get(self):
"""
The child leaf elements of a L{NodeItem} can be accessed as attributes.
"""
schema = NodeSchema("foo", [LeafSchema("bar")])
root = etree.fromstring("<foo><bar>egg</bar></foo>")
foo = schema.create(root)
self.assertEqual("egg", foo.bar)
def test_get_with_many_children(self):
"""
Multiple children are supported.
"""
schema = NodeSchema("foo", [LeafSchema("bar"), LeafSchema("egg")])
root = etree.fromstring("<foo><bar>spam1</bar><egg>spam2</egg></foo>")
foo = schema.create(root)
self.assertEqual("spam1", foo.bar)
self.assertEqual("spam2", foo.egg)
def test_get_with_namespace(self):
"""
The child leaf elements of a L{NodeItem} can be accessed as attributes.
"""
schema = NodeSchema("foo", [LeafSchema("bar")])
root = etree.fromstring("<foo xmlns=\"spam\"><bar>egg</bar></foo>")
foo = schema.create(root)
self.assertEqual("egg", foo.bar)
def test_get_with_unknown_tag(self):
"""
An error is raised when trying to access an attribute not in the
schema.
"""
schema = NodeSchema("foo", [LeafSchema("bar")])
root = etree.fromstring("<foo><bar>egg</bar><spam>boom</spam></foo>")
foo = schema.create(root)
error = self.assertRaises(WSDLParseError, getattr, foo, "spam")
self.assertEqual("Unknown tag 'spam'", error.args[0])
def test_get_with_duplicate_tag(self):
"""
An error is raised when trying to access an attribute associated
with a tag that appears more than once.
"""
schema = NodeSchema("foo", [LeafSchema("bar")])
root = etree.fromstring("<foo><bar>spam1</bar><bar>spam2</bar></foo>")
item = schema.create(root)
error = self.assertRaises(WSDLParseError, getattr, item, "bar")
self.assertEqual("Duplicate tag 'bar'", error.args[0])
def test_get_with_missing_required_tag(self):
"""
An error is raised when trying to access a required attribute and
the associated tag is missing.
"""
schema = NodeSchema("foo", [LeafSchema("bar")])
root = etree.fromstring("<foo></foo>")
item = schema.create(root)
error = self.assertRaises(WSDLParseError, getattr, item, "bar")
self.assertEqual("Missing tag 'bar'", error.args[0])
def test_get_with_empty_required_tag(self):
"""
An error is raised if an expected required tag is found but has and
empty value.
"""
schema = NodeSchema("foo", [LeafSchema("bar")])
root = etree.fromstring("<foo><bar/></foo>")
item = schema.create(root)
error = self.assertRaises(WSDLParseError, getattr, item, "bar")
self.assertEqual("Missing tag 'bar'", error.args[0])
def test_get_with_non_required_tag(self):
"""
No error is raised if a tag is missing and its min count is zero.
"""
schema = NodeSchema("foo")
schema.add(LeafSchema("bar"), min_occurs=0)
root = etree.fromstring("<foo></foo>")
foo = schema.create(root)
self.assertIdentical(None, foo.bar)
def test_get_with_reserved_keyword(self):
"""
Attributes associated to tags named against required attributes can
be accessed appending a '_' to the name.
"""
schema = NodeSchema("foo", [LeafSchema("return")])
root = etree.fromstring("<foo><return>true</return></foo>")
foo = schema.create(root)
self.assertEqual("true", foo.return_)
def test_get_with_nested(self):
"""
It is possible to access nested nodes.
"""
schema = NodeSchema("foo", [NodeSchema("bar", [LeafSchema("egg")])])
root = etree.fromstring("<foo><bar><egg>spam</egg></bar></foo>")
foo = schema.create(root)
self.assertEqual("spam", foo.bar.egg)
def test_get_with_non_required_nested(self):
"""
It is possible to access a non-required nested node that has no
associated element in the XML yet, in that case a new element is
created for it.
"""
schema = NodeSchema("foo")
schema.add(NodeSchema("bar", [LeafSchema("egg")]), min_occurs=0)
root = etree.fromstring("<foo/>")
foo = schema.create(root)
foo.bar.egg = "spam"
self.assertEqual("<foo><bar><egg>spam</egg></bar></foo>",
etree.tostring(schema.dump(foo)))
def test_set_with_unknown_tag(self):
"""
An error is raised when trying to set an attribute not in the schema.
"""
schema = NodeSchema("foo")
foo = schema.create()
error = self.assertRaises(WSDLParseError, setattr, foo, "bar", "egg")
self.assertEqual("Unknown tag 'bar'", error.args[0])
def test_set_with_duplicate_tag(self):
"""
An error is raised when trying to set an attribute associated
with a tag that appears more than once.
"""
schema = NodeSchema("foo", [LeafSchema("bar")])
root = etree.fromstring("<foo><bar>spam1</bar><bar>spam2</bar></foo>")
foo = schema.create(root)
error = self.assertRaises(WSDLParseError, setattr, foo, "bar", "egg")
self.assertEqual("Duplicate tag 'bar'", error.args[0])
def test_set_with_required_tag(self):
"""
An error is raised when trying to set a required attribute to C{None}.
"""
schema = NodeSchema("foo", [LeafSchema("bar")])
root = etree.fromstring("<foo><bar>spam</bar></foo>")
foo = schema.create(root)
error = self.assertRaises(WSDLParseError, setattr, foo, "bar", None)
self.assertEqual("Missing tag 'bar'", error.args[0])
self.assertEqual("spam", foo.bar)
def test_set_with_non_required_tag(self):
"""
It is possible to set a non-required tag value to C{None}, in that
case the element will be removed if present.
"""
schema = NodeSchema("foo")
schema.add(LeafSchema("bar"), min_occurs=0)
root = etree.fromstring("<foo><bar>spam</bar></foo>")
foo = schema.create(root)
foo.bar = None
self.assertEqual("<foo/>", etree.tostring(schema.dump(foo)))
def test_set_with_non_leaf_tag(self):
"""
An error is raised when trying to set a non-leaf attribute to
a value other than C{None}.
"""
schema = NodeSchema("foo", [NodeSchema("bar", [LeafSchema("egg")])])
root = etree.fromstring("<foo><bar><egg>spam</egg></bar></foo>")
foo = schema.create(root)
error = self.assertRaises(WSDLParseError, setattr, foo, "bar", "yo")
self.assertEqual("Can't set non-leaf tag 'bar'", error.args[0])
def test_set_with_optional_node_tag(self):
"""
It is possible to set an optional node tag to C{None}, in that
case it will be removed from the tree.
"""
schema = NodeSchema("foo")
schema.add(NodeSchema("bar", [LeafSchema("egg")]), min_occurs=0)
root = etree.fromstring("<foo><bar><egg>spam</egg></bar></foo>")
foo = schema.create(root)
foo.bar = None
self.assertEqual("<foo/>", etree.tostring(schema.dump(foo)))
def test_set_with_sequence_tag(self):
"""
It is possible to set a sequence tag to C{None}, in that case
all its children will be removed
"""
schema = NodeSchema("foo")
schema.add(SequenceSchema("bar",
NodeSchema("item", [LeafSchema("egg")])))
root = etree.fromstring("<foo>"
"<bar><item><egg>spam</egg></item></bar><"
"/foo>")
foo = schema.create(root)
foo.bar = None
self.assertEqual("<foo><bar/></foo>", etree.tostring(schema.dump(foo)))
def test_set_with_required_non_leaf_tag(self):
"""
An error is raised when trying to set a required non-leaf tag
to C{None}.
"""
schema = NodeSchema("foo", [NodeSchema("bar", [LeafSchema("egg")])])
root = etree.fromstring("<foo><bar><egg>spam</egg></bar></foo>")
foo = schema.create(root)
error = self.assertRaises(WSDLParseError, setattr, foo, "bar", None)
self.assertEqual("Missing tag 'bar'", error.args[0])
self.assertTrue(hasattr(foo, "bar"))
class SequenceSchemaTestCase(WsdlBaseTestCase):
def test_create_with_bad_tag(self):
"""
L{SequenceSchema.create} raises an error if the tag of the given
element doesn't match the expected one.
"""
schema = SequenceSchema("foo", NodeSchema("item", [LeafSchema("bar")]))
root = etree.fromstring("<spam><item><bar>egg</bar></item></spam>")
error = self.assertRaises(WSDLParseError, schema.create, root)
self.assertEqual("Expected response with tag 'foo', but got "
"'spam' instead", error.args[0])
def test_set_with_leaf(self):
"""
L{SequenceSchema.set} raises an error if the given child is a leaf node
"""
schema = SequenceSchema("foo")
error = self.assertRaises(RuntimeError, schema.set, LeafSchema("bar"))
self.assertEqual("Sequence can't have leaf children", str(error))
def test_set_with_previous_child(self):
"""
L{SequenceSchema.set} raises an error if the sequence has already
a child.
"""
schema = SequenceSchema("foo", NodeSchema("item", [LeafSchema("bar")]))
error = self.assertRaises(RuntimeError, schema.set, NodeSchema("egg"))
self.assertEqual("Sequence has already a child", str(error))
def test_set_with_no_min_or_max(self):
"""
L{SequenceSchema.set} raises an error if no values are provided for the
min and max parameters.
"""
schema = SequenceSchema("foo")
child = NodeSchema("item", [LeafSchema("bar")])
error = self.assertRaises(RuntimeError, schema.set, child,
min_occurs=0, max_occurs=None)
self.assertEqual("Sequence node without min or max", str(error))
error = self.assertRaises(RuntimeError, schema.set, child,
min_occurs=None, max_occurs=1)
self.assertEqual("Sequence node without min or max", str(error))
def test_dump(self):
"""
L{SequenceSchema.dump} creates a L{etree.Element} out of
a L{SequenceItem}.
"""
schema = SequenceSchema("foo", NodeSchema("item", [LeafSchema("bar")]))
foo = SequenceItem(schema)
foo.append().bar = "egg"
self.assertEqual("<foo><item><bar>egg</bar></item></foo>",
etree.tostring(schema.dump(foo)))
def test_dump_with_many_items(self):
"""
L{SequenceSchema.dump} supports many child items in the sequence.
"""
schema = SequenceSchema("foo", NodeSchema("item", [LeafSchema("bar")]))
foo = SequenceItem(schema)
foo.append().bar = "spam0"
foo.append().bar = "spam1"
self.assertEqual("<foo>"
"<item><bar>spam0</bar></item>"
"<item><bar>spam1</bar></item>"
"</foo>",
etree.tostring(schema.dump(foo)))
class SequenceItemTestCase(WsdlBaseTestCase):
def test_get(self):
"""
The child elements of a L{SequenceItem} can be accessed as attributes.
"""
schema = SequenceSchema("foo", NodeSchema("item", [LeafSchema("bar")]))
root = etree.fromstring("<foo><item><bar>egg</bar></item></foo>")
foo = schema.create(root)
self.assertEqual("egg", foo[0].bar)
def test_get_items(self):
"""L{SequenceItem} supports elements with many child items."""
schema = SequenceSchema("foo", NodeSchema("item", [LeafSchema("bar")]))
root = etree.fromstring("<foo>"
"<item><bar>egg0</bar></item>"
"<item><bar>egg1</bar></item>"
"</foo>")
foo = schema.create(root)
self.assertEqual("egg0", foo[0].bar)
self.assertEqual("egg1", foo[1].bar)
def test_get_with_namespace(self):
"""
The child elements of a L{SequenceItem} can be accessed as attributes.
"""
schema = SequenceSchema("foo", NodeSchema("item", [LeafSchema("bar")]))
root = etree.fromstring("<foo xmlns=\"spam\">"
"<item><bar>egg</bar></item>"
"</foo>")
foo = schema.create(root)
self.assertEqual("egg", foo[0].bar)
def test_get_with_non_existing_index(self):
"""An error is raised when trying to access a non existing item."""
schema = SequenceSchema("foo", NodeSchema("item", [LeafSchema("bar")]))
root = etree.fromstring("<foo><item><bar>egg</bar></item></foo>")
foo = schema.create(root)
error = self.assertRaises(WSDLParseError, foo.__getitem__, 1)
self.assertEqual("Non existing item in tag 'foo'", error.args[0])
def test_get_with_index_higher_than_max(self):
"""
An error is raised when trying to access an item above the allowed
max value.
"""
schema = SequenceSchema("foo")
schema.set(NodeSchema("item", [LeafSchema("bar")]), min_occurs=0,
max_occurs=1)
root = etree.fromstring("<foo>"
"<item><bar>egg0</bar></item>"
"<item><bar>egg1</bar></item>"
"</foo>")
foo = schema.create(root)
error = self.assertRaises(WSDLParseError, | |
stop_date, start_time, stop_time, asHistoryBuffer=False,
decimate=False, notNone=False, N=0, cache=True, fallback=True,
schemas = None, lasts = True):
sch = [s for s in self.is_attribute_archived(
attribute, preferent = True, start = start_time, stop = stop_time)
if (not schemas or s in schemas)]
if schemas is not None:
schemas = fn.toList(schemas) if schemas is not None else []
if not sch:
self.log.warning('In get_attribute_values_from_any(%s): '
'No valid schema at %s'%(attribute,start_date))
return []
self.log.info('In get_attribute_values_from_any(%s, %s, %s, %s)' % (
attribute, sch, start_date, stop_date))
rd = Schemas.getReader(sch.pop(0))
#@debug
self.log.debug('Using %s schema at %s'%(rd.schema,start_date))
## @TODO, this if is True if attribute is archived on alias only
# all this double-checks are slowing down queries, a solution
# must be found (is_attribute_archived on list?)
if not rd.is_attribute_archived(attribute):
# Stored in preferred schema via alias
attr = self.get_attribute_alias(attribute)
attr = self.get_attribute_model(attr)
if attr!=attribute:
self.log.info('%s => %s' % (attribute, attr))
attribute = attr
#@TODO, implemented classes should have polimorphic methods
values = rd.get_attribute_values(attribute,start_date,stop_date,
asHistoryBuffer=asHistoryBuffer,decimate=decimate,
notNone=notNone,N=N)
if len(values):
self.log.debug('%d values: %s,...'
% (len(values),str(values[0])))
# If no data, it just tries the next database
if fallback:
if (values is None or not len(values)):
gaps = [(start_time,stop_time)]
else:
r = max((300,.1*(stop_time-start_time)))
gaps = get_gaps(values,r,
start = start_time if not N else 0,
stop = stop_time if not N else 0)
self.log.debug('get_gaps(%d): %d gaps' % (len(values),len(gaps)))
fallback = []
for gap0,gap1 in gaps:
prev = rd.schema #every iter searches through all schemas on each gap
sch = [s for s in self.is_attribute_archived(attribute,
start = gap0, stop = gap1, preferent=False)
if (s != prev and (not schemas or s in schemas))]
if not sch:
break
self.log.warning('trying fallbacks: %s' % str(sch))
gapvals = []
while not len(gapvals) and len(sch):
self.log.info(#'In get_attribute_values(%s,%s,%s)(%s): '
'fallback to %s as %s returned no data in (%s,%s)'%(
#attribute,gap0,gap1,prev,
sch[0],rd.schema,time2str(gap0),time2str(gap1)))
gapvals = self.configs[sch[0]
].get_attribute_values(attribute,gap0,gap1,N=N,
asHistoryBuffer=asHistoryBuffer,decimate=decimate)
prev,sch = sch[0],sch[1:]
if len(gapvals):
fallback.extend(gapvals)
if len(fallback):
tf = fn.now()
values = sorted(values+fallback)
self.log.debug('Adding %d values from fallback took '
'%f seconds' % (len(fallback),fn.now()-tf))
# Loading last values to fill initial gap
if decimate:
gap = start_time + (decimate if fn.isNumber(decimate)
else (stop_time-start_time)/utils.MAX_RESOLUTION)
else:
gap = start_time + 60.
if lasts and (not len(values) or not len(values[0]) or values[0][0] > gap):
self.log.warning('No %s values at %s, loading previous values' % (
attribute, fn.time2str(start_time)))
lasts = self.load_last_values(attribute, epoch=start_time)
lasts = [v for k,v in lasts.items() if
k not in ('hdb','tdb') and v is not None and len(v)]
lasts = sorted(t for t in lasts if t and len(t))
if len(lasts):
values.insert(0,tuple(lasts[-1][
:len(values[0]) if values else 2]))
values = self.decimate_values(values, decimate)
return values
def get_attribute_values_from_hdb(self, attribute, db,
start_date, stop_date, decimate, asHistoryBuffer,
N, notNone, GET_LAST):
"""
Query MySQL HDB/TDB databases to extract the attribute data
"""
# CHOOSING DATABASE METHODS
if not self.is_hdbpp:
self.log.debug('get_attribute_values_from_hdb(%s,%s)' %
(attribute, db))
try:
full_name,ID,data_type,data_format,writable = \
db.get_attribute_descriptions(attribute)[0]
except Exception,e:
raise Exception('%s_AttributeNotArchived: %s'
%(attribute,e))
data_type,data_format = (utils.cast_tango_type(
PyTango.CmdArgType.values[data_type]),
PyTango.AttrDataFormat.values[data_format])
self.log.debug('%s, ID=%s, data_type=%s, data_format=%s'
%(attribute,ID,data_type,data_format))
table = get_table_name(ID)
method = db.get_attribute_values
else:
table = attribute
method = db.get_attribute_values
data_type = float
data_format = PyTango.AttrDataFormat.SCALAR
#######################################################################
# QUERYING THE DATABASE
#@TODO: This retrying should be moved down to ArchivingDB class instead
retries,t0,s0,s1 = 0,time.time(),start_date,stop_date
MAX_RETRIES = 2
while retries<MAX_RETRIES and t0>(time.time()-10):
if retries:
#(reshape/retry to avoid empty query bug in python-mysql)
self.log.debug('\t%s Query (%s,%s,%s) returned 0 values, '
'retrying ...' % (self.schema,attribute,s0,s1))
s0,s1 = epoch2str(str2epoch(s0)-30),epoch2str(str2epoch(s1)+30)
result = method(table,s0,s1 if not GET_LAST else None,
N=N,unixtime=True)
if len(result):
if retries:
result = [r for r in result if start_date<=r[0]<=stop_date]
break
retries+=1
if not result:
self.log.warning('Empty %s query after %d retries? (%s) = [0] in %ss'
% (self.schema,retries,str((table,start_date,stop_date,GET_LAST,N,0)),
time.time()-t0))
return []
l0 = len(result)
t1 = time.time()
#@debug
self.log.info('\tQuery(%s,%s,%s,%s,%s) = [%d] in %s s'
%(table,start_date,stop_date,GET_LAST,N,l0,t1-t0))
self.last_reads = result and (result[0][0],result[-1][0]) or (1e10,1e10)
try:
values = self.extract_mysql_data(result,
data_type,data_format,notNone)
values = patch_booleans(values)
except Exception,e:
self.log.info(traceback.format_exc())
raise Exception('Reader.UnableToConvertData(%s,format=%s)'
% (attribute,data_format),str(e))
self.log.info('get_from_db(%s)' % str(len(values) and values[0]))
return values
def extract_mysql_data(self, result, data_type, data_format, notNone):
# CASTING DATATYPES AND DECIMATION
#Returning a list of (epoch,value) tuples
values = []
t1 = time.time()
## The following queries are optimized for performance
#getting read_value index (w/out dimension)
ix = 1 if len(result[0])<4 else 2
#THIS CAST METHODS ARE USED WHEN PARSING DATA FROM SPECTRUMS
if data_type is bool:
cast_type = mysql2bool
elif data_type is int:
#Because int cannot parse '4.0'
cast_type = lambda x:int(float(x))
else:
cast_type = data_type
self.log.debug(str(data_type)+' '+str(notNone))
if data_format==PyTango.AttrDataFormat.SPECTRUM:
dt,df = (cast_type,0.0) if data_type in (int,bool) \
else (data_type,None)
if notNone:
values = [(w[0],mysql2array(w[ix],dt,df))
for w in result if w[ix] is not None]
else:
values = [(w[0],mysql2array(w[ix],dt,df)
if w[ix] else None) for w in result]
#SCALAR values, queries are optimized for performance
elif data_type in (bool,) and notNone:
values = [(w[0],cast_type(w[ix]))
for w in result if w is not None]
elif data_type in (bool,):
values = [(w[0],cast_type(w[ix])) for w in result]
elif notNone:
values = [(w[0],w[ix]) for w in result if w[ix] is not None]
else:
values = [(w[0],w[ix]) for w in result]
#@debug
self.log.info('\tParsed [%d] in %s s'%(len(values),time.time()-t1))
return values
def decimate_values(self, values, decimate):
"""
proxy method to parse arguments for utils.decimation
Removal of None values is always done
Decimation by data_has_changed is done always
Decimation on window is only done if decimate is callable (pickfirst)
"""
l0 = len(values)
if len(values) > 128 and decimate:
decimate,window = decimate if isSequence(decimate) \
else (decimate,'0')
if isString(decimate):
try:
decimate = eval(decimate)
except:
self.log.info('Decimation(%s)?: %s'
% (decimate, traceback.format_exc()))
values = utils.decimation(values, decimate, window=window,
logger_obj=self.log)
self.log.debug('decimate([%d],%s):[%d]' % (l0,decimate,len(values)))
return values
def get_attributes_values(self,attributes,start_date,stop_date=None,
asHistoryBuffer=False,decimate=False,notNone=False,N=0,
cache=True,fallback=True,schemas=None,
correlate=False, trace = False, text = False, subprocess=True,
lasts=False):
"""
This method reads values for a list of attributes between specified dates.
:param attributes: list of attributes
:param start_date: timestamp of the first value
:param stop_date: timestamp of the last value
:param correlate: group values by time using first attribute timestamps
:param asHistoryBuffer: return a history buffer object instead of a list (for trends)
:param text: return a tabulated text instead of a dictionary of values
:param N: if N>0, only the last N values will be returned
:param trace: print out the values obtained
:return: a dictionary with the values of each attribute or (if text=True) a text with tabulated columns
"""
if not attributes:
raise Exception('Empty List!')
start = time.time()
start_date,start_time,stop_date,stop_time = \
self.get_time_interval(start_date,stop_date)
values = dict([(attr,
self.get_attribute_values(attr, start_date, stop_date,
asHistoryBuffer, decimate, notNone, N,
cache, fallback, schemas, subprocess=subprocess,
lasts=lasts))
for attr in attributes])
self.log.debug('Query finished in %d milliseconds'%(1000*(time.time()-start)))
if correlate or text:
if len(attributes)>1:
table = self.correlate_values(values,str2time(stop_date),
resolution=(correlate if correlate is not True
and fn.isNumber(correlate) else None))
else:
table = values
if trace or text:
csv = self.export_to_text(table,order=list(attributes))
if text: return csv
elif trace: print(csv)
return table
else:
if trace: print(values)
return values
@staticmethod
def export_to_text(table,order=None,**kwargs):
"""
It will convert a [(timestamp,value)] array in a CSV-like text.
Order will be used to set the order to data columns (date and timestamp will be always first and second).
Other parameters are available:
sep : character to split values in each row
arrsep : character to split array values in a data column
linesep : characters to insert between lines
"""
sep = kwargs.get('sep','\t')
arrsep = kwargs.get('arrsep',kwargs.get('separator',', '))
linesep = kwargs.get('linesep','\n')
start = time.time()
if not hasattr(table,'keys'):
table = {'attribute':table}
if not order or not all(k in order for k in table):
keys = list(sorted(table.keys()))
else:
keys = sorted(table.keys(),key=order.index)
csv = sep.join(['date','time']+keys)+linesep
def value_to_text(s):
v = (str(s) if not fandango.isSequence(s)
else arrsep.join(map(str,s))).replace('None','')
return v
time_to_text = | |
# -*- coding: utf-8 -*-
"""
A Theil-Sen Estimator for Multiple Linear Regression Model
"""
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import warnings
from itertools import combinations
import numpy as np
from scipy import linalg
from scipy.special import binom
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import check_random_state
from ..utils import check_X_y
from ..utils._joblib import Parallel, delayed, effective_n_jobs
from ..exceptions import ConvergenceWarning
_EPSILON = np.finfo(np.double).eps
def _modified_weiszfeld_step(X, x_old):
"""Modified Weiszfeld step.
This function defines one iteration step in order to approximate the
spatial median (L1 median). It is a form of an iteratively re-weighted
least squares method.
Parameters
----------
X : array, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
x_old : array, shape = [n_features]
Current start vector.
Returns
-------
x_new : array, shape = [n_features]
New iteration step.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
<NAME> and <NAME>
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
diff = X - x_old
diff_norm = np.sqrt(np.sum(diff ** 2, axis=1))
mask = diff_norm >= _EPSILON
# x_old equals one of our samples
is_x_old_in_X = int(mask.sum() < X.shape[0])
diff = diff[mask]
diff_norm = diff_norm[mask][:, np.newaxis]
quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0))
if quotient_norm > _EPSILON: # to avoid division by zero
new_direction = (np.sum(X[mask, :] / diff_norm, axis=0)
/ np.sum(1 / diff_norm, axis=0))
else:
new_direction = 1.
quotient_norm = 1.
return (max(0., 1. - is_x_old_in_X / quotient_norm) * new_direction
+ min(1., is_x_old_in_X / quotient_norm) * x_old)
def _spatial_median(X, max_iter=300, tol=1.e-3):
"""Spatial median (L1 median).
The spatial median is member of a class of so-called M-estimators which
are defined by an optimization problem. Given a number of p points in an
n-dimensional space, the point x minimizing the sum of all distances to the
p other points is called spatial median.
Parameters
----------
X : array, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
max_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if spatial_median has converged. Default is 1.e-3.
Returns
-------
spatial_median : array, shape = [n_features]
Spatial median.
n_iter : int
Number of iterations needed.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
<NAME> and <NAME>
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
if X.shape[1] == 1:
return 1, np.median(X.ravel())
tol **= 2 # We are computing the tol on the squared norm
spatial_median_old = np.mean(X, axis=0)
for n_iter in range(max_iter):
spatial_median = _modified_weiszfeld_step(X, spatial_median_old)
if np.sum((spatial_median_old - spatial_median) ** 2) < tol:
break
else:
spatial_median_old = spatial_median
else:
warnings.warn("Maximum number of iterations {max_iter} reached in "
"spatial median for TheilSen regressor."
"".format(max_iter=max_iter), ConvergenceWarning)
return n_iter, spatial_median
def _breakdown_point(n_samples, n_subsamples):
"""Approximation of the breakdown point.
Parameters
----------
n_samples : int
Number of samples.
n_subsamples : int
Number of subsamples to consider.
Returns
-------
breakdown_point : float
Approximation of breakdown point.
"""
return 1 - (0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) +
n_subsamples - 1) / n_samples
def _lstsq(X, y, indices, fit_intercept):
"""Least Squares Estimator for TheilSenRegressor class.
This function calculates the least squares method on a subset of rows of X
and y defined by the indices array. Optionally, an intercept column is
added if intercept is set to true.
Parameters
----------
X : array, shape = [n_samples, n_features]
Design matrix, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector, where n_samples is the number of samples.
indices : array, shape = [n_subpopulation, n_subsamples]
Indices of all subsamples with respect to the chosen subpopulation.
fit_intercept : bool
Fit intercept or not.
Returns
-------
weights : array, shape = [n_subpopulation, n_features + intercept]
Solution matrix of n_subpopulation solved least square problems.
"""
fit_intercept = int(fit_intercept)
n_features = X.shape[1] + fit_intercept
n_subsamples = indices.shape[1]
weights = np.empty((indices.shape[0], n_features))
X_subpopulation = np.ones((n_subsamples, n_features))
# gelss need to pad y_subpopulation to be of the max dim of X_subpopulation
y_subpopulation = np.zeros((max(n_subsamples, n_features)))
lstsq, = get_lapack_funcs(('gelss',), (X_subpopulation, y_subpopulation))
for index, subset in enumerate(indices):
X_subpopulation[:, fit_intercept:] = X[subset, :]
y_subpopulation[:n_subsamples] = y[subset]
weights[index] = lstsq(X_subpopulation,
y_subpopulation)[1][:n_features]
return weights
class TheilSenRegressor(LinearModel, RegressorMixin):
"""Theil-Sen Estimator: robust multivariate regression model.
The algorithm calculates least square solutions on subsets with size
n_subsamples of the samples in X. Any value of n_subsamples between the
number of features and samples leads to an estimator with a compromise
between robustness and efficiency. Since the number of least square
solutions is "n_samples choose n_subsamples", it can be extremely large
and can therefore be limited with max_subpopulation. If this limit is
reached, the subsets are chosen randomly. In a final step, the spatial
median (or L1 median) is calculated of all least square solutions.
Read more in the :ref:`User Guide <theil_sen_regression>`.
Parameters
----------
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_subpopulation : int, optional, default 1e4
Instead of computing with a set of cardinality 'n choose k', where n is
the number of samples and k is the number of subsamples (at least
number of features), consider only a stochastic subpopulation of a
given maximal size if 'n choose k' is larger than max_subpopulation.
For other than small problem sizes this parameter will determine
memory usage and runtime if n_subsamples is not changed.
n_subsamples : int, optional, default None
Number of samples to calculate the parameters. This is at least the
number of features (plus 1 if fit_intercept=True) and the number of
samples as a maximum. A lower number leads to a higher breakdown
point and a low efficiency while a high number leads to a low
breakdown point and a high efficiency. If None, take the
minimum number of subsamples leading to maximal robustness.
If n_subsamples is set to n_samples, Theil-Sen is identical to least
squares.
max_iter : int, optional, default 300
Maximum number of iterations for the calculation of spatial median.
tol : float, optional, default 1.e-3
Tolerance when calculating spatial median.
random_state : int, RandomState instance or None, optional, default None
A random number generator instance to define the state of the random
permutations generator. If int, random_state is the seed used by the
random number generator; If RandomState instance, random_state is the
random number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
n_jobs : int or None, optional (default=None)
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (median of distribution).
intercept_ : float
Estimated intercept of regression model.
breakdown_ : float
Approximated breakdown point.
n_iter_ : int
Number of iterations needed for the spatial median.
n_subpopulation_ : int
Number of combinations taken into account from 'n choose k', where n is
the number of samples and k is the number of subsamples.
Examples
--------
>>> from sklearn.linear_model import TheilSenRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(
... n_samples=200, n_features=2, noise=4.0, random_state=0)
>>> reg = TheilSenRegressor(random_state=0).fit(X, y)
>>> reg.score(X, y) # doctest: +ELLIPSIS
0.9884...
>>> reg.predict(X[:1,])
array([-31.5871...])
References
----------
- Theil-Sen Estimators in a Multiple Linear Regression Model, 2009
<NAME>, <NAME>, <NAME> and <NAME>
http://home.olemiss.edu/~xdang/papers/MTSE.pdf
"""
def __init__(self, fit_intercept=True, copy_X=True,
max_subpopulation=1e4, n_subsamples=None, max_iter=300,
tol=1.e-3, random_state=None, n_jobs=None, verbose=False):
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.max_subpopulation = int(max_subpopulation)
self.n_subsamples = n_subsamples
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
| |
"""
xorp.py: defines routing services provided by the XORP routing suite.
"""
import logging
from core.services.coreservices import CoreService
class XorpRtrmgr(CoreService):
"""
XORP router manager service builds a config.boot file based on other
enabled XORP services, and launches necessary daemons upon startup.
"""
name = "xorp_rtrmgr"
executables = ("xorp_rtrmgr",)
group = "XORP"
dirs = ("/etc/xorp",)
configs = ("/etc/xorp/config.boot",)
startup = (
"xorp_rtrmgr -d -b %s -l /var/log/%s.log -P /var/run/%s.pid"
% (configs[0], name, name),
)
shutdown = ("killall xorp_rtrmgr",)
validate = ("pidof xorp_rtrmgr",)
@classmethod
def generate_config(cls, node, filename):
"""
Returns config.boot configuration file text. Other services that
depend on this will have generatexorpconfig() hooks that are
invoked here. Filename currently ignored.
"""
cfg = "interfaces {\n"
for ifc in node.netifs():
cfg += " interface %s {\n" % ifc.name
cfg += "\tvif %s {\n" % ifc.name
cfg += "".join(map(cls.addrstr, ifc.addrlist))
cfg += cls.lladdrstr(ifc)
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n\n"
for s in node.services:
try:
s.dependencies.index(cls.name)
cfg += s.generatexorpconfig(node)
except ValueError:
logging.exception("error getting value from service: %s", cls.name)
return cfg
@staticmethod
def addrstr(x):
"""
helper for mapping IP addresses to XORP config statements
"""
addr, plen = x.split("/")
cfg = "\t address %s {\n" % addr
cfg += "\t\tprefix-length: %s\n" % plen
cfg += "\t }\n"
return cfg
@staticmethod
def lladdrstr(ifc):
"""
helper for adding link-local address entries (required by OSPFv3)
"""
cfg = "\t address %s {\n" % ifc.hwaddr.tolinklocal()
cfg += "\t\tprefix-length: 64\n"
cfg += "\t }\n"
return cfg
class XorpService(CoreService):
"""
Parent class for XORP services. Defines properties and methods
common to XORP's routing daemons.
"""
name = None
executables = ("xorp_rtrmgr",)
group = "XORP"
dependencies = ("xorp_rtrmgr",)
dirs = ()
configs = ()
startup = ()
shutdown = ()
meta = "The config file for this service can be found in the xorp_rtrmgr service."
@staticmethod
def fea(forwarding):
"""
Helper to add a forwarding engine entry to the config file.
"""
cfg = "fea {\n"
cfg += " %s {\n" % forwarding
cfg += "\tdisable:false\n"
cfg += " }\n"
cfg += "}\n"
return cfg
@staticmethod
def mfea(forwarding, ifcs):
"""
Helper to add a multicast forwarding engine entry to the config file.
"""
names = []
for ifc in ifcs:
if hasattr(ifc, "control") and ifc.control is True:
continue
names.append(ifc.name)
names.append("register_vif")
cfg = "plumbing {\n"
cfg += " %s {\n" % forwarding
for name in names:
cfg += "\tinterface %s {\n" % name
cfg += "\t vif %s {\n" % name
cfg += "\t\tdisable: false\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
@staticmethod
def policyexportconnected():
"""
Helper to add a policy statement for exporting connected routes.
"""
cfg = "policy {\n"
cfg += " policy-statement export-connected {\n"
cfg += "\tterm 100 {\n"
cfg += "\t from {\n"
cfg += '\t\tprotocol: "connected"\n'
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
@staticmethod
def routerid(node):
"""
Helper to return the first IPv4 address of a node as its router ID.
"""
for ifc in node.netifs():
if hasattr(ifc, "control") and ifc.control is True:
continue
for a in ifc.addrlist:
if a.find(".") >= 0:
return a.split("/")[0]
# raise ValueError, "no IPv4 address found for router ID"
return "0.0.0.0"
@classmethod
def generate_config(cls, node, filename):
return ""
@classmethod
def generatexorpconfig(cls, node):
return ""
class XorpOspfv2(XorpService):
"""
The OSPFv2 service provides IPv4 routing for wired networks. It does
not build its own configuration file but has hooks for adding to the
unified XORP configuration file.
"""
name = "XORP_OSPFv2"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.fea("unicast-forwarding4")
rtrid = cls.routerid(node)
cfg += "\nprotocols {\n"
cfg += " ospf4 {\n"
cfg += "\trouter-id: %s\n" % rtrid
cfg += "\tarea 0.0.0.0 {\n"
for ifc in node.netifs():
if hasattr(ifc, "control") and ifc.control is True:
continue
cfg += "\t interface %s {\n" % ifc.name
cfg += "\t\tvif %s {\n" % ifc.name
for a in ifc.addrlist:
if a.find(".") < 0:
continue
addr = a.split("/")[0]
cfg += "\t\t address %s {\n" % addr
cfg += "\t\t }\n"
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
class XorpOspfv3(XorpService):
"""
The OSPFv3 service provides IPv6 routing. It does
not build its own configuration file but has hooks for adding to the
unified XORP configuration file.
"""
name = "XORP_OSPFv3"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.fea("unicast-forwarding6")
rtrid = cls.routerid(node)
cfg += "\nprotocols {\n"
cfg += " ospf6 0 { /* Instance ID 0 */\n"
cfg += "\trouter-id: %s\n" % rtrid
cfg += "\tarea 0.0.0.0 {\n"
for ifc in node.netifs():
if hasattr(ifc, "control") and ifc.control is True:
continue
cfg += "\t interface %s {\n" % ifc.name
cfg += "\t\tvif %s {\n" % ifc.name
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
class XorpBgp(XorpService):
"""
IPv4 inter-domain routing. AS numbers and peers must be customized.
"""
name = "XORP_BGP"
custom_needed = True
@classmethod
def generatexorpconfig(cls, node):
cfg = "/* This is a sample config that should be customized with\n"
cfg += " appropriate AS numbers and peers */\n"
cfg += cls.fea("unicast-forwarding4")
cfg += cls.policyexportconnected()
rtrid = cls.routerid(node)
cfg += "\nprotocols {\n"
cfg += " bgp {\n"
cfg += "\tbgp-id: %s\n" % rtrid
cfg += "\tlocal-as: 65001 /* change this */\n"
cfg += '\texport: "export-connected"\n'
cfg += "\tpeer 10.0.1.1 { /* change this */\n"
cfg += "\t local-ip: 10.0.1.1\n"
cfg += "\t as: 65002\n"
cfg += "\t next-hop: 10.0.0.2\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
class XorpRip(XorpService):
"""
RIP IPv4 unicast routing.
"""
name = "XORP_RIP"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.fea("unicast-forwarding4")
cfg += cls.policyexportconnected()
cfg += "\nprotocols {\n"
cfg += " rip {\n"
cfg += '\texport: "export-connected"\n'
for ifc in node.netifs():
if hasattr(ifc, "control") and ifc.control is True:
continue
cfg += "\tinterface %s {\n" % ifc.name
cfg += "\t vif %s {\n" % ifc.name
for a in ifc.addrlist:
if a.find(".") < 0:
continue
addr = a.split("/")[0]
cfg += "\t\taddress %s {\n" % addr
cfg += "\t\t disable: false\n"
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
class XorpRipng(XorpService):
"""
RIP NG IPv6 unicast routing.
"""
name = "XORP_RIPNG"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.fea("unicast-forwarding6")
cfg += cls.policyexportconnected()
cfg += "\nprotocols {\n"
cfg += " ripng {\n"
cfg += '\texport: "export-connected"\n'
for ifc in node.netifs():
if hasattr(ifc, "control") and ifc.control is True:
continue
cfg += "\tinterface %s {\n" % ifc.name
cfg += "\t vif %s {\n" % ifc.name
# for a in ifc.addrlist:
# if a.find(":") < 0:
# continue
# addr = a.split("/")[0]
# cfg += "\t\taddress %s {\n" % addr
# cfg += "\t\t disable: false\n"
# cfg += "\t\t}\n"
cfg += "\t\taddress %s {\n" % ifc.hwaddr.tolinklocal()
cfg += "\t\t disable: false\n"
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
return cfg
class XorpPimSm4(XorpService):
"""
PIM Sparse Mode IPv4 multicast routing.
"""
name = "XORP_PIMSM4"
@classmethod
def generatexorpconfig(cls, node):
cfg = cls.mfea("mfea4", node.netifs())
cfg += "\nprotocols {\n"
cfg += " igmp {\n"
names = []
for ifc in node.netifs():
if hasattr(ifc, "control") and ifc.control is True:
continue
names.append(ifc.name)
cfg += "\tinterface %s {\n" % ifc.name
cfg += "\t vif %s {\n" % ifc.name
cfg += "\t\tdisable: false\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += " }\n"
cfg += "}\n"
cfg += "\nprotocols {\n"
cfg += " pimsm4 {\n"
names.append("register_vif")
for name in names:
cfg += "\tinterface %s {\n" % name
cfg += "\t vif %s {\n" % name
cfg += "\t\tdr-priority: 1\n"
cfg += "\t }\n"
cfg += "\t}\n"
cfg += "\tbootstrap {\n"
cfg += "\t cand-bsr {\n"
cfg += "\t\tscope-zone 172.16.58.3/4 {\n"
cfg += '\t\t cand-bsr-by-vif-name: "%s"\n' % names[0]
cfg += "\t\t}\n"
cfg += "\t }\n"
cfg += "\t cand-rp {\n"
| |
import os
import sys
import unittest
class TestNormalizer(unittest.TestCase):
assets_dir = './test/assets'
tokenizer_filenames = [
'tokenizer_basic_ci.xml',
'tokenizer_basic_cs.xml',
'tokenizer_bypass.xml',
'tokenizer_ci_child_of_ci_parent.xml',
'tokenizer_ci_child_of_cs_parent.xml',
'tokenizer_cs_child_of_ci_parent.xml',
'tokenizer_cs_child_of_cs_parent.xml',
'tokenizer_grandchild_ci_of_ci_ci.xml',
'tokenizer_grandchild_ci_of_ci_cs.xml',
'tokenizer_grandchild_ci_of_cs_ci.xml',
'tokenizer_grandchild_ci_of_cs_cs.xml',
'tokenizer_grandchild_cs_of_ci_ci.xml',
'tokenizer_grandchild_cs_of_ci_cs.xml',
'tokenizer_grandchild_cs_of_cs_ci.xml',
'tokenizer_grandchild_cs_of_cs_cs.xml',
'tokenizer_parent_ci.xml',
'tokenizer_parent_cs.xml',
'tokenizer_split_extension.xml',
'tokenizer_split_replace.xml'
]
def normalize(self, config_filename, string):
builder = sic.Builder()
worker = builder.build_normalizer('%s/%s' % (self.assets_dir, config_filename))
word_separator = ' '
options = {0: 'normal', 1: 'list', 2: 'set'}
return (worker.name, {options[x]: worker.normalize(string, word_separator, x) for x in [0, 1, 2]})
def assert_normalization(self, tokenizer_filename, tokenizer_name, testcases):
for testcase in testcases:
name, result = (self.normalize(tokenizer_filename, testcase['original']))
assert name == tokenizer_name, 'Unexpected tokenizer name (expected "%s", got "%s" instead).' % (tokenizer_name, name)
for option in ['normal', 'list', 'set']:
assert result[option] == testcase['expected'][option], 'Unexpected normalization result for %s (option "%s"): "%s" => "%s" (expected "%s").' % (name, option, testcase['original'], result[option], testcase['expected'][option])
return True
def assert_map(self, config_filename, testcases):
for testcase in testcases:
builder = sic.Builder()
worker = builder.build_normalizer('%s/%s' % (self.assets_dir, config_filename))
_ = worker.normalize(testcase['original'], testcase['word_separator'], testcase['option'])
result = worker.result
assert result['original'] == testcase['original'], 'Case "%s": Original strings in input and output do not match for %s (word_separator="%s", option="%s": "%s" => "%s" (expected "%s").' % (testcase['original'], worker.name, testcase['word_separator'], testcase['option'], testcase['original'], result['original'], testcase['original'])
assert result['normalized'] == testcase['normalized'], 'Case "%s": Unexpected normalization result for %s (word_separator="%s", option "%s"): "%s" => "%s" (expected "%s").' % (testcase['original'], worker.name, testcase['word_separator'], testcase['option'], testcase['original'], result['normalized'], testcase['normalized'])
assert len(result['map']) == len(testcase['map']), 'Case "%s": Unexpected map length for %s: expected %d, got %d.' % (testcase['original'], worker.name, len(testcase['map']), len(result['map']))
if testcase['option'] == 0:
assert len(result['map']) == len(result['normalized']), 'Case "%s": Legth of map does not match length of normalized string (config %s, expected %d; got normalized=%d, map=%d instead).' % (testcase['original'], worker.name, len(testcase['map']), len(result['normalized']), len(result['map']))
for i, j in enumerate(result['map']):
if result['normalized'][i] != testcase['word_separator']:
assert testcase['map'][i] == j, 'Case "%s": Unexpected map for %s (word_separator="%s", option "%s"): value at index %d is supposed to be %d (got %d instead), unless character at that position is "%s" (got "%s" instead).' % (testcase['original'], worker.name, testcase['word_separator'], testcase['option'], i, testcase['map'][i], j, testcase['word_separator'], result['normalized'][i])
return True
def test_expose_tokenizer(self):
builder = sic.Builder()
for filename in self.tokenizer_filenames:
ret = builder.expose_tokenizer('%s/%s' % (self.assets_dir, filename))
assert type(ret) == tuple, 'Expected tuple, returned %s' % str(type(ret))
assert len(ret) == 2, 'Expected length 2, returned %s' % str(len(ret))
assert type(ret[0]) == str, 'Expected ret[0] to be str, returned %s' % str(type(ret[0]))
assert type(ret[1]) == str, 'Expected ret[1] to be str, returned %s' % str(type(ret[1]))
def test_build_normalizer(self):
builder = sic.Builder()
for filename in self.tokenizer_filenames:
ret = builder.build_normalizer('%s/%s' % (self.assets_dir, filename))
assert type(ret) == sic.Normalizer, 'Expected Normalizer, returned %s' % str(type(ret))
def test_tokenizer_basic_ci(self):
testcases = [
{
'original': 'abc123-DEF123ghi234def-555DEF ABC',
'expected': {
'normal': 'abc 123 - def 123 ghi 234 def - 555 def abc',
'list': '- - 123 123 234 555 abc abc def def def ghi',
'set': '- 123 234 555 abc def ghi'
}
}
]
assert self.assert_normalization('tokenizer_basic_ci.xml', 'tokenizer_basic_ci', testcases) == True, 'Something is wrong.'
def test_tokenizer_basic_cs(self):
testcases = [
{
'original': 'abc123-DEF123ghi234def-555DEF ABC',
'expected': {
'normal': 'abc 123 - DEF 123 ghi 234 def - 555 DEF ABC',
'list': '- - 123 123 234 555 ABC DEF DEF abc def ghi',
'set': '- 123 234 555 ABC DEF abc def ghi'
}
}
]
assert self.assert_normalization('tokenizer_basic_cs.xml', 'tokenizer_basic_cs', testcases) == True, 'Something is wrong.'
def test_tokenizer_bypass(self):
testcases = [
{
'original': 'abc123-DEF123ghi234def-555DEF ABC',
'expected': {
'normal': 'abc123-DEF123ghi234def-555DEF ABC',
'list': 'abc123-DEF123ghi234def-555DEF ABC',
'set': 'abc123-DEF123ghi234def-555DEF ABC'
}
}
]
assert self.assert_normalization('tokenizer_bypass.xml', 'tokenizer_bypass', testcases) == True, 'Something is wrong.'
def test_tokenizer_ci_child_of_ci_parent(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE hey, emptyTwo-replaceFromCHILD, ALPHAbetaGAmma qwebetaqwe',
'expected': {
'normal': 'this - is replacetoparent - hey , - replacetochild , alpha beta gamma qwe beta qwe',
'list': ', , - - - alpha beta beta gamma hey is qwe qwe replacetochild replacetoparent this',
'set': ', - alpha beta gamma hey is qwe replacetochild replacetoparent this'
}
}
]
assert self.assert_normalization('tokenizer_ci_child_of_ci_parent.xml', 'test_ci_child_of_ci_parent', testcases) == True, 'Something is wrong.'
def test_tokenizer_ci_child_of_cs_parent(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE hey, emptyTwo-replaceFromCHILD, ALPHAbetaGAmma',
'expected': {
'normal': 'this - is replacetoparent - hey , - replacetochild , alpha beta gamma',
'list': ', , - - - alpha beta gamma hey is replacetochild replacetoparent this',
'set': ', - alpha beta gamma hey is replacetochild replacetoparent this'
}
}
]
assert self.assert_normalization('tokenizer_ci_child_of_cs_parent.xml', 'test_ci_child_of_cs_parent', testcases) == True, 'Something is wrong.'
def test_tokenizer_cs_child_of_ci_parent(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE hey, emptyTwo-replaceFromCHILD, ALPHAbetaGAmma',
'expected': {
'normal': 'this - is replacetoparent - hey , - replacetochild , alpha beta gamma',
'list': ', , - - - alpha beta gamma hey is replacetochild replacetoparent this',
'set': ', - alpha beta gamma hey is replacetochild replacetoparent this'
}
}
]
assert self.assert_normalization('tokenizer_ci_child_of_cs_parent.xml', 'test_ci_child_of_cs_parent', testcases) == True, 'Something is wrong.'
def test_tokenizer_cs_child_of_cs_parent(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE hey, emptyTwo-replaceFromCHILD, ALPHAbetaGAmma',
'expected': {
'normal': 'this - is replaCeToPARENT - EMPTYONE hey , - replaCeToCHILD , ALPHA beta GAmma',
'list': ', , - - - ALPHA EMPTYONE GAmma beta hey is replaCeToCHILD replaCeToPARENT this',
'set': ', - ALPHA EMPTYONE GAmma beta hey is replaCeToCHILD replaCeToPARENT this'
}
}
]
assert self.assert_normalization('tokenizer_cs_child_of_cs_parent.xml', 'test_cs_child_of_cs_parent', testcases) == True, 'Something is wrong.'
def test_tokenizer_grandchild_ci_of_ci_ci(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE hey, emptyTwo-replaceFromCHILD/replaceFromGCHLD, ALPHAbetaGAmma emptythree/emptyThree',
'expected': {
'normal': 'this - is replacetoparent - hey , - replacetochild / replacetogchld , alpha beta gamma /',
'list': ', , - - - / / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this',
'set': ', - / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this'
}
}
]
assert self.assert_normalization('tokenizer_grandchild_ci_of_ci_ci.xml', 'test_grandchild_ci_of_ci_ci', testcases) == True, 'Something is wrong.'
def test_tokenizer_grandchild_ci_of_ci_cs(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE hey, emptyTwo-replaceFromCHILD/replaceFromGCHLD, ALPHAbetaGAmma emptythree/emptyThree',
'expected': {
'normal': 'this - is replacetoparent - hey , - replacetochild / replacetogchld , alpha beta gamma /',
'list': ', , - - - / / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this',
'set': ', - / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this'
}
}
]
assert self.assert_normalization('tokenizer_grandchild_ci_of_ci_cs.xml', 'test_grandchild_ci_of_ci_cs', testcases) == True, 'Something is wrong.'
def test_tokenizer_grandchild_ci_of_cs_ci(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE hey, emptyTwo-replaceFromCHILD/replaceFromGCHLD, ALPHAbetaGAmma emptythree/emptyThree',
'expected': {
'normal': 'this - is replacetoparent - hey , - replacetochild / replacetogchld , alpha beta gamma /',
'list': ', , - - - / / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this',
'set': ', - / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this'
}
}
]
assert self.assert_normalization('tokenizer_grandchild_ci_of_cs_ci.xml', 'test_grandchild_ci_of_cs_ci', testcases) == True, 'Something is wrong.'
def test_tokenizer_grandchild_ci_of_cs_cs(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE hey, emptyTwo-replaceFromCHILD/replaceFromGCHLD, ALPHAbetaGAmma emptythree/emptyThree',
'expected': {
'normal': 'this - is replacetoparent - hey , - replacetochild / replacetogchld , alpha beta gamma /',
'list': ', , - - - / / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this',
'set': ', - / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this'
}
}
]
assert self.assert_normalization('tokenizer_grandchild_ci_of_cs_cs.xml', 'test_grandchild_ci_of_cs_cs', testcases) == True, 'Something is wrong.'
def test_tokenizer_grandchild_cs_of_ci_ci(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE hey, emptyTwo-replaceFromCHILD/replaceFromGCHLD, ALPHAbetaGAmma emptythree/emptyThree',
'expected': {
'normal': 'this - is replacetoparent - hey , - replacetochild / replacetogchld , alpha beta gamma /',
'list': ', , - - - / / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this',
'set': ', - / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this'
}
}
]
assert self.assert_normalization('tokenizer_grandchild_cs_of_ci_ci.xml', 'test_grandchild_cs_of_ci_ci', testcases) == True, 'Something is wrong.'
def test_tokenizer_grandchild_cs_of_ci_cs(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE hey, emptyTwo-replaceFromCHILD/replaceFromGCHLD, ALPHAbetaGAmma emptythree/emptyThree',
'expected': {
'normal': 'this - is replacetoparent - hey , - replacetochild / replacetogchld , alpha beta gamma /',
'list': ', , - - - / / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this',
'set': ', - / alpha beta gamma hey is replacetochild replacetogchld replacetoparent this'
}
}
]
assert self.assert_normalization('tokenizer_grandchild_cs_of_ci_cs.xml', 'test_grandchild_cs_of_ci_cs', testcases) == True, 'Something is wrong.'
def test_tokenizer_grandchild_cs_of_cs_ci(self):
testcases = [
{
'original': 'this-is replaceFromPARENT-EMPTYONE | |
# -*- coding: utf-8 -*-
import inspect
import io
import logging
import os
import time
import warnings
from collections import Iterable, Iterator, defaultdict, namedtuple
from copy import copy
from functools import wraps
from typing import Any, Dict, Optional, cast
import numpy as np
import pandas as pd
import pyarrow as pa
from kartothek.core import naming
from kartothek.core.common_metadata import (
make_meta,
normalize_column_order,
read_schema_metadata,
validate_compatible,
validate_shared_columns,
)
from kartothek.core.index import ExplicitSecondaryIndex, IndexBase
from kartothek.core.index import merge_indices as merge_indices_algo
from kartothek.core.naming import get_partition_file_prefix
from kartothek.core.partition import Partition
from kartothek.core.urlencode import decode_key, quote_indices
from kartothek.core.utils import ensure_string_type, verify_metadata_version
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.docs import default_docs
from kartothek.io_components.utils import _instantiate_store, combine_metadata
from kartothek.serialization import (
DataFrameSerializer,
default_serializer,
filter_df_from_predicates,
)
LOGGER = logging.getLogger(__name__)
SINGLE_TABLE = "table"
_Literal = namedtuple("_Literal", ["column", "op", "value"])
_SplitPredicate = namedtuple("_SplitPredicate", ["key_part", "content_part"])
def _predicates_to_named(predicates):
if predicates is None:
return None
return [[_Literal(*x) for x in conjunction] for conjunction in predicates]
def _combine_predicates(predicates, logical_conjunction):
if not logical_conjunction:
return predicates
if predicates is None:
return [logical_conjunction]
combined_predicates = []
for conjunction in predicates:
new_conjunction = conjunction[:]
for literal in logical_conjunction:
new_conjunction.append(literal)
combined_predicates.append(new_conjunction)
return combined_predicates
def _initialize_store_for_metapartition(method, method_args, method_kwargs):
for store_variable in ["store", "storage"]:
if store_variable in method_kwargs:
method_kwargs[store_variable] = _instantiate_store(
method_kwargs[store_variable]
)
else:
method = cast(object, method)
args = inspect.getfullargspec(method).args
if store_variable in args:
ix = args.index(store_variable)
# reduce index since the argspec and method_args start counting differently due to self
ix -= 1
instantiated_store = _instantiate_store(method_args[ix])
new_args = []
for ix_method, arg in enumerate(method_args):
if ix_method != ix:
new_args.append(arg)
else:
new_args.append(instantiated_store)
method_args = tuple(new_args)
return method_args, method_kwargs
def _apply_to_list(method):
"""
Decorate a MetaPartition method to act upon the internal list of metapartitions
The methods must return a MetaPartition object!
"""
@wraps(method)
def _impl(self, *method_args, **method_kwargs):
if not isinstance(self, MetaPartition):
raise TypeError("Type unknown %s", type(self))
result = self.as_sentinel()
if len(self) == 0:
raise RuntimeError("Invalid MetaPartition. No sub-partitions to act upon.")
# Look whether there is a `store` in the arguments and instatiate it
# this way we avoid multiple HTTP pools
method_args, method_kwargs = _initialize_store_for_metapartition(
method, method_args, method_kwargs
)
if (len(self) == 1) and (self.label is None):
result = method(self, *method_args, **method_kwargs)
else:
for mp in self:
method_return = method(mp, *method_args, **method_kwargs)
if not isinstance(method_return, MetaPartition):
raise ValueError(
"Method {} did not return a MetaPartition "
"but {}".format(method.__name__, type(method_return))
)
if method_return.is_sentinel:
result = method_return
else:
for mp in method_return:
result = result.add_metapartition(mp)
if not isinstance(result, MetaPartition):
raise ValueError(
"Result for method {} is not a `MetaPartition` but".format(
method.__name__, type(method_return)
)
)
return result
return _impl
class MetaPartitionIterator(Iterator):
def __init__(self, metapartition):
self.metapartition = metapartition
self.position = 0
def __iter__(self):
return self
def __next__(self):
current = self.metapartition
if len(current) == 1:
if current.label is None:
raise StopIteration()
if self.position >= len(current.metapartitions):
raise StopIteration()
else:
mp_dict = current.metapartitions[self.position]
# These are global attributes, i.e. the nested metapartitions do not carry these and need
# to be added here
mp_dict["dataset_metadata"] = current.dataset_metadata
mp_dict["metadata_version"] = current.metadata_version
mp_dict["table_meta"] = current.table_meta
mp_dict["partition_keys"] = current.partition_keys
mp_dict["logical_conjunction"] = current.logical_conjunction
self.position += 1
return MetaPartition.from_dict(mp_dict)
next = __next__ # Python 2
class MetaPartition(Iterable):
"""
Wrapper for kartothek partition which includes additional information
about the parent dataset
"""
def __init__(
self,
label,
files=None,
metadata=None,
data=None,
dataset_metadata=None,
indices: Optional[Dict[Any, Any]] = None,
metadata_version=None,
table_meta=None,
partition_keys=None,
logical_conjunction=None,
):
"""
Initialize the :mod:`kartothek.io` base class MetaPartition.
The `MetaPartition` is used as a wrapper around the kartothek
`Partition` and primarily deals with dataframe manipulations,
in- and output to store.
The :class:`kartothek.io_components.metapartition` is immutable, i.e. all member
functions will return a new MetaPartition object where the new
attribute is changed
Parameters
----------
label : basestring
partition label
files : dict, optional
A dictionary with references to the files in store where the
keys represent file labels and the keys file prefixes.
metadata : dict, optional
The metadata of the partition
data : dict, optional
A dictionary including the materialized in-memory DataFrames
corresponding to the file references in `files`.
dataset_metadata : dict, optional
The metadata of the original dataset
indices : dict, optional
Kartothek index dictionary,
metadata_version : int, optional
table_meta: Dict[str, SchemaWrapper]
The dataset table schemas
partition_keys: List[str]
The dataset partition keys
logical_conjunction: List[Tuple[object, str, object]]
A logical conjunction to assign to the MetaPartition. By assigning
this, the MetaPartition will only be able to load data respecting
this conjunction.
"""
if metadata_version is None:
self.metadata_version = naming.DEFAULT_METADATA_VERSION
else:
self.metadata_version = metadata_version
verify_metadata_version(self.metadata_version)
self.table_meta = table_meta if table_meta else {}
if isinstance(data, dict) and (len(self.table_meta) == 0):
for table, df in data.items():
if df is not None:
self.table_meta[table] = make_meta(
df,
origin="{}/{}".format(table, label),
partition_keys=partition_keys,
)
indices = indices or {}
for column, index_dct in indices.items():
if isinstance(index_dct, dict):
indices[column] = ExplicitSecondaryIndex(
column=column, index_dct=index_dct
)
self.logical_conjunction = logical_conjunction
self.metapartitions = [
{
"label": label,
"data": data or {},
"files": files or {},
"indices": indices,
"logical_conjunction": logical_conjunction,
}
]
self.dataset_metadata = dataset_metadata or {}
self.partition_keys = partition_keys or []
def __repr__(self):
if len(self.metapartitions) > 1:
label = "NESTED ({})".format(len(self.metapartitions))
else:
label = self.label
return "<{_class} v{version} | {label} | tables {tables} >".format(
version=self.metadata_version,
_class=self.__class__.__name__,
label=label,
tables=sorted(set(self.table_meta.keys())),
)
def __len__(self):
return len(self.metapartitions)
def __iter__(self):
return MetaPartitionIterator(self)
def __getitem__(self, label):
for mp in self:
if mp.label == label:
return mp
raise KeyError("Metapartition doesn't contain partition `{}`".format(label))
@property
def data(self):
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `data` attribute is not allowed while nested"
)
assert isinstance(self.metapartitions[0], dict), self.metapartitions
return self.metapartitions[0]["data"]
@property
def files(self):
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `files` attribute is not allowed while nested"
)
return self.metapartitions[0]["files"]
@property
def is_sentinel(self):
return len(self.metapartitions) == 1 and self.label is None
@property
def label(self):
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `label` attribute is not allowed while nested"
)
assert isinstance(self.metapartitions[0], dict), self.metapartitions[0]
return self.metapartitions[0]["label"]
@property
def indices(self):
if len(self.metapartitions) > 1:
raise AttributeError(
"Accessing `indices` attribute is not allowed while nested"
)
return self.metapartitions[0]["indices"]
@property
def tables(self):
return list(set(self.data.keys()).union(set(self.files.keys())))
@property
def partition(self):
return Partition(label=self.label, files=self.files)
def __eq__(self, other):
if not isinstance(other, MetaPartition):
return False
if self.metadata_version != other.metadata_version:
return False
for table, meta in self.table_meta.items():
if not meta.equals(other.table_meta.get(table, None)):
return False
if self.dataset_metadata != other.dataset_metadata:
return False
if len(self.metapartitions) != len(other.metapartitions):
return False
# In the case both MetaPartitions are nested, we need to ensure a match
# for all sub-partitions.
# Since the label is unique, this can be used as a distinguishing key to sort and compare
# the nested metapartitions.
if len(self.metapartitions) > 1:
for mp_self, mp_other in zip(
sorted(self.metapartitions, key=lambda x: x["label"]),
sorted(other.metapartitions, key=lambda x: x["label"]),
):
if mp_self == mp_other:
continue
# If a single metapartition does not match, the whole object is considered different
return False
return True
# This is unnested only
self_keys = set(self.data.keys())
other_keys = set(other.data.keys())
if not (self_keys == other_keys):
return False
if self.label != other.label:
return False
if self.files != other.files:
return False
for label, df in self.data.items():
if not (df.equals(other.data[label])):
return False
return True
@staticmethod
def from_partition(
partition,
data=None,
dataset_metadata=None,
indices=None,
metadata_version=None,
table_meta=None,
partition_keys=None,
logical_conjunction=None,
):
"""
Transform a kartothek :class:`~kartothek.core.partition.Partition` into a
:class:`~kartothek.io_components.metapartition.MetaPartition`.
Parameters
----------
partition : :class:`~kartothek.core.partition.Partition`
The kartothek partition to be wrapped
data : dict, optional
A dictionaries with materialised :class:`~pandas.DataFrame`
dataset_metadata : dict of basestring, optional
The metadata of the original dataset
indices : dict
The index dictionary of the dataset
table_meta: Union[None, Dict[String, pyarrow.Schema]]
Type metadata for each table, optional
metadata_version: int, optional
partition_keys: Union[None, List[String]]
A list of the primary partition keys
Returns
-------
:class:`~kartothek.io_components.metapartition.MetaPartition`
"""
return MetaPartition(
label=partition.label,
files=partition.files,
data=data,
dataset_metadata=dataset_metadata,
indices=indices,
metadata_version=metadata_version,
table_meta=table_meta,
partition_keys=partition_keys,
logical_conjunction=logical_conjunction,
)
def add_metapartition(
self, metapartition, metadata_merger=None, schema_validation=True
):
"""
Adds a metapartition to the internal list structure to enable batch processing.
The top level `dataset_metadata` dictionary is combined with the existing dict and
all other attributes are stored in the `metapartitions` list
Parameters
----------
metapartition: [MetaPartition]
The MetaPartition to be added.
metadata_merger: [callable]
A callable to perform the metadata merge. By default [kartothek.io_components.utils.combine_metadata] is used
schema_validation : [bool]
If True (default), ensure that the `table_meta` of both `MetaPartition` objects are the same
"""
if self.is_sentinel:
return metapartition
table_meta = metapartition.table_meta
existing_label = [mp_["label"] | |
138.189444444),
"RJNT": (36.6483333333, 137.187222222),
"RJNW": (37.2933333333, 136.962222222),
"RJNY": (34.8125, 138.297777778),
"RJOA": (34.4352777778, 132.921944444),
"RJOB": (34.7569444444, 133.855555556),
"RJOC": (35.4136111111, 132.89),
"RJOE": (34.5333333333, 136.672222222),
"RJOF": (34.0344444444, 131.549166667),
"RJOH": (35.4922222222, 133.236388889),
"RJOI": (34.1436111111, 132.235555556),
"RJOK": (33.5444444444, 133.671388889),
"RJOM": (33.8272222222, 132.699722222),
"RJOO": (34.7852777778, 135.438055556),
"RJOR": (35.53, 134.166388889),
"RJOS": (34.1327777778, 134.606388889),
"RJOT": (34.2138888889, 134.015555556),
"RJOW": (34.6763888889, 131.790277778),
"RJOY": (34.5961111111, 135.602777778),
"RJOZ": (34.0452777778, 131.051944444),
"RJSA": (40.7344444444, 140.690833333),
"RJSC": (38.4116666667, 140.371111111),
"RJSD": (38.0611111111, 138.414166667),
"RJSF": (37.2275, 140.428055556),
"RJSH": (40.5563888889, 141.466111111),
"RJSI": (39.4308333333, 141.135833333),
"RJSK": (39.6155555556, 140.218611111),
"RJSM": (40.7030555556, 141.368333333),
"RJSN": (37.9558333333, 139.111666667),
"RJSO": (41.2327777778, 141.132222222),
"RJSR": (40.1919444444, 140.371666667),
"RJSS": (38.1394444444, 140.916666667),
"RJST": (38.4047222222, 141.219444444),
"RJSU": (38.2355555556, 140.923055556),
"RJSY": (38.8116666667, 139.786944444),
"RJTA": (35.4544444444, 139.45),
"RJTC": (35.7108333333, 139.403055556),
"RJTE": (34.9869444444, 139.829166667),
"RJTF": (35.6716666667, 139.528055556),
"RJTH": (33.115, 139.785555556),
"RJTI": (35.6361111111, 139.839444444),
"RJTJ": (35.8413888889, 139.409722222),
"RJTK": (35.3980555556, 139.909722222),
"RJTL": (35.7988888889, 140.011111111),
"RJTO": (34.7844444444, 139.361388889),
"RJTQ": (34.0719444444, 139.559722222),
"RJTR": (35.5136111111, 139.393611111),
"RJTT": (35.5522222222, 139.779444444),
"RJTU": (36.5144444444, 139.870833333),
"RJTY": (35.7483333333, 139.348333333),
"RKJB": (34.9914055556, 126.382813889),
"RKJJ": (35.1255555556, 126.809722222),
"RKJK": (35.9036111111, 126.615833333),
"RKJM": (34.7588888889, 126.379722222),
"RKJU": (35.8783333333, 127.119444444),
"RKJY": (34.8397222222, 127.615277778),
"RKNC": (37.8836111111, 127.717777778),
"RKND": (38.1475, 128.600555556),
"RKNN": (37.7533333333, 128.943888889),
"RKNW": (37.4380555556, 127.960277778),
"RKNY": (38.0611111111, 128.668888889),
"RKPC": (33.5111111111, 126.492777778),
"RKPD": (33.3969444444, 126.712777778),
"RKPE": (35.1411111111, 128.695555556),
"RKPK": (35.1794444444, 128.938055556),
"RKPP": (35.1708333333, 129.128611111),
"RKPS": (35.0883333333, 128.070277778),
"RKPU": (35.5933333333, 129.351666667),
"RKSG": (36.9605555556, 127.033333333),
"RKSI": (37.4633333333, 126.44),
"RKSM": (37.4458333333, 127.113888889),
"RKSO": (37.0905555556, 127.029444444),
"RKSS": (37.5580555556, 126.790555556),
"RKSW": (37.2391666667, 127.006944444),
"RKTE": (36.5680555556, 127.5),
"RKTH": (35.9877777778, 129.420277778),
"RKTJ": (35.8563888889, 129.211388889),
"RKTN": (35.8938888889, 128.658611111),
"RKTU": (36.7163888889, 127.498888889),
"RKTY": (36.6316666667, 128.354722222),
"ROAH": (26.1955555556, 127.645833333),
"RODE": (26.7286111111, 127.761666667),
"RODN": (26.3555555556, 127.7675),
"ROIG": (24.3444444444, 124.186944444),
"ROKJ": (26.3633333333, 126.713611111),
"ROKR": (26.1683333333, 127.293333333),
"ROMD": (25.8463888889, 131.263333333),
"ROMY": (24.7827777778, 125.295),
"RORA": (26.5927777778, 127.240277778),
"RORE": (26.7225, 127.786944444),
"RORH": (24.0583333333, 123.803888889),
"RORK": (25.9477777778, 131.321388889),
"RORS": (24.8266666667, 125.144722222),
"RORT": (24.6538888889, 124.675277778),
"RORY": (27.0438888889, 128.401388889),
"ROTM": (26.2741666667, 127.756388889),
"ROYN": (24.4669444444, 122.977777778),
"RPAF": (14.5094444444, 121.018333333),
"RPBY": (10.0555555556, 124.435833333),
"RPCA": (11.81, 124.83),
"RPCU": (10.8580555556, 121.069444444),
"RPEN": (11.2025, 119.416111111),
"RPLB": (14.7944444444, 120.271388889),
"RPLC": (15.1858333333, 120.559722222),
"RPLI": (18.1780555556, 120.531666667),
"RPLL": (14.5086111111, 121.019444444),
"RPLN": (17.0644444444, 122.427222222),
"RPLO": (10.8580555556, 121.069444444),
"RPLP": (13.1569444444, 123.746111111),
"RPLQ": (15.2389638889, 120.367230556),
"RPLR": (15.8844444444, 120.601944444),
"RPLS": (14.4913166667, 120.893886111),
"RPLT": (20.7169444444, 121.816944444),
"RPLU": (13.8575, 120.108055556),
"RPLV": (15.4380555556, 121.091111111),
"RPLX": (14.3855555556, 120.573055556),
"RPMA": (6.36776944444, 124.752522222),
"RPMB": (6.10555555556, 125.236111111),
"RPMC": (7.16472222222, 124.210277778),
"RPMD": (7.12555555556, 125.645833333),
"RPME": (8.95138888889, 125.478055556),
"RPMF": (8.19777777778, 126.324444444),
"RPMG": (8.60138888889, 123.334444444),
"RPMH": (9.25361111111, 124.706944444),
"RPMI": (8.130525, 124.214894444),
"RPMJ": (6.05361111111, 121.011111111),
"RPML": (8.41444444444, 124.611388889),
"RPMM": (7.61722222222, 124.058611111),
"RPMN": (5.04694444444, 119.742777778),
"RPMO": (8.178525, 123.841386111),
"RPMP": (7.82777777778, 123.460277778),
"RPMQ": (6.95027777778, 126.2725),
"RPMR": (6.05805555556, 125.096111111),
"RPMS": (9.75777777778, 125.480833333),
"RPMT": (8.36155277778, 124.833325),
"RPMV": (7.78613888889, 122.601138889),
"RPMW": (9.07211111111, 126.171444444),
"RPMY": (8.14055555556, 125.119166667),
"RPMZ": (6.92222222222, 122.059444444),
"RPNS": (9.85888888889, 126.013888889),
"RPPA": (17.0644444444, 122.427222222),
"RPPN": (11.05, 114.283333333),
"RPSB": (11.1622222222, 123.784444444),
"RPSI": (9.85888888889, 126.013888889),
"RPSM": (10.1870722222, 124.783411111),
"RPSN": (10.0555555556, 124.435833333),
"RPUB": (16.375, 120.618888889),
"RPUD": (14.1291666667, 122.980277778),
"RPUF": (14.9863888889, 120.4925),
"RPUG": (16.0347222222, 120.240833333),
"RPUH": (12.3613888889, 121.046388889),
"RPUI": (15.3255555556, 119.968888889),
"RPUJ": (14.9333333333, 120.2),
"RPUL": (13.955, 121.124722222),
"RPUM": (13.2080555556, 120.605277778),
"RPUN": (13.5852777778, 123.270833333),
"RPUO": (20.4516666667, 121.98),
"RPUP": (14.2927777778, 122.645555556),
"RPUQ": (17.5536111111, 120.3575),
"RPUR": (15.7303055556, 121.501583333),
"RPUS": (16.5955555556, 120.303055556),
"RPUT": (17.6380555556, 121.730555556),
"RPUV": (13.5775, 124.206111111),
"RPUW": (13.3611111111, 121.825277778),
"RPUX": (14.8906666667, 120.852722222),
"RPUY": (16.9297222222, 121.753333333),
"RPUZ": (16.6188888889, 121.252222222),
"RPVA": (11.2272222222, 125.027777778),
"RPVB": (10.6425, 122.929444444),
"RPVC": (12.0725, 124.545),
"RPVD": (9.33416666667, 123.301944444),
"RPVE": (11.9247222222, 121.955),
"RPVF": (12.5022222222, 124.635555556),
"RPVG": (11.0355555556, 125.742777778),
"RPVH": (10.3766666667, 124.761111111),
"RPVI": (10.7130555556, 122.545),
"RPVJ": (12.3694444444, 123.629166667),
"RPVK": (11.6811111111, 122.377777778),
"RPVM": (10.3075, 123.979166667),
"RPVO": (11.0558333333, 124.565555556),
"RPVP": (9.74194444444, 118.758611111),
"RPVR": (11.5975, 122.752777778),
"RPVS": (10.7661111111, 121.932222222),
"RPVT": (9.66408055556, 123.853247222),
"RPVU": (12.3108333333, 122.084444444),
"RPVV": (12.1213888889, 120.1),
"RPVW": (11.6741666667, 125.478611111),
"RPVY": (11.81, 124.83),
"RPWA": (6.36776944444, 124.752522222),
"RPWC": (7.16527777778, 124.209722222),
"RPWD": (7.12555555556, 125.645833333),
"RPWE": (8.95138888889, 125.478055556),
"RPWG": (8.60138888889, 123.334444444),
"RPWI": (8.178525, 123.841386111),
"RPWJ": (6.05361111111, 121.011111111),
"RPWL": (8.41555555556, 124.611111111),
"RPWM": (7.61666666667, 124.0575),
"RPWN": (5.04694444444, 119.742777778),
"RPWP": (7.82722222222, 123.458333333),
"RPWS": (9.7575, 125.479444444),
"RPWT": (8.36155277778, 124.833325),
"RPWW": (9.07211111111, 126.171444444),
"RPWX": (8.130525, 124.214894444),
"RPWY": (8.14055555556, 125.119166667),
"RPWZ": (8.19777777778, 126.324444444),
"RPXC": (15.2389638889, 120.367230556),
"RPXG": (13.8575, 120.108055556),
"RPXI": (20.7169444444, 121.816944444),
"RPXM": (15.4380555556, 121.091111111),
"RPXP": (16.6205555556, 120.280555556),
"RPXR": (14.3855555556, 120.573055556),
"SAAC": (-31.2969444444, -57.9963888889),
"SAAG": (-33.0058333333, -58.6130555556),
"SAAI": (-35.3477777778, -57.2938888889),
"SAAJ": (-34.5458333333, -60.9305555556),
"SAAK": (-34.1819444444, -58.2469444444),
"SAAP": (-31.7947222222, -60.4802777778),
"SAAR": (-32.9033333333, -60.7844444444),
"SAAV": (-31.7116666667, -60.8116666667),
"SABA": (-34.8222222222, -58.5358333333),
"SABE": (-34.5591666667, -58.4155555556),
"SACA": (-31.4419444444, -64.2583333333),
"SACC": (-31.0066666667, -64.5325),
"SACO": (-31.3236111111, -64.2077777778),
"SACP": (-31.3557777778, -66.5908888889),
"SACT": (-30.3452777778, -66.2936111111),
"SADD": (-34.5005555556, -58.6041666667),
"SADF": (-34.4530555556, -58.5894444444),
"SADJ": (-34.5605555556, -58.7894444444),
"SADL": (-34.9722222222, -57.8944444444),
"SADM": (-34.6761111111, -58.6425),
"SADO": (-34.5333333333, -58.7166666667),
"SADP": (-34.6097222222, -58.6125),
"SADS": (-34.7313888889, -58.5994444444),
"SAEZ": (-34.8222222222, -58.5358333333),
"SAHC": (-37.4444444444, -70.2222222222),
"SAHE": (-37.8512777778, -71.0094722222),
"SAHR": (-39.0005555556, -67.6202777778),
"SAHS": (-37.3905, -68.9042777778),
"SAHZ": (-38.9755555556, -70.1136111111),
"SAME": (-32.8316666667, -68.7927777778),
"SAMM": (-35.4838888889, -69.5825),
"SAMQ": (-32.8658333333, -68.8722222222),
"SAMR": (-34.5880555556, -68.4025),
"SANC": (-28.5955555556, -65.7516666667),
"SANE": (-27.7655555556, -64.31),
"SANH": (-27.6105555556, -64.9483333333),
"SANI": (-28.0375, -67.5802777778),
"SANL": (-29.3813888889, -66.7958333333),
"SANO": (-29.2238888889, -67.4388888889),
"SANT": (-26.8408333333, -65.1047222222),
"SANU": (-31.5713888889, -68.4180555556),
"SANW": (-29.8718888889, -61.9270277778),
"SAOC": (-33.0855555556, -64.2613888889),
"SAOD": (-31.9411111111, -65.1422222222),
"SAOL": (-34.1352777778, -63.3622222222),
"SAOM": (-32.6836111111, -62.1577777778),
"SAOR": (-33.7297222222, -65.3872222222),
"SAOU": (-33.2730555556, -66.3563888889),
"SARC": (-27.4452777778, -58.7616666667),
"SARE": (-27.4497222222, -59.0561111111),
"SARF": (-26.2125, -58.2280555556),
"SARI": (-25.7375, -54.4730555556),
"SARL": (-29.6891666667, -57.1519444444),
"SARM": (-30.2716666667, -57.64),
"SARP": (-27.3858333333, -55.9705555556),
"SARS": (-26.7563888889, -60.4930555556),
"SASA": (-24.8558333333, -65.4861111111),
"SASJ": (-24.3927777778, -65.0977777778),
"SASO": (-23.1527777778, -64.3291666667),
"SASQ": (-22.1622222222, -65.5697222222),
"SAST": (-22.6197222222, -63.7936111111),
"SATC": (-25.30475, -57.7348333333),
"SATG": (-29.1058333333, -59.2186111111),
"SATK": (-24.7211111111, -60.5486111111),
"SATM": (-29.2230555556, -58.0880555556),
"SATO": (-27.5180555556, -55.1238888889),
"SATR": (-29.21, -59.6908333333),
"SATU": (-29.7705555556, -57.9788888889),
"SAVB": (-41.9430555556, -71.5322222222),
"SAVC": (-45.785, -67.4655555556),
"SAVD": (-42.0305555556, -71.1697222222),
"SAVE": (-42.9077777778, -71.1394444444),
"SAVH": (-46.5383333333, -68.9658333333),
"SAVJ": (-41.3208333333, -69.575),
"SAVN": (-40.7513888889, -65.0344444444),
"SAVR": (-45.0134444444, -70.8128611111),
"SAVS": (-41.5911944444, -65.3397222222),
"SAVT": (-43.2102777778, -65.2702777778),
"SAVV": (-40.8691666667, -63.0002777778),
"SAVY": (-42.7588888889, -65.1025),
"SAWA": (-50.3352777778, -72.2483333333),
"SAWB": (-64.2383333333, -56.6308333333),
"SAWC": (-50.28025, -72.0531944444),
"SAWD": (-47.7352777778, -65.9038888889),
"SAWE": (-53.7775, -67.7491666667),
"SAWG": (-51.6086111111, -69.3125),
"SAWH": (-54.8430555556, -68.2955555556),
"SAWJ": (-49.3066666667, -67.8025),
"SAWM": (-45.7041388889, -70.2436388889),
"SAWP": (-46.5380555556, -70.9786111111),
"SAWR": (-48.7827777778, -70.1491666667),
"SAWS": (-44.0480555556, -70.4591666667),
"SAWT": (-51.6063888889, -72.2166666667),
"SAWU": (-50.0163888889, -68.5791666667),
"SAZA": (-36.8371666667, -59.8807222222),
"SAZB": (-38.7247222222, -62.1691666667),
"SAZC": (-37.4461111111, -61.8891666667),
"SAZD": (-36.3202777778, -57.7216666667),
"SAZF": (-36.8908333333, -60.2161111111),
"SAZG": (-35.6961111111, -63.7580555556),
"SAZH": (-38.3866666667, -60.3294444444),
"SAZI": (-36.1869444444, -61.0761111111),
"SAZL": (-36.5422222222, -56.7216666667),
"SAZM": (-37.9341666667, -57.5733333333),
"SAZN": (-38.9488888889, -68.1555555556),
"SAZO": (-38.4894444444, -58.8158333333),
"SAZP": (-35.8455555556, -61.8577777778),
"SAZR": (-36.5880555556, -64.2752777778),
"SAZS": (-41.1511111111, -71.1575),
"SAZT": (-37.2372222222, -59.2277777778),
"SAZV": (-37.2352777778, -57.0291666667),
"SAZW": (-38.9394444444, -69.2644444444),
"SAZY": (-40.0752777778, -71.1372222222),
"SBAA": (-8.34833333333, -49.3013888889),
"SBAF": (-22.875, -43.3844444444),
"SBAM": (2.07666666667, -50.8622222222),
"SBAQ": (-21.8119444444, -48.1327777778),
"SBAR": (-10.9838888889, -37.0702777778),
"SBAS": (-22.6383333333, -50.4558333333),
"SBAT": (-9.86583333333, -56.1061111111),
"SBAU": (-21.1411111111, -50.4247222222),
"SBAV": (-22.5255555556, -52.9719444444),
"SBBE": (-1.37916666667, -48.4761111111),
"SBBG": (-31.3902777778, -54.1122222222),
"SBBH": (-19.8516666667, -43.9502777778),
"SBBI": (-25.405, -49.2319444444),
"SBBP": (-22.9791666667, -46.5375),
"SBBQ": (-21.2669444444, -43.7608333333),
"SBBR": (-15.8625, -47.9125),
"SBBT": (-20.5855555556, -48.5958333333),
"SBBU": (-22.345, -49.0536111111),
"SBBV": (2.84611111111, -60.69),
"SBBW": (-15.8611111111, -52.3888888889),
"SBBZ": (-22.7661111111, -41.9655555556),
"SBCA": (-25.0002777778, -53.5005555556),
"SBCB": (-22.9208333333, -42.0713888889),
"SBCC": (-9.33388888889, -54.9652777778),
"SBCD": (-26.7897222222, -50.9394444444),
"SBCF": (-19.6336111111, -43.9686111111),
"SBCG": (-20.4686111111, -54.6725),
"SBCH": (-27.1341666667, -52.6563888889),
"SBCI": (-7.32027777778, -47.4586111111),
"SBCJ": (-6.11527777778, -50.0013888889),
"SBCM": (-28.7255555556, -49.4247222222),
"SBCO": (-29.9458333333, -51.1444444444),
"SBCP": (-21.6983333333, -41.3016666667),
"SBCR": (-19.0116666667, -57.6727777778),
"SBCT": (-25.5283333333, -49.1755555556),
"SBCV": (-17.6522222222, -39.2530555556),
"SBCX": (-29.1969444444, -51.1875),
"SBCY": (-15.6527777778, -56.1166666667),
"SBCZ": (-7.6, -72.7694444444),
"SBDN": (-22.175, -51.4244444444),
"SBEG": (-3.03861111111, -60.0497222222),
"SBEK": (-6.23305555556, -57.7766666667),
"SBES": (-22.8127777778, -42.0925),
"SBFC": (-20.5919444444, -47.3827777778),
"SBFI": (-25.5961111111, -54.4869444444),
"SBFL": (-27.6725, -48.5477777778),
"SBFN": (-3.85472222222, -32.4233333333),
"SBFT": (-20.2783333333, -49.1872222222),
"SBFU": (-20.7027777778, -46.335),
"SBFZ": (-3.77611111111, -38.5325),
"SBGL": (-22.8088888889, -43.2436111111),
"SBGM": (-10.7861111111, -65.2847222222),
"SBGO": (-16.6311111111, -49.2222222222),
"SBGP": (-21.7644444444, -48.4047222222),
"SBGR": (-23.4322222222, -46.4691666667),
"SBGS": (-25.1844444444, -50.1438888889),
"SBGW": (-22.7913888889, -45.2047222222),
"SBHT": (-3.25388888889, -52.2538888889),
"SBIC": (-3.12722222222, -58.4811111111),
"SBIH": (-4.24222222222, -56.0005555556),
"SBIL": (-14.8158333333, -39.0330555556),
"SBIP": (-19.4705555556, -42.4875),
"SBIT": (-18.4444444444, -49.2133333333),
"SBIZ": (-5.53111111111, -47.46),
"SBJC": (-1.41388888889, -48.4605555556),
"SBJD": (-23.1816666667, -46.9436111111),
"SBJF": (-21.7913888889, -43.3866666667),
"SBJP": (-7.26972222222, -35.8961111111),
"SBJR": (-22.9875, -43.37),
"SBJV": (-26.2247222222, -48.7972222222),
"SBKG": (-7.26916666667, -35.895),
"SBKP": (-23.0080555556, -47.1344444444),
"SBLB": (-7.25027777778, -64.7838888889),
"SBLJ": (-27.7819444444, -50.2813888889),
"SBLN": (-21.6638888889, -49.7302777778),
"SBLO": (-23.3336111111, -51.13),
"SBLP": (-13.2619444444, -43.4080555556),
"SBLS": (-19.6613888889, -43.8963888889),
"SBMA": (-5.36833333333, -49.1377777778),
"SBMC": (-13.5505555556, -48.2005555556),
"SBMD": (-0.889722222222, -52.6022222222),
"SBME": (-22.3458333333, -41.7638888889),
"SBMG": (-23.4397222222, -51.9069444444),
"SBMK": (-16.7066666667, -43.8188888889),
"SBML": (-22.1966666667, -49.9263888889),
"SBMN": (-3.14555555556, -59.9861111111),
"SBMO": (-9.51027777778, -35.7933333333),
"SBMQ": (0.0505555555556, -51.0719444444),
"SBMS": (-5.20166666667, -37.3641666667),
"SBMT": (-23.5088888889, -46.6375),
"SBMY": (-5.81138888889, -61.2786111111),
"SBNF": (-26.88, -48.6513888889),
"SBNM": (-28.2816666667, -54.1688888889),
"SBNT": (-5.91111111111, -35.2477777778),
"SBOI": (3.85527777778, -51.7966666667),
"SBOU": (-22.9736111111, -49.9113888889),
"SBPA": (-29.9941666667, -51.1713888889),
"SBPB": (-2.89361111111, -41.7319444444),
"SBPC": (-21.8427777778, -46.5677777778),
"SBPF": (-28.2438888889, -52.3263888889),
"SBPG": (-25.5405555556, -48.5311111111),
"SBPJ": (-10.29, -48.3577777778),
"SBPK": (-31.7183333333, -52.3275),
"SBPL": | |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# <NAME> <<EMAIL>>
import logging
import sys
import dask
from dask_jobqueue.core import Job, JobQueueCluster
from distributed.deploy import Adaptive
from distributed.scheduler import Scheduler
from bob.extension import rc
from .sge_queues import QUEUE_DEFAULT
logger = logging.getLogger(__name__)
class SGEIdiapJob(Job):
"""Launches a SGE Job in the IDIAP cluster. This class basically encodes
the CLI command that bootstrap the worker in a SGE job. Check here
`https://distributed.dask.org/en/latest/resources.html#worker-resources`
for more information.
..note: This is class is temporary. It's basically a copy from SGEJob from dask_jobqueue.
The difference is that here I'm also handling the dask job resources tag (which is not handled anywhere). This has to be patched in the Job class. Please follow here `https://github.com/dask/dask-jobqueue/issues/378` to get news about this patch
"""
submit_command = "qsub"
cancel_command = "qdel"
config_name = "SGEIdiapJob"
def __init__(
self,
*args,
queue=None,
project=rc.get("sge.project"),
resource_spec=None,
job_extra=None,
config_name="sge",
**kwargs,
):
if queue is None:
queue = dask.config.get("jobqueue.%s.queue" % config_name)
if project is None:
project = dask.config.get("jobqueue.%s.project" % config_name)
if resource_spec is None:
resource_spec = dask.config.get(
"jobqueue.%s.resource-spec" % config_name
)
if job_extra is None:
job_extra = dask.config.get("jobqueue.%s.job-extra" % config_name)
# Resources
resources = None
if "resources" in kwargs or kwargs["resources"]:
resources = kwargs.pop("resources")
super().__init__(
*args, config_name=config_name, death_timeout=10000, **kwargs
)
# Amending the --resources in the `distributed.cli.dask_worker` CLI command
# if "resources" in kwargs and kwargs["resources"]:
if not resources:
# resources = kwargs["resources"]
# Preparing the string to be sent to `dask-worker` command
def _resource_to_str(resources):
resources_str = ""
for k in resources:
resources_str += f"{k}={resources[k]}"
return resources_str
resources_str = _resource_to_str(resources)
self._command_template += f" --resources {resources_str}"
header_lines = []
if self.job_name is not None:
header_lines.append("#$ -N %(job-name)s")
if queue is not None:
header_lines.append("#$ -q %(queue)s")
if project is not None:
header_lines.append("#$ -P %(project)s")
if resource_spec is not None:
header_lines.append("#$ -l %(resource_spec)s")
if self.log_directory is not None:
header_lines.append("#$ -e %(log_directory)s/")
header_lines.append("#$ -o %(log_directory)s/")
header_lines.extend(["#$ -cwd", "#$ -j y"])
header_lines.extend(["#$ %s" % arg for arg in job_extra])
header_template = "\n".join(header_lines)
config = {
"job-name": self.job_name,
"queue": queue,
"project": project,
"processes": self.worker_processes,
"resource_spec": resource_spec,
"log_directory": self.log_directory,
}
self.job_header = header_template % config
logger.debug("Job script: \n %s" % self.job_script())
def get_max_jobs(queue_dict):
"""Given a queue list, get the max number of possible jobs."""
return max(
[
queue_dict[r]["max_jobs"]
for r in queue_dict
if "max_jobs" in queue_dict[r]
]
)
def get_resource_requirements(pipeline):
"""
Get the resource requirements to execute a graph.
This is useful when it's necessary get the dictionary mapping the dask delayed keys with
specific resource restrictions.
Check https://distributed.dask.org/en/latest/resources.html#resources-with-collections for more information
Parameters
----------
pipeline: :any:`sklearn.pipeline.Pipeline`
A :any:`sklearn.pipeline.Pipeline` wrapper with :any:`bob.pipelines.DaskWrapper`
Example
-------
>>> cluster = SGEMultipleQueuesCluster(sge_job_spec=Q_1DAY_GPU_SPEC) # doctest: +SKIP
>>> client = Client(cluster) # doctest: +SKIP
>>> from bob.pipelines.sge import get_resource_requirements # doctest: +SKIP
>>> resources = get_resource_requirements(pipeline) # doctest: +SKIP
>>> my_delayed_task.compute(scheduler=client, resources=resources) # doctest: +SKIP
"""
resources = dict()
for s in pipeline:
if hasattr(s, "resource_tags"):
resources.update(s.resource_tags)
return resources
class SGEMultipleQueuesCluster(JobQueueCluster):
"""Launch Dask jobs in the SGE cluster allowing the request of multiple
queues.
Parameters
----------
log_directory: str
Default directory for the SGE logs
protocol: str
Scheduler communication protocol
dashboard_address: str
Default port for the dask dashboard,
env_extra: str,
Extra environment variables to send to the workers
sge_job_spec: dict
Dictionary containing a minimum specification for the qsub command.
It consists of:
queue: SGE queue
memory: Memory requirement in GB (e.g. 4GB)
io_bio: set the io_big flag
resource_spec: Whatever extra argument to be sent to qsub (qsub -l)
tag: Mark this worker with an specific tag so dask scheduler can place specific tasks to it (https://distributed.dask.org/en/latest/resources.html)
max_jobs: Maximum number of jobs in the queue
min_jobs: int
Lower bound for the number of jobs for `self.adapt`
Example
-------
Below follow a vanilla-example that will create a set of jobs on all.q:
>>> from bob.pipelines.distributed.sge import SGEMultipleQueuesCluster # doctest: +SKIP
>>> from dask.distributed import Client # doctest: +SKIP
>>> cluster = SGEMultipleQueuesCluster() # doctest: +SKIP
>>> cluster.scale_up(10) # doctest: +SKIP
>>> client = Client(cluster) # doctest: +SKIP
It's possible to demand a resource specification yourself:
>>> Q_1DAY_IO_BIG_SPEC = {
... "default": {
... "queue": "q_1day",
... "memory": "8GB",
... "io_big": True,
... "resource_spec": "",
... "resources": "",
... }
... }
>>> cluster = SGEMultipleQueuesCluster(sge_job_spec=Q_1DAY_IO_BIG_SPEC) # doctest: +SKIP
>>> cluster.scale_up(10) # doctest: +SKIP
>>> client = Client(cluster) # doctest: +SKIP
More than one jon spec can be set:
>>> Q_1DAY_GPU_SPEC = {
... "default": {
... "queue": "q_1day",
... "memory": "8GB",
... "io_big": True,
... "resource_spec": "",
... "resources": "",
... },
... "gpu": {
... "queue": "q_gpu",
... "memory": "12GB",
... "io_big": False,
... "resource_spec": "",
... "resources": {"GPU":1},
... },
... }
>>> cluster = SGEMultipleQueuesCluster(sge_job_spec=Q_1DAY_GPU_SPEC) # doctest: +SKIP
>>> cluster.scale_up(10) # doctest: +SKIP
>>> cluster.scale_up(1, sge_job_spec_key="gpu") # doctest: +SKIP
>>> client = Client(cluster) # doctest: +SKIP
Adaptive job allocation can also be used via `AdaptiveIdiap` extension:
>>> cluster = SGEMultipleQueuesCluster(sge_job_spec=Q_1DAY_GPU_SPEC) # doctest: +SKIP
>>> cluster.adapt(Adaptive=AdaptiveIdiap,minimum=2, maximum=10) # doctest: +SKIP
>>> client = Client(cluster) # doctest: +SKIP
"""
def __init__(
self,
log_directory="./logs",
protocol="tcp://",
dashboard_address=":8787",
env_extra=None,
sge_job_spec=QUEUE_DEFAULT,
min_jobs=1,
project=rc.get("sge.project"),
**kwargs,
):
# Defining the job launcher
self.job_cls = SGEIdiapJob
self.sge_job_spec = sge_job_spec
self.protocol = protocol
self.log_directory = log_directory
self.project = project
silence_logs = "error"
interface = None
host = None
security = None
if env_extra is None:
env_extra = []
elif not isinstance(env_extra, list):
env_extra = [env_extra]
self.env_extra = env_extra + ["export PYTHONPATH=" + ":".join(sys.path)]
scheduler = {
"cls": SchedulerResourceRestriction, # Use local scheduler for now
"options": {
"protocol": self.protocol,
"interface": interface,
"host": host,
"dashboard_address": dashboard_address,
"security": security,
},
}
# Spec cluster parameters
loop = None
asynchronous = False
name = None
# Starting the SpecCluster constructor
super(JobQueueCluster, self).__init__(
scheduler=scheduler,
worker={},
loop=loop,
silence_logs=silence_logs,
asynchronous=asynchronous,
name=name,
)
max_jobs = get_max_jobs(sge_job_spec)
self.scale(max_jobs)
# Adapting to minimim 1 job to maximum 48 jobs
# interval: Milliseconds between checks from the scheduler
# wait_count: Number of consecutive times that a worker should be suggested for
# removal before we remove it.
self.adapt(
minimum=min_jobs,
maximum=max_jobs,
wait_count=5,
interval=10,
target_duration="10s",
)
def _get_worker_spec_options(self, job_spec):
"""Craft a dask worker_spec to be used in the qsub command."""
def _get_key_from_spec(spec, key):
return spec[key] if key in spec else ""
new_resource_spec = _get_key_from_spec(job_spec, "resource_spec")
# IO_BIG
new_resource_spec += (
"io_big=TRUE,"
if "io_big" in job_spec and job_spec["io_big"]
else ""
)
memory = _get_key_from_spec(job_spec, "memory")[:-1]
new_resource_spec += f"mem_free={memory},"
queue = _get_key_from_spec(job_spec, "queue")
if queue != "all.q":
new_resource_spec += f"{queue}=TRUE"
new_resource_spec = (
None if new_resource_spec == "" else new_resource_spec
)
return {
"queue": queue,
"project": self.project,
"memory": _get_key_from_spec(job_spec, "memory"),
"cores": 1,
"processes": 1,
"log_directory": self.log_directory,
"local_directory": self.log_directory,
"resource_spec": new_resource_spec,
"interface": None,
"protocol": self.protocol,
"security": None,
"resources": _get_key_from_spec(job_spec, "resources"),
"env_extra": self.env_extra,
}
def scale(self, n_jobs, sge_job_spec_key="default"):
"""Launch an SGE job in the Idiap SGE cluster.
Parameters
----------
n_jobs: int
Quantity of jobs to scale
sge_job_spec_key: str
One of the specs `SGEMultipleQueuesCluster.sge_job_spec`
"""
if n_jobs == 0:
# Shutting down all workers
return super(JobQueueCluster, self).scale(0, memory=None, cores=0)
job_spec = self.sge_job_spec[sge_job_spec_key]
worker_spec_options = self._get_worker_spec_options(job_spec)
n_cores = 1
worker_spec = {"cls": self.job_cls, "options": worker_spec_options}
# Defining a new worker_spec with some SGE characteristics
self.new_spec = worker_spec
return super(JobQueueCluster, self).scale(
n_jobs, memory=None, cores=n_cores
)
def scale_up(self, n_jobs, sge_job_spec_key=None):
"""Scale cluster up.
This is supposed to be used by the scheduler while dynamically
allocating resources
"""
return self.scale(n_jobs, sge_job_spec_key)
async def scale_down(self, workers, sge_job_spec_key=None):
"""Scale cluster down.
This is supposed to be used by the scheduler while dynamically
allocating resources
"""
await super().scale_down(workers)
def adapt(self, *args, **kwargs):
super().adapt(*args, Adaptive=AdaptiveMultipleQueue, **kwargs)
class AdaptiveMultipleQueue(Adaptive):
"""Custom mechanism to adaptively allocate workers based on scheduler load.
This custom implementation extends the `Adaptive.recommendations` by looking
at the `distributed.scheduler.TaskState.resource_restrictions`.
The heuristics is:
.. note ::
If a certain task has the status `no-worker` and it has resource_restrictions, the scheduler should
request a job matching those resource restrictions
"""
async def recommendations(self, target: int) -> dict:
"""Make scale up/down recommendations based on current state and
target."""
plan = self.plan
# Get tasks with no worker associated due to
# resource restrictions
resource_restrictions = (
await self.scheduler.get_no_worker_tasks_resource_restrictions()
| |
"""
## pyart radar object
pyart.core.radar
================
A general central radial scanning (or dwelling) instrument class.
.. autosummary::
:toctree: generated/
_rays_per_sweep_data_factory
_gate_data_factory
_gate_lon_lat_data_factory
_gate_altitude_data_factory
.. autosummary::
:toctree: generated/
:template: dev_template.rst
Radar
"""
# the code for Radar Object in this file were adapted from pyart by <NAME>. & <NAME>.
# https://github.com/ARM-DOE/pyart
from __future__ import print_function
import numpy as np
import sys
from ..configure.pyart_config import get_metadata
from ..configure.pyart_lazydict import LazyLoadDict
from .transforms import antenna_vectors_to_cartesian, cartesian_to_geographic
class Radar(object):
"""
A class for storing antenna coordinate radar data.
The structure of the Radar class is based on the CF/Radial Data file
format. Global attributes and variables (section 4.1 and 4.3) are
represented as a dictionary in the metadata attribute. Other required and
optional variables are represented as dictionaries in a attribute with the
same name as the variable in the CF/Radial standard. When a optional
attribute not present the attribute has a value of None. The data for a
given variable is stored in the dictionary under the 'data' key. Moment
field data is stored as a dictionary of dictionaries in the fields
attribute. Sub-convention variables are stored as a dictionary of
dictionaries under the meta_group attribute.
Refer to the attribute section for information on the parameters.
Attributes
----------
time : dict
Time at the center of each ray.
range : dict
Range to the center of each gate (bin).
fields : dict of dicts
Moment fields.
metadata : dict
Metadata describing the instrument and data.
scan_type : str
Type of scan, one of 'ppi', 'rhi', 'sector' or 'other'. If the scan
volume contains multiple sweep modes this should be 'other'.
latitude : dict
Latitude of the instrument.
longitude : dict
Longitude of the instrument.
altitude : dict
Altitude of the instrument, above sea level.
altitude_agl : dict or None
Altitude of the instrument above ground level. If not provided this
attribute is set to None, indicating this parameter not available.
sweep_number : dict
The number of the sweep in the volume scan, 0-based.
sweep_mode : dict
Sweep mode for each mode in the volume scan.
fixed_angle : dict
Target angle for thr sweep. Azimuth angle in RHI modes, elevation
angle in all other modes.
sweep_start_ray_index : dict
Index of the first ray in each sweep relative to the start of the
volume, 0-based.
sweep_end_ray_index : dict
Index of the last ray in each sweep relative to the start of the
volume, 0-based.
rays_per_sweep : LazyLoadDict
Number of rays in each sweep. The data key of this attribute is
create upon first access from the data in the sweep_start_ray_index and
sweep_end_ray_index attributes. If the sweep locations needs to be
modified, do this prior to accessing this attribute or use
:py:func:`init_rays_per_sweep` to reset the attribute.
target_scan_rate : dict or None
Intended scan rate for each sweep. If not provided this attribute is
set to None, indicating this parameter is not available.
rays_are_indexed : dict or None
Indication of whether ray angles are indexed to a regular grid in
each sweep. If not provided this attribute is set to None, indicating
ray angle spacing is not determined.
ray_angle_res : dict or None
If rays_are_indexed is not None, this provides the angular resolution
of the grid. If not provided or available this attribute is set to
None.
azimuth : dict
Azimuth of antenna, relative to true North. Azimuth angles are
recommended to be expressed in the range of [0, 360], but other
representations are not forbidden.
elevation : dict
Elevation of antenna, relative to the horizontal plane. Elevation
angles are recommended to be expressed in the range of [-180, 180],
but other representations are not forbidden.
gate_x, gate_y, gate_z : LazyLoadDict
Location of each gate in a Cartesian coordinate system assuming a
standard atmosphere with a 4/3 Earth's radius model. The data keys of
these attributes are create upon first access from the data in the
range, azimuth and elevation attributes. If these attributes are
changed use :py:func:`init_gate_x_y_z` to reset.
gate_longitude, gate_latitude : LazyLoadDict
Geographic location of each gate. The projection parameter(s) defined
in the `projection` attribute are used to perform an inverse map
projection from the Cartesian gate locations relative to the radar
location to longitudes and latitudes. If these attributes are changed
use :py:func:`init_gate_longitude_latitude` to reset the attributes.
projection : dic or str
Projection parameters defining the map projection used to transform
from Cartesian to geographic coordinates. The default dictionary sets
the 'proj' key to 'pyart_aeqd' indicating that the native Py-ART
azimuthal equidistant projection is used. This can be modified to
specify a valid pyproj.Proj projparams dictionary or string.
The special key '_include_lon_0_lat_0' is removed when interpreting
this dictionary. If this key is present and set to True, which is
required when proj='pyart_aeqd', then the radar longitude and
latitude will be added to the dictionary as 'lon_0' and 'lat_0'.
gate_altitude : LazyLoadDict
The altitude of each radar gate as calculated from the altitude of the
radar and the Cartesian z location of each gate. If this attribute
is changed use :py:func:`init_gate_altitude` to reset the attribute.
scan_rate : dict or None
Actual antenna scan rate. If not provided this attribute is set to
None, indicating this parameter is not available.
antenna_transition : dict or None
Flag indicating if the antenna is in transition, 1 = yes, 0 = no.
If not provided this attribute is set to None, indicating this
parameter is not available.
rotation : dict or None
The rotation angle of the antenna. The angle about the aircraft
longitudinal axis for a vertically scanning radar.
tilt : dict or None
The tilt angle with respect to the plane orthogonal (Z-axis) to
aircraft longitudinal axis.
roll : dict or None
The roll angle of platform, for aircraft right wing down is positive.
drift : dict or None
Drift angle of antenna, the angle between heading and track.
heading : dict or None
Heading (compass) angle, clockwise from north.
pitch : dict or None
Pitch angle of antenna, for aircraft nose up is positive.
georefs_applied : dict or None
Indicates whether the variables have had georeference calculation
applied. Leading to Earth-centric azimuth and elevation angles.
instrument_parameters : dict of dicts or None
Instrument parameters, if not provided this attribute is set to None,
indicating these parameters are not avaiable. This dictionary also
includes variables in the radar_parameters CF/Radial subconvention.
radar_calibration : dict of dicts or None
Instrument calibration parameters. If not provided this attribute is
set to None, indicating these parameters are not available
ngates : int
Number of gates (bins) in a ray.
nrays : int
Number of rays in the volume.
nsweeps : int
Number of sweep in the volume.
"""
def __init__(self, time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
sweep_end_ray_index,
azimuth, elevation,
altitude_agl=None,
target_scan_rate=None, rays_are_indexed=None,
ray_angle_res=None,
scan_rate=None, antenna_transition=None,
instrument_parameters=None,
radar_calibration=None,
rotation=None, tilt=None, roll=None, drift=None, heading=None,
pitch=None, georefs_applied=None,
):
if 'calendar' not in time:
time['calendar'] = 'gregorian'
self.time = time
self.range = _range
self.fields = fields
self.metadata = metadata
self.scan_type = scan_type
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
self.altitude_agl = altitude_agl # optional
self.sweep_number = sweep_number
self.sweep_mode = sweep_mode
self.fixed_angle = fixed_angle
self.sweep_start_ray_index = sweep_start_ray_index
self.sweep_end_ray_index = sweep_end_ray_index
self.target_scan_rate = target_scan_rate # optional
self.rays_are_indexed = rays_are_indexed # optional
self.ray_angle_res = ray_angle_res # optional
self.azimuth = azimuth
self.elevation = elevation
self.scan_rate = scan_rate # optional
self.antenna_transition = antenna_transition # optional
self.rotation = rotation # optional
self.tilt = tilt # optional
self.roll = roll # optional
self.drift = drift # optional
self.heading = heading # optional
self.pitch = pitch # optional
self.georefs_applied = georefs_applied # optional
self.instrument_parameters = instrument_parameters # optional
self.radar_calibration = radar_calibration # optional
self.ngates = len(_range['data'])
self.nrays = len(time['data'])
self.nsweeps = len(sweep_number['data'])
self.projection = {'proj': 'pyart_aeqd', '_include_lon_0_lat_0': True}
# initalize attributes with lazy load dictionaries
self.init_rays_per_sweep()
self.init_gate_x_y_z()
self.init_gate_longitude_latitude()
self.init_gate_altitude()
def __getstate__(self):
""" Return object's state which can be pickled. """
| |
<gh_stars>1-10
###############################################################################
# telemetry_iface_ag.py
#
# IMPORTANT NOTE: This file is auto-generated by the script: build_telemetry.py
#
# Generated on: 2018-11-16 15:33:45
# Telemetry dictionary filename: telemetry_dictionary.xml
# Telemetry dictionary version: 0.1
###############################################################################
import struct
import numpy as np
class Telemetry_Object(object):
pass
class Heartbeat_Telemetry(Telemetry_Object):
class Data(object):
def __init__(self):
self.timestamp = int(0)
self.status = [int(0),int(0),int(0),int(0),int(0),]
self.status_msg = ""
def __repr__(self):
print("timestamp: ", self.timestamp)
print("status: ", self.status)
print("status_msg: %s"%(self.status_msg.decode()))
return ""
def __init__(self):
self.telemstruct = struct.Struct("I5I256s")
self.buff = bytearray(struct.calcsize(self.telemstruct.format))
self.data = self.Data()
def pack(self):
self.telemstruct.pack_into(self.buff, 0,
self.data.timestamp,
self.data.status[0], self.data.status[1], self.data.status[2], self.data.status[3], self.data.status[4],
bytes(self.data.status_msg,encoding="utf-8"),
)
return bytes(self.buff)
def set_values(self, timestamp, status, status_msg, ):
self.data.timestamp = timestamp
self.data.status = [int(0),int(0),int(0),int(0),int(0),]
if not hasattr(status, "__iter__"):
status = [status]
if len(status) > 5:
raise ValueError("status must have no more than 5 items in the list.")
for i,p in enumerate(status):
self.data.status[i] = p
self.data.status_msg = status_msg
def unpack_from(self, buff, offset=0):
ret = self.telemstruct.unpack_from(buff, offset)
self.data.timestamp = ret[0]
self.data.status = list(ret[1:6])
self.data.status_msg = ret[6]
return self.data
class Session_Telemetry(Telemetry_Object):
class Data(object):
def __init__(self):
self.name = "" ##
self.description = "" ##
self.num_wavelength = int(0) ##
self.wavelength = [float(0),float(0),float(0),] ## ???? (did not apply) Should this instead be an int for selection? TODO
self.dx = float(0) ##
self.dy = float(0) ##
self.crop_fraction = int(0) ##
self.rebin_factor = int(0) ##
self.lens_focal_length = float(0)
self.lens_numerical_aperture = float(0) ##
self.lens_system_magnification = float(0) ##
self.status_msg = "" ##
def __repr__(self):
print("name: %s"%(self.name.decode()))
print("description: %s"%(self.description.decode()))
print("num_wavelength: ", self.num_wavelength)
print("wavelength: ", self.wavelength)
print("dx: ", self.dx)
print("dy: ", self.dy)
print("crop_fraction: ", self.crop_fraction)
print("rebin_factor: ", self.rebin_factor)
print("lens_focal_length: ", self.lens_focal_length)
print("lens_numerical_aperture: ", self.lens_numerical_aperture)
print("lens_system_magnification: ", self.lens_system_magnification)
print("status_msg: %s"%(self.status_msg.decode()))
return ""
def __init__(self):
self.telemstruct = struct.Struct("256s256sI3fffIIfff256s")
self.buff = bytearray(struct.calcsize(self.telemstruct.format))
self.data = self.Data()
def pack(self):
self.telemstruct.pack_into(self.buff, 0,
bytes(self.data.name,encoding="utf-8"),
bytes(self.data.description,encoding="utf-8"),
self.data.num_wavelength,
self.data.wavelength[0], self.data.wavelength[1], self.data.wavelength[2],
self.data.dx,
self.data.dy,
self.data.crop_fraction,
self.data.rebin_factor,
self.data.lens_focal_length,
self.data.lens_numerical_aperture,
self.data.lens_system_magnification,
bytes(self.data.status_msg,encoding="utf-8"),
)
return bytes(self.buff)
def set_values(self, name, description, num_wavelength, wavelength, dx, dy, crop_fraction, rebin_factor, lens_focal_length, lens_numerical_aperture, lens_system_magnification, status_msg, ):
self.data.name = name
self.data.description = description
self.data.num_wavelength = num_wavelength
self.data.wavelength = [float(0),float(0),float(0),]
if not hasattr(wavelength, "__iter__"):
wavelength = [wavelength]
if len(wavelength) > 3:
raise ValueError("wavelength must have no more than 3 items in the list.")
for i,p in enumerate(wavelength):
self.data.wavelength[i] = p
self.data.dx = dx
self.data.dy = dy
self.data.crop_fraction = crop_fraction
self.data.rebin_factor = rebin_factor
self.data.lens_focal_length = lens_focal_length
self.data.lens_numerical_aperture = lens_numerical_aperture
self.data.lens_system_magnification = lens_system_magnification
self.data.status_msg = status_msg
def unpack_from(self, buff, offset=0):
ret = self.telemstruct.unpack_from(buff, offset)
self.data.name = ret[0]
self.data.description = ret[1]
self.data.num_wavelength = ret[2]
self.data.wavelength = list(ret[3:6])
self.data.dx = ret[6]
self.data.dy = ret[7]
self.data.crop_fraction = ret[8]
self.data.rebin_factor = ret[9]
self.data.lens_focal_length = ret[10]
self.data.lens_numerical_aperture = ret[11]
self.data.lens_system_magnification = ret[12]
self.data.status_msg = ret[13]
return self.data
class Hologram_Telemetry(Telemetry_Object):
class Data(object):
def __init__(self):
self.num_wavelength = int(0)
self.wavelength = [float(0),float(0),float(0),]
self.dx = float(0)
self.dy = float(0)
self.crop_fraction = int(0)
self.rebin_factor = int(0)
self.bgd_sub = False
self.bgd_file = ""
def __repr__(self):
print("num_wavelength: ", self.num_wavelength)
print("wavelength: ", self.wavelength)
print("dx: ", self.dx)
print("dy: ", self.dy)
print("crop_fraction: ", self.crop_fraction)
print("rebin_factor: ", self.rebin_factor)
print("bgd_sub: ", self.bgd_sub)
print("bgd_file: %s"%(self.bgd_file.decode()))
return ""
def __init__(self):
self.telemstruct = struct.Struct("I3fffII?256s")
self.buff = bytearray(struct.calcsize(self.telemstruct.format))
self.data = self.Data()
def pack(self):
self.telemstruct.pack_into(self.buff, 0,
self.data.num_wavelength,
self.data.wavelength[0], self.data.wavelength[1], self.data.wavelength[2],
self.data.dx,
self.data.dy,
self.data.crop_fraction,
self.data.rebin_factor,
self.data.bgd_sub,
bytes(self.data.bgd_file,encoding="utf-8"),
)
return bytes(self.buff)
def set_values(self, num_wavelength, wavelength, dx, dy, crop_fraction, rebin_factor, bgd_sub, bgd_file, ):
self.data.num_wavelength = num_wavelength
self.data.wavelength = [float(0),float(0),float(0),]
if not hasattr(wavelength, "__iter__"):
wavelength = [wavelength]
if len(wavelength) > 3:
raise ValueError("wavelength must have no more than 3 items in the list.")
for i,p in enumerate(wavelength):
self.data.wavelength[i] = p
self.data.dx = dx
self.data.dy = dy
self.data.crop_fraction = crop_fraction
self.data.rebin_factor = rebin_factor
self.data.bgd_sub = bgd_sub
self.data.bgd_file = bgd_file
def unpack_from(self, buff, offset=0):
ret = self.telemstruct.unpack_from(buff, offset)
self.data.num_wavelength = ret[0]
self.data.wavelength = list(ret[1:4])
self.data.dx = ret[4]
self.data.dy = ret[5]
self.data.crop_fraction = ret[6]
self.data.rebin_factor = ret[7]
self.data.bgd_sub = ret[8]
self.data.bgd_file = ret[9]
return self.data
class Reconstruction_Telemetry(Telemetry_Object):
class Data(object):
def __init__(self):
self.num_propagation_distance = int(0) ##
self.propagation_distance = [float(0),float(0),float(0),] ##
self.compute_spectral_peak = False
self.compute_digital_phase_mask = False
self.processing_mode = 0
self.num_chromatic_shift = int(0) ##
self.chromatic_shift = [float(0),float(0),float(0),] ##
self.ref_holo_path = "" ##
self.ref_holo_enable = False ##
self.ref_holo_averaging_sec = float(0) ##
self.ref_holo_averaging_enabled = False ##
self.phase_unwrapping_enabled = False
self.phase_unwrapping_algorithm = 0 ##
self.fitting_mode = 0
self.fitting_method = 0
self.fitting_order = float(0)
self.fitting_apply = False
self.reset_phase_mask = False
self.roi_offset_x = int(0) ##
self.roi_offset_y = int(0) ##
self.roi_size_x = int(0) ##
self.roi_size_y = int(0) ##
self.store_files = False #?????????????
self.center_image = False # ??? Not needed?
self.center_image_and_tilt = False #??? Not needed?
self.center_max_value = False ##
self.center_wide_spectrum = False ##
self.status_msg = "" #
def __repr__(self):
print("num_propagation_distance: ", self.num_propagation_distance)
print("propagation_distance: ", self.propagation_distance)
print("compute_spectral_peak: ", self.compute_spectral_peak)
print("compute_digital_phase_mask: ", self.compute_digital_phase_mask)
print("processing_mode: ", self.processing_mode)
print("num_chromatic_shift: ", self.num_chromatic_shift)
print("chromatic_shift: ", self.chromatic_shift)
print("ref_holo_path: %s"%(self.ref_holo_path.decode()))
print("ref_holo_enable: ", self.ref_holo_enable)
print("ref_holo_averaging_sec: ", self.ref_holo_averaging_sec)
print("ref_holo_averaging_enabled: ", self.ref_holo_averaging_enabled)
print("phase_unwrapping_enabled: ", self.phase_unwrapping_enabled)
print("phase_unwrapping_algorithm: ", self.phase_unwrapping_algorithm)
print("fitting_mode: ", self.fitting_mode)
print("fitting_method: ", self.fitting_method)
print("fitting_order: ", self.fitting_order)
print("fitting_apply: ", self.fitting_apply)
print("reset_phase_mask: ", self.reset_phase_mask)
print("roi_offset_x: ", self.roi_offset_x)
print("roi_offset_y: ", self.roi_offset_y)
print("roi_size_x: ", self.roi_size_x)
print("roi_size_y: ", self.roi_size_y)
print("store_files: ", self.store_files)
print("center_image: ", self.center_image)
print("center_image_and_tilt: ", self.center_image_and_tilt)
print("center_max_value: ", self.center_max_value)
print("center_wide_spectrum: ", self.center_wide_spectrum)
print("status_msg: %s"%(self.status_msg.decode()))
return ""
def __init__(self):
self.telemstruct = struct.Struct("I3f??HI3f256s?f??HHHf??IIII?????256s")
self.buff = bytearray(struct.calcsize(self.telemstruct.format))
self.data = self.Data()
def pack(self):
self.telemstruct.pack_into(self.buff, 0,
self.data.num_propagation_distance,
self.data.propagation_distance[0], self.data.propagation_distance[1], self.data.propagation_distance[2],
self.data.compute_spectral_peak,
self.data.compute_digital_phase_mask,
self.data.processing_mode,
self.data.num_chromatic_shift,
self.data.chromatic_shift[0], self.data.chromatic_shift[1], self.data.chromatic_shift[2],
bytes(self.data.ref_holo_path,encoding="utf-8"),
self.data.ref_holo_enable,
self.data.ref_holo_averaging_sec,
self.data.ref_holo_averaging_enabled,
self.data.phase_unwrapping_enabled,
self.data.phase_unwrapping_algorithm,
self.data.fitting_mode,
self.data.fitting_method,
self.data.fitting_order,
self.data.fitting_apply,
self.data.reset_phase_mask,
self.data.roi_offset_x,
self.data.roi_offset_y,
self.data.roi_size_x,
self.data.roi_size_y,
self.data.store_files,
self.data.center_image,
self.data.center_image_and_tilt,
self.data.center_max_value,
self.data.center_wide_spectrum,
bytes(self.data.status_msg,encoding="utf-8"),
)
return bytes(self.buff)
def set_values(self, num_propagation_distance, propagation_distance, compute_spectral_peak, compute_digital_phase_mask, processing_mode, num_chromatic_shift, chromatic_shift, ref_holo_path, ref_holo_enable, ref_holo_averaging_sec, ref_holo_averaging_enabled, phase_unwrapping_enabled, phase_unwrapping_algorithm, fitting_mode, fitting_method, fitting_order, fitting_apply, reset_phase_mask, roi_offset_x, roi_offset_y, roi_size_x, roi_size_y, store_files, center_image, center_image_and_tilt, center_max_value, center_wide_spectrum, status_msg, ):
self.data.num_propagation_distance = num_propagation_distance
self.data.propagation_distance = [float(0),float(0),float(0),]
if not hasattr(propagation_distance, "__iter__"):
propagation_distance = [propagation_distance]
if len(propagation_distance) > 3:
raise ValueError("propagation_distance must have no more than 3 items in the list.")
for i,p in enumerate(propagation_distance):
self.data.propagation_distance[i] = p
self.data.compute_spectral_peak = compute_spectral_peak
self.data.compute_digital_phase_mask = compute_digital_phase_mask
self.data.processing_mode = processing_mode
self.data.num_chromatic_shift = num_chromatic_shift
self.data.chromatic_shift = [float(0),float(0),float(0),]
if not hasattr(chromatic_shift, "__iter__"):
chromatic_shift = [chromatic_shift]
if len(chromatic_shift) > 3:
raise ValueError("chromatic_shift must have no more than 3 items in the list.")
for i,p in enumerate(chromatic_shift):
self.data.chromatic_shift[i] = p
self.data.ref_holo_path = ref_holo_path
self.data.ref_holo_enable = ref_holo_enable
self.data.ref_holo_averaging_sec = ref_holo_averaging_sec
self.data.ref_holo_averaging_enabled = ref_holo_averaging_enabled
self.data.phase_unwrapping_enabled = phase_unwrapping_enabled
self.data.phase_unwrapping_algorithm = phase_unwrapping_algorithm
self.data.fitting_mode = fitting_mode
self.data.fitting_method = fitting_method
self.data.fitting_order = fitting_order
self.data.fitting_apply = fitting_apply
self.data.reset_phase_mask = reset_phase_mask
self.data.roi_offset_x = roi_offset_x
self.data.roi_offset_y = roi_offset_y
self.data.roi_size_x = roi_size_x
self.data.roi_size_y = roi_size_y
self.data.store_files = store_files
self.data.center_image = center_image
self.data.center_image_and_tilt = center_image_and_tilt
self.data.center_max_value = center_max_value
self.data.center_wide_spectrum = center_wide_spectrum
self.data.status_msg = status_msg
def unpack_from(self, buff, offset=0):
ret = self.telemstruct.unpack_from(buff, offset)
self.data.num_propagation_distance = ret[0]
self.data.propagation_distance = list(ret[1:4])
self.data.compute_spectral_peak = ret[4]
self.data.compute_digital_phase_mask = ret[5]
self.data.processing_mode = ret[6]
self.data.num_chromatic_shift = ret[7]
self.data.chromatic_shift = list(ret[8:11])
self.data.ref_holo_path = ret[11]
self.data.ref_holo_enable = ret[12]
self.data.ref_holo_averaging_sec = ret[13]
self.data.ref_holo_averaging_enabled = ret[14]
self.data.phase_unwrapping_enabled = ret[15]
self.data.phase_unwrapping_algorithm = ret[16]
self.data.fitting_mode = ret[17]
self.data.fitting_method = ret[18]
self.data.fitting_order = ret[19]
self.data.fitting_apply = ret[20]
self.data.reset_phase_mask = ret[21]
self.data.roi_offset_x = ret[22]
self.data.roi_offset_y = ret[23]
self.data.roi_size_x = ret[24]
self.data.roi_size_y = ret[25]
self.data.store_files = ret[26]
self.data.center_image = ret[27]
self.data.center_image_and_tilt = ret[28]
self.data.center_max_value = ret[29]
self.data.center_wide_spectrum = ret[30]
self.data.status_msg = ret[31]
return self.data
class Framesource_Telemetry(Telemetry_Object):
class Data(object):
def __init__(self):
self.state = 0
self.mode = ""
self.file_path = ""
self.current_file = ""
self.status_msg = ""
def __repr__(self):
print("state: ", self.state)
print("mode: %s"%(self.mode.decode()))
print("file_path: %s"%(self.file_path.decode()))
print("current_file: %s"%(self.current_file.decode()))
print("status_msg: %s"%(self.status_msg.decode()))
return ""
def __init__(self):
self.telemstruct = struct.Struct("H10s256s256s256s")
self.buff = bytearray(struct.calcsize(self.telemstruct.format))
self.data = self.Data()
def pack(self):
self.telemstruct.pack_into(self.buff, 0,
self.data.state,
bytes(self.data.mode,encoding="utf-8"),
bytes(self.data.file_path,encoding="utf-8"),
bytes(self.data.current_file,encoding="utf-8"),
bytes(self.data.status_msg,encoding="utf-8"),
)
return bytes(self.buff)
def set_values(self, state, mode, file_path, current_file, status_msg, ):
self.data.state = state
self.data.mode = mode
self.data.file_path = file_path
self.data.current_file = current_file
self.data.status_msg = status_msg
def unpack_from(self, buff, offset=0):
ret = self.telemstruct.unpack_from(buff, offset)
self.data.state = ret[0]
self.data.mode = ret[1]
self.data.file_path = ret[2]
self.data.current_file = ret[3]
self.data.status_msg = ret[4]
return self.data
class Datalogger_Telemetry(Telemetry_Object):
class Data(object):
def __init__(self):
self.enabled = False
self.rootpath = ""
self.status_msg = ""
def __repr__(self):
print("enabled: ", self.enabled)
print("rootpath: %s"%(self.rootpath.decode()))
print("status_msg: %s"%(self.status_msg.decode()))
return ""
def __init__(self):
self.telemstruct = struct.Struct("?256s256s")
self.buff = bytearray(struct.calcsize(self.telemstruct.format))
self.data = self.Data()
def pack(self):
self.telemstruct.pack_into(self.buff, 0,
self.data.enabled,
bytes(self.data.rootpath,encoding="utf-8"),
bytes(self.data.status_msg,encoding="utf-8"),
)
return bytes(self.buff)
def set_values(self, enabled, rootpath, status_msg, ):
self.data.enabled = enabled
self.data.rootpath | |
ribonucleoprotein protein IMP3'),
'P32900' : ntuniprot(RecName_Full='Protein SKG6'),
'P32901' : ntuniprot(RecName_Full='Peptide transporter PTR2'),
'P32902' : ntuniprot(RecName_Full='37S ribosomal protein MRP4, mitochondrial'),
'P32903' : ntuniprot(RecName_Full='Plasma membrane ATPase proteolipid 1'),
'P32904' : ntuniprot(RecName_Full='54S ribosomal protein L6, mitochondrial'),
'P32905' : ntuniprot(RecName_Full='40S ribosomal protein S0-A {ECO:0000255|HAMAP-Rule:MF_03015, ECO:0000303|PubMed:9559554}'),
'P32906' : ntuniprot(RecName_Full='Endoplasmic reticulum mannosyl-oligosaccharide 1,2-alpha-mannosidase'),
'P32907' : ntuniprot(RecName_Full='Ammonia transport outward protein 2'),
'P32908' : ntuniprot(RecName_Full='Structural maintenance of chromosomes protein 1'),
'P32909' : ntuniprot(RecName_Full='Protein SMY2'),
'P32910' : ntuniprot(RecName_Full='DNA-directed RNA polymerase III subunit RPC6'),
'P32911' : ntuniprot(RecName_Full='Eukaryotic translation initiation factor eIF-1'),
'P32912' : ntuniprot(RecName_Full='Vacuolar morphogenesis protein 7'),
'P32913' : ntuniprot(RecName_Full='Vacuolar protein sorting-associated protein 17'),
'P32914' : ntuniprot(RecName_Full='Transcription elongation factor SPT4'),
'P32915' : ntuniprot(RecName_Full='Protein transport protein SEC61'),
'P32916' : ntuniprot(RecName_Full='Signal recognition particle receptor subunit alpha homolog'),
'P32917' : ntuniprot(RecName_Full='Protein STE5'),
'P32939' : ntuniprot(RecName_Full='GTP-binding protein YPT7'),
'P32943' : ntuniprot(RecName_Full='S-phase entry cyclin-6'),
'P32944' : ntuniprot(RecName_Full='Mitosis inhibitor protein kinase SWE1'),
'P32945' : ntuniprot(RecName_Full='Serine/threonine-protein phosphatase PPQ'),
'P33122' : ntuniprot(RecName_Full='Serine-rich protein TYE7'),
'P33199' : ntuniprot(RecName_Full='Uncharacterized protein YGL015C'),
'P33200' : ntuniprot(RecName_Full='Transcription factor PDR3'),
'P33201' : ntuniprot(RecName_Full='Ribosome assembly factor MRT4 {ECO:0000303|PubMed:19346338}'),
'P33202' : ntuniprot(RecName_Full='Ubiquitin fusion degradation protein 4'),
'P33203' : ntuniprot(RecName_Full='Pre-mRNA-processing protein PRP40'),
'P33204' : ntuniprot(RecName_Full='Actin-related protein 2/3 complex subunit 4'),
'P33296' : ntuniprot(RecName_Full='Ubiquitin-conjugating enzyme E2 6'),
'P33297' : ntuniprot(RecName_Full='26S proteasome regulatory subunit 6A'),
'P33298' : ntuniprot(RecName_Full='26S proteasome regulatory subunit 6B homolog'),
'P33299' : ntuniprot(RecName_Full='26S proteasome regulatory subunit 7 homolog'),
'P33300' : ntuniprot(RecName_Full='Mannosyl phosphorylinositol ceramide synthase SUR1'),
'P33301' : ntuniprot(RecName_Full='DNA repair protein XRS2'),
'P33302' : ntuniprot(RecName_Full='Pleiotropic ABC efflux transporter of multiple drugs'),
'P33303' : ntuniprot(RecName_Full='Succinate/fumarate mitochondrial transporter'),
'P33304' : ntuniprot(RecName_Full='Protein AFR1'),
'P33306' : ntuniprot(RecName_Full='Protein BCK2'),
'P33307' : ntuniprot(RecName_Full='Importin alpha re-exporter'),
'P33308' : ntuniprot(RecName_Full='Mediator of RNA polymerase II transcription subunit 9'),
'P33309' : ntuniprot(RecName_Full='Protein DOM34'),
'P33310' : ntuniprot(RecName_Full='ATP-dependent permease MDL1, mitochondrial'),
'P33311' : ntuniprot(RecName_Full='ATP-dependent permease MDL2, mitochondrial'),
'P33312' : ntuniprot(RecName_Full="2,5-diamino-6-ribosylamino-4(3H)-pyrimidinone 5'-phosphate reductase"),
'P33313' : ntuniprot(RecName_Full='Hsp70/Hsp90 co-chaperone CNS1'),
'P33314' : ntuniprot(RecName_Full='Inhibitory regulator protein BUD2/CLA2'),
'P33315' : ntuniprot(RecName_Full='Transketolase 2'),
'P33317' : ntuniprot(RecName_Full="Deoxyuridine 5'-triphosphate nucleotidohydrolase"),
'P33322' : ntuniprot(RecName_Full='H/ACA ribonucleoprotein complex subunit CBF5'),
'P33323' : ntuniprot(RecName_Full='Meiotic recombination protein REC104'),
'P33324' : ntuniprot(RecName_Full='CRAL-TRIO domain-containing protein YKL091C'),
'P33327' : ntuniprot(RecName_Full='NAD-specific glutamate dehydrogenase'),
'P33328' : ntuniprot(RecName_Full='Synaptobrevin homolog 2'),
'P33329' : ntuniprot(RecName_Full='Serine/threonine-protein phosphatase PP-Z2'),
'P33330' : ntuniprot(RecName_Full='Phosphoserine aminotransferase'),
'P33331' : ntuniprot(RecName_Full='Nuclear transport factor 2'),
'P33332' : ntuniprot(RecName_Full='Exocyst complex component SEC3'),
'P33333' : ntuniprot(RecName_Full='Probable 1-acyl-sn-glycerol-3-phosphate acyltransferase'),
'P33334' : ntuniprot(RecName_Full='Pre-mRNA-splicing factor 8'),
'P33335' : ntuniprot(RecName_Full='Protein SGE1'),
'P33336' : ntuniprot(RecName_Full='Beta-glucan synthesis-associated protein SKN1'),
'P33338' : ntuniprot(RecName_Full='Protein SLA2'),
'P33339' : ntuniprot(RecName_Full='Transcription factor tau 131 kDa subunit'),
'P33399' : ntuniprot(RecName_Full='La protein homolog'),
'P33400' : ntuniprot(RecName_Full='pH-response transcription factor pacC/RIM101'),
'P33401' : ntuniprot(RecName_Full='Phosphoglucomutase 1 {ECO:0000303|PubMed:5784209}'),
'P33411' : ntuniprot(RecName_Full='Pre-mRNA-splicing factor 18'),
'P33412' : ntuniprot(RecName_Full='Ethanolamine-phosphate cytidylyltransferase'),
'P33413' : ntuniprot(RecName_Full='Urea active transporter'),
'P33416' : ntuniprot(RecName_Full='Heat shock protein 78, mitochondrial'),
'P33417' : ntuniprot(RecName_Full='Intrastrand cross-link recognition protein'),
'P33418' : ntuniprot(RecName_Full='Exportin-T'),
'P33419' : ntuniprot(RecName_Full='Spindle pole component 29'),
'P33420' : ntuniprot(RecName_Full='Protein NIP100'),
'P33421' : ntuniprot(RecName_Full='Succinate dehydrogenase [ubiquinone] cytochrome b subunit, mitochondrial'),
'P33441' : ntuniprot(RecName_Full='THO complex subunit MFT1'),
'P33442' : ntuniprot(RecName_Full='40S ribosomal protein S1-A {ECO:0000255|HAMAP-Rule:MF_03122, ECO:0000303|PubMed:9559554}'),
'P33448' : ntuniprot(RecName_Full='Mitochondrial import receptor subunit TOM6'),
'P33550' : ntuniprot(RecName_Full='Probable mannosyltransferase KTR2'),
'P33734' : ntuniprot(RecName_Full='Imidazole glycerol phosphate synthase hisHF'),
'P33748' : ntuniprot(RecName_Full='Zinc finger protein MSN2'),
'P33749' : ntuniprot(RecName_Full='Zinc finger protein MSN4'),
'P33750' : ntuniprot(RecName_Full='Protein SOF1'),
'P33751' : ntuniprot(RecName_Full='Flavin prenyltransferase PAD1, mitochondrial {ECO:0000255|HAMAP-Rule:MF_03197, ECO:0000305}'),
'P33753' : ntuniprot(RecName_Full='tRNA (uracil(54)-C(5))-methyltransferase'),
'P33754' : ntuniprot(RecName_Full='Translocation protein SEC66'),
'P33755' : ntuniprot(RecName_Full='Nuclear protein localization protein 4'),
'P33757' : ntuniprot(RecName_Full='Sporulation protein 23'),
'P33759' : ntuniprot(RecName_Full='37S ribosomal protein S5, mitochondrial'),
'P33760' : ntuniprot(RecName_Full='Peroxisomal ATPase PEX6'),
'P33767' : ntuniprot(RecName_Full='Dolichyl-diphosphooligosaccharide--protein glycosyltransferase subunit WBP1'),
'P33775' : ntuniprot(RecName_Full='Dolichyl-phosphate-mannose--protein mannosyltransferase 1 {ECO:0000305}'),
'P33890' : ntuniprot(RecName_Full='Cold shock-induced protein TIR2'),
'P33891' : ntuniprot(RecName_Full='Protein transport protein TIP20'),
'P33892' : ntuniprot(RecName_Full='eIF-2-alpha kinase activator GCN1 {ECO:0000305}'),
'P33893' : ntuniprot(RecName_Full='Glutamyl-tRNA(Gln) amidotransferase subunit B, mitochondrial {ECO:0000255|HAMAP-Rule:MF_03147}'),
'P33894' : ntuniprot(RecName_Full='Dipeptidyl aminopeptidase A'),
'P33895' : ntuniprot(RecName_Full='Kinetochore protein NUF2'),
'P34072' : ntuniprot(RecName_Full='Negative regulator of RAS-cAMP pathway'),
'P34077' : ntuniprot(RecName_Full='Nucleoporin NIC96'),
'P34078' : ntuniprot(RecName_Full='Protein LTV1'),
'P34087' : ntuniprot(RecName_Full='DNA-directed RNA polymerase II subunit RPB7'),
'P34110' : ntuniprot(RecName_Full='Vacuolar protein sorting-associated protein 35'),
'P34111' : ntuniprot(RecName_Full='Transcription factor tau 138 kDa subunit'),
'P34160' : ntuniprot(RecName_Full='Nuclear cap-binding protein complex subunit 1'),
'P34161' : ntuniprot(RecName_Full='Homeobox protein YOX1'),
'P34162' : ntuniprot(RecName_Full='Mediator of RNA polymerase II transcription subunit 20'),
'P34163' : ntuniprot(RecName_Full='Sterol esterase TGL1'),
'P34164' : ntuniprot(RecName_Full='SNF1 protein kinase subunit beta-2'),
'P34165' : ntuniprot(RecName_Full='Mating hormone A-factor 1'),
'P34166' : ntuniprot(RecName_Full='Mating hormone A-factor 2'),
'P34167' : ntuniprot(RecName_Full='Eukaryotic translation initiation factor 4B'),
'P34216' : ntuniprot(RecName_Full='EH domain-containing and endocytosis protein 1'),
'P34217' : ntuniprot(RecName_Full='RNA-binding protein PIN4'),
'P34218' : ntuniprot(RecName_Full='Histone acetyltransferase SAS3'),
'P34219' : ntuniprot(RecName_Full='Transcriptional regulatory protein TOD6'),
'P34220' : ntuniprot(RecName_Full='Deoxyribonuclease Tat-D'),
'P34221' : ntuniprot(RecName_Full='Protein phosphatase 2C homolog 3'),
'P34222' : ntuniprot(RecName_Full='Peptidyl-tRNA hydrolase 2'),
'P34223' : ntuniprot(RecName_Full='UBX domain-containing protein 1'),
'P34224' : ntuniprot(RecName_Full='Uncharacterized protein YBL059W'),
'P34225' : ntuniprot(RecName_Full='Guanine-nucleotide exchange factor YEL1'),
'P34226' : ntuniprot(RecName_Full='Protein SKT5'),
'P34227' : ntuniprot(RecName_Full='Peroxiredoxin PRX1, mitochondrial {ECO:0000305}'),
'P34228' : ntuniprot(RecName_Full='Putative transcription factor SEF1'),
'P34230' : ntuniprot(RecName_Full='Peroxisomal long-chain fatty acid import protein 1'),
'P34231' : ntuniprot(RecName_Full='Uncharacterized protein YKL187C'),
'P34232' : ntuniprot(RecName_Full='mRNA transport regulator MTR2'),
'P34233' : ntuniprot(RecName_Full='Transcriptional regulatory protein ASH1'),
'P34234' : ntuniprot(RecName_Full='Protein LOT5'),
'P34237' : ntuniprot(RecName_Full='Protein CASP'),
'P34239' : ntuniprot(RecName_Full='Protein LST4'),
'P34240' : ntuniprot(RecName_Full='Zinc-regulated transporter 3'),
'P34241' : ntuniprot(RecName_Full='Nucleolar pre-ribosomal-associated protein 1'),
'P34243' : ntuniprot(RecName_Full='DNA polymerase alpha-associated DNA helicase A'),
'P34244' : ntuniprot(RecName_Full='Probable serine/threonine-protein kinase HSL1'),
'P34246' : ntuniprot(RecName_Full='Maintenance of telomere capping protein 2'),
'P34247' : ntuniprot(RecName_Full='U3 small nucleolar RNA-associated protein 11'),
'P34248' : ntuniprot(RecName_Full='Probable intramembrane protease YKL100C'),
'P34250' : ntuniprot(RecName_Full='Eisosome protein SEG2'),
'P34251' : ntuniprot(RecName_Full='Uncharacterized oxidoreductase YKL107W'),
'P34252' : ntuniprot(RecName_Full='DNA replication regulator SLD2'),
'P34253' : ntuniprot(RecName_Full='Protein KTI12'),
'P34730' : ntuniprot(RecName_Full='Protein BMH2'),
'P34756' : ntuniprot(RecName_Full='1-phosphatidylinositol 3-phosphate 5-kinase FAB1'),
'P34758' : ntuniprot(RecName_Full='Protein SCD5'),
'P34760' : ntuniprot(RecName_Full='Peroxiredoxin TSA1 {ECO:0000305}'),
'P34761' : ntuniprot(RecName_Full='Protein WHI3'),
'P34909' : ntuniprot(RecName_Full='General negative regulator of transcription subunit 4'),
'P35056' : ntuniprot(RecName_Full='Peroxisomal targeting signal receptor'),
'P35127' : ntuniprot(RecName_Full='Ubiquitin carboxyl-terminal hydrolase YUH1'),
'P35169' : ntuniprot(RecName_Full='Serine/threonine-protein kinase TOR1'),
'P35172' : ntuniprot(RecName_Full='Probable trehalase'),
'P35176' : ntuniprot(RecName_Full='Peptidyl-prolyl cis-trans isomerase D'),
'P35177' : ntuniprot(RecName_Full='Transcriptional activator SPT7'),
'P35178' : ntuniprot(RecName_Full='Ribosomal RNA-processing protein 1'),
'P35179' : ntuniprot(RecName_Full='Protein transport protein SSS1'),
'P35180' : ntuniprot(RecName_Full='Mitochondrial import receptor subunit TOM20'),
'P35181' : ntuniprot(RecName_Full='AP-1 complex subunit sigma-1'),
'P35182' : ntuniprot(RecName_Full='Protein phosphatase 2C homolog 1'),
'P35183' : ntuniprot(RecName_Full='Protein AST1'),
'P35184' : ntuniprot(RecName_Full='Ribosome assembly protein SQT1'),
'P35187' : ntuniprot(RecName_Full='ATP-dependent helicase SGS1 {ECO:0000303|PubMed:7969174}'),
'P35189' : ntuniprot(RecName_Full='Transcription initiation factor TFIID subunit 14'),
'P35190' : ntuniprot(RecName_Full='PHO85 cyclin CLG1'),
'P35191' : ntuniprot(RecName_Full='DnaJ homolog 1, mitochondrial'),
'P35192' : ntuniprot(RecName_Full='Metal-binding activator 1'),
'P35193' : ntuniprot(RecName_Full='Autophagy-related protein 19'),
'P35194' : ntuniprot(RecName_Full='U3 small nucleolar RNA-associated protein 20'),
'P35195' : ntuniprot(RecName_Full='UPF0045 protein ECM15'),
'P35196' : ntuniprot(RecName_Full='Dehydrodolichyl diphosphate synthase complex subunit RER2 {ECO:0000305}'),
'P35197' : ntuniprot(RecName_Full='ADP-ribosylation factor GTPase-activating protein GCS1'),
'P35198' : ntuniprot(RecName_Full='Protein MTH1'),
'P35200' : ntuniprot(RecName_Full='Protein UPS2, mitochondrial'),
'P35201' : ntuniprot(RecName_Full='Inner kinetochore subunit MIF2 {ECO:0000305}'),
'P35202' : ntuniprot(RecName_Full='Thiamine pyrophosphokinase'),
'P35203' : ntuniprot(RecName_Full='Centromere DNA-binding protein complex CBF3 subunit C'),
'P35206' : ntuniprot(RecName_Full='Mannosyl phosphorylinositol ceramide synthase regulatory protein CSG2'),
'P35207' : ntuniprot(RecName_Full='Antiviral helicase SKI2'),
'P35208' : ntuniprot(RecName_Full='Protein SPT10'),
'P35209' : ntuniprot(RecName_Full='Protein SPT21'),
'P35210' : ntuniprot(RecName_Full='Protein SPT23'),
'P35497' : ntuniprot(RecName_Full='Sorbitol dehydrogenase 1'),
'P35688' : ntuniprot(RecName_Full='Rho-GTPase-activating protein LRG1'),
'P35691' : ntuniprot(RecName_Full='Translationally-controlled tumor protein homolog'),
'P35718' : ntuniprot(RecName_Full='DNA-directed RNA polymerase III subunit RPC8'),
'P35719' : ntuniprot(RecName_Full='Uncharacterized protein MRP8'),
'P35723' : ntuniprot(RecName_Full='Endoplasmic reticulum transmembrane protein 1'),
'P35724' : ntuniprot(RecName_Full='Manganese resistance protein MNR2'),
'P35725' : ntuniprot(RecName_Full='Uncharacterized protein YKL063C'),
'P35727' : ntuniprot(RecName_Full='Biogenesis of lysosome-related organelles complex 1 subunit BLI1'),
'P35728' : ntuniprot(RecName_Full='Protein MPE1'),
'P35729' : ntuniprot(RecName_Full='Nucleoporin NUP120'),
'P35731' : ntuniprot(RecName_Full='3-oxoacyl-[acyl-carrier-protein] reductase'),
'P35732' : ntuniprot(RecName_Full='RNA polymerase II degradation factor 1'),
'P35734' : ntuniprot(RecName_Full='DASH complex subunit ASK1'),
'P35735' : ntuniprot(RecName_Full='Protein SFK1'),
'P35736' : ntuniprot(RecName_Full='Uncharacterized protein YKL050C'),
'P35817' : ntuniprot(RecName_Full='Bromodomain-containing factor 1'),
'P35842' : ntuniprot(RecName_Full='Acid phosphatase PHO11'),
'P35843' : ntuniprot(RecName_Full='Protein HES1'),
'P35844' : ntuniprot(RecName_Full='Oxysterol-binding protein homolog 4'),
'P35845' : ntuniprot(RecName_Full='Oxysterol-binding protein homolog 1'),
'P35994' : ntuniprot(RecName_Full='Seripauperin-16'),
'P35995' : ntuniprot(RecName_Full='Uncharacterized transcriptional regulatory protein YKL222C'),
'P35996' : ntuniprot(RecName_Full='54S ribosomal protein L38, mitochondrial'),
'P35997' : ntuniprot(RecName_Full='40S ribosomal protein S27-A {ECO:0000303|PubMed:9559554}'),
'P35999' : ntuniprot(RecName_Full='Mitochondrial intermediate peptidase'),
'P36000' : ntuniprot(RecName_Full='AP-1 complex subunit beta-1'),
'P36001' : ntuniprot(RecName_Full='Probable folylpolyglutamate synthase'),
'P36002' : ntuniprot(RecName_Full='Serine/threonine-protein kinase PTK1/STK1'),
'P36003' : ntuniprot(RecName_Full='Nitrogen network kinase 1'),
'P36004' : ntuniprot(RecName_Full='Probable serine/threonine-protein kinase KKQ8'),
'P36005' : ntuniprot(RecName_Full='Serine/threonine-protein kinase KDX1'),
'P36006' : ntuniprot(RecName_Full='Myosin-3'),
'P36007' : ntuniprot(RecName_Full='L-threo-3-hydroxyaspartate ammonia-lyase {ECO:0000305|PubMed:12951240}'),
'P36008' : ntuniprot(RecName_Full='Elongation factor 1-gamma 2'),
'P36009' : | |
<reponame>krishp058/hpy
from .support import HPyTest
from .test_hpytype import PointTemplate
class TestSlots(HPyTest):
ExtensionTemplate = PointTemplate
def test_tp_init(self):
mod = self.make_module("""
@DEFINE_PointObject
@DEFINE_Point_xy
HPyDef_SLOT(Point_new, HPyType_GenericNew, HPy_tp_new)
HPyDef_SLOT(Point_init, Point_init_impl, HPy_tp_init)
static int Point_init_impl(HPyContext *ctx, HPy self, HPy *args,
HPy_ssize_t nargs, HPy kw)
{
long x, y;
if (!HPyArg_Parse(ctx, NULL, args, nargs, "ll", &x, &y))
return -1;
PointObject *p = PointObject_AsStruct(ctx, self);
p->x = x;
p->y = y;
return 0;
}
@EXPORT_POINT_TYPE(&Point_new, &Point_init, &Point_x, &Point_y)
@INIT
""")
p = mod.Point(1, 2)
assert p.x == 1
assert p.y == 2
def test_tp_destroy(self):
import gc
mod = self.make_module("""
@DEFINE_PointObject
@DEFINE_Point_new
static long destroyed_x;
HPyDef_SLOT(Point_destroy, Point_destroy_impl, HPy_tp_destroy)
static void Point_destroy_impl(void *obj)
{
PointObject *point = (PointObject *)obj;
destroyed_x += point->x;
}
HPyDef_METH(get_destroyed_x, "get_destroyed_x", get_destroyed_x_impl, HPyFunc_NOARGS)
static HPy get_destroyed_x_impl(HPyContext *ctx, HPy self)
{
return HPyLong_FromLong(ctx, destroyed_x);
}
@EXPORT_POINT_TYPE(&Point_new, &Point_destroy)
@EXPORT(get_destroyed_x)
@INIT
""")
point = mod.Point(7, 3)
assert mod.get_destroyed_x() == 0
del point
gc.collect()
assert mod.get_destroyed_x() == 7
gc.collect()
assert mod.get_destroyed_x() == 7
def test_nb_ops_binary(self):
import operator
mod = self.make_module(r"""
@DEFINE_PointObject
#define MYSLOT(NAME) \
HPyDef_SLOT(p_##NAME, NAME##_impl, HPy_nb_##NAME); \
static HPy NAME##_impl(HPyContext *ctx, HPy self, HPy other) \
{ \
HPy s = HPyUnicode_FromString(ctx, #NAME); \
HPy res = HPyTuple_Pack(ctx, 3, self, s, other); \
HPy_Close(ctx, s); \
return res; \
}
MYSLOT(add)
MYSLOT(and)
MYSLOT(divmod)
MYSLOT(floor_divide)
MYSLOT(lshift)
MYSLOT(multiply)
MYSLOT(or)
MYSLOT(remainder)
MYSLOT(rshift)
MYSLOT(subtract)
MYSLOT(true_divide)
MYSLOT(xor)
MYSLOT(matrix_multiply)
@EXPORT_POINT_TYPE(&p_add, &p_and, &p_divmod, &p_floor_divide, &p_lshift, &p_multiply, &p_or, &p_remainder, &p_rshift, &p_subtract, &p_true_divide, &p_xor, &p_matrix_multiply)
@INIT
""")
p = mod.Point()
assert p + 42 == (p, "add", 42)
assert p & 42 == (p, "and", 42)
assert divmod(p, 42) == (p, "divmod", 42)
assert p // 42 == (p, "floor_divide", 42)
assert p << 42 == (p, "lshift", 42)
assert p * 42 == (p, "multiply", 42)
assert p | 42 == (p, "or", 42)
assert p % 42 == (p, "remainder", 42)
assert p >> 42 == (p, "rshift", 42)
assert p - 42 == (p, "subtract", 42)
assert p / 42 == (p, "true_divide", 42)
assert p ^ 42 == (p, "xor", 42)
# we can't use '@' because we want to be importable on py27
assert operator.matmul(p, 42) == (p, "matrix_multiply", 42)
def test_nb_ops_inplace(self):
import operator
mod = self.make_module(r"""
@DEFINE_PointObject
#define MYSLOT(NAME) \
HPyDef_SLOT(p_##NAME, NAME##_impl, HPy_nb_##NAME); \
static HPy NAME##_impl(HPyContext *ctx, HPy self, HPy other) \
{ \
HPy s = HPyUnicode_FromString(ctx, #NAME); \
HPy res = HPyTuple_Pack(ctx, 3, self, s, other); \
HPy_Close(ctx, s); \
return res; \
}
MYSLOT(inplace_add)
MYSLOT(inplace_and)
MYSLOT(inplace_floor_divide)
MYSLOT(inplace_lshift)
MYSLOT(inplace_multiply)
MYSLOT(inplace_or)
MYSLOT(inplace_remainder)
MYSLOT(inplace_rshift)
MYSLOT(inplace_subtract)
MYSLOT(inplace_true_divide)
MYSLOT(inplace_xor)
MYSLOT(inplace_matrix_multiply)
@EXPORT_POINT_TYPE(&p_inplace_add, &p_inplace_and, &p_inplace_floor_divide, &p_inplace_lshift, &p_inplace_multiply, &p_inplace_or, &p_inplace_remainder, &p_inplace_rshift, &p_inplace_subtract, &p_inplace_true_divide, &p_inplace_xor, &p_inplace_matrix_multiply)
@INIT
""")
p = mod.Point()
tmp = p; tmp += 42; assert tmp == (p, "inplace_add", 42)
tmp = p; tmp &= 42; assert tmp == (p, "inplace_and", 42)
tmp = p; tmp //= 42; assert tmp == (p, "inplace_floor_divide", 42)
tmp = p; tmp <<= 42; assert tmp == (p, "inplace_lshift", 42)
tmp = p; tmp *= 42; assert tmp == (p, "inplace_multiply", 42)
tmp = p; tmp |= 42; assert tmp == (p, "inplace_or", 42)
tmp = p; tmp %= 42; assert tmp == (p, "inplace_remainder", 42)
tmp = p; tmp >>= 42; assert tmp == (p, "inplace_rshift", 42)
tmp = p; tmp -= 42; assert tmp == (p, "inplace_subtract", 42)
tmp = p; tmp /= 42; assert tmp == (p, "inplace_true_divide", 42)
tmp = p; tmp ^= 42; assert tmp == (p, "inplace_xor", 42)
#
# we can't use '@=' because we want to be importable on py27
tmp = p
tmp = operator.imatmul(p, 42)
assert tmp == (p, "inplace_matrix_multiply", 42)
def test_nb_ops_unary(self):
mod = self.make_module(r"""
@DEFINE_PointObject
#define MYSLOT(NAME) \
HPyDef_SLOT(p_##NAME, NAME##_impl, HPy_nb_##NAME); \
static HPy NAME##_impl(HPyContext *ctx, HPy self) \
{ \
HPy s = HPyUnicode_FromString(ctx, #NAME); \
HPy res = HPyTuple_Pack(ctx, 2, s, self); \
HPy_Close(ctx, s); \
return res; \
}
MYSLOT(negative)
MYSLOT(positive)
MYSLOT(absolute)
MYSLOT(invert)
@EXPORT_POINT_TYPE(&p_negative, &p_positive, &p_absolute, &p_invert)
@INIT
""")
p = mod.Point()
assert +p == ('positive', p)
assert -p == ('negative', p)
assert abs(p) == ('absolute', p)
assert ~p == ('invert', p)
def test_nb_ops_type_conversion(self):
import operator
mod = self.make_module(r"""
@DEFINE_PointObject
@DEFINE_Point_new
HPyDef_SLOT(p_int, p_int_impl, HPy_nb_int);
static HPy p_int_impl(HPyContext *ctx, HPy self)
{
return HPyLong_FromLong(ctx, 42);
}
HPyDef_SLOT(p_float, p_float_impl, HPy_nb_float);
static HPy p_float_impl(HPyContext *ctx, HPy self)
{
return HPyFloat_FromDouble(ctx, 123.4);
}
HPyDef_SLOT(p_index, p_index_impl, HPy_nb_index);
static HPy p_index_impl(HPyContext *ctx, HPy self)
{
return HPyLong_FromLong(ctx, -456);
}
HPyDef_SLOT(p_bool, p_bool_impl, HPy_nb_bool);
static int p_bool_impl(HPyContext *ctx, HPy self)
{
PointObject *point = PointObject_AsStruct(ctx, self);
return (point->x != 0);
}
@EXPORT_POINT_TYPE(&Point_new, &p_int, &p_float, &p_index, &p_bool)
@INIT
""")
p = mod.Point(0, 0)
assert int(p) == 42
assert float(p) == 123.4
assert operator.index(p) == -456
#
assert bool(mod.Point(0, 0)) is False
assert bool(mod.Point(1, 0)) is True
def test_nb_ops_power(self):
mod = self.make_module(r"""
@DEFINE_PointObject
HPyDef_SLOT(p_power, p_power_impl, HPy_nb_power);
static HPy p_power_impl(HPyContext *ctx, HPy self, HPy x, HPy y)
{
HPy s = HPyUnicode_FromString(ctx, "power");
HPy res = HPyTuple_Pack(ctx, 4, self, s, x, y);
HPy_Close(ctx, s);
return res;
}
HPyDef_SLOT(p_inplace_power, p_inplace_power_impl, HPy_nb_inplace_power);
static HPy p_inplace_power_impl(HPyContext *ctx, HPy self, HPy x, HPy y)
{
HPy s = HPyUnicode_FromString(ctx, "inplace_power");
HPy res = HPyTuple_Pack(ctx, 4, self, s, x, y);
HPy_Close(ctx, s);
return res;
}
@EXPORT_POINT_TYPE(&p_power, &p_inplace_power)
@INIT
""")
p = mod.Point()
assert p**42 == (p, 'power', 42, None)
assert pow(p, 42, 123) == (p, 'power', 42, 123)
tmp = p
tmp **= 42
assert tmp == (p, 'inplace_power', 42, None)
def test_buffer(self):
import pytest
import sys
mod = self.make_module("""
@TYPE_STRUCT_BEGIN(FakeArrayObject)
int exports;
@TYPE_STRUCT_END
static char static_mem[12] = {0,1,2,3,4,5,6,7,8,9,10,11};
static HPy_ssize_t _shape[1] = {12};
static HPy_ssize_t _strides[1] = {1};
HPyDef_SLOT(FakeArray_getbuffer, _getbuffer_impl, HPy_bf_getbuffer)
static int _getbuffer_impl(HPyContext *ctx, HPy self, HPy_buffer* buf, int flags) {
FakeArrayObject *arr = FakeArrayObject_AsStruct(ctx, self);
if (arr->exports > 0) {
buf->obj = HPy_NULL;
HPyErr_SetString(ctx, ctx->h_BufferError,
"only one buffer allowed");
return -1;
}
arr->exports++;
buf->buf = static_mem;
buf->len = 12;
buf->itemsize = 1;
buf->readonly = 1;
buf->ndim = 1;
buf->format = "B";
buf->shape = _shape;
buf->strides = _strides;
buf->suboffsets = NULL;
buf->internal = NULL;
buf->obj = HPy_Dup(ctx, self);
return 0;
}
HPyDef_SLOT(FakeArray_releasebuffer, _relbuffer_impl, HPy_bf_releasebuffer)
static void _relbuffer_impl(HPyContext *ctx, HPy h_obj, HPy_buffer* buf) {
FakeArrayObject *arr = FakeArrayObject_AsStruct(ctx, h_obj);
arr->exports--;
}
static HPyDef *FakeArray_defines[] = {
&FakeArray_getbuffer,
&FakeArray_releasebuffer,
NULL
};
static HPyType_Spec FakeArray_Spec = {
.name = "mytest.FakeArray",
.basicsize = sizeof(FakeArrayObject),
.defines = FakeArray_defines,
.legacy = FakeArrayObject_IS_LEGACY,
};
@EXPORT_TYPE("FakeArray", FakeArray_Spec)
@INIT
""")
arr = mod.FakeArray()
if self.supports_refcounts():
init_refcount = sys.getrefcount(arr)
with memoryview(arr) as mv:
with pytest.raises(BufferError):
mv2 = memoryview(arr)
if self.supports_refcounts():
assert sys.getrefcount(arr) == init_refcount + 1
for i in range(12):
assert mv[i] == i
if self.supports_refcounts():
assert sys.getrefcount(arr) == init_refcount
mv2 = memoryview(arr) # doesn't raise
class TestSqSlots(HPyTest):
ExtensionTemplate = PointTemplate
def test_sq_item_and_sq_length(self):
mod = self.make_module("""
@DEFINE_PointObject
HPyDef_SLOT(Point_getitem, Point_getitem_impl, HPy_sq_item);
static HPy Point_getitem_impl(HPyContext *ctx, HPy self, HPy_ssize_t idx)
{
return HPyLong_FromLong(ctx, (long)idx*2);
}
HPyDef_SLOT(Point_length, Point_length_impl, HPy_sq_length);
static HPy_ssize_t Point_length_impl(HPyContext *ctx, HPy self)
{
return 1234;
}
@EXPORT_POINT_TYPE(&Point_getitem, &Point_length)
@INIT
""")
p = mod.Point()
assert len(p) == 1234
assert p[4] == 8
assert p[21] == 42
assert p[-1] == 1233 * 2
def test_sq_ass_item(self):
import pytest
mod = self.make_module("""
@DEFINE_PointObject
@DEFINE_Point_new
@DEFINE_Point_xy
HPyDef_SLOT(Point_len, Point_len_impl, HPy_sq_length);
static HPy_ssize_t Point_len_impl(HPyContext *ctx, HPy self)
{
return 2;
}
HPyDef_SLOT(Point_setitem, Point_setitem_impl, HPy_sq_ass_item);
static int Point_setitem_impl(HPyContext *ctx, HPy self, HPy_ssize_t idx,
HPy h_value)
{
long value;
if (HPy_IsNull(h_value))
value = -123; // this is the del p[] case
else {
value = HPyLong_AsLong(ctx, h_value);
if (HPyErr_Occurred(ctx))
return -1;
}
PointObject *point = PointObject_AsStruct(ctx, self);
if (idx == 0)
point->x = value;
else if (idx == 1)
point->y = value;
else {
HPyErr_SetString(ctx, ctx->h_IndexError, "invalid index");
return -1;
}
return 0;
}
@EXPORT_POINT_TYPE(&Point_new, &Point_x, &Point_y, &Point_len, &Point_setitem)
@INIT
""")
p = mod.Point(1, 2)
# check __setitem__
p[0] = 100
assert p.x == 100
p[1] = 200
assert p.y == 200
with pytest.raises(IndexError):
p[2] = 300
# check __delitem__
del p[0]
assert p.x == -123
del p[1]
assert p.y == -123
# check negative indexes
p[-2] = 400
| |
#!/usr/bin/env python
u"""
esa_cryosat_ftp.py
Written by <NAME> (03/2020)
This program syncs Cryosat Elevation products
From the ESA Cryosat ftp dissemination server:
https://earth.esa.int/web/guest/-/how-to-access-cryosat-data-6842
https://earth.esa.int/web/guest/-/products-overview-6975
INPUTS:
CryoSat-2 product to sync with ESA servers
SIR_LRM_L2: CryoSat-2 Low-Resolution Mode
SIR_SAR_L2: CryoSat-2 SAR Mode
SIR_SIN_L2: CryoSat-2 SARin Mode
CALLING SEQUENCE:
python esa_cryosat_ftp.py --baseline=C --user=<username> SIR_SIN_L2
where <username> is your ESA data dissemination server username
COMMAND LINE OPTIONS:
--help: list the command line options
-Y X, --year=X: years to sync separated by commas
-B X, --baseline=X: CryoSat-2 baseline to sync
--user: username for CryoSat-2 FTP servers
--directory: working data directory (default: current working directory)
--bbox=X: Bounding box (lonmin,latmin,lonmax,latmax)
--polygon=X: Georeferenced file containing a set of polygons
-M X, --mode=X: Local permissions mode of the directories and files synced
--log: output log of files downloaded
--list: print files to be transferred, but do not execute transfer
--clobber: Overwrite existing data in transfer
PYTHON DEPENDENCIES:
lxml: Pythonic XML and HTML processing library using libxml2/libxslt
https://lxml.de/
https://github.com/lxml/lxml
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
gdal: Pythonic interface to the Geospatial Data Abstraction Library (GDAL)
https://pypi.python.org/pypi/GDAL/
fiona: Python wrapper for vector data access functions from the OGR library
https://fiona.readthedocs.io/en/latest/manual.html
geopandas: Python tools for geographic data
http://geopandas.readthedocs.io/
shapely: PostGIS-ish operations outside a database context for Python
http://toblerity.org/shapely/index.html
pyproj: Python interface to PROJ library
https://pypi.org/project/pyproj/
future: Compatibility layer between Python 2 and Python 3
(http://python-future.org/)
UPDATE HISTORY:
Updated 03/2020: add spatial subsetting to reduce files to sync
increase ftplib timeout to prevent connection drops
Updated 02/2020: convert from hard to soft tabulation
Updated 08/2019: include baseline in regular expression patterns
Updated 06/2018: using python3 compatible octal and input
Updated 05/2018 for public release.
Updated 05/2017: exception if ESA Cryosat-2 credentials weren't entered
using os.makedirs to recursively create directories
using getpass to enter server password securely (remove --password)
Updated 04/2017: minor changes to check_connection function to use ftplib
updated regular expression for months to include year of interest
Updated 02/2017: switching username and password to login command
Written 11/2016
"""
from __future__ import print_function
import sys
import re
import os
import io
import getopt
import getpass
import builtins
import lxml.etree
import calendar, time
import shapely.geometry
import ftplib, posixpath
from cryosat_toolkit.read_shapefile import read_shapefile
from cryosat_toolkit.read_kml_file import read_kml_file
from cryosat_toolkit.read_geojson_file import read_geojson_file
#-- PURPOSE: check internet connection
def check_connection(USER, PASSWORD):
#-- attempt to connect to ftp host for Cryosat-2 servers
try:
f = ftplib.FTP('science-pds.cryosat.esa.int')
f.login(USER, PASSWORD)
f.voidcmd("NOOP")
except IOError:
raise RuntimeError('Check internet connection')
except ftplib.error_perm:
raise RuntimeError('Check login credentials')
else:
return True
#-- PURPOSE: compile regular expression operator to find CryoSat-2 files
def compile_regex_pattern(PRODUCT, BASELINE, START='\d+T?\d+', STOP='\d+T?\d+',
SUFFIX='(DBL|HDR|nc)'):
#-- CryoSat file class
#-- OFFL (Off Line Processing/Systematic)
#-- NRT_ (Near Real Time)
#-- RPRO (ReProcessing)
#-- TEST (Testing)
#-- LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|LTA_'
#-- CryoSat mission products
#-- SIR_LRM_1B L1B Product from Low Resolution Mode Processing
#-- SIR_FDM_1B L1B Product from Fast Delivery Marine Mode Processing
#-- SIR_SIN_1B L1B Product from SAR Interferometric Processing
#-- SIR_SID_1B L1B Product from SIN Degraded Processing
#-- SIR_SAR_1B L1B Product from SAR Processing
#-- SIR_LRM_2 L2 Product from Low Resolution Mode Processing
#-- SIR_FDM_2 L2 Product from Fast Delivery Marine Mode Processing
#-- SIR_SIN_2 L2 Product from SAR Interferometric Processing
#-- SIR_SID_2 L2 Product from SIN Degraded Processing
#-- SIR_SAR_2 L2 Product from SAR Processing
#-- SIR_GDR_2 L2 Consolidated Product
#-- SIR_LRMI2 In-depth L2 Product from LRM Processing
#-- SIR_SINI2 In-depth L2 Product from SIN Processing
#-- SIR_SIDI2 In-depth L2 Product from SIN Degraded Process.
#-- SIR_SARI2 In-depth L2 Product from SAR Processing
regex_products = {}
regex_products['SIR_LRM_L1'] = 'SIR_LRM_1B'
regex_products['SIR_FDM_L1'] = 'SIR_FDM_1B'
regex_products['SIR_SIN_L1'] = 'SIR_SIN_1B'
regex_products['SIR_SID_L1'] = 'SIR_SID_1B'
regex_products['SIR_SAR_L1'] = 'SIR_SAR_1B'
regex_products['SIR_LRM_L2'] = 'SIR_LRM_2_'
regex_products['SIR_FDM_L2'] = 'SIR_FDM_2_'
regex_products['SIR_SIN_L2'] = 'SIR_SIN_2_'
regex_products['SIR_SID_L2'] = 'SIR_SID_2_'
regex_products['SIR_SAR_L2'] = 'SIR_SAR_2_'
regex_products['SIR_GDR_L2'] = 'SIR_GDR_2_'
regex_products['SIR_LRM_L2I'] = 'SIR_LRMI2_'
regex_products['SIR_SIN_L2I'] = 'SIR_SINI2_'
regex_products['SIR_SID_L2I'] = 'SIR_SIDI2_'
regex_products['SIR_SAR_L2I'] = 'SIR_SARI2_'
#-- Cryosat baseline Identifier
regex_baseline = '({0})'.format(BASELINE) if BASELINE else '(.*?)'
#-- CRYOSAT LEVEL-2 PRODUCTS NAMING RULES
#-- Mission Identifier
#-- File Class
#-- File Product
#-- Validity Start Date and Time
#-- Validity Stop Date and Time
#-- Baseline Identifier
#-- Version Number
regex_pattern = '({0})_({1})_({2})_({3})_({4})_{5}(\d+).{6}$'.format('CS',
regex_class,regex_products[PRODUCT],START,STOP,regex_baseline,SUFFIX)
return re.compile(regex_pattern, re.VERBOSE)
#-- PURPOSE: sync local Cryosat-2 files with ESA server
def esa_cryosat_ftp(PRODUCT, YEARS, BASELINE=None, DIRECTORY=None,
USER='', PASSWORD='', BBOX=None, POLYGON=None, LOG=False, LIST=False,
MODE=None, CLOBBER=False):
#-- connect and login to ESA ftp server
f = ftplib.FTP('science-pds.cryosat.esa.int', timeout=3600)
f.login(USER, PASSWORD)
#-- compile xml parser for lxml
XMLparser = lxml.etree.XMLParser()
#-- create log file with list of synchronized files (or print to terminal)
if LOG:
#-- check if log directory exists and recursively create if not
os.makedirs(DIRECTORY,MODE) if not os.path.exists(DIRECTORY) else None
#-- format: ESA_CS_SIR_SIN_L2_sync_2002-04-01.log
today = time.strftime('%Y-%m-%d',time.localtime())
LOGFILE = 'ESA_CS_{0}_sync_{1}.log'.format(PRODUCT,today)
fid1 = open(os.path.join(DIRECTORY,LOGFILE),'w')
print('ESA CryoSat-2 Sync Log ({0})'.format(today), file=fid1)
print('PRODUCT={0}'.format(PRODUCT), file=fid1)
else:
#-- standard output (terminal output)
fid1 = sys.stdout
#-- compile regular expression operator for years to sync
regex_years = '|'.join('{0:d}'.format(y) for y in YEARS)
R1 = re.compile('({0})'.format(regex_years), re.VERBOSE)
#-- initial regular expression pattern for months of the year
regex_months = '(' + '|'.join('{0:02d}'.format(m) for m in range(1,13)) + ')'
#-- compile the regular expression operator to find CryoSat-2 files
#-- spatially subset data using bounding box or polygon file
if BBOX:
#-- if using a bounding box to spatially subset data
#-- only find header files to extract latitude and longitude coordinates
R3 = compile_regex_pattern(PRODUCT, BASELINE, SUFFIX='(HDR)')
#-- min_lon,min_lat,max_lon,max_lat
lon = [BBOX[0],BBOX[2],BBOX[2],BBOX[0],BBOX[0]]
lat = [BBOX[1],BBOX[1],BBOX[3],BBOX[3],BBOX[1]]
#-- create shapely polygon
poly_obj = shapely.geometry.Polygon(list(zip(lon, lat)))
#-- Valid Polygon cannot have overlapping exterior or interior rings
if (not poly_obj.is_valid):
poly_obj = poly_obj.buffer(0)
elif POLYGON:
#-- if using a polygon file to spatially subset data
#-- only find header files to extract latitude and longitude coordinates
R3 = compile_regex_pattern(PRODUCT, BASELINE, SUFFIX='(HDR)')
#-- read shapefile, kml/kmz file or GeoJSON file
fileBasename,fileExtension = os.path.splitext(POLYGON)
#-- extract file name and subsetter indices lists
match_object = re.match('(.*?)(\[(.*?)\])?$',POLYGON)
FILE = os.path.expanduser(match_object.group(1))
#-- read specific variables of interest
v = match_object.group(3).split(',') if match_object.group(2) else None
#-- get MultiPolygon object from input spatial file
if fileExtension in ('.shp','.zip'):
#-- if reading a shapefile or a zipped directory with a shapefile
ZIP = (fileExtension == '.zip')
m = read_shapefile(os.path.expanduser(FILE), VARIABLES=v, ZIP=ZIP)
elif fileExtension in ('.kml','.kmz'):
#-- if reading a keyhole markup language (can be compressed)
KMZ = (fileExtension == '.kmz')
m = read_kml_file(os.path.expanduser(FILE), VARIABLES=v, KMZ=KMZ)
elif fileExtension in ('.json','.geojson'):
#-- if reading a GeoJSON file
m = read_geojson_file(os.path.expanduser(FILE), VARIABLES=v)
else:
raise IOError('Unlisted polygon type ({0})'.format(fileExtension))
#-- calculate the convex hull of the MultiPolygon object for subsetting
poly_obj = m.convex_hull
#-- Valid Polygon cannot have overlapping exterior or interior rings
if (not poly_obj.is_valid):
poly_obj = poly_obj.buffer(0)
else:
R3 = compile_regex_pattern(PRODUCT, BASELINE)
#-- find remote yearly directories for PRODUCT within YEARS
YRS = [R1.findall(Y).pop() for Y in f.nlst(PRODUCT) if R1.search(Y)]
for Y in YRS:
#-- compile regular expression operator for months in year to sync
R2 = re.compile(posixpath.join(Y,regex_months), re.VERBOSE)
#-- find remote monthly directories for PRODUCT within year
MNS = [R2.findall(M).pop() for M in f.nlst(posixpath.join(PRODUCT,Y))
if R2.search(M)]
for M in MNS:
#-- remote and local directory for data product of year and month
remote_dir = posixpath.join(PRODUCT,Y,M)
local_dir = os.path.join(DIRECTORY,PRODUCT,Y,M)
#-- check if local directory exists and recursively create if not
os.makedirs(local_dir,MODE) if not os.path.exists(local_dir) else None
#-- get filenames from remote directory
valid_lines = [fi for fi in f.nlst(remote_dir) if R3.search(fi)]
#-- if spatially subsetting
if BBOX or POLYGON:
for line in sorted(valid_lines):
#-- extract filename from regex object
f1 = R3.search(line).group(0)
remote_file = posixpath.join(remote_dir,f1)
local_file = os.path.join(local_dir,f1)
#-- extract information from filename
MI,CLASS,PRD,START,STOP,BSLN,VERS,SFX = R3.findall(f1).pop()
#-- read XML header file and check if intersecting
if parse_xml_file(f, remote_file, poly_obj, XMLparser):
#-- compile regular expression operator for times
R4 = compile_regex_pattern(PRODUCT, BASELINE,
START=START, STOP=STOP)
subset=[f2 for f2 in f.nlst(remote_dir) if R4.search(f2)]
for subset_line in subset:
#-- extract filename from regex object
f2 = R4.search(subset_line).group(0)
remote_file = posixpath.join(remote_dir,f2)
local_file = os.path.join(local_dir,f2)
ftp_mirror_file(fid1,f,remote_file,local_file,
LIST,CLOBBER,MODE)
else:
for line in sorted(valid_lines):
#-- extract filename from regex object
fi = R3.search(line).group(0)
remote_file = posixpath.join(remote_dir,fi)
local_file = os.path.join(local_dir,fi)
ftp_mirror_file(fid1,f,remote_file,local_file,
LIST,CLOBBER,MODE)
#-- close the ftp connection
f.quit()
#-- close log file | |
= \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.Filter'
class CompositeFilter(ProtocolBuffer.ProtocolMessage):
# Operator values
AND = 1
_Operator_NAMES = {
1: "AND",
}
def Operator_Name(cls, x): return cls._Operator_NAMES.get(x, "")
Operator_Name = classmethod(Operator_Name)
has_operator_ = 0
operator_ = 0
def __init__(self, contents=None):
self.filter_ = []
if contents is not None: self.MergeFromString(contents)
def operator(self): return self.operator_
def set_operator(self, x):
self.has_operator_ = 1
self.operator_ = x
def clear_operator(self):
if self.has_operator_:
self.has_operator_ = 0
self.operator_ = 0
def has_operator(self): return self.has_operator_
def filter_size(self): return len(self.filter_)
def filter_list(self): return self.filter_
def filter(self, i):
return self.filter_[i]
def mutable_filter(self, i):
return self.filter_[i]
def add_filter(self):
x = Filter()
self.filter_.append(x)
return x
def clear_filter(self):
self.filter_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_operator()): self.set_operator(x.operator())
for i in range(x.filter_size()): self.add_filter().CopyFrom(x.filter(i))
def Equals(self, x):
if x is self: return 1
if self.has_operator_ != x.has_operator_: return 0
if self.has_operator_ and self.operator_ != x.operator_: return 0
if len(self.filter_) != len(x.filter_): return 0
for e1, e2 in zip(self.filter_, x.filter_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_operator_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: operator not set.')
for p in self.filter_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.operator_)
n += 1 * len(self.filter_)
for i in range(len(self.filter_)): n += self.lengthString(self.filter_[i].ByteSize())
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_operator_):
n += 1
n += self.lengthVarInt64(self.operator_)
n += 1 * len(self.filter_)
for i in range(len(self.filter_)): n += self.lengthString(self.filter_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_operator()
self.clear_filter()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.operator_)
for i in range(len(self.filter_)):
out.putVarInt32(18)
out.putVarInt32(self.filter_[i].ByteSize())
self.filter_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_operator_):
out.putVarInt32(8)
out.putVarInt32(self.operator_)
for i in range(len(self.filter_)):
out.putVarInt32(18)
out.putVarInt32(self.filter_[i].ByteSizePartial())
self.filter_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_operator(d.getVarInt32())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_filter().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_operator_: res+=prefix+("operator: %s\n" % self.DebugFormatInt32(self.operator_))
cnt=0
for e in self.filter_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("filter%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
koperator = 1
kfilter = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "operator",
2: "filter",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.CompositeFilter'
class PropertyFilter(ProtocolBuffer.ProtocolMessage):
# Operator values
LESS_THAN = 1
LESS_THAN_OR_EQUAL = 2
GREATER_THAN = 3
GREATER_THAN_OR_EQUAL = 4
EQUAL = 5
HAS_ANCESTOR = 11
_Operator_NAMES = {
1: "LESS_THAN",
2: "LESS_THAN_OR_EQUAL",
3: "GREATER_THAN",
4: "GREATER_THAN_OR_EQUAL",
5: "EQUAL",
11: "HAS_ANCESTOR",
}
def Operator_Name(cls, x): return cls._Operator_NAMES.get(x, "")
Operator_Name = classmethod(Operator_Name)
has_property_ = 0
has_operator_ = 0
operator_ = 0
has_value_ = 0
def __init__(self, contents=None):
self.property_ = PropertyReference()
self.value_ = Value()
if contents is not None: self.MergeFromString(contents)
def property(self): return self.property_
def mutable_property(self): self.has_property_ = 1; return self.property_
def clear_property(self):self.has_property_ = 0; self.property_.Clear()
def has_property(self): return self.has_property_
def operator(self): return self.operator_
def set_operator(self, x):
self.has_operator_ = 1
self.operator_ = x
def clear_operator(self):
if self.has_operator_:
self.has_operator_ = 0
self.operator_ = 0
def has_operator(self): return self.has_operator_
def value(self): return self.value_
def mutable_value(self): self.has_value_ = 1; return self.value_
def clear_value(self):self.has_value_ = 0; self.value_.Clear()
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_property()): self.mutable_property().MergeFrom(x.property())
if (x.has_operator()): self.set_operator(x.operator())
if (x.has_value()): self.mutable_value().MergeFrom(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_property_ != x.has_property_: return 0
if self.has_property_ and self.property_ != x.property_: return 0
if self.has_operator_ != x.has_operator_: return 0
if self.has_operator_ and self.operator_ != x.operator_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_property_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: property not set.')
elif not self.property_.IsInitialized(debug_strs): initialized = 0
if (not self.has_operator_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: operator not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
elif not self.value_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.property_.ByteSize())
n += self.lengthVarInt64(self.operator_)
n += self.lengthString(self.value_.ByteSize())
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_property_):
n += 1
n += self.lengthString(self.property_.ByteSizePartial())
if (self.has_operator_):
n += 1
n += self.lengthVarInt64(self.operator_)
if (self.has_value_):
n += 1
n += self.lengthString(self.value_.ByteSizePartial())
return n
def Clear(self):
self.clear_property()
self.clear_operator()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.property_.ByteSize())
self.property_.OutputUnchecked(out)
out.putVarInt32(16)
out.putVarInt32(self.operator_)
out.putVarInt32(26)
out.putVarInt32(self.value_.ByteSize())
self.value_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_property_):
out.putVarInt32(10)
out.putVarInt32(self.property_.ByteSizePartial())
self.property_.OutputPartial(out)
if (self.has_operator_):
out.putVarInt32(16)
out.putVarInt32(self.operator_)
if (self.has_value_):
out.putVarInt32(26)
out.putVarInt32(self.value_.ByteSizePartial())
self.value_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_property().TryMerge(tmp)
continue
if tt == 16:
self.set_operator(d.getVarInt32())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_value().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError()
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_property_:
res+=prefix+"property <\n"
res+=self.property_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_operator_: res+=prefix+("operator: %s\n" % self.DebugFormatInt32(self.operator_))
if self.has_value_:
res+=prefix+"value <\n"
res+=self.value_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kproperty = 1
koperator = 2
kvalue = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "property",
2: "operator",
3: "value",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_PROTO_DESCRIPTOR_NAME = 'apphosting.datastore.v4.PropertyFilter'
class GqlQuery(ProtocolBuffer.ProtocolMessage):
has_query_string_ = 0
query_string_ = ""
has_allow_literal_ = 0
allow_literal_ = 0
def __init__(self, contents=None):
self.name_arg_ = []
self.number_arg_ = []
if contents is not None: self.MergeFromString(contents)
def query_string(self): return self.query_string_
def set_query_string(self, x):
self.has_query_string_ = 1
self.query_string_ = x
def clear_query_string(self):
if self.has_query_string_:
self.has_query_string_ = 0
self.query_string_ = ""
def has_query_string(self): return self.has_query_string_
def allow_literal(self): return self.allow_literal_
def set_allow_literal(self, x):
self.has_allow_literal_ = 1
self.allow_literal_ = x
def clear_allow_literal(self):
if self.has_allow_literal_:
self.has_allow_literal_ = 0
self.allow_literal_ = 0
def has_allow_literal(self): return self.has_allow_literal_
def name_arg_size(self): return len(self.name_arg_)
def name_arg_list(self): return self.name_arg_
def name_arg(self, i):
return self.name_arg_[i]
def mutable_name_arg(self, i):
return self.name_arg_[i]
def add_name_arg(self):
x = GqlQueryArg()
self.name_arg_.append(x)
return x
def clear_name_arg(self):
self.name_arg_ = []
def number_arg_size(self): return len(self.number_arg_)
def number_arg_list(self): return self.number_arg_
def number_arg(self, i):
return self.number_arg_[i]
def mutable_number_arg(self, i):
return self.number_arg_[i]
def add_number_arg(self):
x = GqlQueryArg()
self.number_arg_.append(x)
return x
def clear_number_arg(self):
self.number_arg_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_query_string()): self.set_query_string(x.query_string())
if (x.has_allow_literal()): self.set_allow_literal(x.allow_literal())
for i in range(x.name_arg_size()): self.add_name_arg().CopyFrom(x.name_arg(i))
for i in range(x.number_arg_size()): self.add_number_arg().CopyFrom(x.number_arg(i))
def Equals(self, x):
if x is self: return 1
if self.has_query_string_ != x.has_query_string_: return 0
if self.has_query_string_ and self.query_string_ != x.query_string_: return 0
if self.has_allow_literal_ != x.has_allow_literal_: return 0
if self.has_allow_literal_ and self.allow_literal_ != x.allow_literal_: return 0
if len(self.name_arg_) != len(x.name_arg_): return 0
for e1, e2 in zip(self.name_arg_, x.name_arg_):
if e1 != e2: return 0
if len(self.number_arg_) != len(x.number_arg_): return 0
for e1, e2 in zip(self.number_arg_, x.number_arg_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_query_string_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: query_string not set.')
for p in self.name_arg_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.number_arg_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.query_string_))
if (self.has_allow_literal_): n += 2
n += 1 * len(self.name_arg_)
for i in range(len(self.name_arg_)): n += self.lengthString(self.name_arg_[i].ByteSize())
n += 1 * len(self.number_arg_)
for i | |
so I put my elbows into
it and made a good job. She’s kissed to pieces.”
“Dick!”
“Well, now! It’ll teach you to go careful how you start a man on them
tricks. Lynnette’s a worthy child, but I’d never have thought of
kissing her. Yet it wasn’t so bad. Rather subtle.” He licked his lips
tentatively.
“Dicky! Vulgar, vulgar boy!”
“You know, I believe she did like it,” confided Dick.
Then very soon, in the middle of the sunshiny, warm morning he went.
In the hall, where they had raced and played games long ago, she told
him good-by, doing a difficult best to give him cheer and courage to
remember, not heart-break. Something helped her unexpectedly, reaction,
maybe, of a chord overstrained; likely the good Lord ordered it; His
hand reaches into queer brain-twists. She said small, silly things that
made the boy laugh, till at last the towering figure was upon her and
she was crushed into khaki, with his expert rifleman’s badge digging
into her forehead. She was glad of the hurt. The small defenses had
gone down and she knew that only high Heaven could get her through the
next five seconds with a proper record as a brave man’s mother. In five
seconds he turned and fled, and with a leap was through the door. Gone!
She tossed out her arms as if shot, and fled after him. Already he was
across the lawn, by the tulip-bed, and suddenly he wheeled at the patch
of color and his visored cap was off, and he was kissing his hand with
the deep glow in his eyes she had seen often lately. It was as if the
soul of him came close to the windows and looked out at her. His blond
hair in the sunlight was almost as yellow as on that other day long ago
when--What was this? Up from the clover in the ditch, filling all the
air with fluttering gold, stormed again a flight of yellow butterflies,
the Cloudless Sulphur on their spring migration. The boy as he stood
looking back at her shouted young laughter and the winged things
glittered about him, and with that two lighted on his head.
“Good luck! It’s for good luck, mother,” he called.
She watched, smiling determinedly, dwelling on details, the uniform,
the folds of brown wool puttees, the bronze shine on his shoes, the
gold spots of light flickering about his head. He wheeled, stumbling
a bit, and then the light feet sprang away; there was no Dick there
now, only a glimmering, moving cloud of yellow--meaningless. The
tulip-bed--sunshine--butterflies--silence. The world was empty. She
clutched at her chest as if this sudden, sick, dropping away of life
were physical. His triumphant last word came back to her, “It’s for
good luck, mother”; then other words followed, words which she had
spoken years ago.
“And for immortality.”
Immortality! She beat her hands against the wall. Not Dick--not her
boy--her one thing. Not immortality for him, yet. Not for years and
years--fifty--sixty. He had a right to long, sweet mortal life before
that terrible immortality. She wanted him mortal, close, the flesh and
blood which she knew. It was not to be borne, this sending him away
to--Oh, God! The thousands on thousands of strong young things like
Dick who had already passed to that horrible, unknown immortality. The
word meant to her then only death, only a frantic terror; the subtle,
underlying, enormous hope of it missed her in the black hour.
A letter came next day from camp, and the next, and every day for a
week, and she pulled herself together and went about her busy hours
minute by minute cheerfully, as one must. She disregarded the fact that
inside of her an odd mental-moral-spiritual-physical arrangement which
is called a heart lay quite defenseless, and that shortly a dagger
was going to be struck into it. So when the dagger came, folded in a
yellow Western Union envelope, it was exactly as bad as if there had
been no preparation at all. Dick had sailed. She spun about and caught
at a table. And then went on quietly with the five hundred little
cheese-cloth “sponges” which she had promised to have at the Red Cross
rooms to-morrow. Ghastly little things. So the boy went, one of two
million to go, but yet, as most of the others were, the only one. And
two weeks later, it might be, came another telegram; a queerly worded
thing from the war office:
“The ship on which I sailed has arrived safely in port.”
What ship? What port? After what adventures? But the great fact
remained; he was, at least, overseas, beyond the first great peril. She
flung herself into war work and wrote every day a letter with its vague
military address ending in <NAME>. And got back many letters full of
enthusiasm, of adventure, of old friends and new, of dear French people
who had been good to him--but everybody was good to this boy. Of hard
training, too, and a word of praise from high quarters once or twice,
passed on secretly, proudly to the one person to whom a fellow could
repeat such things. It was a life crowded with happiness and hardship
and comradeship and worth-while work. And then, soon, with danger.
Through all sordidness and horror it was a life vitalized by enormous
incentive, a life whose memory few of those who lived it would give up
for everything else that any career might offer. The power of these
gay, commonplace, consecrated boys’ lives reached across oceans and
swung nations into consecration. Dick’s mother moved gladly in the huge
orbit, for war work meant to her Dick. The days went. He was in action
at times now, and wrote that his life was a charmed one, and that he
walked safe through dangers; wrote also the pitiful bit of statistics
which boys all told to their mothers, about the small percentage of
killed and wounded; wrote as well the heroic sweet thoughts which came
from depths of young souls which had never before known these depths.
“If I’m killed, darling child, honey, after all it’s not much
different. It wouldn’t be really long before we’d be playing together
again. And I’ve had the joy and the usefulness of fifty years of living
in these last months. What more could you ask? The best thing to do
with a life is to give it away--you taught me that--and this certainly
is the best way to give it, for our America. And don’t worry about my
suffering if I’m wounded; there’s not much to that. Things hurt and you
stand it--that happens in every life--and we wiggle and get through.
It hurt like the dickens when I had pneumonia, don’t you remember? So,
behold the straight dope of the wise man Dick, and follow thereby.
Nothing can happen that’s unbearable; keep it in your mind, precious.
Live on the surface--don’t go feeling any more than you can help.”
Thousands of others found the sense of that sentence a way out of
impossibility, as this woman did. She slept nights and worked days and
wrote letters and rejoiced in getting them, and shunned like poison
thoughts that thronged below the threshold, thoughts she dared not
meet. Weeks wore on, months; the Germans were being pushed back; with a
shivering joy she heard people say that the war could not last long; he
might--he might come home safe. She knew as that shaft of golden hope
winged across her brain, from the reeling rapture of it she knew how
little hope she had ever had. But she whispered Dick’s wise sentence
once in a while, “Nothing can happen that’s unbearable,” and she held
her head high for Dick. Then the one thing which had never entered her
mind happened. Dick was reported among the missing.
Missing.
Let any mother of a boy consider what that means. Anything. Everything.
“Nothing can happen that’s unbearable,” said Dick. But this was. A
woman can’t stay sane and face that word “missing”--can she? This woman
gasped that question of herself. Yet she must stay sane, for Dick might
come back. Oh, he might even come back safe and sound. They did come
through prison camps--sometimes--and get back to health. Prison camps.
She fell to remembering about nights when she | |
from sqlobject import dbconnection
from sqlobject import classregistry
from sqlobject import events
from sqlobject import sqlbuilder
from sqlobject.col import StringCol, ForeignKey
from sqlobject.main import sqlmeta, SQLObject, SelectResults, \
makeProperties, unmakeProperties, getterName, setterName
import iteration
def tablesUsedSet(obj, db):
if hasattr(obj, "tablesUsedSet"):
return obj.tablesUsedSet(db)
elif isinstance(obj, (tuple, list, set, frozenset)):
s = set()
for component in obj:
s.update(tablesUsedSet(component, db))
return s
else:
return set()
class InheritableSelectResults(SelectResults):
IterationClass = iteration.InheritableIteration
def __init__(self, sourceClass, clause, clauseTables=None,
inheritedTables=None, **ops):
if clause is None or isinstance(clause, str) and clause == 'all':
clause = sqlbuilder.SQLTrueClause
dbName = (ops.get('connection',None) or sourceClass._connection).dbName
tablesSet = tablesUsedSet(clause, dbName)
tablesSet.add(str(sourceClass.sqlmeta.table))
orderBy = ops.get('orderBy')
if inheritedTables:
for tableName in inheritedTables:
tablesSet.add(str(tableName))
if orderBy and not isinstance(orderBy, basestring):
tablesSet.update(tablesUsedSet(orderBy, dbName))
#DSM: if this class has a parent, we need to link it
#DSM: and be sure the parent is in the table list.
#DSM: The following code is before clauseTables
#DSM: because if the user uses clauseTables
#DSM: (and normal string SELECT), he must know what he wants
#DSM: and will do himself the relationship between classes.
if not isinstance(clause, str):
tableRegistry = {}
allClasses = classregistry.registry(
sourceClass.sqlmeta.registry).allClasses()
for registryClass in allClasses:
if str(registryClass.sqlmeta.table) in tablesSet:
#DSM: By default, no parents are needed for the clauses
tableRegistry[registryClass] = registryClass
tableRegistryCopy = tableRegistry.copy()
for childClass in tableRegistryCopy:
if childClass not in tableRegistry:
continue
currentClass = childClass
while currentClass:
if currentClass in tableRegistryCopy:
if currentClass in tableRegistry:
#DSM: Remove this class as it is a parent one
#DSM: of a needed children
del tableRegistry[currentClass]
#DSM: Must keep the last parent needed
#DSM: (to limit the number of join needed)
tableRegistry[childClass] = currentClass
currentClass = currentClass.sqlmeta.parentClass
#DSM: Table registry contains only the last children
#DSM: or standalone classes
parentClause = []
for (currentClass, minParentClass) in tableRegistry.items():
while (currentClass != minParentClass) \
and currentClass.sqlmeta.parentClass:
parentClass = currentClass.sqlmeta.parentClass
parentClause.append(currentClass.q.id == parentClass.q.id)
currentClass = parentClass
tablesSet.add(str(currentClass.sqlmeta.table))
clause = reduce(sqlbuilder.AND, parentClause, clause)
super(InheritableSelectResults, self).__init__(sourceClass,
clause, clauseTables, **ops)
def accumulateMany(self, *attributes, **kw):
if kw.get("skipInherited"):
return super(InheritableSelectResults, self).accumulateMany(*attributes)
tables = []
for func_name, attribute in attributes:
if not isinstance(attribute, basestring):
tables.append(attribute.tableName)
clone = self.__class__(self.sourceClass, self.clause,
self.clauseTables, inheritedTables=tables, **self.ops)
return clone.accumulateMany(skipInherited=True, *attributes)
class InheritableSQLMeta(sqlmeta):
@classmethod
def addColumn(sqlmeta, columnDef, changeSchema=False, connection=None, childUpdate=False):
soClass = sqlmeta.soClass
#DSM: Try to add parent properties to the current class
#DSM: Only do this once if possible at object creation and once for
#DSM: each new dynamic column to refresh the current class
if sqlmeta.parentClass:
for col in sqlmeta.parentClass.sqlmeta.columnList:
cname = col.name
if cname == 'childName': continue
if cname.endswith("ID"): cname = cname[:-2]
setattr(soClass, getterName(cname), eval(
'lambda self: self._parent.%s' % cname))
if not col.immutable:
def make_setfunc(cname):
def setfunc(self, val):
if not self.sqlmeta._creating and not getattr(self.sqlmeta, "row_update_sig_suppress", False):
self.sqlmeta.send(events.RowUpdateSignal, self, {cname : val})
result = setattr(self._parent, cname, val)
return setfunc
setfunc = make_setfunc(cname)
setattr(soClass, setterName(cname), setfunc)
if childUpdate:
makeProperties(soClass)
return
if columnDef:
super(InheritableSQLMeta, sqlmeta).addColumn(columnDef, changeSchema, connection)
#DSM: Update each child class if needed and existing (only for new
#DSM: dynamic column as no child classes exists at object creation)
if columnDef and hasattr(soClass, "q"):
q = getattr(soClass.q, columnDef.name, None)
else:
q = None
for c in sqlmeta.childClasses.values():
c.sqlmeta.addColumn(columnDef, connection=connection, childUpdate=True)
if q: setattr(c.q, columnDef.name, q)
@classmethod
def delColumn(sqlmeta, column, changeSchema=False, connection=None, childUpdate=False):
if childUpdate:
soClass = sqlmeta.soClass
unmakeProperties(soClass)
makeProperties(soClass)
if isinstance(column, str):
name = column
else:
name = column.name
delattr(soClass, name)
delattr(soClass.q, name)
return
super(InheritableSQLMeta, sqlmeta).delColumn(column, changeSchema, connection)
#DSM: Update each child class if needed
#DSM: and delete properties for this column
for c in sqlmeta.childClasses.values():
c.sqlmeta.delColumn(column, changeSchema=changeSchema,
connection=connection, childUpdate=True)
@classmethod
def addJoin(sqlmeta, joinDef, childUpdate=False):
soClass = sqlmeta.soClass
#DSM: Try to add parent properties to the current class
#DSM: Only do this once if possible at object creation and once for
#DSM: each new dynamic join to refresh the current class
if sqlmeta.parentClass:
for join in sqlmeta.parentClass.sqlmeta.joins:
jname = join.joinMethodName
jarn = join.addRemoveName
setattr(soClass, getterName(jname),
eval('lambda self: self._parent.%s' % jname))
if hasattr(join, 'remove'):
setattr(soClass, 'remove' + jarn,
eval('lambda self,o: self._parent.remove%s(o)' % jarn))
if hasattr(join, 'add'):
setattr(soClass, 'add' + jarn,
eval('lambda self,o: self._parent.add%s(o)' % jarn))
if childUpdate:
makeProperties(soClass)
return
if joinDef:
super(InheritableSQLMeta, sqlmeta).addJoin(joinDef)
#DSM: Update each child class if needed and existing (only for new
#DSM: dynamic join as no child classes exists at object creation)
for c in sqlmeta.childClasses.values():
c.sqlmeta.addJoin(joinDef, childUpdate=True)
@classmethod
def delJoin(sqlmeta, joinDef, childUpdate=False):
if childUpdate:
soClass = sqlmeta.soClass
unmakeProperties(soClass)
makeProperties(soClass)
return
super(InheritableSQLMeta, sqlmeta).delJoin(joinDef)
#DSM: Update each child class if needed
#DSM: and delete properties for this join
for c in sqlmeta.childClasses.values():
c.sqlmeta.delJoin(joinDef, childUpdate=True)
@classmethod
def getAllColumns(sqlmeta):
columns = sqlmeta.columns.copy()
sm = sqlmeta
while sm.parentClass:
columns.update(sm.parentClass.sqlmeta.columns)
sm = sm.parentClass.sqlmeta
return columns
@classmethod
def getColumns(sqlmeta):
columns = sqlmeta.getAllColumns()
if 'childName' in columns:
del columns['childName']
return columns
class InheritableSQLObject(SQLObject):
sqlmeta = InheritableSQLMeta
_inheritable = True
SelectResultsClass = InheritableSelectResults
def set(self, **kw):
if self._parent:
SQLObject.set(self, _suppress_set_sig=True, **kw)
else:
SQLObject.set(self, **kw)
def __classinit__(cls, new_attrs):
SQLObject.__classinit__(cls, new_attrs)
# if we are a child class, add sqlbuilder fields from parents
currentClass = cls.sqlmeta.parentClass
while currentClass:
for column in currentClass.sqlmeta.columnDefinitions.values():
if column.name == 'childName':
continue
if isinstance(column, ForeignKey):
continue
setattr(cls.q, column.name,
getattr(currentClass.q, column.name))
currentClass = currentClass.sqlmeta.parentClass
@classmethod
def _SO_setupSqlmeta(cls, new_attrs, is_base):
# Note: cannot use super(InheritableSQLObject, cls)._SO_setupSqlmeta -
# InheritableSQLObject is not defined when it's __classinit__
# is run. Cannot use SQLObject._SO_setupSqlmeta, either:
# the method would be bound to wrong class.
if cls.__name__ == "InheritableSQLObject":
call_super = super(cls, cls)
else:
# InheritableSQLObject must be in globals yet
call_super = super(InheritableSQLObject, cls)
call_super._SO_setupSqlmeta(new_attrs, is_base)
sqlmeta = cls.sqlmeta
sqlmeta.childClasses = {}
# locate parent class and register this class in it's children
sqlmeta.parentClass = None
for superclass in cls.__bases__:
if getattr(superclass, '_inheritable', False) \
and (superclass.__name__ != 'InheritableSQLObject'):
if sqlmeta.parentClass:
# already have a parent class;
# cannot inherit from more than one
raise NotImplementedError(
"Multiple inheritance is not implemented")
sqlmeta.parentClass = superclass
superclass.sqlmeta.childClasses[cls.__name__] = cls
if sqlmeta.parentClass:
# remove inherited column definitions
cls.sqlmeta.columns = {}
cls.sqlmeta.columnList = []
cls.sqlmeta.columnDefinitions = {}
# default inheritance child name
if not sqlmeta.childName:
sqlmeta.childName = cls.__name__
@classmethod
def get(cls, id, connection=None, selectResults=None, childResults=None, childUpdate=False):
val = super(InheritableSQLObject, cls).get(id, connection, selectResults)
#DSM: If we are updating a child, we should never return a child...
if childUpdate: return val
#DSM: If this class has a child, return the child
if 'childName' in cls.sqlmeta.columns:
childName = val.childName
if childName is not None:
childClass = cls.sqlmeta.childClasses[childName]
# If the class has no columns (which sometimes makes sense
# and may be true for non-inheritable (leaf) classes only),
# shunt the query to avoid almost meaningless SQL
# like "SELECT NULL FROM child WHERE id=1".
# This is based on assumption that child object exists
# if parent object exists. (If it doesn't your database
# is broken and that is a job for database maintenance.)
if not (childResults or childClass.sqlmeta.columns):
childResults = (None,)
return childClass.get(id, connection=connection,
selectResults=childResults)
#DSM: Now, we know we are alone or the last child in a family...
#DSM: It's time to find our parents
inst = val
while inst.sqlmeta.parentClass and not inst._parent:
inst._parent = inst.sqlmeta.parentClass.get(id,
connection=connection, childUpdate=True)
inst = inst._parent
#DSM: We can now return ourself
return val
@classmethod
def _notifyFinishClassCreation(cls):
sqlmeta = cls.sqlmeta
# verify names of added columns
if sqlmeta.parentClass:
# FIXME: this does not check for grandparent column overrides
parentCols = sqlmeta.parentClass.sqlmeta.columns.keys()
for column in sqlmeta.columnList:
if column.name == 'childName':
raise AttributeError(
"The column name 'childName' is reserved")
if column.name in parentCols:
raise AttributeError("The column '%s' is"
" already defined in an inheritable parent"
% column.name)
# if this class is inheritable, add column for children distinction
if cls._inheritable and (cls.__name__ != 'InheritableSQLObject'):
sqlmeta.addColumn(StringCol(name='childName',
# limit string length to get VARCHAR and not CLOB
length=255, default=None))
if not sqlmeta.columnList:
# There are no columns - call addColumn to propagate columns
# from parent classes to children
sqlmeta.addColumn(None)
if not sqlmeta.joins:
# There are no joins - call addJoin to propagate joins
# from parent classes to children
sqlmeta.addJoin(None)
def _create(self, id, **kw):
#DSM: If we were called by | |
<filename>tests/test_comparisons.py
"""
Tests for comparisons of the Markov model and the simulation
"""
import numpy as np
import pytest
from hypothesis import given, settings
from hypothesis.strategies import floats, integers
from ambulance_game.comparisons import (
get_heatmaps,
get_mean_blocking_time_from_simulation_state_probabilities,
get_mean_waiting_time_from_simulation_state_probabilities,
get_proportion_within_target_from_simulation_state_probabilities,
plot_output_comparisons,
)
NUMBER_OF_DIGITS_TO_ROUND = 8
def test_get_heatmaps_example_1():
"""
Test to ensure that the probabilities generated by the simulation and the
Markov model are as expected.
"""
sim_calculated_probs, markov_calculated_probs, diff_calculated_probs = get_heatmaps(
lambda_2=2,
lambda_1=1,
mu=2,
num_of_servers=2,
threshold=3,
system_capacity=5,
buffer_capacity=5,
seed_num=0,
runtime=100,
num_of_trials=10,
linear_positioning=False,
algebraic_function=np.linalg.solve,
)
sim_expected_probs, markov_expected_probs, diff_expected_probs = (
np.array(
[
[0.15657134, 0.23662749, 0.16391817, 0.13420543, 0.02070944, 0.0036757],
[np.nan, np.nan, np.nan, 0.08165133, 0.02249408, 0.00498913],
[np.nan, np.nan, np.nan, 0.05124684, 0.01655216, 0.00379816],
[np.nan, np.nan, np.nan, 0.03741792, 0.01048049, 0.00129502],
[np.nan, np.nan, np.nan, 0.02189239, 0.00640466, 0.00116072],
[np.nan, np.nan, np.nan, 0.01507139, 0.00871438, 0.00112376],
]
),
np.array(
[
[
0.15459909,
0.23189863,
0.17392397,
0.13044298,
0.02059626,
0.00343271,
],
[np.nan, np.nan, np.nan, 0.07723598, 0.01942191, 0.00438122],
[np.nan, np.nan, np.nan, 0.05051955, 0.01503237, 0.0039658],
[np.nan, np.nan, np.nan, 0.03475886, 0.01107021, 0.00316697],
[np.nan, np.nan, np.nan, 0.02449802, 0.0080307, 0.00239411],
[np.nan, np.nan, np.nan, 0.01746141, 0.00957775, 0.00359149],
]
),
np.array(
[
[
0.00197225,
0.00472886,
-0.0100058,
0.00376245,
0.00011318,
0.00024299,
],
[np.nan, np.nan, np.nan, 0.00441536, 0.00307217, 0.0006079],
[np.nan, np.nan, np.nan, 0.00072728, 0.00151979, -0.00016765],
[np.nan, np.nan, np.nan, 0.00265906, -0.00058972, -0.00187194],
[np.nan, np.nan, np.nan, -0.00260564, -0.00162603, -0.00123339],
[np.nan, np.nan, np.nan, -0.00239002, -0.00086337, -0.00246773],
]
),
)
assert np.allclose(sim_calculated_probs, sim_expected_probs, equal_nan=True)
assert np.allclose(markov_calculated_probs, markov_expected_probs, equal_nan=True)
assert np.allclose(diff_calculated_probs, diff_expected_probs, equal_nan=True)
def test_get_heatmaps_example_2():
"""
Test to ensure that the probabilities generated by the simulation and the
Markov model are as expected.
"""
sim_calculated_probs, markov_calculated_probs, diff_calculated_probs = get_heatmaps(
lambda_2=1.5,
lambda_1=1.5,
mu=4,
num_of_servers=1,
threshold=2,
system_capacity=6,
buffer_capacity=1,
seed_num=2,
runtime=150,
num_of_trials=5,
linear_positioning=True,
algebraic_function=np.linalg.solve,
)
sim_expected_probs, markov_expected_probs, diff_expected_probs = (
np.array(
[
[
0.31415055,
0.22936987,
0.17661768,
0.04897618,
0.01226239,
0.00191243,
0.00063125,
],
[
np.nan,
np.nan,
0.09676506,
0.06857442,
0.0296508,
0.01747934,
0.00361002,
],
]
),
np.array(
[
[
0.3236358,
0.24272685,
0.18204514,
0.04553079,
0.01141196,
0.00289688,
0.00079006,
],
[
np.nan,
np.nan,
0.09100306,
0.05686228,
0.02698544,
0.01150214,
0.00460958,
],
]
),
np.array(
[
[
-0.00948526,
-0.01335698,
-0.00542746,
0.00344539,
0.00085043,
-0.00098445,
-0.00015881,
],
[
np.nan,
np.nan,
0.005762,
0.01171214,
0.00266535,
0.0059772,
-0.00099956,
],
]
),
)
assert np.allclose(sim_calculated_probs, sim_expected_probs, equal_nan=True)
assert np.allclose(markov_calculated_probs, markov_expected_probs, equal_nan=True)
assert np.allclose(diff_calculated_probs, diff_expected_probs, equal_nan=True)
def test_get_mean_waiting_time_from_simulation_state_probabilities():
"""
Test for the mean waiting time using the Markov formula and the simulation
state probabilities
"""
mean_waiting_time = get_mean_waiting_time_from_simulation_state_probabilities(
lambda_2=0.2,
lambda_1=0.2,
mu=0.2,
num_of_servers=3,
threshold=4,
system_capacity=10,
buffer_capacity=10,
class_type=0,
seed_num=0,
runtime=2000,
num_of_trials=1,
)
assert round(mean_waiting_time, NUMBER_OF_DIGITS_TO_ROUND) == round(
1.3988142785295379, NUMBER_OF_DIGITS_TO_ROUND
)
def test_get_mean_blocking_time_from_simulation_state_probabilities():
"""
Test for the mean blocking time using the Markov formula and the simulation
state probabilities
"""
mean_blocking_time = get_mean_blocking_time_from_simulation_state_probabilities(
lambda_2=5,
lambda_1=6,
mu=2,
num_of_servers=7,
threshold=5,
system_capacity=15,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=1000,
)
assert round(mean_blocking_time, NUMBER_OF_DIGITS_TO_ROUND) == round(
0.6247616245889802, NUMBER_OF_DIGITS_TO_ROUND
)
def test_get_proportion_within_target_from_simulation_state_probabilities():
"""
Test for the proportion of customers that are within the target waiting
time using the Markov formula and the simulation state probabilities
"""
mean_proportion = get_proportion_within_target_from_simulation_state_probabilities(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=3,
threshold=7,
system_capacity=10,
buffer_capacity=5,
target=4,
class_type=0,
seed_num=0,
num_of_trials=2,
runtime=100,
)
assert round(mean_proportion, NUMBER_OF_DIGITS_TO_ROUND) == round(
0.9605868280871762, NUMBER_OF_DIGITS_TO_ROUND
)
def test_plot_output_comparisons_waiting_class_1():
"""
Test that the values to be plotted by the function for the mean waiting time
of class 1 individuals are the expected when using:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_times_using_markov_formula,
markov_times,
simulation_times,
) = plot_output_comparisons(
lambda_1=3,
lambda_2=4,
mu=1,
num_of_servers=3,
threshold=6,
system_capacity=15,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="waiting",
class_type=0,
plot_over="mu",
max_parameter_value=5,
accuracy=5,
)
expected_range_space = [1, 2, 3, 4, 5]
expected_sim_times_using_formula = [
2.377120739790196,
0.7785480327193071,
0.21825612502962743,
0.0633853178321979,
0.02219807426322811,
]
expected_markov_times = [
2.666380625245361,
0.7505484517766888,
0.201787897652177,
0.06072282228882266,
0.024434222615639434,
]
expected_sim_times = [
[2.100498503091243],
[0.8060558886538617],
[0.24673859227916475],
[0.06673599211050996],
[0.026042424326131127],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_times_using_markov_formula, expected_sim_times_using_formula
)
assert np.allclose(markov_times, expected_markov_times)
assert np.allclose(simulation_times, expected_sim_times)
def test_plot_output_comparisons_waiting_class_2():
"""
Test that the values to be plotted by the function for the mean waiting time
of class 2 individuals are the expected when using:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_times_using_markov_formula,
markov_times,
simulation_times,
) = plot_output_comparisons(
lambda_1=3,
lambda_2=4,
mu=1,
num_of_servers=3,
threshold=6,
system_capacity=10,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="waiting",
class_type=1,
plot_over="system_capacity",
max_parameter_value=18,
accuracy=5,
)
expected_range_space = [
10,
12,
14,
16,
18,
]
expected_sim_times_using_formula = [
0.9518119232230957,
0.9314674163209273,
0.8815151220881429,
0.9520317760341209,
0.9522967196743792,
]
expected_markov_times = [
0.9996062485853283,
0.9996071004169865,
0.9996071216135696,
0.9996071221161823,
0.9996071221275438,
]
expected_sim_times = [
[0.8587675978623437],
[0.9410302653948986],
[0.6712503805879015],
[0.7596612894701423],
[0.7466921877207321],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_times_using_markov_formula, expected_sim_times_using_formula
)
assert np.allclose(markov_times, expected_markov_times)
assert np.allclose(simulation_times, expected_sim_times)
def test_plot_output_comparisons_waiting_both_classes():
"""
Test that the values to be plotted by the function for the mean waiting time
of all individuals are the expected when using:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_times_using_markov_formula,
markov_times,
simulation_times,
) = plot_output_comparisons(
lambda_1=3,
lambda_2=4,
mu=1,
num_of_servers=3,
threshold=5,
system_capacity=10,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="waiting",
class_type=None,
plot_over="threshold",
max_parameter_value=9,
accuracy=5,
)
expected_range_space = [
5,
6,
7,
8,
9,
]
expected_sim_times_using_formula = [
1.4383683274990688,
1.6172139699602939,
1.7871674638990411,
1.902900393648282,
2.0799187425189745,
]
expected_markov_times = [
1.4997317350805834,
1.6663508613218276,
1.8329697824825426,
1.999548467136932,
2.165791830248812,
]
expected_sim_times = [
[1.4595100304540891],
[1.5414680277219233],
[1.8463653589649593],
[1.9638358136060718],
[2.1872623359765617],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_times_using_markov_formula, expected_sim_times_using_formula
)
assert np.allclose(markov_times, expected_markov_times)
assert np.allclose(simulation_times, expected_sim_times)
def test_plot_output_comparisons_blocking_class_1():
"""
Test to ensure an error comes up when trying to get the blocking times of
class 1 individuals
"""
with pytest.raises(Exception):
plot_output_comparisons(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=3,
threshold=5,
system_capacity=10,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="blocking",
class_type=0,
plot_over="lambda_1",
max_parameter_value=3,
accuracy=5,
)
def test_plot_output_comparisons_blocking_class_2():
"""
Test that the values to be plotted by the function for the mean blocking time
of class 2 individuals are the expected when using:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_times_using_markov_formula,
markov_times,
simulation_times,
) = plot_output_comparisons(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=3,
threshold=5,
system_capacity=10,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="blocking",
class_type=1,
plot_over="lambda_2",
max_parameter_value=3,
accuracy=None,
)
expected_range_space = [
1,
1.5,
2,
2.5,
3,
]
expected_sim_times_using_formula = [
0.09939633736936365,
0.3428086786668058,
1.258688113496702,
1.550748270791677,
2.4490455912594884,
]
expected_markov_times = [
0.25749828422874693,
0.7336269690016299,
1.4059020459868858,
2.0166211860863115,
2.446138025813656,
]
expected_sim_times = [
[0.05675700649642476],
[0.2035750550633296],
[1.0204972927807057],
[1.4297836865197424],
[2.276273474404749],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_times_using_markov_formula, expected_sim_times_using_formula
)
assert np.allclose(markov_times, expected_markov_times)
assert np.allclose(simulation_times, expected_sim_times)
def test_plot_output_comparisons_blocking_both_classes():
"""
Test that the values to be plotted by the function for the mean waiting time
of all individuals are the expected when using:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
"""
(
range_space,
simulation_times_using_markov_formula,
markov_times,
simulation_times,
) = plot_output_comparisons(
lambda_1=1,
lambda_2=1,
mu=1,
num_of_servers=1,
threshold=5,
system_capacity=10,
buffer_capacity=7,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="blocking",
class_type=None,
plot_over="num_of_servers",
max_parameter_value=5,
accuracy=None,
)
expected_range_space = [
1,
2,
3,
4,
5,
]
expected_sim_times_using_formula = [
30.454703888754974,
0.8000539978455747,
0.09939633736936365,
0.08297030340373893,
0.06341488800287158,
]
expected_markov_times = [
40.065612220723104,
2.820781651110878,
0.25749828422874693,
0.05700263606859959,
0.024799827726554754,
]
expected_sim_times = [
[10.427934396602263],
[0.25420006034794723],
[0.05675700649642476],
[0.08092456927729426],
[0.08979883878110877],
]
assert np.all(range_space == expected_range_space)
assert np.allclose(
simulation_times_using_markov_formula, expected_sim_times_using_formula
)
assert np.allclose(markov_times, expected_markov_times)
assert np.allclose(simulation_times, expected_sim_times)
@given(
lambda_1=floats(min_value=1, max_value=3),
lambda_2=floats(min_value=1, max_value=3),
mu=floats(min_value=1, max_value=3),
num_of_servers=integers(min_value=2, max_value=5),
threshold=integers(min_value=2, max_value=10),
system_capacity=integers(min_value=10, max_value=20),
buffer_capacity=integers(min_value=2, max_value=10),
)
@settings(max_examples=5, deadline=None)
def test_plot_output_comparisons_blocking_property(
lambda_1, lambda_2, mu, num_of_servers, threshold, system_capacity, buffer_capacity
):
"""
Test that the values to be plotted by the function for the mean blocking time
of either CLASS 2 INDIVIDUALS or ALL INDIVIDUALS are the same for all methods
used:
- Markov formula and simulation state probabilities
- Markov formula and Markov state probabilities
- Simulation
These values are expected to be the same because class 1 individuals do not
have any blocking time, and thus the overall blocking time is calculated just
from class 2 individuals.
"""
(
range_space_1,
simulation_times_using_markov_formula_1,
markov_times_1,
simulation_times_1,
) = plot_output_comparisons(
lambda_1=lambda_1,
lambda_2=lambda_2,
mu=mu,
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="blocking",
class_type=1,
plot_over="buffer_capacity",
max_parameter_value=5,
accuracy=None,
)
(
range_space_2,
simulation_times_using_markov_formula_2,
markov_times_2,
simulation_times_2,
) = plot_output_comparisons(
lambda_1=lambda_1,
lambda_2=lambda_2,
mu=mu,
num_of_servers=num_of_servers,
threshold=threshold,
system_capacity=system_capacity,
buffer_capacity=buffer_capacity,
seed_num=0,
num_of_trials=1,
runtime=100,
measure_to_compare="blocking",
class_type=None,
plot_over="buffer_capacity",
max_parameter_value=5,
accuracy=None,
)
assert np.all(range_space_1 == range_space_2)
assert np.all(
simulation_times_using_markov_formula_1
== simulation_times_using_markov_formula_2
)
assert np.all(markov_times_1 == markov_times_2)
assert np.all(simulation_times_1 == simulation_times_2)
def test_plot_of_proportion_within_target_class_1():
"""
Test the values to be plotted by the function for the mean proportion of
individuals for class 1 are as | |
import asyncio
import configparser
import logging
import os
import re
import secrets
import time
import traceback
from datetime import datetime, timedelta
from decimal import ROUND_UP, Decimal
import pymongo
import qrcode
import qrcode.image.svg
from pyrogram import Client, filters
from pyrogram.errors import BadRequest
from pyrogram.session import Session
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
# BTC class for BTC coin, the same for others, just replace the name
# for litecoin just import LTC
from bitcart import BCH, BSTY, BTC, GZRO, LTC, APIManager
from bitcart.utils import bitcoins
# Don't show message
Session.notice_displayed = True
# load token from config
main_config = configparser.ConfigParser()
main_config.read("config.ini")
try:
config = main_config["app"]
except KeyError:
raise ValueError("No [app] section found, exiting...")
# constants
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
BET_LUCK_IMAGES = {
"up": "https://i.imgur.com/AcItxdr.gif",
"down": "https://i.imgur.com/wJYyCSw.gif",
"same": "https://i.imgur.com/VbC8kNM.gif",
"nobalance": "https://i.imgur.com/UY8I7ow.gif",
}
# loading variables
TOKEN = config.get("token")
XPUB = config.get("xpub")
if not TOKEN:
raise ValueError("No token provided. Provide it using token variable in [app] section")
if not XPUB:
raise ValueError("Provide your x/y/z pub/prv in xpub setting in [app] section")
app = Client("tg", bot_token=TOKEN)
mongo = pymongo.MongoClient()
mongo = mongo["atomic_tip_db"]
# bitcart: initialize btc instance
btc = BTC(xpub=XPUB)
# the same here
bch = BCH(xpub=XPUB)
ltc = LTC(xpub=XPUB)
gzro = GZRO(xpub=XPUB)
bsty = BSTY(xpub=XPUB)
# same api, so we can do this
instances = {"btc": btc, "bch": bch, "ltc": ltc, "gzro": gzro, "bsty": bsty}
manager = APIManager({currency.upper(): [coin.xpub] for currency, coin in instances.items()}) # bitcart: create APIManager
satoshis_hundred = 0.000001
# misc
deposit_select_filter = filters.create(lambda _, __, query: bool(re.match(r"^deposit_", query.data)))
deposit_filter = filters.create(lambda _, __, query: bool(re.match(r"^pay_", query.data)))
bet_filter = filters.create(lambda _, __, query: bool(re.match(r"^bet_", query.data)))
paylink_filter = filters.create(lambda _, __, query: bool(re.match(r"^pl_", query.data)))
paylink_pay_filter = filters.create(lambda _, __, query: bool(re.match(r"^plp_", query.data)))
pagination_filter = filters.create(lambda _, __, query: bool(re.match(r"^page_", query.data)))
class Paginator:
items_per_page = 10
def __init__(self, data):
self.data = data
def get_page(self, page):
end = page * self.items_per_page
start = end - self.items_per_page
return self.data[start:end]
def get_starting_count(self, page):
return ((page - 1) * self.items_per_page) + 1
def has_next_page(self, page):
return self.data.count() >= (page * self.items_per_page) + 1
def has_prev_page(self, page):
return page > 1
def get_user_data(user_id):
user = mongo.users.find_one({"user_id": user_id})
if not user:
user = {
"user_id": user_id,
"balance": 0,
"created_time": datetime.now().strftime(DATE_FORMAT),
}
mongo.users.insert_one(user)
return user
def round_usd(d):
return d.quantize(Decimal(".01"), rounding=ROUND_UP) # round to two digits
def change_balance(user_id, amount, tx_type, tx_hash=None, address=None):
mongo.users.update_one({"user_id": user_id}, {"$inc": {"balance": amount}})
mongo.txes.insert_one(
{
"user_id": user_id,
"amount": amount,
"type": tx_type,
"tx_hash": tx_hash,
"address": address,
"date": datetime.now().strftime(DATE_FORMAT),
}
)
def deposit_keyboard():
keyboard = [
[InlineKeyboardButton("100 Satoshi", callback_data="deposit_100")],
[InlineKeyboardButton("1 000 Satoshi", callback_data="deposit_1000")],
[InlineKeyboardButton("10 000 Satoshi", callback_data="deposit_10000")],
[InlineKeyboardButton("100 000 Satoshi", callback_data="deposit_100000")],
]
return InlineKeyboardMarkup(keyboard)
def bet_menu_keyboard():
keyboard = [
[InlineKeyboardButton("Go up!", callback_data="bet_up")],
[InlineKeyboardButton("Go down!", callback_data="bet_down")],
[InlineKeyboardButton("Will stay same", callback_data="bet_same")],
]
return InlineKeyboardMarkup(keyboard)
def payment_method_kb(amount):
keyboard = [
[
InlineKeyboardButton("Bitcoin (BTC)", callback_data=f"pay_btc_{amount}"),
InlineKeyboardButton("Bitcoin Cash (BCH)", callback_data=f"pay_bch_{amount}"),
InlineKeyboardButton("Litecoin (LTC)", callback_data=f"pay_ltc_{amount}"),
],
[
InlineKeyboardButton("Gravity (GZRO)", callback_data=f"pay_gzro_{amount}"),
InlineKeyboardButton("GlobalBoost (BSTY)", callback_data=f"pay_bsty_{amount}"),
],
]
return InlineKeyboardMarkup(keyboard)
def paylink_kb(currency, amount):
keyboard = [
[InlineKeyboardButton("Bot link", callback_data=f"pl_bot_{currency}_{amount}")],
[
InlineKeyboardButton(
"Payment request(for non-bot users)",
callback_data=f"pl_pr_{currency}_{amount}",
)
],
]
return InlineKeyboardMarkup(keyboard)
@app.on_message(filters.command("help"))
def help_handler(client, message):
# bitcart: get usd price
usd_price = round(btc.rate() * Decimal(satoshis_hundred), 2) # we use Decimals for accuracy
message.reply(
f"""
<b>In development, now working commands are tip!xxx, /start, /help, /deposit, /balance, /send, /history, /send2telegram,
/paylink, /claim, /bet and /top</b>
<b>Send tip in a group chat:</b>
reply any user message in group including <b>tip!xxx</b> - where xxx is amount you wish to send.
<b>Wallet commands:</b>
/deposit for top-ups
/send to withdraw
/balance to check your balance
/history show transaction history
<b>LApps:</b>
/send2phone +118767854 1000 <i>send satoshi to number by sat2.io</i>
/send2telegram @username 1000 <i> send satoshis to known telegram user</i>
/paylink 10000 <i>request payment link for sharing</i>
/bet [currency] 1000 <i>[up|down|same] [minute|hour|day|month] Bet on currencies prices</i>
Betting rewards amounts are based on the time you bet for:
1 % profit if betting for one minute
13 % profit if betting for one hour
19 % profit if betting for one day
23 % profit if betting for one month
/sendsms +118767854 hello, lightning! <i>send text message to number via lnsms.world</i>
<b>Misc:</b>
/top show user rank
<b>What is 'satoshi'?</b>
<a href=\"https://en.wikipedia.org/wiki/Satoshi_Nakamoto\">Satoshi</a> is a creator of Bitcoin
and <a href=\"https://en.bitcoin.it/wiki/Satoshi_(unit)\">currently the smallest unit of the bitcoin currency</a>.
Price of 1000 satoshis now is about ${usd_price} (USD)
<b>Have a problem or suggestion?</b>
<a href=\"https://t.me/joinchat/B9nfbhWuDDPTPUcagWAm1g\">Contact bot community</a>"
""",
quote=False,
)
def get_user_repr(user_id):
user = app.get_users(user_id)
return f"{user.first_name}({user.username})"
def paylink_pay_kb(deposit_id, amount):
keyboard = [
[InlineKeyboardButton("Yes", callback_data=f"plp_y_{deposit_id}_{amount}")],
[InlineKeyboardButton("No", callback_data=f"plp_n_{deposit_id}_{amount}")],
]
return InlineKeyboardMarkup(keyboard)
@app.on_message(filters.command("start"))
def start(client, message):
# quote=False with reply is just a shorter version of
# app.send_message(chat_id, message)
user_id = message.from_user.id
user = get_user_data(user_id)
texts = message.text.split()
# paylink handling
send_welcome = True
try:
if len(texts) == 2:
deposit_id, amount = texts[1].split("=")
deposit_id = int(deposit_id)
amount = int(amount)
if amount <= 0 or user["balance"] < amount:
message.reply("Not enough balance to pay the paylink.")
else:
if deposit_id != user_id:
message.reply(
f"Paying paylink with {amount} satoshis to {get_user_repr(deposit_id)}. Proceed?",
reply_markup=paylink_pay_kb(deposit_id, amount),
)
send_welcome = False
except ValueError:
send_welcome = True
if send_welcome:
message.reply(
"Welcome to the BitcartCC Atomic TipBot! /help for list of commands",
quote=False,
)
@app.on_callback_query(paylink_pay_filter)
def pay_paylink(client, message):
user_id = message.from_user.id
_, answer, deposit_id, amount = message.data.split("_")
deposit_id = int(deposit_id)
amount = int(amount)
if answer == "y":
change_balance(deposit_id, amount, "paylink")
change_balance(user_id, -amount, "paylink")
message.edit_message_text(f"Successfully paid the paylink with {amount} satoshis.")
app.send_message(deposit_id, f"Your paylink was successfuly paid by {get_user_repr(user_id)}")
else:
message.edit_message_text("Paylink canceled.")
@app.on_message(filters.command("balance"))
def balance(client, message):
user_data = get_user_data(message.from_user.id)
message.reply(f"Your balance is {user_data['balance']} satoshis")
@app.on_message(filters.command("deposit") & filters.private)
def deposit(client, message):
message.reply(
"Choose amount you want to deposit:",
reply_markup=deposit_keyboard(),
quote=False,
)
# callback query
def send_qr(text, chat_id, client, caption=None):
file_name = f"files/{secrets.token_urlsafe(32)}.png"
with open(file_name, "wb") as f:
qrcode.make("hi").save(f)
client.send_photo(chat_id, file_name, caption=caption)
os.remove(file_name)
@app.on_callback_query(deposit_select_filter)
def deposit_select_query(client, call):
amount = int(call.data[8:])
call.edit_message_text("Select payment method:", reply_markup=payment_method_kb(amount))
def convert_amounts(currency, amount):
currency = currency.lower()
amount /= instances[currency].rate("BTC")
return amount, instances[currency].friendly_name
def generate_invoice(user_id, currency, amount, amount_sat, description=""):
amount, friendly_name = convert_amounts(currency, amount)
# bitcart: create invoice
invoice = instances[currency].add_request(amount, description, expire=20160) # 14 days
amount_field = instances[currency].amount_field # bitcart: each coin object provides amount_field
invoice[amount_field] = str(invoice[amount_field]) # convert to str for mongodb
invoice.update({"user_id": user_id, "currency": currency, "original_amount": amount_sat})
mongo.invoices.insert_one(invoice)
return invoice, amount, friendly_name
@app.on_callback_query(deposit_filter)
def deposit_query(client, call):
call.edit_message_text("Okay, almost done! Now generating invoice...")
_, currency, amount = call.data.split("_")
amount_sat = int(amount)
amount_btc = bitcoins(amount_sat) # bitcart: convert satoshis to bitcoins
user_id = call.from_user.id
invoice, amount, _ = generate_invoice(user_id, currency, amount_btc, amount_sat, f"{secret_id(user_id)} top-up")
send_qr(
invoice["URI"],
user_id,
client,
caption=f"Your invoice for {amount_sat} Satoshi ({amount:0.8f} {currency.upper()}):\n{invoice['address']}",
)
@app.on_message(filters.private & filters.command("paylink"))
def paylink(client, message):
try:
_, currency, amount_sat = message.command
amount_sat = int(amount_sat)
except ValueError:
return message.reply("Invalid amount. Command format to request 1000 satoshi in BTC: /paylink btc 1000")
message.reply(
"Which link would you like to get?",
reply_markup=paylink_kb(currency, amount_sat),
quote=False,
)
@app.on_callback_query(paylink_filter)
def paylink_query(client, message):
user_id = message.from_user.id
_, link_type, currency, amount_sat = message.data.split("_")
amount_sat = int(amount_sat)
amount_btc = bitcoins(amount_sat)
amount, currency_name = convert_amounts(currency, amount_btc)
if link_type == "pr":
invoice, _, _ = generate_invoice(user_id, currency, amount_btc, amount_sat, f"{secret_id(user_id)} paylink")
invoice_link = invoice["URI"]
elif link_type == "bot":
bot_username = app.get_me().username
invoice_link = f"https://t.me/{bot_username}?start={user_id}={amount_sat}"
try:
message.edit_message_text(f"Invoice for {amount_sat} Satoshi [{amount:.8f} {currency.upper()}]\n\nMessage to forward:")
time.sleep(1)
app.send_message(
chat_id=user_id,
text=f"Send me {currency_name.lower()} using this link: {invoice_link}",
)
except BadRequest:
pass
# Register event handler for all coins in a manager
@manager.on("new_payment")
async def payment_handler(instance, event, address, status, status_str): # async to make pyrogram sending work
inv = mongo.invoices.find({"address": address}).limit(1).sort([("$natural", -1)])[0] # to get latest result
if inv and inv["status_str"] != "Paid":
# bitcart: get invoice info, not neccesary here
# btc.get_request(address)
if status_str == "Paid":
user = mongo.users.find_one({"user_id": inv["user_id"]})
amount = inv["original_amount"]
new_balance = user["balance"] + amount
mongo.invoices.update_one({"address": address}, {"$set": {"status": "Paid"}})
change_balance(inv["user_id"], amount, "deposit", address=address)
await app.send_message(
user["user_id"],
f"{amount} Satoshis added to your balance. Your balance: {new_balance}",
) # we await here as function is async
def secret_id(user_id):
user_id = str(user_id)
return f"{user_id[:3]}-{user_id[-3:]}"
@app.on_message(filters.command("top"))
def top(client, message):
userlist = mongo.users.find().sort("balance", pymongo.DESCENDING).limit(10)
balance = get_user_data(message.from_user.id)["balance"]
msg = "Top 10 users:\n\n"
place = 1
for user in userlist:
if user["balance"] > 0:
user_id = secret_id(user["user_id"])
msg_one = f"{place}. {user_id}: {user['balance']}"
if place <= 3:
msg_one = f"<b>{msg_one}</b>"
msg_one += "\n"
msg += msg_one
place += 1
user_id = secret_id(message.from_user.id)
msg += f"Your ({user_id}) balance: {balance}"
message.reply(msg, quote=False)
@app.on_message(filters.private & filters.command("send"))
def send(client, message):
message.reply(
"""
Send me currency, address and amount(in satoshis) to send to, separated via space, like so:
btc 181AUpDVRQ3JVcb9wYLzKz2C8Rdb5mDeH7 500
""",
quote=False,
)
@app.on_message(filters.reply & filters.regex(r"[Tt]ip!([0-9]+)"))
def tip(client, message):
reply_id = message.reply_to_message.from_user.id
user_id = message.from_user.id
if reply_id == user_id:
return
try:
amount = int(message.matches[0].group(1))
except ValueError:
return
sender = get_user_data(user_id)
get_user_data(reply_id)
receiver_name = message.reply_to_message.from_user.first_name
receiver_username = message.reply_to_message.from_user.username or "-"
if amount <= 0 or sender["balance"] | |
will be used.
@return: A L{Deferred} that fires when the lines have been delivered
and the output checked.
"""
dummy = protocolInstance if protocolInstance else DummyPOP3()
client = LineSendingProtocol(lines)
d = loopback.loopbackAsync(dummy, client)
return d.addCallback(self._cbRunTest, client, dummy, expectedOutput)
def _cbRunTest(self, ignored, client, dummy, expectedOutput):
self.assertEqual(b'\r\n'.join(expectedOutput),
b'\r\n'.join(client.response))
dummy.connectionLost(failure.Failure(
Exception("Test harness disconnect")))
return ignored
def test_buffer(self):
"""
Test a lot of different POP3 commands in an extremely pipelined
scenario.
This test may cover legitimate behavior, but the intent and
granularity are not very good. It would likely be an improvement to
split it into a number of smaller, more focused tests.
"""
return self.runTest(
[b"APOP moshez dummy",
b"LIST",
b"UIDL",
b"RETR 1",
b"RETR 2",
b"DELE 1",
b"RETR 1",
b"QUIT"],
[b'+OK <moshez>',
b'+OK Authentication succeeded',
b'+OK 1',
b'1 44',
b'.',
b'+OK ',
b'1 0',
b'.',
b'+OK 44',
b'From: moshe',
b'To: moshe',
b'',
b'How are you, friend?',
b'.',
b'-ERR Bad message number argument',
b'+OK ',
b'-ERR message deleted',
b'+OK '])
def test_noop(self):
"""
Test the no-op command.
"""
return self.runTest(
[b'APOP spiv dummy',
b'NOOP',
b'QUIT'],
[b'+OK <moshez>',
b'+OK Authentication succeeded',
b'+OK ',
b'+OK '])
def test_badUTF8CharactersInCommand(self):
"""
Sending a command with invalid UTF-8 characters
will raise a L{pop3.POP3Error}.
"""
error = b'not authenticated yet: cannot do \x81PASS'
d = self.runTest(
[b'\x81PASS',
b'QUIT'],
[b'+OK <moshez>',
b"-ERR bad protocol or server: POP3Error: " +
error,
b'+OK '])
errors = self.flushLoggedErrors(pop3.POP3Error)
self.assertEqual(len(errors), 1)
return d
def test_authListing(self):
"""
L{pop3.POP3} responds to an I{AUTH} command with a list of supported
authentication types based on its factory's C{challengers}.
"""
p = DummyPOP3()
p.factory = internet.protocol.Factory()
p.factory.challengers = {b'Auth1': None, b'secondAuth': None,
b'authLast': None}
client = LineSendingProtocol([
b"AUTH",
b"QUIT",
])
d = loopback.loopbackAsync(p, client)
return d.addCallback(self._cbTestAuthListing, client)
def _cbTestAuthListing(self, ignored, client):
self.assertTrue(client.response[1].startswith(b'+OK'))
self.assertEqual(sorted(client.response[2:5]),
[b"AUTH1", b"AUTHLAST", b"SECONDAUTH"])
self.assertEqual(client.response[5], b".")
def run_PASS(self, real_user, real_password,
tried_user=None, tried_password=None,
after_auth_input=[], after_auth_output=[]):
"""
Test a login with PASS.
If L{real_user} matches L{tried_user} and L{real_password} matches
L{tried_password}, a successful login will be expected.
Otherwise an unsuccessful login will be expected.
@type real_user: L{bytes}
@param real_user: The user to test.
@type real_password: L{<PASSWORD>}
@param real_password: <PASSWORD>.
@type tried_user: L{bytes} or L{None}
@param tried_user: The user to call USER with.
If None, real_user will be used.
@type tried_password: L{bytes} or L{None}
@param tried_password: The password to call PASS with.
If None, real_password will be used.
@type after_auth_input: L{list} of l{bytes}
@param after_auth_input: Extra protocol input after authentication.
@type after_auth_output: L{list} of l{bytes}
@param after_auth_output: Extra protocol output after authentication.
"""
if not tried_user:
tried_user = real_user
if not tried_password:
tried_password = <PASSWORD>
response = [b'+OK <moshez>',
b'+OK USER accepted, send PASS',
b'-ERR Authentication failed']
if real_user == tried_user and real_password == tried_password:
response = [b'+OK <moshez>',
b'+OK USER accepted, send PASS',
b'+OK Authentication succeeded']
fullInput = [b' '.join([b'USER', tried_user]),
b' '.join([b'PASS', tried_password])]
fullInput += after_auth_input + [b'QUIT']
response += after_auth_output + [b'+OK ']
return self.runTest(
fullInput,
response,
protocolInstance=DummyPOP3Auth(real_user, real_password))
def run_PASS_before_USER(self, password):
"""
Test protocol violation produced by calling PASS before USER.
@type password: <PASSWORD>}
@param password: <PASSWORD>.
"""
return self.runTest(
[b' '.join([b'PASS', password]),
b'QUIT'],
[b'+OK <moshez>',
b'-ERR USER required before PASS',
b'+OK '])
def test_illegal_PASS_before_USER(self):
"""
Test PASS before USER with a wrong password.
"""
return self.run_PASS_before_USER(b'fooz')
def test_empty_PASS_before_USER(self):
"""
Test PASS before USER with an empty password.
"""
return self.run_PASS_before_USER(b'')
def test_one_space_PASS_before_USER(self):
"""
Test PASS before USER with an password that is a space.
"""
return self.run_PASS_before_USER(b' ')
def test_space_PASS_before_USER(self):
"""
Test PASS before USER with a password containing a space.
"""
return self.run_PASS_before_USER(b'fooz barz')
def test_multiple_spaces_PASS_before_USER(self):
"""
Test PASS before USER with a password containing multiple spaces.
"""
return self.run_PASS_before_USER(b'fooz barz asdf')
def test_other_whitespace_PASS_before_USER(self):
"""
Test PASS before USER with a password containing tabs and spaces.
"""
return self.run_PASS_before_USER(b'fooz barz\tcrazy@! \t ')
def test_good_PASS(self):
"""
Test PASS with a good password.
"""
return self.run_PASS(b'testuser', b'fooz')
def test_space_PASS(self):
"""
Test PASS with a password containing a space.
"""
return self.run_PASS(b'testuser', b'fooz barz')
def test_multiple_spaces_PASS(self):
"""
Test PASS with a password containing a space.
"""
return self.run_PASS(b'testuser', b'fooz barz asdf')
def test_other_whitespace_PASS(self):
"""
Test PASS with a password containing tabs and spaces.
"""
return self.run_PASS(b'testuser', b'fooz barz\tcrazy@! \t ')
def test_pass_wrong_user(self):
"""
Test PASS with a wrong user.
"""
return self.run_PASS(b'testuser', b'fooz',
tried_user=b'wronguser')
def test_wrong_PASS(self):
"""
Test PASS with a wrong password.
"""
return self.run_PASS(b'testuser', b'fooz',
tried_password=b'<PASSWORD>')
def test_wrong_space_PASS(self):
"""
Test PASS with a password containing a space.
"""
return self.run_PASS(b'testuser', b'fooz barz',
tried_password=b'<PASSWORD> ')
def test_wrong_multiple_spaces_PASS(self):
"""
Test PASS with a password containing a space.
"""
return self.run_PASS(b'testuser', b'fooz barz asdf',
tried_password=b'<PASSWORD> ')
def test_wrong_other_whitespace_PASS(self):
"""
Test PASS with a password containing tabs and spaces.
"""
return self.run_PASS(b'testuser', b'fooz barz\tcrazy@! \t ')
def test_wrong_command(self):
"""
After logging in, test a dummy command that is not defined.
"""
extra_input = [b'DUMMY COMMAND']
extra_output = [b' '.join([b'-ERR bad protocol or server: POP3Error:',
b'Unknown protocol command: DUMMY'])]
return self.run_PASS(b'testuser', b'testpassword',
after_auth_input=extra_input,
after_auth_output=extra_output,
).addCallback(self.flushLoggedErrors,
pop3.POP3Error)
@implementer(pop3.IServerFactory)
class TestServerFactory:
"""
A L{pop3.IServerFactory} implementation, for use by the test suite, with
some behavior controlled by the values of (settable) public attributes and
other behavior based on values hard-coded both here and in some test
methods.
"""
def cap_IMPLEMENTATION(self):
"""
Return the hard-coded value.
@return: L{pop3.IServerFactory}
"""
return "Test Implementation String"
def cap_EXPIRE(self):
"""
Return the hard-coded value.
@return: L{pop3.IServerFactory}
"""
return 60
challengers = OrderedDict([(b"SCHEME_1", None), (b"SCHEME_2", None)])
def cap_LOGIN_DELAY(self):
"""
Return the hard-coded value.
@return: L{pop3.IServerFactory}
"""
return 120
pue = True
def perUserExpiration(self):
"""
Return the hard-coded value.
@return: L{pop3.IServerFactory}
"""
return self.pue
puld = True
def perUserLoginDelay(self):
"""
Return the hard-coded value.
@return: L{pop3.IServerFactory}
"""
return self.puld
class TestMailbox:
"""
An incomplete L{IMailbox} implementation with certain per-user values
hard-coded and known by tests in this module.
This is useful for testing the server's per-user capability
implementation.
"""
loginDelay = 100
messageExpiration = 25
def contained(testcase, s, *caps):
"""
Assert that the given capability is included in all of the capability
sets.
@param testcase: A L{unittest.TestCase} to use to make assertions.
@param s: The capability for which to check.
@type s: L{bytes}
@param caps: The capability sets in which to check.
@type caps: L{tuple} of iterable
"""
for c in caps:
testcase.assertIn(s, c)
class CapabilityTests(unittest.TestCase):
"""
Tests for L{pop3.POP3}'s per-user capability handling.
"""
def setUp(self):
"""
Create a POP3 server with some capabilities.
"""
s = BytesIO()
p = pop3.POP3()
p.factory = TestServerFactory()
p.transport = internet.protocol.FileWrapper(s)
p.connectionMade()
p.do_CAPA()
self.caps = p.listCapabilities()
self.pcaps = s.getvalue().splitlines()
s = BytesIO()
p.mbox = TestMailbox()
p.transport = internet.protocol.FileWrapper(s)
p.do_CAPA()
self.lpcaps = s.getvalue().splitlines()
p.connectionLost(failure.Failure(Exception("Test harness disconnect")))
def test_UIDL(self):
"""
The server can advertise the I{UIDL} capability.
"""
contained(self, b"UIDL", self.caps, self.pcaps, self.lpcaps)
def test_TOP(self):
"""
The server can advertise the I{TOP} capability.
"""
contained(self, b"TOP", self.caps, self.pcaps, self.lpcaps)
def test_USER(self):
"""
The server can advertise the I{USER} capability.
"""
contained(self, b"USER", self.caps, self.pcaps, self.lpcaps)
def test_EXPIRE(self):
"""
The server can advertise its per-user expiration as well as a global
expiration.
"""
contained(self, b"EXPIRE 60 USER", self.caps, self.pcaps)
contained(self, b"EXPIRE 25", self.lpcaps)
def test_IMPLEMENTATION(self):
"""
The server can advertise its implementation string.
"""
contained(
self,
b"IMPLEMENTATION Test Implementation String",
self.caps, self.pcaps, self.lpcaps
)
def test_SASL(self):
"""
The server can advertise the SASL schemes it supports.
"""
contained(
self,
b"SASL SCHEME_1 SCHEME_2",
self.caps, self.pcaps, self.lpcaps
)
def test_LOGIN_DELAY(self):
"""
The can advertise a per-user login delay as well as a global login
delay.
"""
contained(self, b"LOGIN-DELAY 120 USER", self.caps, self.pcaps)
self.assertIn(b"LOGIN-DELAY 100", self.lpcaps)
class GlobalCapabilitiesTests(unittest.TestCase):
"""
Tests for L{pop3.POP3}'s global capability handling.
"""
def setUp(self):
"""
Create a POP3 server with some capabilities.
"""
s = BytesIO()
p = pop3.POP3()
p.factory = TestServerFactory()
p.factory.pue = p.factory.puld = False
p.transport = internet.protocol.FileWrapper(s)
p.connectionMade()
p.do_CAPA()
self.caps = p.listCapabilities()
self.pcaps = s.getvalue().splitlines()
s = BytesIO()
p.mbox = TestMailbox()
p.transport = internet.protocol.FileWrapper(s)
p.do_CAPA()
self.lpcaps = s.getvalue().splitlines()
p.connectionLost(failure.Failure(Exception("Test harness disconnect")))
def test_EXPIRE(self):
"""
I{EXPIRE} is in the server's advertised capabilities.
"""
contained(self, b"EXPIRE 60", self.caps, self.pcaps, self.lpcaps)
def test_LOGIN_DELAY(self):
"""
I{LOGIN-DELAY} | |
<reponame>SchrodingersGat/PyGen
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from rapidfuzz import fuzz
from . import debug
class PidgenElement():
"""
Base level PidgenElement class.
Provides low-level functionality inherited by all higher classes
"""
BASIC_KEYS = [
"name",
"title",
"comment",
]
# Options for specifying a "true" value
_TRUE = ["y", "yes", "1", "true", "on"]
_FALSE = ["n", "no", "0", "false", "off"]
def __repr__(self):
return "{f}:{line} - <{tag}>:{name}".format(
f=self.path,
line=self.lineNumber,
tag=self.tag,
name=self.name,
)
def __init__(self, parent, **kwargs):
"""
Initialize the element with some basic information
args:
parent - Parent object for this object. e.g. directory -> file -> packet -> struct -> data
kwargs:
xml - Raw xml data associated with this object
path - Filepath of this object
"""
if not hasattr(self, 'xml'):
self.xml = kwargs.get("xml", None)
self.children = []
self.parent = parent
if self.parent is not None:
self.parent.addChild(self)
# Store a copy of the kwargs
self.kwargs = kwargs
self.validateKeys()
self.validateChildren()
self._parse()
def _parse(self):
if self.xml is not None:
debug.debug("Parsing", str(self))
self.parse()
def parse(self):
""" Default implementation does nothing... """
pass
def checkPath(self, path):
"""
Check if the given path has already been parsed by the protocol.
Args:
path - Path of the file or directory to check
"""
abspath = os.path.abspath(path)
if abspath in self.protocol.files:
debug.warning("{path} - Path '{f}' has already been parsed".format(
path=self.path,
f=path))
elif os.path.exists(abspath):
self.protocol.files.append(abspath)
return True
else:
debug.error("{path} - Path '{f}' is invalid".format(
path=self.path,
f=abspath))
return False
def findItemByName(self, item_type, item_name, global_search=True, ignore_case=True):
"""
Lookup an object using the provided name.
Args:
item_type - Can be either a class type or a string which matches a class
item_name - Name of the item to look for (case-insensitive)
kwargs:
global_search - If True, search the entire protocol. Otherwise, search local object. (Default = True)
ignore_case - If true, use case-insenstive matching (default=True)
Return:
Matching item, if one (and only one) match was found.
- If no matches are found, issue a warning and return None
- If multiple matches are found, issue an error and return None
"""
if global_search:
# Search the entire protocol
context = self.protocol
else:
# Search just the current object
context = self
# Grab list of structs
childs = context.getChildren(item_type, traverse_children=global_search)
# List of exact matches
exact_matches = []
best_score = 0
best_match = None
if ignore_case:
item_name = item_name.lower()
for child in childs:
child_name = child.name
if ignore_case:
child_name = child_name.lower()
if child_name == item_name:
exact_matches.append(child)
best_score = 100
best_match = child
else:
score = fuzz.partial_ratio(child.name.lower(), item_name.lower())
if score > best_score:
best_score = score
best_match = child
if len(exact_matches) == 1:
return exact_matches[0]
elif len(exact_matches) > 1:
debug.error("Multiple matches found for '{t}' : '{n}'".format(t=item_type, n=item_name))
else:
debug.warning("No matches found for '{t}' : '{n}'".format(t=item_type, n=item_name))
if best_match is not None and best_score > 65:
debug.warning("Instead of '{n}', did you mean '{s}'?".format(n=item_name, s=best_match.name))
return None
def getChildren(self, pattern, traverse_children=False):
"""
Return any children under this item which conform to the provided pattern.
Pattern can be:
a) A class type
b) A "string" representation of a class type (to get around circular import issues)
c) A list [] of potential class types as per a) or b)
"""
# Enforce list format so the following code is consistent
if type(pattern) not in [list, tuple]:
pattern = [pattern]
childs = []
for child in self.children:
for p in pattern:
if type(p) is str:
if p.lower() in str(child.__class__).lower():
childs.append(child)
break
elif isinstance(child, p):
childs.append(child)
break
if traverse_children:
childs += child.getChildren(pattern, True)
return childs
@property
def protocol(self):
"""
The "Protocol" object is the top-level directory parser.
So, go obtain the "Protocol" object, simply traverse upwards,
until there are no higher parent objects.
"""
if not hasattr(self, 'parent') or self.parent is None:
return self
else:
return self.parent
@property
def lineNumber(self):
""" Return the line number of the XML element which defines this object """
try:
return self.xml._start_line_number
except AttributeError:
return 0
@property
def tag(self):
""" Return the base tag associated with this element """
if self.xml is None:
return ''
else:
return self.xml.tag
def keys(self):
"""
Return a list of all top-level tags in this item.
Remove any 'meta' tags (e.g. line number)
"""
if self.xml is None:
return []
return [k for k in self.xml.keys()]
def isSet(self, key, default=False):
"""
Test if the given key's value is "set" (in a boolean sense).
To be considered "set":
1) The key must be present
b) The associated value must "look" like a binary value
"""
return self.parseBool(self.get(key, default))
def get(self, key, ret=None, ignore_case=True):
"""
Return the value associated with the given key, in the XML data.
Args:
key - Name of the key (or a list of keys to be checked in order)
kwargs:
ret - Value to return if the key is not found
ignore_case - If true, key-lookup is not case sensitive (default = True)
"""
if not hasattr(self, 'xml') or self.xml is None:
return ret
# Enforce list encoding
if type(key) not in [list, tuple]:
key = [key]
for k in key:
if ignore_case:
k = k.lower()
for sk in self.keys():
if ignore_case:
skl = sk.lower()
else:
skl = sk
if k == skl:
return self.xml.get(sk, ret)
# No matching key found?
return ret
@property
def name(self):
""" Return the 'name' for this object """
return self.get("name", None)
@property
def title(self):
"""
Return the 'title' for this object.
The title is an optional description text.
If not present, default to the 'name' field.
"""
return self.get("title", self.name)
@property
def path(self):
""" Return the filepath for this object """
# If no path is specified for this object, maybe the parent?
p = self.kwargs.get('path', None)
if p is None and self.parent is not None:
return self.parent.path
else:
return p
@property
def directory(self):
"""
Return the directory for the current object
"""
return os.path.dirname(self.path)
@property
def comment(self):
""" Return the 'comment' for this object """
return self.get('comment', None)
@property
def ancestors(self):
""" Return flattened list of ancestors for this object """
a = []
parent = self.parent
while parent is not None:
a.append(parent)
parent = parent.parent
return a
def getDescendants(self, descendants=[]):
"""
Return a flattened list of all descendants of this object (recursive)
Args:
descendants - Flat list of descendants, passed down to lower function calls
"""
for child in self.children:
# Add the child to the list
descendants.append(child)
# Child then adds its own descendants to the list
child.getDescendants(descendants)
return descendants
def addChild(self, child):
""" Add a new child object """
if child not in self.children:
self.children.append(child)
def getSetting(self, key):
"""
Return the value associated with the provided key (if it exists).
If the key is not found, request it from the parent object (and so-on).
In this manner, a top-down settings hierarchy is achieved.
"""
if key in self.kwargs:
return self.kwargs[key]
elif self.parent is not None:
return self.parent.getSetting(key)
else:
return None
def setSettings(self, key, value):
"""
Set the value of a local settings parameter.
This value is available to this object and also any children,
unless those children override the value.
"""
self.kwargs[key] = value
@property
def required_keys(self):
""" Return a list of keys required for this element """
if hasattr(self, "REQUIRED_KEYS"):
return set(self.REQUIRED_KEYS)
else:
return set()
@property
def allowed_keys(self):
""" Return a list of keys allowed for this element """
# These keys are "allowed" for any element
allowed = set(self.BASIC_KEYS)
if hasattr(self, "ALLOWED_KEYS"):
for k in self.ALLOWED_KEYS:
allowed.add(k)
# Required keys are also 'allowed'
for k in self.required_keys:
allowed.add(k)
return allowed
@property
def required_children(self):
""" Return a list of child elements required for this element """
if hasattr(self, "REQUIRED_CHILDREN"):
return set(self.REQUIRED_CHILDREN)
else:
return set()
@property
def allowed_children(self):
""" Return a list of child elements allowed for this element """
if hasattr(self, "ALLOWED_CHILDREN"):
return set(self.ALLOWED_CHILDREN)
else:
return set()
def validateKeys(self):
"""
Ensure that the tags provided under | |
<filename>periplasmic_proteome.py
from numpy.polynomial.polynomial import polyfit
from glob import glob
import pandas as pd
import numpy as np
from Bio import SeqIO
import math
import os
import cobrame
from cobrame.core.processdata import PostTranslationData
from cobrame.core.component import ProcessedProtein, Metabolite
from cobrame.core.reaction import PostTranslationReaction, MEReaction
from cobrame.util import mu, building
peptide_rog_df = pd.read_csv('data/peptide_radius_of_gyration.csv')
periplasm_fold_rate_df = pd.read_csv('data/periplasm_protein_fold_rate.csv', index_col=0)
charged_aa_pKa_df = pd.read_csv('data/charged_aa_side_chain_pKa.csv')
DATA_DIR = 'data/'; all_proteins_dir = 'proteins/'
def get_protein_net_charge_table():
#get the charge of folded proteins at different pHs
proteins_w_charge_calc = []
for cur_gene_path in glob(DATA_DIR + all_proteins_dir + '*'): #periplasm_genes
if os.path.exists(cur_gene_path + '/sum_crg.out'):
proteins_w_charge_calc.append(cur_gene_path.split('/')[-1])
#define the dataframe with the pH as the index and gene id as the column
protein_net_charge_df = pd.DataFrame(index=np.arange(0, 14.5, 0.5), columns=proteins_w_charge_calc)
#now read the charge output file for each gene and fill in the table
for cur_gene in proteins_w_charge_calc:
cur_file = open(DATA_DIR + all_proteins_dir + cur_gene + '/sum_crg.out')
for line in cur_file:
if 'Net_Charge' not in line: continue
protein_net_charge_df[cur_gene] = map(float, line.split()[1:])
cur_file.close()
return protein_net_charge_df
protein_net_charge_df = get_protein_net_charge_table()
def calc_peptide_Rg(residue_num):
'''
calculate based on residue length using the best fit line from data
data as indicated on the source column in the excel sheet
http://biostat.jhsph.edu/~iruczins/presentations/ruczinski.05.03.rutgers.pdf
Principles of polymer chemistry, by Flory 1953 is where the scaling exponent 0.588 comes from
'''
residual_power = 0.588
residue_num_list = [np.power(cur_num, residual_power) for cur_num in peptide_rog_df.Corrected_residue.values]
rog_list = peptide_rog_df.Rg_u.values
fit_coef = polyfit(residue_num_list, rog_list, 1)
residue_num_powered = [np.power(residue_num, residual_power*p) for p in range(2)] #only first order
return np.dot(fit_coef, residue_num_powered)
def get_AA_seq_from_pdb(pdb_file_path):
#read pdb file and return the one letter code amino acid sequence code for protein
aa_letter_dict = {'ALA':'A','ARG':'R','ASN':'N','ASP':'D','CYS':'C','GLU':'E','GLN':'Q','GLY':'G','HIS':'H',\
'ILE':'I','LEU':'L','LYS':'K','MET':'M','PHE':'F','PRO':'P','SER':'S','THR':'T','TRP':'W','TYR':'Y','VAL':'V'}
input_file = open(pdb_file_path)
seq = ''; prev = '0'
for line in input_file:
toks = line.split()
if len(toks) < 1: continue
if toks[0] != 'ATOM': continue
if toks[5] != prev:
seq += aa_letter_dict[toks[3]]
prev = toks[5]
input_file.close()
return seq
def calc_tot_folding_energy(T, aa_num):
#the folding energy calculated here is assumed to be pH 7
#T should be in kelvin
dH = 4.0*aa_num + 143.0 #in unit kJ/mol
dS = (13.27*aa_num + 448.0)/1000 #in unit kJ/mol
dCp = 0.049*aa_num + 0.85 #in unit kJ/mol
T_h = 373.5; T_s = 385
dG = dH + dCp*(T - T_h) - T*dS - T*dCp*np.log(T/T_s)
#dG2 = (4.0*aa_num + 143.0) + (0.049*aa_num + 0.85)*(T-T_h) - T*(0.01327*aa_num + 0.448) - T*(0.049*aa_num + 0.85)*np.log(T/T_s)
return -dG #G_folded - G_unfolded, in kJ/mol
def calc_tot_folding_energy_from_b_num(T, b_num):
return calc_tot_folding_energy(T, len(get_AA_seq_from_pdb(get_pdb_file_path(b_num))))
def calc_protein_Rg(pdb_file_path):
'''
Calculates the Radius of Gyration (Rg) of a protein given its .pdb
structure file. Returns the Rg in Angstrom.
This function is adapted from https://github.com/sarisabban/Rg/blob/master/Rg.py
'''
coord = list()
mass = list()
Structure = open(pdb_file_path, 'r')
for line in Structure:
try:
line = line.split()
x = float(line[6])
y = float(line[7])
z = float(line[8])
coord.append([x, y, z])
if line[-1] == 'C':
mass.append(12.0107)
elif line[-1] == 'O':
mass.append(15.9994)
elif line[-1] == 'N':
mass.append(14.0067)
elif line[-1] == 'S':
mass.append(32.065)
except:
pass
xm = [(m*i, m*j, m*k) for (i, j, k), m in zip(coord, mass)]
tmass = sum(mass)
rr = sum(mi*i + mj*j + mk*k for (i, j, k), (mi, mj, mk) in zip(coord, xm))
mm = sum((sum(i) / tmass)**2 for i in zip(*xm))
rg = math.sqrt(rr / tmass-mm)
Structure.close()
return rg
def get_aa_charge(aa_one_letter, pH):
#get the charge for the individual amino acid, based on individual aa pKa
if aa_one_letter not in charged_aa_pKa_df.one_letter.values: return 0
cur_aa_info = charged_aa_pKa_df[charged_aa_pKa_df.one_letter == aa_one_letter]
cur_aa_pKa = cur_aa_info.pKa.values[0]
cur_aa_type = cur_aa_info.type.values[0]
pH_pKa_val = np.power(10, pH - cur_aa_pKa)
if cur_aa_type == 'acid':
return -pH_pKa_val/(1 + pH_pKa_val)
elif cur_aa_type == 'base':
return 1/(1 + pH_pKa_val)
def get_peptide_charge(aa_one_letter_seq, pH):
#get the charge for the whole peptide sequence, based on individual aa pKa
acid_charge = lambda pKa: np.power(10, pH - pKa)/(1 + np.power(10, pH - pKa))
base_charge = lambda pKa: np.power(10, pKa - pH)/(1 + np.power(10, pKa - pH))
total_charge = 0
for aa_one_letter in aa_one_letter_seq:
if aa_one_letter not in charged_aa_pKa_df.one_letter.values: continue
cur_aa_info = charged_aa_pKa_df[charged_aa_pKa_df.one_letter == aa_one_letter]
cur_aa_pKa = cur_aa_info.pKa.values[0]
cur_aa_type = cur_aa_info.type.values[0]
if cur_aa_type == 'acid':
total_charge -= acid_charge(cur_aa_pKa)
elif cur_aa_type == 'base':
total_charge += base_charge(cur_aa_pKa)
return total_charge
def get_protein_charge(bnum, pH_to_predict, degree = 15):
#use polynomial fit based on the existing pH profile, overfitting is fine here as we are trying to draw a smooth curve
if type(pH_to_predict) != list:
pH_to_predict = [pH_to_predict]
pH_list = protein_net_charge_df.index
charge_list = protein_net_charge_df[bnum].values
fit_coef = polyfit(pH_list, charge_list, degree)
pH_out_powered = [np.power(pH_to_predict,p) for p in range(degree + 1)]
charge_predicted = np.dot(fit_coef, pH_out_powered)
if len(charge_predicted) == 1:
return charge_predicted[0]
else:
return charge_predicted
def get_pdb_file_path(b_num):
return DATA_DIR + all_proteins_dir + '%s/prot.pdb' %b_num
def calc_e_folding_energy(b_num, pH, T=310.15, IS=0.25):
diele_water = 80.4; R = 8.314
l_b = 1.39/10**4/diele_water/R/T #in meter
k = np.sqrt(2*(IS*1000)*l_b) #convert IS from mol/L to mol/m3
cur_bnum_pdb_path = get_pdb_file_path(b_num)
cur_peptide_seq = get_AA_seq_from_pdb(cur_bnum_pdb_path)
protein_charge = protein_net_charge_df.at[pH, b_num] if pH in protein_net_charge_df.index else get_protein_charge(b_num, pH)
protein_Rg = calc_protein_Rg(cur_bnum_pdb_path) / 10.0**10 #get angstrom, need to convert to meter
peptide_charge = get_peptide_charge(cur_peptide_seq, pH)
peptide_Rg = calc_peptide_Rg(len(cur_peptide_seq)) / 10.0**10 #get angstrom, need to convert to meter
dG_e = R*T*(np.power(protein_charge, 2) * l_b / (2 * protein_Rg * (1 + k * protein_Rg)) - \
np.power(peptide_charge, 2) * l_b / (2 * peptide_Rg * (1 + k * peptide_Rg)))
return dG_e/1000 #unit in kJ/mol
def calc_tot_folding_energy_at_pH(b_num, pH, T = 310.15):
dG_folding_pH7 = calc_tot_folding_energy_from_b_num(T, b_num)
dGe_folding_pH7 = calc_e_folding_energy(b_num, 7.0, T)
dGe_folding_pH = calc_e_folding_energy(b_num, float(pH), T)
return dG_folding_pH7 - dGe_folding_pH7 + dGe_folding_pH #in kJ/mol
def get_fold_rate(seq, secstruct):
"""
This function is obtained from ssbio package, https://github.com/SBRG/ssbio/blob/master/ssbio/protein/sequence/properties/kinetic_folding_rate.py
Submit sequence and structural class to FOLD-RATE calculator (http://www.iitm.ac.in/bioinfo/fold-rate/)
to calculate kinetic folding rate.
Args:
seq: Amino acid sequence in string format
secstruct (str): Structural class: `all-alpha``, ``all-beta``, ``mixed``, or ``unknown``
Returns:
float: Kinetic folding rate k_f
"""
url = 'http://www.iitm.ac.in/bioinfo/cgi-bin/fold-rate/foldrateCalculator.pl'
values = {'sequence': seq, 'eqn': secstruct}
data = urllib.urlencode(values)
data = data.encode('ASCII')
response = urllib.urlopen(url, data)
result = str(response.read())
ind = str.find(result, 'The folding rate,')
result2 = result[ind:ind + 70]
ind1 = str.find(result2, '=')
ind2 = str.find(result2, '/sec')
rate = result2[ind1 + 2:ind2]
return rate
def merge_list_of_list(list_of_list):
return [item for sublist in list_of_list for item in sublist]
def get_chaperone_translocation_rxns(complex_name, me_model):
return list(set([cur_rxn for cur_rxn in merge_list_of_list([met.reactions for met in me_model.metabolites.query(complex_name)]) if 'translocation_' in cur_rxn.id]))
def add_periplasm_protein_folded(me_model):
#add periplasm proteins into the model and make corresponding changes in reactions
unique_Tat_complex_translocation_rxns = get_chaperone_translocation_rxns('Tat', me_model)
unique_Sec_complex_translocation_rxns = get_chaperone_translocation_rxns('Sec', me_model)
periplasm_proteins_in_Tat_pathway = list(set([cur_protein.id for cur_protein in merge_list_of_list([rxn.products for rxn in unique_Tat_complex_translocation_rxns]) if 'Periplasm' in cur_protein.id]))
periplasm_proteins_in_Sec_pathway = list(set([cur_protein.id for cur_protein in merge_list_of_list([rxn.products for rxn in unique_Sec_complex_translocation_rxns]) if 'Periplasm' in cur_protein.id]))
bnum_in_Tat_pathway = [cur_protein.split('_')[1] for cur_protein in periplasm_proteins_in_Tat_pathway]
bnum_in_Sec_pathway = [cur_protein.split('_')[1] for cur_protein in periplasm_proteins_in_Sec_pathway]
#add folded periplasm protein objects into the model
for cur_protein in me_model.metabolites.query('Periplasm'):
cur_folded_pid = cur_protein.id + '_folded'
folded_protein = ProcessedProtein(cur_folded_pid, cur_protein.id)
me_model.add_metabolites([folded_protein])
#the proteins translocated by Tat are already folded, so we need to modify change all the reactions they are in as folded protein
for cur_protein_id in periplasm_proteins_in_Tat_pathway:
cur_protein_folded = me_model.metabolites.get_by_id(cur_protein_id + '_folded')
cur_protein_rxns = me_model.metabolites.get_by_id(cur_protein_id).reactions
for cur_rxn in cur_protein_rxns:
cur_protein_coeff = cur_rxn.pop(cur_protein_id)
cur_rxn.add_metabolites({cur_protein_folded: cur_protein_coeff})
#adjust the folding event occuring in cytoplasm for Tat and Sec assisted pathway proteins
for data in me_model.translation_data:
#add folding reactions to proteins translocated by Tat
if data.id in bnum_in_Tat_pathway:
data.subreactions['GroEL_dependent_folding'] = 1
#remove folding reactions from proteins translocated by Sec
if data.id in bnum_in_Sec_pathway:
for subreaction in list(data.subreactions.keys()):
if 'folding' in subreaction:
data.subreactions.pop(subreaction)
#for proteins in Sec pathway, we need to modify their complex formation reactions to have folded protein
#for translocation reaction it will still be unfolded protein
for cur_protein_id in periplasm_proteins_in_Sec_pathway:
cur_protein_folded = me_model.metabolites.get_by_id(cur_protein_id + '_folded')
cur_protein_rxns = me_model.metabolites.get_by_id(cur_protein_id).reactions
for cur_rxn in cur_protein_rxns:
if 'formation' in cur_rxn.id:
cur_protein_coeff = cur_rxn.pop(cur_protein_id)
cur_rxn.add_metabolites({cur_protein_folded: cur_protein_coeff})
def add_periplasm_protein_folding_reaction(me_model, pH, T = 310.15):
#this function allows repeatedly modifying the folding reaction given different pH
#https://github.com/SBRG/ssbio/tree/master/ssbio/protein/sequence/properties
R = 8.314
for cur_protein in me_model.metabolites.query('Periplasm'):
if 'folded' in cur_protein.id or 'b3509' in cur_protein.id: continue #just handle unfolded protein
cur_folded_pid = cur_protein.id + '_folded'
try:
folded_protein = me_model.metabolites.get_by_id(cur_folded_pid)
except KeyError:
folded_protein = ProcessedProtein(cur_folded_pid, cur_protein.id)
me_model.add_metabolites([folded_protein])
folding_id = 'folding_' + cur_protein.id + '_folding_spontaneous'
try:
folding_rxn = me_model.reactions.query(folding_id)[0]
except IndexError:
folding_rxn = PostTranslationReaction(folding_id)
me_model.add_reaction(folding_rxn)
folding_rxn.clear_metabolites() #first remove all previous metabolites
cur_bnum = cur_protein.id.split('_')[1]
if cur_bnum | |
== 1:
out = Node("qualified_name", meta=[None, args[0].value]) # no namespace
else:
out = Node("qualified_name", meta=[args[0].value, args[1].value])
#print("Converter.qualified_name: returning: %s" % (out.pretty(),))
return out
def dataset(self, args):
assert len(args) in (1,2)
if len(args) == 1:
return Node("dataset", [args[0], None]) # dataset without meta filter
else:
return Node("dataset", [args[0], args[1]])
def filter(self, args):
assert len(args) == 3
#print("filter: args:", type(args[0]), args[0], type(args[1]), args[1], type(args[2]), args[2])
query_list = args[2].C
return Node("filter", query_list, meta = (args[0].value, args[1]))
def filter_params(self, args):
#print("filter_params:", args)
return args
def cmp_op(self, args):
return Node(args[1].value, [args[0].value, args[2]])
def in_op(self, args):
return Node("in", [args[1].value, args[0]])
def meta_and(self, args):
children = []
for a in args:
if a.T == "meta_and":
children += a.C
else:
children.append(a)
return Node("meta_and", children)
def meta_or(self, args):
children = []
for a in args:
if a.T == "meta_or":
children += a.C
else:
children.append(a)
return Node("meta_or", children)
def _apply_not(self, node):
if node.T == "meta_and":
return Node("meta_or", [self._apply_not(c) for c in node.C])
elif node.T == "meta_or":
return Node("meta_and", [self._apply_not(c) for c in node.C])
elif node.T == "meta_not":
return node.C[0]
elif node.T in CMP_OPS:
new_op = {
"~~": "!~~",
"!~~": "~~",
"~~*": "!~~*",
"!~~*": "~~*",
">": "<=",
"<": ">=",
">=": "<",
"<=": ">",
"=": "!=",
"==": "!=",
"!=": "=="
}[node.T]
return Node(new_op, node.C)
def meta_not(self, children):
assert len(children) == 1
return self._apply_not(children[0])
class _Assembler(Ascender):
def __init__(self, db, default_namespace):
Ascender.__init__(self)
self.DB = db
self.DefaultNamespace = default_namespace
def walk(self, inp):
#print("_Assembler.walk(): in:", inp.pretty() if isinstance(inp, Node) else repr(inp))
out = Ascender.walk(self, inp)
#print("_Assembler.walk(): out:", out.pretty() if isinstance(out, Node) else repr(out))
return out
def named_query(self, children, query_name):
#print("_Assembler.named_query()")
namespace, name = query_name
namespace = namespace or self.DefaultNamespace
tree = Query.from_db(self.DB, namespace, name).parse()
tree = _ParamsApplier().walk(tree, {"namespace":namespace})
#print("_Assembler.named_query: returning:", tree.pretty())
return tree
class _ProvenancePusher(Descender):
def parents_of(self, node, _):
children = node.C
assert len(children) == 1
child = children[0]
if isinstance(child, Node) and child.T == "union":
return Node("union", [self.walk(Node("parents_of", [cc])) for cc in child.C])
@pass_node
def children_of(self, node, _):
children = node.C
assert len(children) == 1
child = children[0]
if isinstance(child, Node) and child.T == "union":
return Node("union", [self.walk(Node("children_of", [cc])) for cc in child.C])
class _LimitPusher(Descender):
def limit(self, node, limit):
#print("_LimitPusher.limit: node:", node)
assert len(node.C) == 1
limit = node.M if limit is None else min(limit, node.M)
return self.walk(node.C[0], limit)
def union(self, node, limit):
return Node("limit",
[Node("union",
[self.walk(c, limit) for c in node.C]
)
], meta=limit)
def data_source(self, node, limit):
node.M.addLimit(limit)
return node
def _default(self, node, limit):
print("_LimitPusher._default: node:", node.pretty())
if limit is not None:
new_node = Node(node.T, node.C, node.M)
self.visit_children(new_node, None)
return Node("limit", [new_node], meta=limit)
else:
return self.visit_children(node, None)
class _MetaExpPusher(Descender):
def meta_filter(self, node, meta_exp):
node_q, node_exp = node.C
if meta_exp is None:
meta_exp = node_exp
elif node_exp is None:
meta_exp = meta_exp # duh
else:
meta_exp = Node("meta_or", [Node("meta_and", [meta_exp, node_exp])])
out = self.walk(node_q, meta_exp)
return out
def join(self, node, meta_exp):
return Node("join", [self.walk(c, meta_exp) for c in node.C])
def union(self, node, meta_exp):
return Node("union", [self.walk(c, meta_exp) for c in node.C])
def minus(self, node, meta_exp):
assert len(node.C) == 2
left, right = node.C
return Node("minus", [self.walk(left, meta_exp), self.walk(right, None)])
def data_source(self, node, meta_exp):
assert isinstance(node.M, DataSourceMeta)
if meta_exp is not None: node.M.addWhere(meta_exp)
#print("_MetaExpPusher.DataSource: out: ", node.pretty())
return node
if False:
class _DNFConverter(Visitor):
# find all DataSource nodes and apply DNF converter to their Wheres
def DataSource(self, node, context):
exp = node.Wheres
if exp is not None:
assert isinstance(exp, Node)
exp = _MetaRegularizer().walk(exp)
node.WheresDNF = _MetaRegularizer._make_DNF_lists(exp)
return False
else:
class _DNFConverter(Descender):
# find all DataSource nodes and apply DNF converter to their Wheres
def data_source(self, node, _):
#print("_DNFConverter.DataSource: node:", node, type(node))
exp = node.M.Wheres
if exp is not None:
assert isinstance(exp, Node)
exp = _MetaRegularizer().walk(exp)
node.M.WheresDNF = _MetaRegularizer._make_DNF_lists(exp)
class _SQLGenerator(Ascender):
@pass_node
def data_source(self, node):
keep_meta = True
return Node("SQL", meta=node.M.sql())
class _Evaluator(Ascender):
def __init__(self, db, filters, with_meta, limit):
Ascender.__init__(self)
self.Filters = filters
self.DB = db
self.WithMeta = with_meta
self.Limit = limit
def parents_of(self, args, meta):
assert len(args) == 1
arg = args[0]
#print("parents_of: arg:", arg)
return arg.parents(with_metadata=True)
def children_of(self, args, meta):
assert len(args) == 1
arg = args[0]
#print("children_of: arg:", arg)
return arg.children(with_metadata=True)
def limit(self, args, meta):
assert len(args) == 1
if meta is not None:
return args[0].limit(meta)
else:
return args[0]
@pass_node
def data_source(self, node):
assert isinstance(node.M, DataSourceMeta)
#print("_Evaluator.DataSource: node:", node.pretty())
return DBFileSet.from_data_source(self.DB, node.M, self.WithMeta, self.Limit)
def source_spec_list(self, args, meta):
#print("source_spec_list: args:", args)
return DBFileSet.union(self.DB, args)
def data_source_rec(self, args, meta):
assert len(args) == 1
return args[0]
def union(self, args, meta):
#print("Evaluator.union: args:", args)
return DBFileSet.union(self.DB, args)
def join(self, args, meta):
return DBFileSet.join(self.DB, args)
def minus(self, expressions, meta):
assert len(expressions) == 2
left, right = expressions
return left - right
def filter(self, args, meta):
name, params = meta
inputs = args
#print("Evaluator.filter: inputs:", inputs)
filter_function = self.Filters[name]
return DBFileSet(self.DB, filter_function(inputs, params))
def meta_filter(self, args, meta):
assert len(args) == 2
#print("meta_filter: args:", args)
files, meta_exp = args
#print("Evaluator.meta_filter: files:", files, " meta_exp:", meta_exp)
if meta_exp is not None:
return DBFileSet(self.DB, (f for f in files if self.evaluate_meta_expression(f, meta_exp)))
else:
return files
def _eval_meta_bool(self, f, bool_op, parts):
assert len(parts) > 0
p0 = parts[0]
rest = parts[1:]
ok = self.evaluate_meta_expression(f, p0)
if bool_op in ("and", "meta_and"):
if len(rest) and ok:
ok = self._eval_meta_bool(f, bool_op, rest)
return ok
elif bool_op in ("or", "meta_or"):
if len(rest) and not ok:
ok = self._eval_meta_bool(f, bool_op, rest)
return ok
elif bool_op == "not":
assert len(rest) == 0
return not ok
else:
raise ValueError("Unrecognized boolean operation '%s'" % (op,))
BOOL_OPS = ("and", "or", "not")
def evaluate_meta_expression(self, f, meta_expression):
#print("evaluate_meta_expression: meta_expression:", meta_expression.pretty())
op, args = meta_expression.T, meta_expression.C
if op in ("meta_and", "meta_or") and len(args) == 1:
return self.evaluate_meta_expression(f, args[0])
if op == "meta_and": op = "and"
if op == "meta_or": op = "or"
if op in self.BOOL_OPS:
return self._eval_meta_bool(f, op, args)
else:
#
name, value = args
attr_value = f.get_attribute(name, None)
if op == "<": return attr_value < value
elif op == ">": return attr_value > value
elif op == "<=": return attr_value <= value
elif op == ">=": return attr_value >= value
elif op in ("==",'='):
#print("evaluate_meta_expression:", repr(attr_value), repr(value))
return attr_value == value
elif op == "!=": return attr_value != value
elif op == "in": return value in attr_value # exception, e.g. 123 in event_list
else:
raise ValueError("Invalid comparison operator '%s' in %s" % (op, meta_expression))
def meta_exp_to_sql(self, meta_expression):
op, args = meta_expression.T, meta_expression.C
if op in self.BOOL_OPS:
bool_op = op
exps = args
else:
bool_op = "and"
if op in self.BOOL_OPS:
if op in ('or','and'):
sql_op = op
return (' ' + sql_op + ' ').join([
'(' + self.meta_exp_to_sql(part) + ')' for part in args])
elif op == 'not':
return ' not (' + self.meta_exp_to_sql(args[1]) + ')'
else:
raise ValueError("Unrecognized boolean operation '%s'" % (op,))
else:
name, value = args
if op in ('<', '>', '<=', '>=', '==', '=', '!='):
sql_op = '=' if op == '==' else op
if isinstance(value, bool): colname = "bool_value"
elif isinstance(value, int): colname = "int_value"
elif isinstance(value, float): colname = "float_value"
elif isinstance(value, str): colname = "string_value"
else:
raise ValueError("Unrecognized value type %s for attribute %s" % (type(value), name))
return "attr.name='%s' and attr.%s %s '%s'" % (name, colname, sql_op, value)
elif op == 'in':
value, _, name = meta_expression
if isinstance(value, bool): colname = "bool_array"
elif isinstance(value, int): colname = "int_array"
elif isinstance(value, float): colname = "float_array"
elif isinstance(value, str): colname = "string_array"
else:
raise ValueError("Unrecognized value type %s for attribute %s" % (type(value), name))
return "attr.name='%s' and '%s' in attr.%s" % (name, value, colname)
else:
raise ValueError("Invalid comparison operator '%s' in %s" % (op, meta_expression))
class Query(object):
_Parser = Lark(MQL_Grammar, start="query")
def __init__(self, source, default_namespace=None):
self.Source = source
self.DefaultNamespace = default_namespace
self.Parsed = self.Optimized = self.Assembled = None
def remove_comments(self, text):
out = []
for l | |
heading_angle, 2)
# heading_angle = 0.0
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# heading_angle = -math.pi/2
# node_index = cal_next_node(node_poses, node_index, heading_angle, 4)
# print(node_index)
# print(node_poses)
# with open(filename, 'w') as f:
# pickle.dump(node_poses, f)
# ##### script to generate 100-cross #####
# filename = '100-cross'
# swarm_size = 100
# node_poses = np.zeros((swarm_size, 2))
# node_index = 0
# heading_angle = 0.0
# node_index = cal_next_node(node_poses, node_index, heading_angle, 5)
# heading_angle = math.pi/2
# node_index = cal_next_node(node_poses, node_index, heading_angle, 17)
# heading_angle = 0.0
# node_index = cal_next_node(node_poses, node_index, heading_angle, 8)
# heading_angle = math.pi/2
# node_index = cal_next_node(node_poses, node_index, heading_angle, 5)
# heading_angle = -math.pi
# node_index = cal_next_node(node_poses, node_index, heading_angle, 8)
# heading_angle = math.pi/2
# node_index = cal_next_node(node_poses, node_index, heading_angle, 7)
# heading_angle = -math.pi
# node_index = cal_next_node(node_poses, node_index, heading_angle, 5)
# heading_angle = -math.pi/2
# node_index = cal_next_node(node_poses, node_index, heading_angle, 7)
# heading_angle = -math.pi
# node_index = cal_next_node(node_poses, node_index, heading_angle, 8)
# heading_angle = -math.pi/2
# node_index = cal_next_node(node_poses, node_index, heading_angle, 5)
# heading_angle = 0.0
# node_index = cal_next_node(node_poses, node_index, heading_angle, 8)
# heading_angle = -math.pi/2
# node_index = cal_next_node(node_poses, node_index, heading_angle, 16)
# print(node_index)
# print(node_poses)
# with open(filename, 'w') as f:
# pickle.dump(node_poses, f)
# ##### script to generate 30-hand #####
# filename = '30-hand'
# swarm_size = 30
# node_poses = np.zeros((swarm_size, 2))
# node_index = 0
# heading_angle = 0.0
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (20.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (20.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (55.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# # small finger
# heading_angle = reset_radian(heading_angle - (15.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (85.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# # middle finger(no ring finger)
# heading_angle = reset_radian(heading_angle - (147.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (85.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# # index finger
# heading_angle = reset_radian(heading_angle - (147.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (85.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# # thumb
# heading_angle = reset_radian(heading_angle - (125.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# heading_angle = reset_radian(heading_angle + (85.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (85.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# print(node_index)
# print(node_poses)
# with open(filename, 'w') as f:
# pickle.dump(node_poses, f)
# ##### script to generate 100-hand #####
# filename = '100-hand'
# swarm_size = 100
# node_poses = np.zeros((swarm_size, 2))
# node_index = 0
# heading_angle = 0.0
# node_index = cal_next_node(node_poses, node_index, heading_angle, 6)
# heading_angle = reset_radian(heading_angle + (45.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# heading_angle = reset_radian(heading_angle + (35.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 5)
# # small finger
# heading_angle = reset_radian(heading_angle - (15.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 6)
# heading_angle = reset_radian(heading_angle + (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (44.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 4)
# heading_angle = reset_radian(heading_angle - (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# # ring finger
# heading_angle = reset_radian(heading_angle - (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 7)
# heading_angle = reset_radian(heading_angle + (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (44.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 7)
# heading_angle = reset_radian(heading_angle - (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# # middle finger
# heading_angle = reset_radian(heading_angle - (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 8)
# heading_angle = reset_radian(heading_angle + (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (44.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 8)
# heading_angle = reset_radian(heading_angle - (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# # index finger
# heading_angle = reset_radian(heading_angle - (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 7)
# heading_angle = reset_radian(heading_angle + (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (44.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 8)
# heading_angle = reset_radian(heading_angle - (10.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# heading_angle = reset_radian(heading_angle - (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# # thumb
# heading_angle = reset_radian(heading_angle - (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 5)
# heading_angle = reset_radian(heading_angle + (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (80.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (40.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 9)
# heading_angle = reset_radian(heading_angle + (20.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# print(node_index)
# print(node_poses)
# with open(filename, 'w') as f:
# pickle.dump(node_poses, f)
# ##### script to generate 30-wrench #####
# filename = '30-wrench'
# swarm_size = 30
# node_poses = np.zeros((swarm_size, 2))
# node_index = 0
# heading_angle = 0.0
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle - (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# heading_angle = reset_radian(heading_angle + (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle - (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# heading_angle = reset_radian(heading_angle - (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle - (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# # half the wrench is finished, mirror the next half
# axis_vect = node_poses[node_index] - node_poses[0]
# axis_vect = axis_vect / np.linalg.norm(axis_vect)
# for i in range(1,15):
# old_vect = node_poses[i] - node_poses[0]
# node_poses[-i] = 2*np.dot(axis_vect, old_vect)*axis_vect - old_vect
# print(node_index)
# print(node_poses)
# with open(filename, 'w') as f:
# pickle.dump(node_poses, f)
# ##### script to generate 100-wrench #####
# filename = '100-wrench'
# swarm_size = 100
# node_poses = np.zeros((swarm_size, 2))
# node_index = 0
# heading_angle = 0.0
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# heading_angle = reset_radian(heading_angle - (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 4)
# heading_angle = reset_radian(heading_angle + (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# heading_angle = reset_radian(heading_angle - (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 20)
# heading_angle = reset_radian(heading_angle - (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# heading_angle = reset_radian(heading_angle + (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 4)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# heading_angle = reset_radian(heading_angle + (90.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# heading_angle = reset_radian(heading_angle - (50.0*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# # half the wrench is finished, mirror the next half
# axis_vect = node_poses[node_index] - node_poses[0]
# axis_vect = axis_vect / np.linalg.norm(axis_vect)
# for i in range(1,50):
# old_vect = node_poses[i] - node_poses[0]
# node_poses[-i] = 2*np.dot(axis_vect, old_vect)*axis_vect - old_vect
# print(node_index)
# print(node_poses)
# with open(filename, 'w') as f:
# pickle.dump(node_poses, f)
# ##### script to generate 30-goblet #####
# filename = '30-goblet'
# swarm_size = 30
# node_poses = np.zeros((swarm_size, 2))
# node_index = 0
# arc_angle = 10.8 # default 11.25 deg
# heading_angle = 0.0
# node_index = cal_next_node(node_poses, node_index, heading_angle, 2)
# heading_angle = reset_radian(heading_angle + (135*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (30*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = math.pi/2
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# heading_angle = (arc_angle*math.pi)/180.0
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (2*arc_angle*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (2*arc_angle*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = reset_radian(heading_angle + (2*arc_angle*math.pi)/180.0)
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = math.pi/2
# node_index = cal_next_node(node_poses, node_index, heading_angle, 1)
# heading_angle = -math.pi
# node_index = cal_next_node(node_poses, node_index, heading_angle, 3)
# # half the wrench is finished, mirror the next half
# axis_vect = node_poses[node_index] - node_poses[0]
# axis_vect = axis_vect / np.linalg.norm(axis_vect)
# for i in range(1,15):
# old_vect = node_poses[i] - node_poses[0]
# node_poses[-i] = 2*np.dot(axis_vect, old_vect)*axis_vect - old_vect
# print(node_index)
# print(node_poses)
# with open(filename, 'w') as f:
# pickle.dump(node_poses, f)
# ##### script to generate 100-goblet #####
# filename | |
<reponame>bopopescu/sage<filename>src/sage/groups/affine_gps/group_element.py
"""
Elements of Affine Groups
The class in this module is used to represent the elements of
:func:`~sage.groups.affine_gps.affine_group.AffineGroup` and its
subgroups.
EXAMPLES::
sage: F = AffineGroup(3, QQ)
sage: F([1,2,3,4,5,6,7,8,0], [10,11,12])
[1 2 3] [10]
x |-> [4 5 6] x + [11]
[7 8 0] [12]
sage: G = AffineGroup(2, ZZ)
sage: g = G([[1,1],[0,1]], [1,0])
sage: h = G([[1,2],[0,1]], [0,1])
sage: g*h
[1 3] [2]
x |-> [0 1] x + [1]
sage: h*g
[1 3] [1]
x |-> [0 1] x + [1]
sage: g*h != h*g
True
AUTHORS:
- <NAME>
"""
#*****************************************************************************
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.structure.element import is_Matrix
from sage.misc.cachefunc import cached_method
from sage.structure.element import MultiplicativeGroupElement
from sage.structure.richcmp import richcmp, richcmp_not_equal
class AffineGroupElement(MultiplicativeGroupElement):
r"""
An affine group element.
INPUT:
- ``A`` -- an invertible matrix, or something defining a
matrix if ``convert==True``.
- ``b``-- a vector, or something defining a vector if
``convert==True`` (default: ``0``, defining the zero
vector).
- ``parent`` -- the parent affine group.
- ``convert`` - bool (default: ``True``). Whether to convert
``A`` into the correct matrix space and ``b`` into the
correct vector space.
- ``check`` - bool (default: ``True``). Whether to do some
checks or just accept the input as valid.
As a special case, ``A`` can be a matrix obtained from
:meth:`matrix`, that is, one row and one column larger. In
that case, the group element defining that matrix is
reconstructed.
OUTPUT:
The affine group element `x \mapsto Ax + b`
EXAMPLES::
sage: G = AffineGroup(2, GF(3))
sage: g = G.random_element()
sage: type(g)
<class 'sage.groups.affine_gps.affine_group.AffineGroup_with_category.element_class'>
sage: G(g.matrix()) == g
True
sage: G(2)
[2 0] [0]
x |-> [0 2] x + [0]
Conversion from a matrix and a matrix group element::
sage: M = Matrix(4, 4, [0, 0, -1, 1, 0, -1, 0, 1, -1, 0, 0, 1, 0, 0, 0, 1])
sage: A = AffineGroup(3, ZZ)
sage: A(M)
[ 0 0 -1] [1]
x |-> [ 0 -1 0] x + [1]
[-1 0 0] [1]
sage: G = MatrixGroup([M])
sage: A(G.0)
[ 0 0 -1] [1]
x |-> [ 0 -1 0] x + [1]
[-1 0 0] [1]
"""
def __init__(self, parent, A, b=0, convert=True, check=True):
r"""
Create element of an affine group.
TESTS::
sage: G = AffineGroup(4, GF(5))
sage: g = G.random_element()
sage: TestSuite(g).run()
"""
try:
A = A.matrix()
except AttributeError:
pass
if is_Matrix(A) and A.nrows() == A.ncols() == parent.degree()+1:
g = A
d = parent.degree()
A = g.submatrix(0, 0, d, d)
b = [ g[i,d] for i in range(d) ]
convert = True
if convert:
A = parent.matrix_space()(A)
b = parent.vector_space()(b)
if check:
# Note: the coercion framework expects that we raise TypeError for invalid input
if not is_Matrix(A):
raise TypeError('A must be a matrix')
if not (A.parent() is parent.matrix_space()):
raise TypeError('A must be an element of '+str(parent.matrix_space()))
if not (b.parent() is parent.vector_space()):
raise TypeError('b must be an element of '+str(parent.vector_space()))
parent._element_constructor_check(A, b)
super(AffineGroupElement, self).__init__(parent)
self._A = A
self._b = b
def A(self):
"""
Return the general linear part of an affine group element.
OUTPUT:
The matrix `A` of the affine group element `Ax + b`.
EXAMPLES::
sage: G = AffineGroup(3, QQ)
sage: g = G([1,2,3,4,5,6,7,8,0], [10,11,12])
sage: g.A()
[1 2 3]
[4 5 6]
[7 8 0]
"""
return self._A
def b(self):
"""
Return the translation part of an affine group element.
OUTPUT:
The vector `b` of the affine group element `Ax + b`.
EXAMPLES::
sage: G = AffineGroup(3, QQ)
sage: g = G([1,2,3,4,5,6,7,8,0], [10,11,12])
sage: g.b()
(10, 11, 12)
"""
return self._b
@cached_method
def matrix(self):
"""
Return the standard matrix representation of ``self``.
.. SEEALSO::
- :meth:`AffineGroup.linear_space()`
EXAMPLES::
sage: G = AffineGroup(3, GF(7))
sage: g = G([1,2,3,4,5,6,7,8,0], [10,11,12])
sage: g
[1 2 3] [3]
x |-> [4 5 6] x + [4]
[0 1 0] [5]
sage: g.matrix()
[1 2 3|3]
[4 5 6|4]
[0 1 0|5]
[-----+-]
[0 0 0|1]
sage: parent(g.matrix())
Full MatrixSpace of 4 by 4 dense matrices over Finite Field of size 7
sage: g.matrix() == matrix(g)
True
Composition of affine group elements equals multiplication of
the matrices::
sage: g1 = G.random_element()
sage: g2 = G.random_element()
sage: g1.matrix() * g2.matrix() == (g1*g2).matrix()
True
"""
A = self._A
b = self._b
parent = self.parent()
d = parent.degree()
from sage.matrix.constructor import matrix, zero_matrix, block_matrix
zero = zero_matrix(parent.base_ring(), 1, d)
one = matrix(parent.base_ring(), [[1]])
m = block_matrix(2,2, [A, b.column(), zero, one])
m.set_immutable()
return m
_matrix_ = matrix
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: G = AffineGroup(2, QQ)
sage: g = G([[1, 1], [0, 1]], [3,4])
sage: g
[1 1] [3]
x |-> [0 1] x + [4]
"""
A = str(self._A)
b = str(self._b.column())
deg = self.parent().degree()
indices = range(deg)
s = []
for Ai, bi, i in zip(A.splitlines(), b.splitlines(), indices):
if i == deg//2:
s.append('x |-> '+Ai+' x + '+bi)
else:
s.append(' '+Ai+' '+bi)
return '\n'.join(s)
def _latex_(self):
r"""
Return a LaTeX representation of ``self``.
EXAMPLES::
sage: G = AffineGroup(2, QQ)
sage: g = G([[1, 1], [0, 1]], [3,4])
sage: latex(g)
\vec{x}\mapsto \left(\begin{array}{rr}
1 & 1 \\
0 & 1
\end{array}\right)\vec{x} + \left(\begin{array}{r}
3 \\
4
\end{array}\right)
sage: g._latex_()
'\\vec{x}\\mapsto \\left(\\begin{array}{rr}\n1 & 1 \\\\\n0 &
1\n\\end{array}\\right)\\vec{x} + \\left(\\begin{array}{r}\n3
\\\\\n4\n\\end{array}\\right)'
"""
return r'\vec{x}\mapsto '+self.A()._latex_()+r'\vec{x} + '+self.b().column()._latex_()
def _mul_(self, other):
"""
Return the composition of ``self`` and ``other``.
INPUT:
- ``other`` -- another element of the same affine group.
OUTPUT:
The product of the affine group elements ``self`` and
``other`` defined by the composition of the two affine
transformations.
EXAMPLES::
sage: G = AffineGroup(2, GF(3))
sage: g = G([1,1, 0,1], [0,1])
sage: h = G([1,1, 0,1], [1,2])
sage: g*h
[1 2] [0]
x |-> [0 1] x + [0]
sage: g.matrix() * h.matrix() == (g*h).matrix()
True
"""
parent = self.parent()
A = self._A * other._A
b = self._b + self._A * other._b
return parent.element_class(parent, A, b, check=False)
def __call__(self, v):
"""
Apply the affine transformation to ``v``.
INPUT:
- ``v`` -- a multivariate polynomial, a vector, or anything
that can be converted into a vector.
OUTPUT:
The image of ``v`` under the affine group element.
EXAMPLES::
sage: G = AffineGroup(2, QQ)
sage: g = G([0,1,-1,0],[2,3]); g
[ 0 1] [2]
x |-> [-1 0] x + [3]
sage: v = vector([4,5])
sage: g(v)
(7, -1)
sage: R.<x,y> = QQ[]
sage: g(x), g(y)
(y + 2, -x + 3)
sage: p = x^2 + 2*x*y + y + 1
sage: g(p)
-2*x*y + y^2 - 5*x + 10*y + 20
The action on polynomials is such that it intertwines with
evaluation. That is::
sage: p(*g(v)) == g(p)(*v)
True
Test that the univariate polynomial ring is covered::
sage: H = AffineGroup(1, QQ)
sage: h = H([2],[3]); h
x |-> [2] x + [3]
sage: R.<z> = QQ[]
sage: h(z+1)
3*z + 2
"""
from sage.rings.polynomial.polynomial_element import is_Polynomial
from sage.rings.polynomial.multi_polynomial import is_MPolynomial
parent = self.parent()
if is_Polynomial(v) and parent.degree() == 1:
ring = v.parent()
return ring([self._A[0,0], self._b[0]])
if is_MPolynomial(v) and parent.degree() == v.parent().ngens():
ring = v.parent()
from sage.modules.all import vector
image_coords = self._A * vector(ring, ring.gens()) + self._b
return v(*image_coords)
v = parent.vector_space()(v)
return self._A*v + self._b
def _act_on_(self, x, self_on_left):
"""
Define the multiplicative action of the affine group elements.
EXAMPLES::
sage: G = AffineGroup(2, GF(3))
sage: g = G([1,2,3,4], [5,6])
sage: g
[1 2] [2]
x |-> [0 1] x + [0]
sage: v = vector(GF(3), [1,-1]); v
(1, 2)
sage: g*v
(1, 2)
sage: g*v == g.A() * v + g.b()
True
"""
if self_on_left:
return self(x)
def inverse(self):
"""
Return the inverse group element.
OUTPUT:
Another affine group element.
EXAMPLES::
sage: G = AffineGroup(2, GF(3))
sage: g | |
import pygame
from pygame.locals import *
import math
from Variables_pour_importer import Objet, Types, Bouton
from Variables_pour_importer import horloge, ouvrir_terrain, terrain_vierge, enregistrer_terrain, arrondir, IA_missile_besoin_tourner, get_sign
fps = 40
class Fenetre():
def __init__(self):
self.jouer = True
self.open_window = True
self.maping = False
class Terrain():
def __init__(self, position_vert, position_rouge, position_bleu, fond_ecran):
self.numero = "5" #input("Quel terrain ?")
self.position_vert = self.position_rouge = self.position_bleu = ""
self.fond_ecran = fond_ecran
self.contenu = []
ouvrir_terrain(self)
"""
Classe qui cree l'objet terrain
"""
class Personnage(pygame.sprite.Sprite):
def __init__(self,position,type,sprinter,fighter,tank,image_attak,image_explosion,image_piege,numero_manette,faiblesse,faible):
pygame.sprite.Sprite.__init__(self)
if type == "sprinter":
self.image = sprinter
self.rect = pygame.Rect(position[0],position[1],40,52)
self.vitesse = 5
self.vie = self.full_vie = 45
self.attaque_speciale = "missile"
elif type == "fighter":
self.image = fighter
self.rect = pygame.Rect(position[0],position[1],60,48)
self.vitesse = 2.5
self.vie = self.full_vie = 60
self.attaque_speciale = "bombe"
else:
self.image = tank
self.rect = pygame.Rect(position[0],position[1],56,60)
self.vitesse = 2
self.vie = self.full_vie = 100
self.attaque_speciale = "glace"
self.left, self.top = position[0], position[1]
self.faible = faible
self.faiblesse = faiblesse
self.dy = self.dx = self.saut = self.temps_air = 0
self.state = "STANDING"
self.regarde = -1
self.tomber = 0
self.y_max = 0
self.val_haut = 1
self.acceleration = self.ralentissement = 1
self.empoisonne = 0
self.index_img = self.count = 0
self.image = dict([(direction,[self.image.subsurface(x,y,self.rect.width,self.rect.height)for x in range(0,self.rect.width*4,self.rect.width)]) for direction,y in zip((-1,1),range(0,self.rect.height*2,self.rect.height))])
self.direction = -1
self.image_attak = [image_attak.subsurface(index,0,20,20)for index in range(0,80,20)]
self.image_explosion = image_explosion
self.image_piege = image_piege
self.tirer_attak = self.temps_rechargement_attak = 0
self.tirer_missile = self.temps_rechargement_missile = 0
self.tirer_bombe = self.temps_rechargement_bombe = 0
self.tirer_glace = self.temps_rechargement_glace = self.degel = 0
self.tirer_piege = self.temps_rechargement_piege = 0
groupe_pers.add(self)
def move(self):
#Effets blocs
for bloc in groupe_bloc_effet:
if self.rect.colliderect(bloc) and bloc.numero == "axel":
self.acceleration = 2
break
else:
self.acceleration = 1
if self.rect.colliderect(bloc) and bloc.numero == "jump":
if self.rect.left < bloc.rect.left:
diff = self.rect.right - bloc.rect.left
else:
diff = bloc.rect.right - self.rect.left
if diff > 7:
self.val_haut = 3
self.saut = 160000/(27*150**2)
self.state = "JUMPING"
self.temps_air = 0
if self.tomber:
self.tomber = 0
break
#Rencontre piege
for piege in groupe_piege:
if self.rect.colliderect(piege.rect) and piege.vie > 0 and piege.lanceur != self:
piege.vie = 0
if piege.image == piege_vert:
self.empoisonne = 800
elif piege.image == piege_rouge:
self.vie -= 5
elif piege.image == piege_bleu:
self.ralentissement = 3.999
if self.ralentissement > 1:
self.ralentissement -= 0.003
if not self.ralentissement > 3:
self.ralentissement = 1
if self.empoisonne:
if self.empoisonne%100 == 0:
self.vie -= 1
self.empoisonne -= 1
#Deplacement vertical
if self.state == "STANDING":
if self.joystick.get_axis(3) < -0.5:
if self.tomber:
self.temps_air = self.tomber = 0
self.saut = 160000/(27*100**2)
elif self.saut == 0 and self.state == "JUMPING":
if self.temps_air == 0:
self.temps_air = 7.5
self.saut = 160000/(27*85**2)
self.tomber = 1
if self.saut:
self.jump()
self.top += self.dy
self.rect.top = self.top
self.collision(0,self.dy)
#Deplacement horizontal + Animation
self.dx = round(self.joystick.get_axis(2)) * self.vitesse * self.acceleration / int(self.ralentissement)
self.left += self.dx
self.rect.left = self.left
if self.dx:
self.regarde = round(self.joystick.get_axis(2))
else:
self.index_img = 0
self.count = 0
temp = self.rect.copy()
self.collision(self.dx,0)
if self.rect.contains(temp):
if round(self.joystick.get_axis(2)):
self.direction = round(self.joystick.get_axis(2))
self.count += 0.5 / (self.ralentissement *2)
if not self.count < 4:
self.count = 0
self.index_img = int(self.count)
else: self.index_img = 0
else: self.index_img = 0
#Lancement attaque
self.tirer_attak = 1 if math.sqrt(self.joystick.get_axis(0)**2 + self.joystick.get_axis(1)**2) > 0.9 else 0
if self.temps_rechargement_attak == 0 and self.tirer_attak:
Attak(self.rect.centerx,self.rect.centery,round(self.joystick.get_axis(0)*100),round(self.joystick.get_axis(1)*100),self.image_attak,self,self.image_explosion,self.faible)
self.temps_rechargement_attak = 30
elif self.temps_rechargement_attak != 0:
self.temps_rechargement_attak -= 1
#Lancement missile
if self.attaque_speciale == "missile":
self.tirer_missile = 1 if self.joystick.get_button(4) or self.joystick.get_button(5) else 0
if self.temps_rechargement_missile == 0 and self.tirer_missile:
Missile(self.rect.centerx,self.rect.centery,self.regarde,self)
self.temps_rechargement_missile = 150
elif self.temps_rechargement_missile != 0:
self.temps_rechargement_missile -= 1
#Lancement bombe
if self.attaque_speciale == "bombe":
self.tirer_bombe = 1 if self.joystick.get_button(4) or self.joystick.get_button(5) else 0
if self.temps_rechargement_bombe == 0 and self.tirer_bombe:
Bombe(self.rect.centerx,self.rect.centery,self, -1 if self.joystick.get_button(4) else 1)
self.temps_rechargement_bombe = 300
elif self.temps_rechargement_bombe != 0:
self.temps_rechargement_bombe -= 1
#Lancement rayon glace
if self.attaque_speciale == "glace":
self.tirer_glace = 1 if self.joystick.get_button(4) or self.joystick.get_button(5) else 0
if self.temps_rechargement_glace == 0 and self.tirer_glace:
Glace(self.rect.centerx, self.rect.centery, self)
self.temps_rechargement_glace = 100
elif self.temps_rechargement_glace != 0:
self.temps_rechargement_glace -= 1
#Lancement piege
self.tirer_piege = 1 if self.joystick.get_button(6) or self.joystick.get_button(7) else 0
if self.temps_rechargement_piege == 0 and self.tirer_piege:
Piege(self.rect.centerx, self.rect.centery,self.image_piege,self)
self.temps_rechargement_piege = 100
elif self.temps_rechargement_piege != 0:
self.temps_rechargement_piege -= 1
#Detection etat
self.statement()
if self.state == "STANDING":
if self.dy > 0:
print(self.temps_air)
self.dy = self.temps_air = self.saut = self.tomber = self.y_max = 0
self.val_haut = 1
def collision(self,dx,dy):
for bloc in groupe_bloc:
if self.rect.colliderect(bloc.rect):
if dx and self.state == "STANDING":
ecart = self.rect.bottom - bloc.rect.top
if ecart < 16:
self.rect.bottom -= ecart
self.top = self.rect.top
if self.test_collision():
self.rect.bottom += ecart
self.top = self.rect.top
self.bloquage(dx,dy,bloc)
else:
self.bloquage(dx,dy,bloc)
else:
self.bloquage(dx,dy,bloc)
def test_collision(self):
for bloc in groupe_bloc:
if self.rect.colliderect(bloc.rect):
return True ; break
def bloquage(self,dx,dy,mur):
if dx > 0:
self.rect.right = mur.rect.left
self.left = self.rect.left
elif dx < 0:
self.rect.left = mur.rect.right
self.left = self.rect.left
if dy > 0:
if mur.numero == "slim" and self.state == "JUMPING":
y_chute = self.rect.bottom - self.y_max
self.saut = 160000/(27*(0.5*y_chute)**2)
self.temps_air = 0
self.val_haut = 1
self.rect.bottom += 1
self.top = self.rect.top
self.tomber = 0
self.y_max = 0
self.rect.bottom = mur.rect.top
self.top = self.rect.top
elif dy < 0:
self.rect.top = mur.rect.bottom
self.top = self.rect.top
self.temps_air = 11/math.sqrt(3*self.saut)
def jump(self):
self.dy = (self.saut*self.temps_air**3/-5 + 20*self.temps_air)*self.val_haut
self.temps_air += 0.125
self.dy -= (self.saut*self.temps_air**3/-5 + 20*self.temps_air)*self.val_haut
#Creation variable pour calculer la hauteur du saut
if self.dy > 0 and not self.y_max:
self.y_max = self.rect.bottom
print(self.temps_air)
def statement(self):
collide = False
temp = self.rect.copy()
temp.top += 1
for bloc in groupe_bloc:
if temp.colliderect(bloc.rect) and not(self.state == "JUMPING" and bloc.numero == "slim" and self.saut < 50):
self.state = "STANDING"
collide = True
break # pas besoin de regarder les autres murs
if not collide:
self.state = "JUMPING"
"""
Classe qui definit les personnages
"""
#_______________________________________________________________________________
#Fonction qui prends un screenshot
def screenshot():
sub = ecran.subsurface(pygame.Rect(0,0,(len(terrain.contenu[0])-3)*30,(len(terrain.contenu)-3)*30))
pygame.image.save(sub,"screenshot.png")
#Fonction qui sert a l'affichage dans la fenetre Maping
def affichage(carre):
ecran.blit(terrain.fond_ecran, (0,0))
for groupe in (groupe_bloc, groupe_bouton_bloc, groupe_bouton_maping):
groupe.draw(ecran)
ecran.blit(bloc_select, carre.topleft)
for bloc in groupe_bloc_effet:
ecran.blit(bloc.image.subsurface(0,0,30,30), (bloc.rect.left, bloc.rect.top))
pygame.display.flip()
#Fonction qui permet de rajouter un bloc
def ajout(groupe_bloc,groupe_bloc_effet,case,coord_x,coord_y):
if case == "bloc":
groupe_bloc.add(Objet(coord_x,coord_y,30,30,bloc_image,"bloc"))
elif case == "grnd":
groupe_bloc.add(Objet(coord_x,coord_y,60,60,ground,"grnd"))
elif case == "escR":
groupe_bloc.add(Objet(coord_x,coord_y,15,15,stair_right,"escR"))
groupe_bloc.add(Objet(coord_x,coord_y+15,30,15,slab_down,"escR"))
elif case == "escL":
groupe_bloc.add(Objet(coord_x+15,coord_y,15,15,stair_left,"escL"))
groupe_bloc.add(Objet(coord_x,coord_y+15,30,15,slab_down,"escL"))
elif case == "dalU":
groupe_bloc.add(Objet(coord_x,coord_y,30,15,slab_up,"dalU"))
elif case == "dalD":
groupe_bloc.add(Objet(coord_x,coord_y+15,30,15,slab_down,"dalD"))
elif case == "slim":
groupe_bloc.add(Objet(coord_x,coord_y,30,30,trampo,"slim"))
elif case == "jump":
groupe_bloc_effet.add(Objet(coord_x,coord_y,30,30,jumper_image,"jump"))
elif case == "axel":
groupe_bloc_effet.add(Objet(coord_x,coord_y,30,30,accelerator_image,"axel"))
return groupe_bloc,groupe_bloc_effet
#Fonction qui permet de sauvegarder les modifications apportees au terrain
def sauvegarder():
sub = ecran.subsurface(pygame.Rect(0,0,(len(terrain.contenu[0])-2)*30,(len(terrain.contenu)-4)*30))
pygame.image.save(sub,"terrains/terrain"+terrain.numero+".png")
terrain.contenu = terrain_vierge(terrain.contenu)
for groupe in (groupe_bloc, groupe_bloc_effet):
for bloc in groupe:
terrain.contenu[int(bloc.rect.top/30)+1][int(bloc.rect.left/30)+1] = bloc.numero
#On enregistre le terrain.contenu
enregistrer_terrain(terrain)
#Fonction qui vide tout les objets en dehors des blocs et des boutons.
def vide():
for groupe in (groupe_pers, groupe_attak, groupe_missile, groupe_bombe, groupe_piege):
for objet in groupe:
groupe.remove(objet)
#Fonction qui initialise les personnages en fonction du nombre de joysticks
def start():
vide()
robot = Personnage((0,0), type.vert, vert_sprinter, vert_fighter, vert_tank, attak_vert, explosion_vert, piege_vert, None, "rouge", "bleu")
global groupe_lignes, groupe_chute
groupe_lignes = [ ((101, 34), (184, 34)) , ((185, 49), (199, 49)) , ((821, 49), (940, 49)) , ((200, 64), (289, 64)) , ((386, 124), (529, 124)) , ((551, 124), (754, 124)) , ((371, 139), (385, 139)) , ((755, 139), (769, 139)) , ((356, 154), (370, 154)) , ((341, 169), (355, 169)) , ((326, 184), (340, 184)) , ((311, 199), (325, 199)) , ((296, 214), (310, 214)) , ((161, 229), (295, 229)) , ((551, 304), (724, 304)) , ((725, 319), (739, 319)) , ((740, 334), (754, 334)) , ((755, 349), (769, 349)) , ((26, 394), (79, 394)) , ((176, 394), (244, 394)) , ((266, 394), (334, 394)) , ((161, 409), (175, 409)) , ((245, 409), (265, 409)) , ((335, 409), (349, 409)) , ((80, 424), (160, 424)) , ((350, 424), (940, 424)) ]
# a l'aide du 1 de module fonctions a copier
groupe_chute = []
for i in 1, -1:
for ligne in groupe_lignes:
robot.rect.center = ligne[0]
robot.rect.centerx -= i
collide = False
for bloc in groupe_bloc:
if robot.rect.colliderect(bloc):
collide = True
if collide == False:
for ligne2 in groupe_lignes:
if ligne[0][0] - ligne2[1][0] == i:
if ligne[0][1] - ligne2[0][1] == -15:
groupe_chute.append([ligne, ligne2])
elif ligne[1][0] - ligne2[0][0] == i:
if ligne[0][1] - ligne2[0][1] == -15:
groupe_chute.append([ligne, ligne2])
| |
# get abc posterior
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
abc_run(sim.lower()), 'theta.t%i.dat' % nabc[sim.lower()]))
theta_median = np.median(theta_T, axis=0)
theta_meds.append(theta_median)
# get sims posterior
_, _sim_sed, _ = _sim_observables(sim.lower(), theta_median)
sim_seds.append(_sim_sed)
fig = plt.figure(figsize=(11,8))
# SF or Q
for isfq, _sfq in enumerate(['star-forming', 'quiescent']):
# low or high mass
for im, _m in enumerate(['low mass', 'high mass']):
sub = fig.add_subplot(2,2, 2 * im + isfq + 1)
for i, sim in enumerate(['TNG', 'EAGLE']):
# get abc posterior
theta_median = theta_meds[i]
_sim_sed = sim_seds[i]
mstar = _sim_sed['logmstar']
sfr = _sim_sed['logsfr.inst']
ssfr = sfr - mstar
assert sfr.min() != -999
# M* and SFR
if _m == 'low mass':
mlim = (mstar < 11)
elif _m == 'high mass':
mlim = (mstar >= 11)
if _sfq == 'star-forming':
ssfrlim = (ssfr > -11)
#sfrlim = (sfr > 0.5)
elif _sfq == 'quiescent':
ssfrlim = (ssfr < -11)
#sfrlim = (sfr < -0.5)
# subpopulation sample cut
subpop = mlim & ssfrlim
# get attenuation curve
_A_lambda = dem_attenuate(
theta_median,
wave,
np.ones(len(wave)),
mstar[subpop],
sfr[subpop])#, nebular=False)
A_lambda = -2.5 * np.log10(_A_lambda)
Al_1m, Al_med, Al_1p = np.quantile(A_lambda, [0.16, 0.5, 0.84], axis=0)
sub.fill_between(wave, Al_1m, Al_1p, color=clrs[sim.lower()],
alpha=0.25, linewidth=0, label=sim)
sub.plot(wave, Al_med, c=clrs[sim.lower()])
sub.set_xlim(1.2e3, 1e4)
if im == 0: sub.set_xticklabels([])
sub.set_ylim(0., 8.)
if isfq == 1: sub.set_yticklabels([])
if im == 0 and isfq == 0:
sub.set_title(r'Star-forming ($\log {\rm SSFR} > -11$)', fontsize=20)
if im == 0 and isfq == 1:
sub.set_title(r'Quiescent ($\log {\rm SSFR} < -11$)', fontsize=20)
sub.legend(loc='upper right', handletextpad=0.2, fontsize=20)
sub.text(1.01, 0.5, r'$\log M_*/M_\odot < 11$',
transform=sub.transAxes, ha='left', va='center',
rotation=270, fontsize=20)
if im == 1 and isfq == 1:
sub.text(1.01, 0.5, r'$\log M_*/M_\odot > 11$',
transform=sub.transAxes, ha='left', va='center',
rotation=270, fontsize=20)
bkgd = fig.add_subplot(111, frameon=False)
bkgd.set_xlabel(r'Wavelength [$\AA$]', labelpad=5, fontsize=20)
bkgd.set_ylabel(r'$A(\lambda)$', labelpad=10, fontsize=25)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
fig.subplots_adjust(wspace=0.1, hspace=0.1)
ffig = os.path.join(fig_dir, 'abc_attenuation_unormalized.png')
fig.savefig(ffig, bbox_inches='tight')
fig.savefig(fig_tex(ffig, pdf=True), bbox_inches='tight')
plt.close()
return None
def ABC_tnorm_Observables():
''' figure presenting the ABC posterior observables for tnorm models
'''
#########################################################################
# read in SDSS measurements
#########################################################################
r_edges, gr_edges, fn_edges, _ = dustInfer.sumstat_obs(name='sdss',
statistic='2d', return_bins=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
dfn = fn_edges[1] - fn_edges[0]
ranges = [(r_edges[0], r_edges[-1]), (-0.05, 1.5), (-1., 4.)]
fsdss = os.path.join(dat_dir, 'obs', 'tinker_SDSS_centrals_M9.7.valueadd.hdf5')
sdss = h5py.File(fsdss, 'r')
mr_complete = (sdss['mr_tinker'][...] < -20.)
x_obs = [-1.*sdss['mr_tinker'][...][mr_complete],
sdss['mg_tinker'][...][mr_complete] - sdss['mr_tinker'][...][mr_complete],
sdss['ABSMAG'][...][:,0][mr_complete] - sdss['ABSMAG'][...][:,1][mr_complete]]
sfr0_obs = np.zeros(len(x_obs[0])).astype(bool)
#########################################################################
# read in simulations without dust attenuation
#########################################################################
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
'simba.tnorm_noll_msfr.L2.3d', 'theta.t6.dat'))
theta_simba = np.median(theta_T, axis=0)
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
'tng.tnorm_noll_msfr.L2.3d', 'theta.t5.dat'))
theta_tng = np.median(theta_T, axis=0)
x_simba, sfr0_simba = _sim_observables('simba', theta_simba,
model='tnorm', zero_sfr_sample=True)
x_tng, sfr0_tng = _sim_observables('tng', theta_tng,
model='tnorm', zero_sfr_sample=True)
#########################################################################
# plotting
#########################################################################
xs = [x_obs, x_simba, x_tng]
names = ['SDSS', 'SIMBA (w/ DEM)', 'TNG (w/ DEM)']
clrs = ['k', 'C1', 'C0']
sfr0s = [sfr0_obs, sfr0_simba, sfr0_tng]
fig = plt.figure(figsize=(5*len(xs),10))
#for i, _x, name, clr in zip(range(len(xs)), xs, names, clrs):
for i, _x, _sfr0, name, clr in zip(range(len(xs)), xs, sfr0s, names, clrs):
# R vs (G - R)
sub = fig.add_subplot(2,len(xs),i+1)
DFM.hist2d(_x[0], _x[1], levels=[0.68, 0.95],
range=[ranges[0], ranges[1]], bins=20, color=clrs[i],
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
#sub.scatter(_x[0][_sfr0], _x[1][_sfr0], c='k', s=1)
sub.text(0.95, 0.95, name, ha='right', va='top', transform=sub.transAxes, fontsize=25)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([])
if i == 0:
sub.set_ylabel(r'$G-R$', fontsize=20)
else:
sub.set_yticklabels([])
sub.set_ylim(ranges[1])
sub.set_yticks([0., 0.5, 1.])
# R vs FUV-NUV
sub = fig.add_subplot(2,len(xs),i+len(xs)+1)
DFM.hist2d(_x[0], _x[2], levels=[0.68, 0.95],
range=[ranges[0], ranges[2]], bins=20, color=clrs[i],
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
#sub.scatter(_x[0][_sfr0], _x[2][_sfr0], c='k', s=1)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([-20, -21, -22, -23])
if i == 0:
sub.set_ylabel(r'$FUV - NUV$', fontsize=20)
else:
sub.set_yticklabels([])
sub.set_ylim(ranges[2])
bkgd = fig.add_subplot(111, frameon=False)
bkgd.set_xlabel(r'$M_r$ luminosity', labelpad=10, fontsize=25)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
fig.subplots_adjust(wspace=0.1, hspace=0.1)
ffig = os.path.join(fig_dir, 'abc_tnorm_observables.png')
fig.savefig(ffig, bbox_inches='tight')
fig.savefig(fig_tex(ffig, pdf=True), bbox_inches='tight')
plt.close()
obs_lims = [(20, 22.5), (0.2, 1.5), (-0.5, 4)]
obs_lbls = [r'$M_r$ luminosity', '$G - R$', '$FUV - NUV$']
yobs_lbls = [r'central luminosity function, $\Phi^{\rm cen}_{M_r}$', '$p(G - R)$', '$p(FUV - NUV)$']
fig = plt.figure(figsize=(15,4))
for i in range(3):
sub = fig.add_subplot(1,3,i+1)
if i == 0:
mr_bin = np.linspace(20, 23, 7)
dmr = mr_bin[1:] - mr_bin[:-1]
Ngal_simba, _ = np.histogram(x_simba[i], bins=mr_bin)
Ngal_tng, _ = np.histogram(x_tng[i], bins=mr_bin)
phi_simba = Ngal_simba.astype(float) / vol_simba / dmr
phi_tng = Ngal_tng.astype(float) / vol_tng / dmr
sub.plot(0.5*(mr_bin[1:] + mr_bin[:-1]), phi_simba, c='C1')
sub.plot(0.5*(mr_bin[1:] + mr_bin[:-1]), phi_tng, c='C0')
fsdss = os.path.join(dat_dir, 'obs',
'tinker_SDSS_centrals_M9.7.phi_Mr.dat')
mr_low, mr_high, phi_sdss, err_phi_sdss = np.loadtxt(fsdss, unpack=True)
sub.errorbar(-0.5*(mr_low + mr_high), phi_sdss, yerr=err_phi_sdss,
fmt='.k', label='SDSS Centrals')
sub.set_yscale('log')
sub.set_ylim(5e-5, 8e-3)
else:
_ = sub.hist(x_simba[i],
weights=np.repeat(1./vol_simba, len(x_simba[i])),
range=ranges[i], bins=20, color='C1', linewidth=2, histtype='step')
_ = sub.hist(x_tng[i][x_tng[0] > 20],
weights=np.repeat(1./vol_tng, len(x_tng[i])),
range=ranges[i], bins=20, color='C0', linewidth=2, histtype='step')
_ = sub.hist(x_obs[i],
weights=np.repeat(1./vol_sdss, len(x_obs[i])),
range=ranges[i], bins=20, color='k',
linestyle='--', linewidth=2, histtype='step')
sub.set_xlabel(obs_lbls[i], fontsize=20)
sub.set_xlim(obs_lims[i])
sub.set_ylabel(yobs_lbls[i], fontsize=20)
fig.subplots_adjust(wspace=0.6)
ffig = os.path.join(fig_dir, 'abc_tnorm_observables.1d.png')
fig.savefig(ffig, bbox_inches='tight')
fig.savefig(fig_tex(ffig, pdf=True), bbox_inches='tight')
plt.close()
return None
def ABC_Lir():
''' compare L_IR predicted by the ABC posterior dust attenuation
'''
cinA = 2.9979e18 # A/s
lsun = 3.839e33 # erg/s
#########################################################################
# read in simulations without dust attenuation
#########################################################################
def get_seds(sim, theta):
sed = dustInfer._read_sed(sim)
zerosfr = (sed['logsfr.inst'] == -999)
logsfr_min = sed['logsfr.inst'][~zerosfr].min() # minimum SFR
sed['logsfr.inst'][zerosfr] = logsfr_min
cuts = (sed['logmstar'] > 9.4)
sed_nodust = sed['sed_noneb'][cuts,:]
sed_dust = dustFM.Attenuate(
theta,
sed['wave'],
sed['sed_noneb'][cuts,:],
sed['sed_onlyneb'][cuts,:],
sed['logmstar'][cuts],
sed['logsfr.inst'][cuts],
dem=dem)
R_mag = measureObs.AbsMag_sed(sed['wave'], sed_dust, band='r_sdss')
return sed['wave'], sed_nodust, sed_dust, R_mag
L_irs, M_rs = [], []
for sim in sims: #['TNG', 'EAGLE']:
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
abc_run(sim.lower()), 'theta.t%i.dat' % nabc[sim.lower()]))
theta_med = np.median(theta_T, axis=0)
wave, sed_nodust, sed_dust, M_r = get_seds(sim.lower(), theta_med)
L_nodust = measureObs.tsum(wave, sed_nodust * cinA / (wave**2)) # Lsun
L_dust = measureObs.tsum(wave, sed_dust * cinA / (wave**2)) # Lsun
# L_ir based on energy balance assumption of da Cunha+(2008)
L_ir = L_nodust - L_dust
notnan = ~np.isnan(np.log10(L_ir))
L_irs.append(L_ir[notnan])
M_rs.append(-1.*M_r[notnan])
#########################################################################
# plotting
#########################################################################
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
for i, _M_r, _L_ir, sim in zip(range(3), M_rs, L_irs, sims): #['TNG', 'EAGLE']):
# R vs log L_IR
if i == 0:# sim == 'TNG':
DFM.hist2d(_M_r, np.log10(_L_ir), range=[(20, 23), (5, 12)],
levels=[0.68, 0.95], bins=30, smooth=True, color=clrs[sim.lower()],
plot_datapoints=False, fill_contours=True, plot_density=False,
contour_kwargs={'linewidths': 0}, ax=sub)
else: #elif sim == 'EAGLE':
_hist2d_hack(_M_r, np.log10(_L_ir), range=[(20, 23), (5, 12)],
levels=[0.68, 0.95], bins=30, color=clrs[sim.lower()],
alpha=0.33, smooth=True, sub=sub)
_plt0 = sub.fill_between([], [], [], color=clrs['simba'], alpha=0.25, edgecolor='none')
_plt1 = sub.fill_between([], [], [], color=clrs['tng'], alpha=0.25, edgecolor='none')
_plt2 = sub.fill_between([], [], [], color=clrs['eagle'], alpha=0.25, edgecolor='none')
sub.legend([_plt0, _plt1, _plt2], ['SIMBA', 'TNG', 'EAGLE'], loc='lower right',
handletextpad=0.2, fontsize=20)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([-20, -21, -22, -23])
sub.set_ylabel(r'IR dust emission $\log(~L_{\rm IR}$ [$L_\odot$] )', fontsize=25)
sub.set_xlabel(r'$M_r$ luminosity', fontsize=25)
sub.set_ylim(4.5, 12)
ffig = os.path.join(fig_dir, 'abc_Lir.png')
#fig.savefig(ffig, bbox_inches='tight')
fig.savefig(fig_tex(ffig, pdf=True), bbox_inches='tight')
plt.close()
return None
def _ABC_Lir_subpop():
''' compare L_IR predicted by the ABC posterior dust attenuation
'''
cinA = 2.9979e18 # A/s
lsun = 3.839e33 # erg/s
#########################################################################
# read in simulations without dust attenuation
#########################################################################
def get_seds(sim, theta):
sed = dustInfer._read_sed(sim)
zerosfr = (sed['logsfr.inst'] == -999)
logsfr_min = sed['logsfr.inst'][~zerosfr].min() # minimum SFR
sed['logsfr.inst'][zerosfr] = logsfr_min
cuts = (sed['logmstar'] > 9.4)
sed_nodust = sed['sed_noneb'][cuts,:]
sed_dust = dustFM.Attenuate(
theta,
sed['wave'],
sed['sed_noneb'][cuts,:],
sed['sed_onlyneb'][cuts,:],
sed['logmstar'][cuts],
sed['logsfr.inst'][cuts],
dem=dem)
R_mag = measureObs.AbsMag_sed(sed['wave'], sed_dust, band='r_sdss')
return sed['wave'], |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.