input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
row = tdata.pop(0)
if not row:
continue
row = row.split(",")
# neglect if error string
if len(row) < 2:
continue
s1, s2 = row[0].split(':', 1)
if not s1.isdigit():
m = re.search(ns_hist_pattern, s1)
if m:
ns = m.group(1)
hist_name = m.group(2)
else:
ns = None
hist_name = s1
if ns_set and (not ns or ns not in ns_set):
hist_name = None
continue
columns = row[1:]
start_time = s2
start_time = util.remove_suffix(start_time, "-GMT")
columns.insert(0, 'Time Span')
continue
if not hist_name or not start_time:
continue
try:
end_time = row.pop(0)
end_time = util.remove_suffix(end_time, "-GMT")
row = [float(r) for r in row]
row.insert(0, "%s->%s" % (start_time, end_time))
if hist_name not in data:
data[hist_name] = {}
if ns:
ns_key = "namespace"
if ns_key not in data[hist_name]:
data[hist_name][ns_key] = {}
if ns not in data[hist_name][ns_key]:
data[hist_name][ns_key][ns] = {}
data[hist_name][ns_key][ns]["columns"] = columns
data[hist_name][ns_key][ns]["values"] = []
data[hist_name][ns_key][ns][
"values"].append(copy.deepcopy(row))
if total_key not in data[hist_name]:
data[hist_name][total_key] = {}
data[hist_name][total_key]["columns"] = columns
data[hist_name][total_key]["values"] = []
data[hist_name][total_key]["values"] = self._update_total_latency(
data[hist_name][total_key]["values"], row)
start_time = end_time
except Exception:
pass
return data
@return_exceptions
def info_dcs(self):
"""
Get a list of datacenters for this node. asinfo -v "dcs" -p 3004
Returns:
list -- list of dcs
"""
if self.is_feature_present('xdr'):
return util.info_to_list(self.info("dcs"))
return util.info_to_list(self.xdr_info("dcs"))
@return_exceptions
def info_dc_statistics(self, dc):
"""
Get statistics for a datacenter.
Returns:
dict -- {stat_name : stat_value, ...}
"""
if self.is_feature_present('xdr'):
return util.info_to_dict(self.info("dc/%s" % dc))
return util.info_to_dict(self.xdr_info("dc/%s" % dc))
@return_exceptions
def info_all_dc_statistics(self):
dcs = self.info_dcs()
if isinstance(dcs, Exception):
return {}
stats = {}
for dc in dcs:
stat = self.info_dc_statistics(dc)
if not stat or isinstance(stat, Exception):
stat = {}
stats[dc] = stat
return stats
@return_exceptions
def info_udf_list(self):
"""
Get config for a udf.
Returns:
dict -- {file_name1:{key_name : key_value, ...}, file_name2:{key_name : key_value, ...}}
"""
udf_data = self.info('udf-list')
if not udf_data:
return {}
return util.info_to_dict_multi_level(udf_data, "filename", delimiter2=',')
@return_exceptions
def info_dc_get_config(self):
"""
Get config for a datacenter.
Returns:
dict -- {dc_name1:{config_name : config_value, ...}, dc_name2:{config_name : config_value, ...}}
"""
if self.is_feature_present('xdr'):
configs = self.info("get-dc-config")
if not configs or isinstance(configs, Exception):
configs = self.info("get-dc-config:")
if not configs or isinstance(configs, Exception):
return {}
return util.info_to_dict_multi_level(configs, ["dc-name", "DC_Name"], ignore_field_without_key_value_delimiter=False)
configs = self.xdr_info("get-dc-config")
if not configs or isinstance(configs, Exception):
return {}
return util.info_to_dict_multi_level(configs, ["dc-name", "DC_Name"], ignore_field_without_key_value_delimiter=False)
@return_exceptions
def info_XDR_get_config(self):
xdr_configs = self.info_get_config(stanza='xdr')
# for new aerospike version (>=3.8) with xdr-in-asd config from service
# port is sufficient
if self.is_feature_present('xdr'):
return xdr_configs
# required for old aerospike server versions (<3.8)
xdr_configs_xdr = self.xdr_info('get-config')
if xdr_configs_xdr and not isinstance(xdr_configs_xdr, Exception):
xdr_configs_xdr = util.info_to_dict(xdr_configs_xdr)
if xdr_configs_xdr and not isinstance(xdr_configs_xdr, Exception):
if xdr_configs and not isinstance(xdr_configs, Exception):
xdr_configs.update(xdr_configs_xdr)
else:
xdr_configs = xdr_configs_xdr
return xdr_configs
@return_exceptions
def info_histogram(self, histogram, raw_output=False):
namespaces = self.info_namespaces()
data = {}
for namespace in namespaces:
try:
datum = self.info("hist-dump:ns=%s;hist=%s" %
(namespace, histogram))
if raw_output:
data[namespace] = datum
else:
datum = datum.split(',')
datum.pop(0) # don't care about ns, hist_name, or length
width = int(datum.pop(0))
datum[-1] = datum[-1].split(';')[0]
datum = map(int, datum)
data[namespace] = {'histogram': histogram, 'width': width, 'data': datum}
except Exception:
pass
return data
@return_exceptions
def info_sindex(self):
return [util.info_to_dict(v, ':')
for v in util.info_to_list(self.info("sindex"))[:-1]]
@return_exceptions
def info_sindex_statistics(self, namespace, indexname):
"""
Get statistics for a sindex.
Returns:
dict -- {stat_name : stat_value, ...}
"""
return util.info_to_dict(self.info("sindex/%s/%s" % (namespace, indexname)))
@return_exceptions
def info_XDR_build_version(self):
"""
Get Build Version for XDR
Returns:
string -- build version
"""
# for new aerospike version (>=3.8) with
# xdr-in-asd stats available on service port
if self.is_feature_present('xdr'):
return self.info('build')
return self.xdr_info('build')
def _set_default_system_credentials(self, default_user=None, default_pwd=None, default_ssh_key=None,
default_ssh_port=None, credential_file=None):
if default_user:
self.sys_default_user_id = default_user
if default_pwd:
self.sys_default_pwd = <PASSWORD>
if default_ssh_key:
self.sys_default_ssh_key = default_ssh_key
self.sys_credential_file = None
if credential_file:
self.sys_credential_file = credential_file
if default_ssh_port:
try:
self.sys_default_ssh_port = int(default_ssh_port)
except Exception:
pass
def _set_system_credentials_from_file(self):
if not self.sys_credential_file:
return False
result = False
f = None
try:
try:
f = open(self.sys_credential_file, 'r')
except IOError as e:
self.logger.warning("Ignoring credential file. Can not open credential file. \n%s." %(str(e)))
return result
for line in f.readlines():
if not line or not line.strip():
continue
try:
line = line.strip().replace('\n', ' ').strip().split(",")
if len(line) < 2:
continue
ip = None
port = None
ip_port = line[0].strip()
if not ip_port:
continue
if "]" in ip_port:
# IPv6
try:
ip_port = ip_port[1:].split("]")
ip = ip_port[0].strip()
if len(ip_port) > 1:
# Removing ':' from port
port = int(ip_port[1].strip()[1:])
except Exception:
pass
else:
# IPv4
try:
ip_port = ip_port.split(":")
ip = ip_port[0]
if len(ip_port) > 1:
port = int(ip_port[1].strip())
except Exception:
pass
if ip and self._is_any_my_ip([ip]):
self.sys_user_id = line[1].strip()
try:
self.sys_pwd = line[2].strip()
self.sys_ssh_key = line[3].strip()
except Exception:
pass
self.sys_ssh_port = port
result = True
break
except Exception:
pass
except Exception as e:
self.logger.warning("Ignoring credential file.\n%s." %(str(e)))
finally:
if f:
f.close()
return result
def _clear_sys_credentials(self):
self.sys_ssh_port = None
self.sys_user_id = None
self.sys_pwd = None
self.sys_ssh_key = None
def _set_system_credentials(self):
self._clear_sys_credentials()
set = self._set_system_credentials_from_file()
if set:
return
self.sys_user_id = self.sys_default_user_id
self.sys_pwd = <PASSWORD>.sys_default_pwd
self.sys_ssh_key = self.sys_default_ssh_key
self.sys_ssh_port = self.sys_default_ssh_port
@return_exceptions
def info_system_statistics(self, default_user=None, default_pwd=None, default_ssh_key=None,
default_ssh_port=None, credential_file=None, commands=[], collect_remote_data=False):
"""
Get statistics for a system.
Returns:
dict -- {stat_name : stat_value, ...}
"""
if commands:
cmd_list = copy.deepcopy(commands)
else:
cmd_list = [_key for _key, cmds in self.sys_cmds]
if self.localhost:
return self._get_localhost_system_statistics(cmd_list)
if collect_remote_data:
self._set_default_system_credentials(default_user, default_pwd, default_ssh_key,
default_ssh_port, credential_file)
return self._get_remote_host_system_statistics(cmd_list)
return {}
@return_exceptions
def _get_localhost_system_statistics(self, commands):
sys_stats = {}
for _key, cmds in self.sys_cmds:
if _key not in commands:
continue
for cmd in cmds:
o, e = util.shell_command([cmd])
if e or not o:
continue
else:
parse_system_live_command(_key, o, sys_stats)
break
return sys_stats
@return_exceptions
def _login_remote_system(self, ip, user, pwd, ssh_key=None, port=None):
s = pxssh.pxssh()
s.force_password = True
s.SSH_OPTS = "-o 'NumberOfPasswordPrompts=1'"
s.login(ip, user, pwd, ssh_key=ssh_key, port=port)
return s
@return_exceptions
def _spawn_remote_system(self, ip, user, pwd, ssh_key=None, port=None):
terminal_prompt_msg = '(?i)terminal type'
ssh_newkey_msg = '(?i)are you sure you want to continue connecting'
connection_closed_msg = "(?i)connection closed by remote host"
permission_denied_msg = "(?i)permission denied"
pwd_passphrase_msg = "(?i)(?:password)|(?:passphrase for key)"
terminal_type = 'vt100'
ssh_options = "-o 'NumberOfPasswordPrompts=1' "
if port:
ssh_options += " -p %s"%(str(port))
if ssh_key is not None:
try:
os.path.isfile(ssh_key)
except Exception:
raise Exception('private ssh key %s does not exist'%(str(ssh_key)))
ssh_options += ' -i %s' % (ssh_key)
s = pexpect.spawn('ssh %s -l %s %s'%(ssh_options, str(user), str(ip)))
i = s.expect([ssh_newkey_msg, self.remote_system_command_prompt, pwd_passphrase_msg, permission_denied_msg, terminal_prompt_msg, pexpect.TIMEOUT, connection_closed_msg, pexpect.EOF], timeout=10)
if i == 0:
# In this case SSH does not have the public key cached.
s.sendline("yes")
i = s.expect([ssh_newkey_msg, self.remote_system_command_prompt, pwd_passphrase_msg, permission_denied_msg, terminal_prompt_msg, pexpect.TIMEOUT])
if i == 2:
# password or passphrase
if pwd is None:
raise Exception("Wrong SSH Password None.")
s.sendline(pwd)
i = s.expect([ssh_newkey_msg, self.remote_system_command_prompt, pwd_passphrase_msg, permission_denied_msg, terminal_prompt_msg, pexpect.TIMEOUT])
if i == 4:
s.sendline(terminal_type)
i = s.expect([ssh_newkey_msg, self.remote_system_command_prompt, pwd_passphrase_msg, permission_denied_msg, terminal_prompt_msg, pexpect.TIMEOUT])
if i == 7:
s.close()
return None
if i == 0:
# twice not expected
s.close()
return None
elif i == 1:
pass
elif i == 2:
# password prompt again means input password is wrong
s.close()
return None
elif i == 3:
# permission denied means input password is wrong
s.close()
return None
elif i == 4:
# twice not expected
s.close()
return None
elif i == 5:
# timeout
# Two possibilities
# 1. couldn't login
# 2. couldn't match shell prompt
# safe option is to pass
pass
elif i == 6:
# connection closed by remote host
s.close()
return None
else:
# unexpected
s.close()
return None
self.remote_system_command_prompt = "\[PEXPECT\][\$\#] "
s.sendline("unset PROMPT_COMMAND")
# sh style
s.sendline("PS1='[PEXPECT]\$ '")
i = s.expect([pexpect.TIMEOUT, self.remote_system_command_prompt], timeout=10)
if i == 0:
# csh-style.
s.sendline("set prompt='[PEXPECT]\$ '")
i = s.expect([pexpect.TIMEOUT, self.remote_system_command_prompt], timeout=10)
if i == 0:
return None
return s
@return_exceptions
def _create_ssh_connection(self, ip, user, pwd, ssh_key=None, port=None):
if user is None and pwd is None and ssh_key is None:
raise Exception("Insufficient credentials to connect.")
if PEXPECT_VERSION == NEW_MODULE:
return self._login_remote_system(ip, user, pwd, ssh_key, port)
if PEXPECT_VERSION == OLD_MODULE:
return self._spawn_remote_system(ip, user, pwd, ssh_key, port)
return None
@return_exceptions
def _execute_remote_system_command(self, conn, cmd):
if not conn or not cmd or PEXPECT_VERSION == NO_MODULE:
return None
conn.sendline(cmd)
if PEXPECT_VERSION == NEW_MODULE:
| |
import os
import re
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
# app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
# Make sure API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
# Selects unique SYMBOLS from history.db to use lookup() function in each one.
symbols = db.execute("SELECT DISTINCT symbol FROM history WHERE user_id = ?", session["user_id"])
shares = {} # [{'symbol': 'AAPL'}, {'symbol': 'FB'}]
print(symbols)
# Use lookup() function in each distinct symbol to get the current price and store it in prices = {} dict, shares and names.
prices = {}
names = {}
totals = {}
totalCash = 0.0
for i in range(len(symbols)):
# updates prices dictionary with keys = symbols from query.
symbol = symbols[i]["symbol"]
quote = lookup(symbol)
price = quote["price"] # float
prices[symbol] = price
# updates shares dictionary with keys = symbols from query.
quantity = db.execute("SELECT SUM(shares) FROM history WHERE symbol = ? AND user_id = ?",
quote["symbol"], session["user_id"]) # Gives back a list with a single dict "[{'SUM(shares)': 4.0}]""
shares[symbol] = int(quantity[0]["SUM(shares)"]) # Gets the value of the single dict above.
# updates totals dictionary with keys = symbol
totals[symbol] = float(price * shares[symbol])
# updates names dictionary with keys = symbol
name = quote["name"]
names[symbol] = name
totalCash = totalCash + totals[symbol] # tfoot in the index.html sum the total
# print(prices) # {'AAPL': 145.11, 'FB': 350.42}
# print(shares) # {'AAPL': 3.0, 'FB': 3.0}
# print(names) # {'AAPL': 'Apple Inc', 'FB': 'Facebook Inc - Class A'}
# print(totals)
cash = db.execute("SELECT cash FROM users WHERE id = ?", session["user_id"])
totalCash = totalCash + cash[0]["cash"]
return render_template("index.html", prices=prices, shares=shares, names=names, symbols=symbols, cash=cash, totals=totals, totalCash=totalCash)
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure symbol was submitted
if not request.form.get("symbol"):
return apology("must provide symbol")
# Ensure shares was submitted
if not request.form.get("shares"):
return apology("must provide shares")
# Ensure shares submitted is valid
shares = request.form.get("shares")
# Defines a function to check if the string (shares) typed by the user is a float or integer
def is_integer(n):
try:
float(n)
except ValueError:
return False
else:
return float(n).is_integer()
# Ensure shares submitted is a positive integer
if not is_integer(shares):
return apology("a share must be a positive integer")
# After validating above, gets number of shares from the user as an integer
shares = int(shares)
# Ensure shares is bigger than 0
if shares <= 0:
return apology("a share must be a positive integer")
# Ensure user has those shares
# Gets quantity from history.db
quantity = db.execute("SELECT SUM(shares) FROM history WHERE symbol = ? AND user_id = ?",
request.form.get("symbol"), session["user_id"]) # Gives back a list with a single dict "[{'SUM(shares)': 4.0}]""
# Ensure user has any share from that symbol
if not quantity:
return apology("you don't have shares from this company")
# Ensure user has more than he's selling
quantity = int(quantity[0]["SUM(shares)"])
if shares > quantity:
return apology("You dont have that many shares from this company")
# Look up the stock’s current price
quote = lookup(request.form.get("symbol"))
name = quote["name"] # string
price = quote["price"] # float
symbol = quote["symbol"] # string
# Calculates the sale price as a float
sp = float(price * shares)
# Select how much cash the user currently has
cash = db.execute("SELECT cash FROM users WHERE id = ?", session["user_id"])
cash = float(cash[0]["cash"])
# Updates cash variable adding sp (sale price)
cash = cash + sp
# Updates the amount of cash this user has after the sale in the database finance.db
db.execute("UPDATE users SET cash = ? WHERE id = ?", cash, session["user_id"])
# SQL DATE: TEXT as ISO8601 strings ("YYYY-MM-DD HH:MM:SS.SSS").
# Gets the date and time of the purchase using datetime() imported from datetime library.
date = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # Formats as desired: seconds being integer not float.
# Updates history TABLE from finance.db with name (companyname), price of 1 share, symbol, shares sold (-), datetime and user_id.
# Transform shares sold in a negative number in the database so we can track is history.db and history.html what is a buy and what is a sell.
shares = shares * (-1)
db.execute("INSERT INTO history (user_id, name, symbol, shares, price, date) VALUES(?, ?, ?, ?, ?, ?)",
session["user_id"], name, symbol, shares, price, date)
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
# Selects unique SYMBOLS from history.db to pass to the select form in sell.html
symbols = db.execute("SELECT DISTINCT symbol FROM history WHERE user_id = ?", session["user_id"])
shares = {} # [{'symbol': 'AAPL'}, {'symbol': 'FB'}]
print(symbols)
# to exclude from select (sell.html) stocks that user have bought but sold all.
for i in range(len(symbols)):
symbol = symbols[i]["symbol"]
quantity = db.execute("SELECT SUM(shares) FROM history WHERE symbol = ? AND user_id = ?",
symbol, session["user_id"]) # Gives back a list with a single dict "[{'SUM(shares)': 4.0}]""
shares[symbol] = int(quantity[0]["SUM(shares)"]) # Gets the value of the single dict above.
return render_template("sell.html", symbols=symbols, shares=shares)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure symbol was submitted
if not request.form.get("symbol"):
return apology("must provide symbol")
quote = lookup(request.form.get("symbol"))
# Ensure symbol submitted is valid
if not quote:
return apology("invalid symbol")
# Ensure shares was submitted
if not request.form.get("shares"):
return apology("must provide shares")
# Ensure shares submitted is valid
shares = request.form.get("shares")
# Defines a function to check if the string (shares) typed by the user is a float or integer
def is_integer(n):
try:
float(n)
except ValueError:
return False
else:
return float(n).is_integer()
# Ensure shares submitted is a positive integer
if not is_integer(shares):
return apology("a share must be a positive integer")
# After validating above, gets number of shares from the user as an integer
shares = int(shares)
# Ensure shares is bigger than 0
if shares <= 0:
return apology("a share must be a positive integer")
# Look up the stock’s current price
name = quote["name"] # string
price = quote["price"] # float
symbol = quote["symbol"] # string
# Calculates the purchase price as a float
pp = float(price * shares)
# Select how much cash the user currently has
cash = db.execute("SELECT cash FROM users WHERE id = ?", session["user_id"])
cash = float(cash[0]["cash"])
# Ensure user has the money to buy those shares at that purchase price
if pp > cash:
return apology("cannot afford the number of shares at the current price")
# Updates cash variable subtracting pp (purchase price)
cash = cash - pp
# Updates the amount of cash this user has after the purchase in the database finance.db
db.execute("UPDATE users SET cash = ? WHERE id = ?", cash, session["user_id"])
# SQL DATE: TEXT as ISO8601 strings ("YYYY-MM-DD HH:MM:SS.SSS").
# Gets the date and time of the purchase using datetime() imported from datetime library.
date = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # Formats as desired: seconds being integer not float.
# Updates history TABLE from finance.db with name (companyname), price of 1 share, symbol, shares purchased (+), datetime and user_id.
db.execute("INSERT INTO history (user_id, name, symbol, shares, price, date) VALUES(?, ?, ?, ?, ?, ?)",
session["user_id"], name, symbol, shares, | |
<reponame>alexmplastow/RNACircos
#!/usr/bin/env python
import argparse
import sys
import os
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.legend_handler import HandlerLine2D
from matplotlib.patches import Circle
import matplotlib.image as img
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.colors as clr
import numpy as np
import math
from PIL import Image
import webcolors as wc
import functions
import circle_graph
import linear_graph
import graph_phylogeny
def main():
parser=argparse.ArgumentParser(description='CLI for graphing of dotbracket notation of RNA secondary structure')
parser.add_argument('-i', '--input', help='input file for the dot-bracket structure' , type=str)
parser.add_argument('-i2', '--secondary_input', help="secondary input file, also signals the superimposition of one linear/circular representation onto another",type=str)
parser.add_argument('-l', "--linear", help="Produces the linear representation of RNA secondary structure", action="store_true")
parser.add_argument('-c', "--circular", help="Produces the circular representation of RNA secondary structure", action="store_true")
parser.add_argument('-c1', "--color_one", help='selected color for the plot, the default is lightblue', type=str)
parser.add_argument('-c2', "--color_two", help='selected color for the superimposed plot, the default is lightgreen',type=str)
parser.add_argument('-c3', "--color_three",help="When graphs are superimposed, the overlapping areas should be set to a seperate color, something which contrasts well is recommended",type=str)
parser.add_argument('-c4', "--color_four", help="overlap color of unaligned regions if -a2 is chosen", type=str)
parser.add_argument('-a', "--align", help="Align the nucleotides before checking for structural similarities (recommended)", action="store_true")
parser.add_argument( '-sa','--second_alignment_path', help="align and permit the overlaping regions to be some fourth color of your choice", action="store_true")
parser.add_argument('-o', '--overlay_alpha', help='transparency value between 0 and 1 for the overlying plot in superimposed graphs', type=str)
parser.add_argument('-u', '--underlay_alpha', help='transparency value between 0 and 1 for the underlying plot in superimposed graphs', type=str)
parser.add_argument('-m', '--match_radii', help='by default, circular representations of secondary structure will adapt to polymer length, including this argument will cause the circular graphs to adopt uniform radii', action="store_true")
parser.add_argument('-st','--structures', help='input files for the dot-bracket structure' ,nargs='+')
parser.add_argument('-MSA', '--MSA_file', help='input MSA alignment output from CLUSTALW' , type=str)
parser.add_argument('-ct', '--cutoff', help='number of homologous sequences to be ignored (start from 0, the default, and work your way up (1,2,3....) if in doubt')
parser.add_argument('-cn', '--colored_nucleotides', help='colored nucleotide alignment for MSA', action="store_true")
parser.add_argument('-nc', '--nucleotide_colors', help='specific colors for nucleotides given the \"colored_nucleotides\" command, the colors are ordered as A,T,G, and C, default colors are lime-green, orange-red, blue-violet, and cyan', nargs='+')
parser.add_argument('-pm', '--p_matrix_input', help='required input *dp.ps containing the ViennaRNA generated probability matrix. Triggers gradient generation' , type=str)
parser.add_argument('-lc', '--low_prob_color', nargs='+', help='add the low rgb values for the custom gradient', type=str)
parser.add_argument('-hc', '--high_prob_color', nargs='+', help='add the high rgb values for the custom gradient', type=str)
parser.add_argument('-g', '--gradient_legend', help="adds a legend to gradient graphs to show which color corresponds to a low probability and which color coresponds to a high probability", action="store_true")
parser.add_argument('-n', '--nucleotides', help='adds nucleotides to the visualization', action="store_true")
parser.add_argument('-d', '--dpi', help='enter the dpi needed for supderimposed graphs, there is no "one size fits all", raise or lower this value as needed, start with 96 if in doubt', type=int)
args=parser.parse_args()
if args.nucleotide_colors:
nucleotide_colors=args.nucleotide_colors
else:
nucleotide_colors=['limegreen','orangered','blueviolet','cyan']
if args.colored_nucleotides:
colored_nucleotides=True
else:
colored_nucleotides=False
if not args.color_four:
color_4='purple'
else:
color_4=args.color_four
if not args.structures:
brackets=functions.bracket_reader(args.input)
else:
brackets='brackets'
brackets_length_one=len(brackets)
if not args.overlay_alpha:
overlay_alpha=1
else:
overlay_alpha=float(args.overlay_alpha)
if not args.underlay_alpha:
underlay_alpha=1
else:
underlay_alpha=float(args.underlay_alpha)
if not args.color_one:
color_1='lightblue'
else:
color_1=args.color_one
if not args.color_two:
color_2='lightgreen'
else:
color_2=args.color_two
if args.p_matrix_input:
fh=functions.prob_matrix(args.p_matrix_input)
gradient_0=True
else:
fh=0
gradient_0=False
if args.p_matrix_input:
sqrt_prob=functions.sqrt_finder(functions.bond_finder(brackets), fh)
else:
sqrt_prob=0
if args.low_prob_color and args.high_prob_color:
custom_gradient=True
try:
rgb_start_global=tuple(args.high_prob_color)
rgb_end_global=tuple(args.low_prob_color)
color_spectrum = functions.gen_gradient(('1920', '1080'), args.low_prob_color, args.high_prob_color)
except:
s_v=list(wc.name_to_rgb(args.high_prob_color[0]))
rgb_start_global=(s_v[0], s_v[1], s_v[2])
s_v = list(wc.name_to_rgb(args.low_prob_color[0]))
rgb_end_global = (s_v[0], s_v[1], s_v[2])
color_spectrum = functions.gen_gradient(('1920', '1080'), list(rgb_start_global), list(rgb_end_global))
else:
custom_gradient=False
rgb_start_global=0
rgb_end_global=0
color_spectrum=0
if not custom_gradient:
rgb_start=('255','0','0')
rgb_end=('255','255','0')
color_spectrum=functions.gen_gradient(('1920','1080'),rgb_start,rgb_end)
if args.low_prob_color and args.high_prob_color:
gradient_color_graph=True
else:
gradient_color_graph=False
args=parser.parse_args()
if args.secondary_input:
mutation_imposition=True
else:
mutation_imposition=False
if args.dpi:
my_dpi=args.dpi
else:
my_dpi=150
if args.color_three:
color_3=args.color_three
else:
color_3='pink'
if not args.structures:
list_nucleotides=functions.nucleotide_list(args.input)
if args.cutoff:
n=int(args.cutoff)
else:
n=0
if args.secondary_input and args.linear and not args.align and not args.second_alignment_path:
max_bond=functions.height_finder(brackets,functions.bracket_reader(args.secondary_input))
name_1=functions.name(args.input)
linear_graph.linear_graph(linear=True, brackets=brackets, custom_gradient=custom_gradient,
gradient_0=gradient_0, gradient_color_graph=gradient_color_graph,
mutation_imposition=mutation_imposition, fh=fh,
rgb_start_global=rgb_start_global, rgb_end_global=rgb_end_global,
input_color=color_1, list_nucleotides=list_nucleotides,
nucleotides=args.nucleotides, alpha_0=1)
bond_zero = mpatches.Patch(color=color_1, label=name_1)
brackets=functions.bracket_reader(args.secondary_input)
brackets_length_two=len(brackets)
name_2=functions.name(args.secondary_input)
bonds_one = mpatches.Patch(color=color_2, label=name_2)
max_brackets=max(brackets_length_one, brackets_length_two)
plt.xlim(0,max_brackets)
plt.ylim(0, 0.6*max_bond)
plt.yticks([])
overlap_patch=mpatches.Patch(color=color_3, label='OVERLAP')
plt.legend(handles=[bond_zero, bonds_one, overlap_patch], loc='upper right', fontsize='xx-small')
plt.savefig('plot_0000000000000000_1.png', dpi=my_dpi)
plt.clf()
linear_graph.linear_graph(linear=True, brackets=brackets, custom_gradient=custom_gradient,
gradient_0=gradient_0, gradient_color_graph=gradient_color_graph,
mutation_imposition=mutation_imposition, fh=fh,
rgb_start_global=rgb_start_global, rgb_end_global=rgb_end_global,
input_color=color_2, list_nucleotides=list_nucleotides,
nucleotides=args.nucleotides, alpha_0=1)
plt.xlim(0,max_brackets)
plt.ylim(0, 0.6*max_bond)
plt.legend(handles=[bond_zero, bonds_one, overlap_patch], loc='upper right', fontsize='xx-small')
plt.yticks([])
plt.savefig('plot_0000000000000000_2.png', dpi=my_dpi)
image=functions.clean_imposition(list(wc.name_to_rgb(color_1)),
list(wc.name_to_rgb(color_2)),
list(wc.name_to_rgb(color_3)),
'plot_0000000000000000_1.png',
'plot_0000000000000000_2.png')
os.remove('plot_0000000000000000_1.png')
os.remove('plot_0000000000000000_2.png')
image.show()
save_question=input('Would you like to save the image? (Y/n)')
if save_question == 'Y':
name_3=(str(name_1)).strip() + '&' + (str(name_2)).strip() + '_linear.png'
image.save(name_3)
elif args.linear and args.second_alignment_path:
name_1 = functions.name(args.input)
match_brackets, first_idiosyncratic_brackets, second_idiosyncratic_brackets, first_alignment, second_alignment \
= functions.align_redefine(
functions.nucleotide_string(args.input), functions.nucleotide_string(args.secondary_input),
functions.bracket_reader(args.input), functions.bracket_reader(args.secondary_input))
list_nucleotides = functions.aligned_nucleotide_list(first_alignment, second_alignment)
max_bond = max(functions.height_finder(first_idiosyncratic_brackets, match_brackets),
functions.height_finder(second_idiosyncratic_brackets, match_brackets))
linear_graph.linear_graph(linear=True, brackets=first_idiosyncratic_brackets, custom_gradient=custom_gradient,
gradient_0=gradient_0, gradient_color_graph=gradient_color_graph,
mutation_imposition=mutation_imposition, fh=fh,
rgb_start_global=rgb_start_global, rgb_end_global=rgb_end_global,
input_color=color_1, list_nucleotides=list_nucleotides,
nucleotides=first_alignment, alpha_0=1)
linear_graph.linear_graph(linear=True, brackets=match_brackets, custom_gradient=custom_gradient,
gradient_0=gradient_0, gradient_color_graph=gradient_color_graph,
mutation_imposition=mutation_imposition, fh=fh,
rgb_start_global=rgb_start_global, rgb_end_global=rgb_end_global,
input_color=color_3, list_nucleotides=list_nucleotides,
nucleotides=args.nucleotides, alpha_0=1)
bond_zero = mpatches.Patch(color=color_1, label=name_1)
brackets = functions.bracket_reader(args.secondary_input)
brackets_length_two = len(brackets)
name_2 = functions.name(args.secondary_input)
bonds_one = mpatches.Patch(color=color_2, label=name_2)
max_brackets = max(brackets_length_one, brackets_length_two)
plt.xlim(0, max_brackets)
plt.ylim(0, 0.6*max_bond)
plt.yticks([])
alignment_patch = mpatches.Patch(color=color_3, label='ALIGNMENT')
overlap_patch=mpatches.Patch(color=color_4, label='OVERLAP')
plt.legend(handles=[bond_zero, bonds_one, alignment_patch, overlap_patch], loc='upper right', fontsize='xx-small')
plt.savefig('plot_0000000000000000_1.png', dpi=my_dpi)
plt.clf()
linear_graph.linear_graph(linear=True, brackets=second_idiosyncratic_brackets, custom_gradient=custom_gradient,
gradient_0=gradient_0, gradient_color_graph=gradient_color_graph,
mutation_imposition=mutation_imposition, fh=fh,
rgb_start_global=rgb_start_global, rgb_end_global=rgb_end_global,
input_color=color_2, list_nucleotides=list_nucleotides,
nucleotides=second_alignment, alpha_0=1)
linear_graph.linear_graph(linear=True, brackets=match_brackets, custom_gradient=custom_gradient,
gradient_0=gradient_0, gradient_color_graph=gradient_color_graph,
mutation_imposition=mutation_imposition, fh=fh,
rgb_start_global=rgb_start_global, rgb_end_global=rgb_end_global,
input_color=color_3, list_nucleotides=list_nucleotides,
nucleotides=args.nucleotides, alpha_0=1)
plt.xlim(0, max_brackets)
plt.ylim(0, 0.6*max_bond)
plt.legend(handles=[bond_zero, bonds_one, alignment_patch, overlap_patch], loc='upper right', fontsize='xx-small')
plt.yticks([])
plt.savefig('plot_0000000000000000_2.png', dpi=my_dpi)
image = functions.clean_imposition(list(wc.name_to_rgb(color_1)),
list(wc.name_to_rgb(color_2)),
list(wc.name_to_rgb(color_4)),
'plot_0000000000000000_1.png',
'plot_0000000000000000_2.png')
os.remove('plot_0000000000000000_1.png')
os.remove('plot_0000000000000000_2.png')
image.show()
save_question = input('Would you like to save the image? (Y/n)')
if save_question == 'Y':
name_3 = (str(name_1)).strip() + '&' + (str(name_2)).strip() + '_linear.png'
image.save(name_3)
elif args.secondary_input and args.linear and args.align:
match_brackets, first_idiosyncratic_brackets, second_idiosyncratic_brackets, first_alignment, second_alignment\
=functions.align_redefine(
functions.nucleotide_string(args.input),functions.nucleotide_string(args.secondary_input),
functions.bracket_reader(args.input),functions.bracket_reader(args.secondary_input))
list_nucleotides=functions.aligned_nucleotide_list(first_alignment, second_alignment)
linear_graph.linear_graph(linear=True, brackets=first_idiosyncratic_brackets, custom_gradient=custom_gradient,
gradient_0=gradient_0, gradient_color_graph=gradient_color_graph,
mutation_imposition=mutation_imposition, fh=fh,
rgb_start_global=rgb_start_global, rgb_end_global=rgb_end_global,
input_color=color_1, list_nucleotides=list_nucleotides,
nucleotides=False, alpha_0=underlay_alpha)
linear_graph.linear_graph(linear=True, brackets=second_idiosyncratic_brackets, custom_gradient=custom_gradient,
gradient_0=gradient_0, gradient_color_graph=gradient_color_graph,
mutation_imposition=mutation_imposition, fh=fh,
rgb_start_global=rgb_start_global, rgb_end_global=rgb_end_global,
input_color=color_2, list_nucleotides=list_nucleotides,
nucleotides=False, alpha_0=overlay_alpha)
linear_graph.linear_graph(linear=True, brackets=match_brackets, custom_gradient=custom_gradient,
gradient_0=gradient_0, gradient_color_graph=gradient_color_graph,
mutation_imposition=mutation_imposition, fh=fh,
rgb_start_global=rgb_start_global, rgb_end_global=rgb_end_global,
input_color=color_3, list_nucleotides=list_nucleotides,
nucleotides=args.nucleotides, alpha_0=1)
name_1 = functions.name(args.input)
bond_zero = mpatches.Patch(color=color_1, label=name_1)
name_2 = functions.name(args.secondary_input)
bonds_one = mpatches.Patch(color=color_2, label=name_2)
overlap_patch = mpatches.Patch(color=color_3, label='OVERLAP')
plt.legend(handles=[bond_zero, bonds_one, overlap_patch], loc='upper right', fontsize='small')
plt.yticks([])
if not args.nucleotides:
abcissa_x=list(range(0,len(match_brackets)))
abcissa_y=[0 for i in range(0,len(match_brackets))]
plt.plot(abcissa_x,abcissa_y, 'black')
plt.show()
else:
if args.linear and not args.MSA_file and not args.structures:
linear_graph.linear_graph(linear=True, brackets=brackets, custom_gradient=custom_gradient,
gradient_0=gradient_0, gradient_color_graph=gradient_color_graph,
mutation_imposition=mutation_imposition, fh=fh,
rgb_start_global=rgb_start_global, rgb_end_global=rgb_end_global,
input_color=color_1, list_nucleotides=list_nucleotides,
nucleotides=args.nucleotides, alpha_0=1)
plt.title(functions.name(args.input))
plt.yticks([])
if gradient_0 and args.gradient_legend:
attempted_colormap=[color_spectrum[-1], color_spectrum[round(len(color_spectrum)/2)],color_spectrum[0]]
my_cmap=LinearSegmentedColormap.from_list('my_cmap', attempted_colormap)
plt.scatter([-1,-2,-3,-4,-5,-6,-7,-8,-9,-10, -11],[1,1,1,1,1,1,1,1,1,1,1], c=[0,0.1, 0.2, 0.3 , 0.4, 0.5, 0.6, 0.7 , 0.8, 0.9, 1.0], cmap=my_cmap)
plt.colorbar(label='bond probability (0.0 - 1.0)')
plt.xlim(0, len(brackets))
plt.ylim(0, len(brackets) / 2)
plt.show()
elif args.linear and args.MSA_file and args.structures:
serial_brackets = [[functions.name(i), functions.bracket_reader(i)] for i in args.structures]
serial_names=[i[0] for i in serial_brackets]
serial_names=', '.join(serial_names)
serial_alignment = functions.MSA_alignment_extraction(args.MSA_file)
bond_list_backbone, bond_list_frequency, indices, low_value = graph_phylogeny.alignment_bracket_processing(serial_brackets,
serial_alignment , n)
abcissa = np.linspace(0, max(indices), max(indices))
functions.bond_grapher_2(abcissa=abcissa, linear=True, bonds=bond_list_backbone, color='purple',
gradient_0=False,
gradient_color_graph=False,
color_spectrum=color_spectrum, mutation_imposition=False, alpha_0=1, mosaic=True,
bond_ubiquity=bond_list_frequency)
plt.yticks([])
if args.nucleotides or args.colored_nucleotides:
graph_phylogeny.linear_nucleotide_plot(serial_alignment,colored_nucleotides,nucleotide_color_list=nucleotide_colors)
if args.gradient_legend:
attempted_colormap = [color_spectrum[-1], color_spectrum[round(len(color_spectrum) / 2)], color_spectrum[0]]
my_cmap = LinearSegmentedColormap.from_list('my_cmap', attempted_colormap)
if n ==len(serial_alignment)-1:
color_list = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
else:
color_list = list(np.linspace(low_value, 1.0, 11))
plt.scatter([-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
c=color_list, cmap=my_cmap)
plt.colorbar(label='bond conservation probability ('+ str(round(low_value,3)) + ' - 1.0)')
plt.xlim(0,len(serial_alignment[0][1]))
plt.plot(list(range(0,len(serial_alignment[0][1]))),[0 for i in range(0,len(serial_alignment[0][1]))],
color='black', zorder=0)
plt.title(serial_names)
plt.show()
if args.secondary_input and args.circular:
if args.match_radii and not args.second_alignment_path:
brackets_1=brackets
brackets_2 = functions.bracket_reader(args.secondary_input)
name_1 = functions.name(args.input)
bond_zero = mpatches.Patch(color=color_1, label=name_1)
name_2 = functions.name(args.secondary_input)
bond_one = mpatches.Patch(color=color_2, label=name_2)
if len(functions.bracket_reader(args.input)) < len(functions.bracket_reader(args.secondary_input)):
brackets_1, brackets_2 = brackets_2, brackets_1
name_1, name_2= name_2, name_1
bond_zero, bond_one = bond_one, bond_zero
len_index_input=circle_graph.circle_graph(brackets_1,sqrt_prob, color_0=color_1, gradient_0=gradient_0,
gradient_color_graph=gradient_color_graph, color_spectrum=color_spectrum,
mutation_imposition=True, first=True, len_input=0, list_nucleotides=list_nucleotides,
nucleotides=args.nucleotides, alpha=1)
plt.xlim(-1.45*(len_index_input/(2*math.pi)),1.45*(len_index_input)/(2*math.pi))
plt.ylim(-1.45*(len_index_input / (2 * math.pi)), 1.45*(len_index_input) / (2 * math.pi))
overlap_patch = mpatches.Patch(color=color_3, label='OVERLAP')
plt.legend(handles=[bond_zero, bond_one, overlap_patch], loc='upper right', fontsize='xx-small')
plt.savefig('plot_0000000000000000_1.png', dpi=my_dpi)
plt.clf()
circle_graph.circle_graph(brackets_2,sqrt_prob, color_0=color_2, gradient_0=gradient_0,
gradient_color_graph=gradient_color_graph, color_spectrum=color_spectrum,
mutation_imposition=True, first=False, len_input=len_index_input,
list_nucleotides=list_nucleotides, nucleotides=args.nucleotides, alpha=1)
plt.xlim(-1.45*(len_index_input / (2 * math.pi)), 1.45*(len_index_input) / (2 * math.pi))
plt.ylim(-1.45*(len_index_input / (2 * math.pi)), 1.45*(len_index_input) / (2 * math.pi))
overlap_patch = mpatches.Patch(color=color_3, label='OVERLAP')
plt.legend(handles=[bond_zero, bond_one, overlap_patch], loc='upper right', fontsize='xx-small')
plt.savefig('plot_0000000000000000_2.png', dpi=my_dpi)
image = functions.clean_imposition(list(wc.name_to_rgb(color_1)),
list(wc.name_to_rgb(color_2)),
list(wc.name_to_rgb(color_3)),
'plot_0000000000000000_1.png',
'plot_0000000000000000_2.png')
os.remove('plot_0000000000000000_1.png')
os.remove('plot_0000000000000000_2.png')
image.show()
save_question = input('Would you like to save the image? (Y/n)')
if save_question == 'Y':
name_3 = (str(name_1)).strip() + '&' + (str(name_2)).strip() + '_circular.png'
image.save(name_3)
| |
range(collection_num):
suffix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(5))
collection_names.append(collection_name + "_" + suffix)
# #####
ni_per = collection["ni_per"]
build_index = collection["build_index"]
# TODO: debug
for c_name in collection_names:
milvus_instance = MilvusClient(collection_name=c_name, host=self.host, port=self.port)
if milvus_instance.exists_collection(collection_name=c_name):
milvus_instance.drop(name=c_name)
time.sleep(10)
milvus_instance.create_collection(c_name, dimension, index_file_size, metric_type)
index_info = {
"build_index": build_index
}
if build_index is True:
index_type = collection["index_type"]
index_param = collection["index_param"]
index_info.update({
"index_type": index_type,
"index_param": index_param
})
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
res = self.do_insert(milvus_instance, c_name, data_type, dimension, collection_size, ni_per)
logger.info(res)
if "flush" in collection and collection["flush"] == "no":
logger.debug("No manual flush")
else:
milvus_instance.flush()
logger.debug("Table row counts: %d" % milvus_instance.count(name=c_name))
if build_index is True:
logger.debug("Start build index for last file")
milvus_instance.create_index(index_type, index_param)
logger.debug(milvus_instance.describe_index())
code_str = """
import random
import string
from locust import User, task, between
from locust_task import MilvusTask
from client import MilvusClient
host = '%s'
port = %s
dim = %s
connection_type = '%s'
collection_names = %s
m = MilvusClient(host=host, port=port)
def get_collection_name():
return random.choice(collection_names)
def get_client(collection_name):
if connection_type == 'single':
return MilvusTask(m=m)
elif connection_type == 'multi':
return MilvusTask(connection_type='multi', host=host, port=port, collection_name=collection_name)
class QueryTask(User):
wait_time = between(0.001, 0.002)
@task()
def %s(self):
top_k = %s
X = [[random.random() for i in range(dim)] for i in range(%s)]
search_param = %s
collection_name = get_collection_name()
client = get_client(collection_name)
client.query(X, top_k, search_param, collection_name=collection_name)
""" % (self.host, self.port, dimension, connection_type, collection_names, def_name, task_params["top_k"], task_params["nq"], task_params["search_param"])
with open(task_file_script, 'w+') as fd:
fd.write(code_str)
locust_cmd = "locust -f %s --headless --csv=%s -u %d -r %d -t %s" % (
task_file_script,
task_file,
clients_num,
hatch_rate,
during_time)
logger.info(locust_cmd)
try:
res = os.system(locust_cmd)
except Exception as e:
logger.error(str(e))
return
# . retrieve and collect test statistics
locust_stats = None
with open(task_file_csv, newline='') as fd:
dr = csv.DictReader(fd)
for row in dr:
if row["Name"] != "Aggregated":
continue
locust_stats = row
logger.info(locust_stats)
# clean up temp files
search_params = {
"top_k": task_params["top_k"],
"nq": task_params["nq"],
"nprobe": task_params["search_param"]["nprobe"]
}
run_params = {
"connection_num": connection_num,
"clients_num": clients_num,
"hatch_rate": hatch_rate,
"during_time": during_time
}
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_params, run_params)
metric.metrics = {
"type": run_type,
"value": {
"during_time": during_time,
"request_count": int(locust_stats["Request Count"]),
"failure_count": int(locust_stats["Failure Count"]),
"qps": locust_stats["Requests/s"],
"min_response_time": int(locust_stats["Min Response Time"]),
"max_response_time": int(locust_stats["Max Response Time"]),
"median_response_time": int(locust_stats["Median Response Time"]),
"avg_response_time": int(locust_stats["Average Response Time"])
}
}
report(metric)
elif run_type == "search_ids_stability":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
during_time = collection["during_time"]
ids_length = collection["ids_length"]
ids = collection["ids"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
g_top_k = int(collection["top_ks"].split("-")[1])
l_top_k = int(collection["top_ks"].split("-")[0])
# g_id = int(ids.split("-")[1])
# l_id = int(ids.split("-")[0])
g_id_length = int(ids_length.split("-")[1])
l_id_length = int(ids_length.split("-")[0])
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.debug(start_mem_usage)
start_time = time.time()
while time.time() < start_time + during_time * 60:
search_param = {}
top_k = random.randint(l_top_k, g_top_k)
ids_num = random.randint(l_id_length, g_id_length)
ids_param = [random.randint(l_id_length, g_id_length) for _ in range(ids_num)]
for k, v in search_params.items():
search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
logger.debug("Query top-k: %d, ids_num: %d, param: %s" % (top_k, ids_num, json.dumps(search_param)))
result = milvus_instance.query_ids(top_k, ids_param, search_param=search_param)
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, {})
metric.metrics = {
"type": "search_ids_stability",
"value": {
"during_time": during_time,
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage
}
}
report(metric)
# for sift/deep datasets
# TODO: enable
elif run_type == "accuracy":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
# mapping to search param list
search_params = self.generate_combinations(search_params)
top_ks = collection["top_ks"]
nqs = collection["nqs"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
milvus_instance.preload_collection()
true_ids_all = self.get_groundtruth_ids(collection_size)
for search_param in search_params:
for top_k in top_ks:
for nq in nqs:
# total = 0
search_param_group = {
"nq": nq,
"topk": top_k,
"search_param": search_param
}
logger.info("Query params: %s" % json.dumps(search_param_group))
result_ids, _ = self.do_query_ids(milvus_instance, collection_name, top_k, nq, search_param=search_param)
acc_value = self.get_recall_value(true_ids_all[:nq, :top_k].tolist(), result_ids)
logger.info("Query accuracy: %s" % acc_value)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_param_group)
metric.metrics = {
"type": "accuracy",
"value": {
"acc": acc_value
}
}
report(metric)
elif run_type == "ann_accuracy":
hdf5_source_file = collection["source_file"]
collection_name = collection["collection_name"]
index_file_sizes = collection["index_file_sizes"]
index_types = collection["index_types"]
index_params = collection["index_params"]
top_ks = collection["top_ks"]
nqs = collection["nqs"]
search_params = collection["search_params"]
# mapping to search param list
search_params = self.generate_combinations(search_params)
# mapping to index param list
index_params = self.generate_combinations(index_params)
data_type, dimension, metric_type = parser.parse_ann_collection_name(collection_name)
dataset = utils.get_dataset(hdf5_source_file)
true_ids = np.array(dataset["neighbors"])
for index_file_size in index_file_sizes:
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"index_file_size": index_file_size,
"dataset_name": collection_name
}
if milvus_instance.exists_collection(collection_name):
logger.info("Re-create collection: %s" % collection_name)
milvus_instance.drop()
time.sleep(DELETE_INTERVAL_TIME)
milvus_instance.create_collection(collection_name, dimension, index_file_size, metric_type)
logger.info(milvus_instance.describe())
insert_vectors = self.normalize(metric_type, np.array(dataset["train"]))
# Insert batch once
# milvus_instance.insert(insert_vectors)
loops = len(insert_vectors) // INSERT_INTERVAL + 1
for i in range(loops):
start = i*INSERT_INTERVAL
end = min((i+1)*INSERT_INTERVAL, len(insert_vectors))
tmp_vectors = insert_vectors[start:end]
if start < end:
if not isinstance(tmp_vectors, list):
milvus_instance.insert(tmp_vectors.tolist(), ids=[i for i in range(start, end)])
else:
milvus_instance.insert(tmp_vectors, ids=[i for i in range(start, end)])
milvus_instance.flush()
logger.info("Table: %s, row count: %s" % (collection_name, milvus_instance.count()))
if milvus_instance.count() != len(insert_vectors):
logger.error("Table row count is not equal to insert vectors")
return
for index_type in index_types:
for index_param in index_params:
logger.debug("Building index with param: %s" % json.dumps(index_param))
milvus_instance.create_index(index_type, index_param=index_param)
logger.info(milvus_instance.describe_index())
logger.info("Start preload collection: %s" % collection_name)
milvus_instance.preload_collection()
index_info = {
"index_type": index_type,
"index_param": index_param
}
logger.debug(index_info)
for search_param in search_params:
for nq in nqs:
query_vectors = self.normalize(metric_type, np.array(dataset["test"][:nq]))
for top_k in top_ks:
search_param_group = {
"nq": len(query_vectors),
"topk": top_k,
"search_param": search_param
}
logger.debug(search_param_group)
if not isinstance(query_vectors, list):
result = milvus_instance.query(query_vectors.tolist(), top_k, search_param=search_param)
else:
result = milvus_instance.query(query_vectors, top_k, search_param=search_param)
if len(result):
logger.debug(len(result))
logger.debug(len(result[0]))
result_ids = result.id_array
acc_value = self.get_recall_value(true_ids[:nq, :top_k].tolist(), result_ids)
logger.info("Query ann_accuracy: %s" % acc_value)
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, search_param_group)
metric.metrics = {
"type": "ann_accuracy",
"value": {
"acc": acc_value
}
}
report(metric)
elif run_type == "search_stability":
(data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name)
search_params = collection["search_params"]
during_time = collection["during_time"]
collection_info = {
"dimension": dimension,
"metric_type": metric_type,
"dataset_name": collection_name
}
if not milvus_instance.exists_collection():
logger.error("Table name: %s not existed" % collection_name)
return
logger.info(milvus_instance.count())
index_info = milvus_instance.describe_index()
logger.info(index_info)
g_top_k = int(collection["top_ks"].split("-")[1])
g_nq = int(collection["nqs"].split("-")[1])
l_top_k = int(collection["top_ks"].split("-")[0])
l_nq = int(collection["nqs"].split("-")[0])
milvus_instance.preload_collection()
start_mem_usage = milvus_instance.get_mem_info()["memory_used"]
logger.debug(start_mem_usage)
start_row_count = milvus_instance.count()
logger.debug(milvus_instance.describe_index())
logger.info(start_row_count)
start_time = time.time()
while time.time() < start_time + during_time * 60:
search_param = {}
top_k = random.randint(l_top_k, g_top_k)
nq = random.randint(l_nq, g_nq)
for k, v in search_params.items():
search_param[k] = random.randint(int(v.split("-")[0]), int(v.split("-")[1]))
query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)]
logger.debug("Query nq: %d, top-k: %d, param: %s" % (nq, top_k, json.dumps(search_param)))
result = milvus_instance.query(query_vectors, top_k, search_param=search_param)
end_mem_usage = milvus_instance.get_mem_info()["memory_used"]
metric = self.report_wrapper(milvus_instance, self.env_value, self.hostname, collection_info, index_info, {})
metric.metrics = {
"type": "search_stability",
"value": {
"during_time": during_time,
"start_mem_usage": start_mem_usage,
"end_mem_usage": end_mem_usage,
"diff_mem": end_mem_usage - start_mem_usage
}
}
report(metric)
elif run_type == "loop_stability":
# init data
milvus_instance.clean_db()
pull_interval = collection["pull_interval"]
collection_num = collection["collection_num"]
concurrent = collection["concurrent"] if "concurrent" in collection else False
concurrent_num = collection_num
dimension = collection["dimension"] if "dimension" in collection else 128
insert_xb = collection["insert_xb"] if "insert_xb" in collection else 100000
index_types = collection["index_types"] if "index_types" in collection else ['ivf_sq8']
index_param = {"nlist": 2048}
collection_names = []
milvus_instances_map = {}
insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(insert_xb)]
for i in range(collection_num):
name = utils.get_unique_name(prefix="collection_")
collection_names.append(name)
metric_type = random.choice(["l2", "ip"])
index_file_size = random.randint(10, 20)
milvus_instance.create_collection(name, dimension, index_file_size, metric_type)
milvus_instance = MilvusClient(collection_name=name, host=self.host)
index_type = random.choice(index_types)
milvus_instance.create_index(index_type, index_param=index_param)
logger.info(milvus_instance.describe_index())
insert_vectors = utils.normalize(metric_type, insert_vectors)
milvus_instance.insert(insert_vectors)
milvus_instance.flush()
milvus_instances_map.update({name: milvus_instance})
logger.info(milvus_instance.describe_index())
logger.info(milvus_instance.describe())
# loop time unit: min -> s
pull_interval_seconds = pull_interval * 60
tasks = ["insert_rand", "delete_rand", "query_rand", "flush", "compact"]
i = 1
while True:
logger.info("Loop time: %d" % i)
start_time = time.time()
while time.time() - start_time < pull_interval_seconds:
if concurrent:
mp = []
for _ in range(concurrent_num):
tmp_collection_name = random.choice(collection_names)
task_name = random.choice(tasks)
mp.append((tmp_collection_name, task_name))
with futures.ThreadPoolExecutor(max_workers=concurrent_num) as executor:
future_results = {executor.submit(getattr(milvus_instances_map[mp[j][0]], mp[j][1])): j for j in range(concurrent_num)}
for future in futures.as_completed(future_results):
future.result()
else:
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#########################################
#
# function: run phenix.refine in parallel and more
# author: <NAME>
# created: 2014/7/8
# updated: 2015/2/17
# updated: 2016/12/10
# updated: 2017/1/5
# updated: 2017/1/25
# updated: 2017/12/14
#
#########################################
import sys, os, signal, termios, fcntl, shutil, glob
import random, ast, re, time, datetime, getopt, yaml
import subprocess as subproc
import multiprocessing
debug = False
if not debug:
def excepthandler(exception_type, exception, traceback):
pass
sys.excepthook = excepthandler
def seed():
'''
generate random seed
'''
l = random.randint(4, 8)
s = "".join(random.sample([chr(i) for i in xrange(48, 58)], l)).lstrip('0')
return s
def prep_ref(prm, n, jobs):
'''
prepare def file for refinement
'''
with open(prm) as fd:
eff = fd.readlines()
if not eff:
return
flag = False
for (i, line) in enumerate(eff):
# edit pdb file path
if line.find(' pdb {') != -1:
ln = eff[i + 1].split(' = ')
pdb = os.path.abspath(ast.literal_eval(ln[1]))
if os.path.exists(pdb):
eff[i + 1] = '%s = \"%s\"\n' % (ln[0], pdb)
else:
raise IOError
continue
# edit mtz file name
if line.find(' xray_data {') != -1:
flag = True
ln = eff[i + 1].split(' = ')
mtz = os.path.abspath(ast.literal_eval(ln[1]))
if os.path.exists(mtz):
eff[i + 1] = '%s = \"%s\"\n' % (ln[0], mtz)
else:
raise IOError
continue
# edit cross-validation data file name
if flag and (line.find(' r_free_flags {') != -1):
ln = eff[i + 1].split(' = ')
mtz = os.path.abspath(ast.literal_eval(ln[1]))
if os.path.exists(mtz):
eff[i + 1] = '%s = \"%s\"\n' % (ln[0], mtz)
else:
raise IOError
continue
# set random seed
if line.find(' random_seed') != -1:
# When n = 0, the random seed is kept unchanged by default (MAKE_ALL_SEEDS = 0).
# When MAKE_ALL_SEEDS = 1, all random seeds will be changed/generated.
if (n == 0) and (os.environ.get('MAKE_ALL_SEEDS', '0') == '0'):
s = eff[i].split(' = ')[1].strip()
else:
s = seed()
eff[i] = ' random_seed = %s\n' % (s)
continue
# set nproc to 1
if line.find(' nproc') != -1:
# number of CPUs
ncpu = multiprocessing.cpu_count()
# nproc for every job to use, set to 1 or unset
nproc = int(os.environ.get('NPROC', 0))
# automatically use maximal number of CPUs for all jobs
if (not nproc) and (os.environ.get('USE_MAXCPU', '1') == '1'):
nproc = ncpu // jobs or 1
eff[i] = ' nproc = %i\n' % (nproc)
break
with open('.temp.def', 'w') as fd:
fd.write(''.join(eff))
return s
def worker(cwd, prm):
'''
worker for running a job
'''
os.chdir(cwd)
# for redirecting output to /dev/null
null = open('/dev/null', 'a')
# --overwrite & --unused_ok to improve robustness &
# compatibility between versions
cmd = ['phenix.refine', prm, '--overwrite', '--unused_ok']
proc = subproc.Popen(cmd, cwd=cwd, stdout=null)
try:
while proc.poll() is None:
time.sleep(5)
except (KeyboardInterrupt, IOError) as e:
return
def dry_run(prm):
'''
check if the param file is OK
'''
null = open('/dev/null', 'a')
cmd = ['phenix.refine', prm, '-n', '--overwrite', '--unused_ok']
proc = subproc.Popen(cmd, stdout=null)
return proc.wait()
def progress(elapse=0):
rotor = ['―', '\\', '|', '/', '―', '\\', '|', '/', '―', '\\', '|', '/']
sys.stdout.write('\n')
while True:
for i in xrange(12):
elapse += 1
mins = elapse // 60
hours = mins // 60
mins = mins % 60
secs = elapse % 60
sys.stdout.write("\rTime elapses: %i:%02i:%02i %s " \
% (hours, mins, secs, rotor[i]))
sys.stdout.flush()
time.sleep(1)
def statistic(prm):
'''
sort the refinement result, save the best,
and modify def for next run
'''
rvals = []
pfx = prm[:-4]
cwd = os.getcwd()
logs = glob.glob('%s/ref-*/%s.log' % (cwd, pfx))
# write refinement log
outf = open(pfx + '.out', 'a')
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
outf.write('# ' + now + '\n')
outf.write('Refinement results:\n')
print '\n\nRefinement results:\n'
for log in logs:
with open(log) as fd:
# read the last line
ln = fd.readlines()[-1]
try:
# extract Rfree factor
rval = re.search('R-free = (.*)$', ln).group(1)
# key = ref-?
key = os.path.split(os.path.dirname(log))[-1]
rvals.append((key, float(rval)))
outf.write(key + ": " + ln)
print '/'.join(log.split('/')[-2:]) + ':\n' + ln
except:
pass
# the lowest Rfree factor is the best
best = sorted(rvals, key=lambda x: x[1], reverse=False)[0][0]
src = '%s/%s/' % (cwd, best)
pth = '%s/%s_' % (cwd, pfx)
# make target dir for the best result
i = 1
while True:
dest = pth + str(i)
if not os.path.exists(dest):
os.mkdir(dest)
break
else:
i += 1
outf.write("Best result saved in `%s' \n" % (os.path.basename(dest)))
outf.write('\n')
outf.close()
# copy files of the best result to target dir
for ext in ['.mtz', '.pdb', '.log']:
shutil.copy(src + pfx + ext, dest)
for f in glob.glob(src + "*.def"):
shutil.copy(f, dest)
# modify the def file for next run
defs = glob.glob(dest + '/*.def')
if len(defs) == 2:
defs.sort()
next_def = defs[-1]
with open(next_def, 'r+') as fd:
# replace only 1 time
cont = fd.read().replace(best.join(['/'] * 2),
os.path.split(dest)[-1].join(['/'] * 2),
1).replace('.pdb', '-coot-0.pdb', 1)
fd.seek(0)
fd.write(cont)
return True
def conf_parser(argv):
'''
parse config data
'''
prm = ''
jobs = 4
status = 0
# parse conf.yaml file first
conf_pth = os.path.expanduser('~/.paref.yaml')
try:
# load config into environment variables
if os.path.exists(conf_pth):
with open(conf_pth) as f:
conf = yaml.load(f)
for key, value in conf.iteritems():
os.environ[key] = str(value)
except:
print "\x1b[31m[ERROR]\x1b[m wrong in parsing `~/.paref.yaml'!"
status = -2
# then parse argument variables
try:
optlist, args = getopt.getopt(argv, 'j:ah',
['jobs=', 'make-all-seeds', 'help'])
for (key, value) in optlist:
if key in ('-j', '--jobs'):
if value.isdigit():
# number of jobs
jobs = int(value)
if jobs < 1:
print "\x1b[31m[ERROR]\x1b[m number of jobs must > 0!"
status = 1
else:
print "\x1b[31m[ERROR]\x1b[m wrong number of jobs given!"
status = 2
if key in ('-a', '--make-all-seeds'):
os.environ['MAKE_ALL_SEEDS'] = '1'
if key in ('-h', '--help'):
return None, None, -1
if args:
prm = args[0]
if not (prm[-3:].lower() in ['def', 'eff'] and os.path.exists(prm)):
print "\x1b[31m[ERROR]\x1b[m wrong/no parameter file given!"
status = 1
except getopt.GetoptError:
status = -3
return prm, jobs, status
def notify(jobname):
'''
Display notification
'''
if os.uname()[0] != 'Darwin':
return
notifier = '/Applications/terminal-notifier.app/Contents/MacOS/terminal-notifier'
icon = '/Applications/terminal-notifier.app/Contents/Resources/phenix.icns'
if os.path.exists(notifier):
cmd = '''%s -title "PHENIX parallel refinement" \
-message "Refinement jobs (%s) finished" \
-sound "Glass" -contentImage "%s" &''' \
%(notifier, jobname, icon)
else:
cmd = '''osascript -e "display notification \
\\"Refinement jobs (%s) finished\\" \
with title \\"PHENIX parallel refinement\\"" &''' \
%(jobname)
os.system(cmd)
return
def usage():
print "###################################################\n" \
" A toolkit for running phenix.refine in parallel\n" \
"###################################################\n" \
"Usage: paref.py [options] param.{def,eff}\n\n" \
"Options:\n" \
" --jobs= | -j : number of jobs, default is 4.\n" \
" --make-all-seeds | -a : make all seeds, default is to keep the first one.\n" \
" --help | -h : show this help information.\n" \
" more options can be configured in `~/.paref.yaml'.\n"
def info():
print "Type `paref.py -h' for help information."
def main():
'''
start parallel processes and count time
'''
# disable traceback upon keyboard interrupt
signal.signal(signal.SIGINT, lambda x, y: sys.exit(1))
prm, jobs, status = conf_parser(sys.argv[1:])
if status != 0:
return status
if os.environ.get('CHECK_PRM_FILE', '1') == '1':
print "Checking the parameter file...",
sys.stdout.flush()
if dry_run(prm) != 0:
print "\n\x1b[31m[ERROR]\x1b[m There's something wrong with the parameter file!"
return 0
print "OK\n"
print "Starting %i jobs of phenix.refine in parallel...\n" % (jobs)
isOK = False
procs = []
pwd = os.getcwd()
for n in xrange(jobs):
# make dir and copy files
cwd = "%s/ref-%i" % (pwd, n)
if not os.path.exists(cwd):
os.mkdir(cwd)
# prepare def file
s = prep_ref(prm, n, jobs)
shutil.copy('.temp.def', cwd + '/' + prm)
# start a process for refinement
print " ref-%i: seed = %s" % (n, s)
proc = multiprocessing.Process(target=worker, args=(cwd, prm))
proc.start()
procs.append(proc)
time.sleep(0.1)
# start a process for monitoring progress of running
prog = multiprocessing.Process(target=progress)
prog.start()
# disabling keyboard input
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldflags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
# check the whole progress
try:
while True:
for proc in procs:
proc.join(1) # wait for 1 second
if not proc.is_alive():
procs.remove(proc)
if not procs:
try:
prog.terminate()
prog.join()
except:
pass
isOK = True
break
except KeyboardInterrupt:
pass
finally:
# restore terminal properties
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
fcntl.fcntl(fd, fcntl.F_SETFL, | |
<filename>blender/arm/write_data.py<gh_stars>0
import glob
import json
import os
import shutil
import stat
import html
from typing import List
import bpy
import arm.assets as assets
import arm.make_state as state
import arm.utils
if arm.is_reload(__name__):
import arm
assets = arm.reload_module(assets)
state = arm.reload_module(state)
arm.utils = arm.reload_module(arm.utils)
else:
arm.enable_reload(__name__)
def on_same_drive(path1: str, path2: str) -> bool:
drive_path1, _ = os.path.splitdrive(path1)
drive_path2, _ = os.path.splitdrive(path2)
return drive_path1 == drive_path2
def add_armory_library(sdk_path: str, name: str, rel_path=False) -> str:
if rel_path:
sdk_path = '../' + os.path.relpath(sdk_path, arm.utils.get_fp()).replace('\\', '/')
return ('project.addLibrary("' + sdk_path + '/' + name + '");\n').replace('\\', '/').replace('//', '/')
def add_assets(path: str, quality=1.0, use_data_dir=False, rel_path=False) -> str:
if not bpy.data.worlds['Arm'].arm_minimize and path.endswith('.arm'):
path = path[:-4] + '.json'
if rel_path:
path = os.path.relpath(path, arm.utils.get_fp()).replace('\\', '/')
notinlist = not path.endswith('.ttf') # TODO
s = 'project.addAssets("' + path + '", { notinlist: ' + str(notinlist).lower() + ' '
if quality < 1.0:
s += ', quality: ' + str(quality)
if use_data_dir:
s += ', destination: "data/{name}"'
s += '});\n'
return s
def add_shaders(path: str, rel_path=False) -> str:
if rel_path:
path = os.path.relpath(path, arm.utils.get_fp())
return 'project.addShaders("' + path.replace('\\', '/').replace('//', '/') + '");\n'
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def write_khafilejs(is_play, export_physics: bool, export_navigation: bool, export_ui: bool, is_publish: bool,
import_traits: List[str]) -> None:
wrd = bpy.data.worlds['Arm']
sdk_path = arm.utils.get_sdk_path()
rel_path = arm.utils.get_relative_paths() # Convert absolute paths to relative
project_path = arm.utils.get_fp()
build_dir = arm.utils.build_dir()
# Whether to use relative paths for paths inside the SDK
do_relpath_sdk = rel_path and on_same_drive(sdk_path, project_path)
with open('khafile.js', 'w', encoding="utf-8") as khafile:
khafile.write(
"""// Auto-generated
let project = new Project('""" + arm.utils.safesrc(wrd.arm_project_name + '-' + wrd.arm_project_version) + """');
project.addSources('Sources');
""")
# Auto-add assets located in Bundled directory
if os.path.exists('Bundled'):
for file in glob.glob("Bundled/**", recursive=True):
if os.path.isfile(file):
assets.add(file)
# Auto-add shape key textures if exists
if os.path.exists('MorphTargets'):
for file in glob.glob("MorphTargets/**", recursive=True):
if os.path.isfile(file):
assets.add(file)
# Add project shaders
if os.path.exists('Shaders'):
# Copy to enable includes
shader_path = os.path.join(build_dir, 'compiled', 'Shaders', 'Project')
if os.path.exists(shader_path):
shutil.rmtree(shader_path, onerror=remove_readonly)
shutil.copytree('Shaders', shader_path)
khafile.write("project.addShaders('" + build_dir + "/compiled/Shaders/Project/**');\n")
# for file in glob.glob("Shaders/**", recursive=True):
# if os.path.isfile(file):
# assets.add_shader(file)
# Add engine sources if the project does not use its own armory/iron versions
if not os.path.exists(os.path.join('Libraries', 'armory')):
khafile.write(add_armory_library(sdk_path, 'armory', rel_path=do_relpath_sdk))
if not os.path.exists(os.path.join('Libraries', 'iron')):
khafile.write(add_armory_library(sdk_path, 'iron', rel_path=do_relpath_sdk))
# Project libraries
if os.path.exists('Libraries'):
libs = os.listdir('Libraries')
for lib in libs:
if os.path.isdir('Libraries/' + lib):
khafile.write('project.addLibrary("{0}");\n'.format(lib.replace('//', '/')))
# Subprojects, merge this with libraries
if os.path.exists('Subprojects'):
libs = os.listdir('Subprojects')
for lib in libs:
if os.path.isdir('Subprojects/' + lib):
khafile.write('await project.addProject("Subprojects/{0}");\n'.format(lib))
if state.target.startswith('krom'):
assets.add_khafile_def('js-es=6')
if export_physics:
assets.add_khafile_def('arm_physics')
if wrd.arm_physics_engine == 'Bullet':
assets.add_khafile_def('arm_bullet')
if not os.path.exists('Libraries/haxebullet'):
khafile.write(add_armory_library(sdk_path + '/lib/', 'haxebullet', rel_path=do_relpath_sdk))
if state.target.startswith('krom'):
ammojs_path = sdk_path + '/lib/haxebullet/ammo/ammo.wasm.js'
ammojs_path = ammojs_path.replace('\\', '/').replace('//', '/')
khafile.write(add_assets(ammojs_path, rel_path=do_relpath_sdk))
ammojs_wasm_path = sdk_path + '/lib/haxebullet/ammo/ammo.wasm.wasm'
ammojs_wasm_path = ammojs_wasm_path.replace('\\', '/').replace('//', '/')
khafile.write(add_assets(ammojs_wasm_path, rel_path=do_relpath_sdk))
elif state.target == 'html5' or state.target == 'node':
ammojs_path = sdk_path + '/lib/haxebullet/ammo/ammo.js'
ammojs_path = ammojs_path.replace('\\', '/').replace('//', '/')
khafile.write(add_assets(ammojs_path, rel_path=do_relpath_sdk))
elif wrd.arm_physics_engine == 'Oimo':
assets.add_khafile_def('arm_oimo')
if not os.path.exists('Libraries/oimo'):
khafile.write(add_armory_library(sdk_path + '/lib/', 'oimo', rel_path=do_relpath_sdk))
if export_navigation:
assets.add_khafile_def('arm_navigation')
if not os.path.exists('Libraries/haxerecast'):
khafile.write(add_armory_library(sdk_path + '/lib/', 'haxerecast', rel_path=do_relpath_sdk))
if state.target.startswith('krom') or state.target == 'html5':
recastjs_path = sdk_path + '/lib/haxerecast/js/recast/recast.js'
recastjs_path = recastjs_path.replace('\\', '/').replace('//', '/')
khafile.write(add_assets(recastjs_path, rel_path=do_relpath_sdk))
if is_publish:
assets.add_khafile_def('arm_published')
if wrd.arm_dce:
khafile.write("project.addParameter('-dce full');\n")
if wrd.arm_no_traces:
khafile.write("project.addParameter('--no-traces');\n")
if wrd.arm_asset_compression:
assets.add_khafile_def('arm_compress')
else:
assets.add_khafile_def(f'arm_assert_level={wrd.arm_assert_level}')
if wrd.arm_assert_quit:
assets.add_khafile_def('arm_assert_quit')
# khafile.write("""project.addParameter("--macro include('armory.trait')");\n""")
# khafile.write("""project.addParameter("--macro include('armory.trait.internal')");\n""")
# if export_physics:
# khafile.write("""project.addParameter("--macro include('armory.trait.physics')");\n""")
# if wrd.arm_physics_engine == 'Bullet':
# khafile.write("""project.addParameter("--macro include('armory.trait.physics.bullet')");\n""")
# else:
# khafile.write("""project.addParameter("--macro include('armory.trait.physics.oimo')");\n""")
# if export_navigation:
# khafile.write("""project.addParameter("--macro include('armory.trait.navigation')");\n""")
if not wrd.arm_compiler_inline:
khafile.write("project.addParameter('--no-inline');\n")
use_live_patch = arm.utils.is_livepatch_enabled()
if wrd.arm_debug_console or use_live_patch:
import_traits.append('armory.trait.internal.Bridge')
if use_live_patch:
assets.add_khafile_def('arm_patch')
# Include all logic node classes so that they can later
# get instantiated
khafile.write("""project.addParameter("--macro include('armory.logicnode')");\n""")
import_traits = list(set(import_traits))
for i in range(0, len(import_traits)):
khafile.write("project.addParameter('" + import_traits[i] + "');\n")
khafile.write("""project.addParameter("--macro keep('""" + import_traits[i] + """')");\n""")
noembed = wrd.arm_cache_build and not is_publish and state.target == 'krom'
if noembed:
# Load shaders manually
assets.add_khafile_def('arm_noembed')
noembed = False # TODO: always embed shaders for now, check compatibility with haxe compile server
shaders_path = build_dir + '/compiled/Shaders/*.glsl'
if rel_path:
shaders_path = os.path.relpath(shaders_path, project_path).replace('\\', '/')
khafile.write('project.addShaders("' + shaders_path + '", { noembed: ' + str(noembed).lower() + '});\n')
if arm.utils.get_gapi() == 'direct3d11':
# noprocessing flag - gets renamed to .d3d11
shaders_path = build_dir + '/compiled/Hlsl/*.glsl'
if rel_path:
shaders_path = os.path.relpath(shaders_path, project_path).replace('\\', '/')
khafile.write('project.addShaders("' + shaders_path + '", { noprocessing: true, noembed: ' + str(noembed).lower() + ' });\n')
# Move assets for published game to /data folder
use_data_dir = is_publish and (state.target == 'krom-windows' or state.target == 'krom-linux' or state.target == 'windows-hl' or state.target == 'linux-hl')
if use_data_dir:
assets.add_khafile_def('arm_data_dir')
ext = 'arm' if wrd.arm_minimize else 'json'
assets_path = build_dir + '/compiled/Assets/**'
assets_path_sh = build_dir + '/compiled/Shaders/*.' + ext
if rel_path:
assets_path = os.path.relpath(assets_path, project_path).replace('\\', '/')
assets_path_sh = os.path.relpath(assets_path_sh, project_path).replace('\\', '/')
dest = ''
if use_data_dir:
dest += ', destination: "data/{name}"'
khafile.write('project.addAssets("' + assets_path + '", { notinlist: true ' + dest + '});\n')
khafile.write('project.addAssets("' + assets_path_sh + '", { notinlist: true ' + dest + '});\n')
shader_data_references = sorted(list(set(assets.shader_datas)))
for ref in shader_data_references:
ref = ref.replace('\\', '/').replace('//', '/')
if '/compiled/' in ref: # Asset already included
continue
do_relpath_shaders = rel_path and on_same_drive(ref, project_path)
khafile.write(add_assets(ref, use_data_dir=use_data_dir, rel_path=do_relpath_shaders))
asset_references = sorted(list(set(assets.assets)))
for ref in asset_references:
ref = ref.replace('\\', '/').replace('//', '/')
if '/compiled/' in ref: # Asset already included
continue
quality = 1.0
s = ref.lower()
if s.endswith('.wav'):
quality = wrd.arm_sound_quality
elif s.endswith('.png') or s.endswith('.jpg'):
quality = wrd.arm_texture_quality
do_relpath_assets = rel_path and on_same_drive(ref, project_path)
khafile.write(add_assets(ref, quality=quality, use_data_dir=use_data_dir, rel_path=do_relpath_assets))
if wrd.arm_sound_quality < 1.0 or state.target == 'html5':
assets.add_khafile_def('arm_soundcompress')
if wrd.arm_audio == 'Disabled':
assets.add_khafile_def('kha_no_ogg')
else:
assets.add_khafile_def('arm_audio')
if wrd.arm_texture_quality < 1.0:
assets.add_khafile_def('arm_texcompress')
if wrd.arm_debug_console:
assets.add_khafile_def('arm_debug')
khafile.write(add_shaders(sdk_path + "/armory/Shaders/debug_draw/**", rel_path=do_relpath_sdk))
if not is_publish and state.target == 'html5':
khafile.write("project.addParameter('--debug');\n")
if arm.utils.get_pref_or_default('haxe_times', False):
khafile.write("project.addParameter('--times');\n")
if export_ui:
if not os.path.exists('Libraries/zui'):
khafile.write(add_armory_library(sdk_path, 'lib/zui', rel_path=do_relpath_sdk))
p = sdk_path + '/armory/Assets/font_default.ttf'
p = p.replace('//', '/')
khafile.write(add_assets(p.replace('\\', '/'), use_data_dir=use_data_dir, rel_path=do_relpath_sdk))
assets.add_khafile_def('arm_ui')
if not wrd.arm_minimize:
assets.add_khafile_def('arm_json')
if wrd.arm_deinterleaved_buffers:
assets.add_khafile_def('arm_deinterleaved')
if wrd.arm_batch_meshes:
assets.add_khafile_def('arm_batch')
if wrd.arm_stream_scene:
assets.add_khafile_def('arm_stream')
rpdat = arm.utils.get_rp()
if rpdat.arm_skin != 'Off':
assets.add_khafile_def('arm_skin')
if rpdat.arm_morph_target != 'Off':
assets.add_khafile_def('arm_morph_target')
if rpdat.arm_particles != 'Off':
assets.add_khafile_def('arm_particles')
if rpdat.rp_draw_order == 'Shader':
assets.add_khafile_def('arm_draworder_shader')
if arm.utils.get_viewport_controls() == 'azerty':
assets.add_khafile_def('arm_azerty')
if os.path.exists(project_path + '/Bundled/config.arm'):
assets.add_khafile_def('arm_config')
if is_publish and wrd.arm_loadscreen:
assets.add_khafile_def('arm_loadscreen')
if wrd.arm_winresize or state.target == 'html5':
assets.add_khafile_def('arm_resizable')
# if bpy.data.scenes[0].unit_settings.system_rotation == 'DEGREES':
# assets.add_khafile_def('arm_degrees')
# Allow libraries to recognize Armory
assets.add_khafile_def('armory')
for d in assets.khafile_defs:
khafile.write("project.addDefine('" + d + "');\n")
for p in assets.khafile_params:
khafile.write("project.addParameter('" + p + "');\n")
if state.target.startswith('android'):
bundle = 'org.armory3d.' + wrd.arm_project_package if wrd.arm_project_bundle == '' else wrd.arm_project_bundle
khafile.write("project.targetOptions.android_native.package = '{0}';\n".format(arm.utils.safestr(bundle)))
if wrd.arm_winorient != 'Multi':
khafile.write("project.targetOptions.android_native.screenOrientation = '{0}';\n".format(wrd.arm_winorient.lower()))
# Android SDK Versions
khafile.write("project.targetOptions.android_native.compileSdkVersion = '{0}';\n".format(wrd.arm_project_android_sdk_compile))
khafile.write("project.targetOptions.android_native.minSdkVersion = '{0}';\n".format(wrd.arm_project_android_sdk_min))
khafile.write("project.targetOptions.android_native.targetSdkVersion = '{0}';\n".format(wrd.arm_project_android_sdk_target))
# Permissions
if len(wrd.arm_exporter_android_permission_list) > 0:
perms = ''
for item in wrd.arm_exporter_android_permission_list:
perm = "'android.permission."+ item.arm_android_permissions + "'"
# Checking In
if perms.find(perm) == -1:
if len(perms) > 0:
perms = perms + ', ' + perm
else:
perms = perm
if len(perms) > 0:
khafile.write("project.targetOptions.android_native.permissions = [{0}];\n".format(perms))
# Android ABI Filters
if len(wrd.arm_exporter_android_abi_list) > 0:
abis = ''
for item in wrd.arm_exporter_android_abi_list:
abi = "'"+ item.arm_android_abi + "'"
# Checking In
if abis.find(abi) == -1:
if len(abis) > 0:
abis = abis + ', ' + abi
else:
abis = abi
if len(abis) > 0:
khafile.write("project.targetOptions.android_native.abiFilters = [{0}];\n".format(abis))
elif state.target.startswith('ios'):
bundle = 'org.armory3d.' + wrd.arm_project_package if wrd.arm_project_bundle == '' else wrd.arm_project_bundle
khafile.write("project.targetOptions.ios.bundle = '{0}';\n".format(arm.utils.safestr(bundle)))
if wrd.arm_project_icon != '':
shutil.copy(bpy.path.abspath(wrd.arm_project_icon), project_path + '/icon.png')
if wrd.arm_khafile is not None:
khafile.write(wrd.arm_khafile.as_string())
khafile.write("\n\nresolve(project);\n")
def get_winmode(arm_winmode):
if arm_winmode == 'Window':
return 0
else: # Fullscreen
return 1
def write_config(resx, resy):
wrd = bpy.data.worlds['Arm']
p = os.path.join(arm.utils.get_fp(), 'Bundled')
if not os.path.exists(p):
os.makedirs(p)
rpdat = arm.utils.get_rp()
rp_shadowmap_cube = int(rpdat.rp_shadowmap_cube) if rpdat.rp_shadows else 0
rp_shadowmap_cascade = arm.utils.get_cascade_size(rpdat) if rpdat.rp_shadows else 0
output = {
'window_mode': get_winmode(wrd.arm_winmode),
'window_w': int(resx),
'window_h': int(resy),
'window_resizable': wrd.arm_winresize,
'window_maximizable': wrd.arm_winresize and wrd.arm_winmaximize,
'window_minimizable': wrd.arm_winminimize,
'window_vsync': wrd.arm_vsync,
'window_msaa': int(rpdat.arm_samples_per_pixel),
'window_scale': 1.0,
'rp_supersample': float(rpdat.rp_supersampling),
'rp_shadowmap_cube': rp_shadowmap_cube,
'rp_shadowmap_cascade': rp_shadowmap_cascade,
'rp_ssgi': rpdat.rp_ssgi != 'Off',
'rp_ssr': rpdat.rp_ssr != 'Off',
'rp_bloom': rpdat.rp_bloom != 'Off',
'rp_motionblur': rpdat.rp_motionblur != 'Off',
'rp_gi': rpdat.rp_voxelao,
'rp_dynres': rpdat.rp_dynres
}
with open(os.path.join(p, 'config.arm'), 'w') as configfile:
configfile.write(json.dumps(output, sort_keys=True, indent=4))
def | |
b'h si d') # like b'd is h' but reverted
self.assertEqual(bytes(c2), b'h si d')
self.assertEqual(c2.size, (6,))
self.assertEqual(c2.stride, (-2,))
def test_convert_memoryview(self):
a = b'World is hell!'
a_refcount = sys.getrefcount(a)
b = containers.StridedArrayView1D(a)
b_refcount = sys.getrefcount(b)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
c = memoryview(b)
self.assertEqual(c.ndim, 1)
self.assertEqual(len(c), len(a))
self.assertEqual(bytes(c), a)
# Unlike slicing, StridedArrayView's buffer protocol returns a
# reference to itself and not the underlying buffer -- it needs to be
# kept around because the Py_buffer refers to its internals for size.
# Also returning a reference to the underlying buffer would mean the
# underlying buffer's releasebuffer function gets called instead of
# ours which is *not* wanted.
self.assertIs(c.obj, b)
self.assertEqual(sys.getrefcount(b), b_refcount + 1)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
with self.assertRaisesRegex(TypeError, "cannot modify read-only memory"):
c[-1] = ord('?')
def test_convert_mutable_memoryview(self):
a = bytearray(b'World is hell!')
b = memoryview(containers.MutableStridedArrayView1D(a))
b[-1] = ord('?')
self.assertEqual(a, b'World is hell?')
class StridedArrayView2D(unittest.TestCase):
def test_init(self):
a = containers.StridedArrayView2D()
b = containers.MutableStridedArrayView2D()
self.assertIs(a.owner, None)
self.assertIs(b.owner, None)
self.assertEqual(len(a), 0)
self.assertEqual(len(b), 0)
self.assertEqual(bytes(a), b'')
self.assertEqual(bytes(b), b'')
self.assertEqual(a.size, (0, 0))
self.assertEqual(b.size, (0, 0))
self.assertEqual(a.stride, (0, 0))
self.assertEqual(b.stride, (0, 0))
self.assertEqual(a.dimensions, 2)
self.assertEqual(b.dimensions, 2)
def test_init_buffer(self):
a = (b'01234567'
b'456789ab'
b'89abcdef')
a_refcount = sys.getrefcount(a)
v = memoryview(a).cast('b', shape=[3, 8])
b = containers.StridedArrayView2D(v)
self.assertEqual(len(b), 3)
self.assertEqual(bytes(b), b'01234567'
b'456789ab'
b'89abcdef')
self.assertEqual(b.size, (3, 8))
self.assertEqual(b.stride, (8, 1))
self.assertIsInstance(b[1], containers.StridedArrayView1D)
self.assertEqual(bytes(b[1]), b'456789ab')
self.assertEqual(b[1, 2], '6')
self.assertEqual(b[1][2], '6')
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
# Not mutable
with self.assertRaisesRegex(TypeError, "object does not support item assignment"):
b[1, 2] = '!'
# b should keep a reference to a, so deleting the local reference
# shouldn't affect it
del a
self.assertTrue(sys.getrefcount(b.owner), a_refcount)
self.assertEqual(b[1][2], '6')
# Now, if we delete b, a should not be referenced by anything anymore
a = b.owner
del b
self.assertTrue(sys.getrefcount(a), a_refcount)
def test_init_buffer_mutable(self):
a = bytearray(b'01234567'
b'456789ab'
b'89abcdef')
b = containers.MutableStridedArrayView2D(memoryview(a).cast('b', shape=[3, 8]))
b[0, 7] = '!'
b[1, 7] = '!'
b[2, 7] = '!'
self.assertEqual(b[0][7], '!')
self.assertEqual(bytes(b), b'0123456!'
b'456789a!'
b'89abcde!')
def test_init_buffer_unexpected_dimensions(self):
a = b'123456'
with self.assertRaisesRegex(BufferError, "expected 2 dimensions but got 1"):
b = containers.StridedArrayView2D(a)
def test_init_buffer_stride(self):
a = memoryview(b'01234567'
b'456789ab'
b'89abcdef').cast('b', shape=[3, 8])[::2]
self.assertEqual(bytes(a), b'0123456789abcdef')
b = containers.StridedArrayView2D(a)
self.assertEqual(len(b), 2)
self.assertEqual(bytes(b), b'0123456789abcdef')
self.assertEqual(b.size, (2, 8))
self.assertEqual(b.stride, (16, 1))
self.assertEqual(bytes(b[1]), b'89abcdef')
self.assertEqual(b[1][3], 'b')
def test_init_buffer_mutable_from_immutable(self):
a = memoryview(b'01234567'
b'456789ab'
b'89abcdef').cast('b', shape=[3, 8])
with self.assertRaisesRegex(BufferError, "underlying buffer is not writable"):
b = containers.MutableStridedArrayView2D(a)
def test_slice(self):
a = memoryview(b'01234567'
b'456789ab'
b'89abcdef').cast('b', shape=[3, 8])
a_refcount = sys.getrefcount(a)
b = containers.StridedArrayView2D(a)
b_refcount = sys.getrefcount(b)
# memoryview's buffer protocol returns itself, not the underlying
# bytes, as it manages the Py_buffer instance. So this is expected.
self.assertIs(b.owner, a)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
# When slicing, b's refcount should not change but a's refcount should
# increase
c = b[0:-1]
self.assertIsInstance(c, containers.StridedArrayView2D)
self.assertEqual(c.size, (2, 8))
self.assertEqual(c.stride, (8, 1))
self.assertEqual(bytes(c), b'01234567456789ab')
self.assertEqual(sys.getrefcount(b), b_refcount)
self.assertEqual(sys.getrefcount(a), a_refcount + 2)
# Deleting a slice should reduce a's refcount again, keep b's unchanged
del c
self.assertEqual(sys.getrefcount(b), b_refcount)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
def test_slice_multidimensional(self):
a = memoryview(b'01234567'
b'456789ab'
b'89abcdef').cast('b', shape=[3, 8])
a_refcount = sys.getrefcount(a)
b = containers.StridedArrayView2D(a)
b_refcount = sys.getrefcount(b)
# memoryview's buffer protocol returns itself, not the underlying
# bytes, as it manages the Py_buffer instance. So this is expected.
self.assertIs(b.owner, a)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
# When slicing, b's refcount should not change but a's refcount should
# increase
c = b[1:3,4:7]
self.assertIsInstance(c, containers.StridedArrayView2D)
self.assertEqual(c.size, (2, 3))
self.assertEqual(c.stride, (8, 1))
self.assertEqual(bytes(c[0]), b'89a')
self.assertEqual(bytes(c[1]), b'cde')
self.assertEqual(sys.getrefcount(b), b_refcount)
self.assertEqual(sys.getrefcount(a), a_refcount + 2)
# Deleting a slice should reduce a's refcount again, keep b's unchanged
del c
self.assertEqual(sys.getrefcount(b), b_refcount)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
def test_slice_multidimensional_empty(self):
a = memoryview(b'01234567'
b'456789ab'
b'89abcdef').cast('b', shape=[3, 8])
a_refcount = sys.getrefcount(a)
b = containers.StridedArrayView2D(a)[1:1,2:2]
self.assertEqual(b.size, (0, 0))
# Empty view, original data not referenced at all
self.assertIs(b.owner, None)
self.assertEqual(sys.getrefcount(a), a_refcount)
def test_slice_invalid(self):
with self.assertRaisesRegex(ValueError, "slice step cannot be zero"):
containers.StridedArrayView1D()[-5:3:0]
def test_slice_stride(self):
a = (b'01234567'
b'456789ab'
b'89abcdef')
v = memoryview(a).cast('b', shape=[3, 8])
b = containers.StridedArrayView2D(v)
# Check consistency with slices on memoryview
c1 = v[0:3:2]
c2 = b[0:3:2]
self.assertEqual(len(c1), 2)
self.assertEqual(len(c2), 2)
self.assertIsInstance(c2, containers.StridedArrayView2D)
self.assertEqual(bytes(c1), b'0123456789abcdef')
self.assertEqual(bytes(c2), b'0123456789abcdef')
self.assertEqual(c2.size, (2, 8))
self.assertEqual(c2.stride, (16, 1))
self.assertEqual(bytes(c2[1]), b'89abcdef')
def test_slice_stride_negative(self):
a = (b'01234567'
b'456789ab'
b'89abcdef')
v = memoryview(a).cast('b', shape=[3, 8])
b = containers.StridedArrayView2D(v)
# Check consistency with slices on memoryview
self.assertEqual(v.shape, (3, 8))
self.assertEqual(b.size, (3, 8))
self.assertEqual(v.strides, (8, 1))
self.assertEqual(b.stride, (8, 1))
c1 = v[-1:None:-2] # like [0:3:2] above, but reverted
c2 = b[-1:None:-2]
self.assertEqual(len(c1), 2)
self.assertEqual(len(c2), 2)
self.assertEqual(bytes(c1), b'89abcdef01234567') # like above but reverted
self.assertEqual(bytes(c2), b'89abcdef01234567')
self.assertEqual(c1.shape, (2, 8))
self.assertEqual(c2.size, (2, 8))
self.assertEqual(c1.strides, (-16, 1))
self.assertEqual(c2.stride, (-16, 1))
def test_slice_stride_negative_multidimensional(self):
a = (b'01234567'
b'456789ab'
b'89abcdef')
v = memoryview(a).cast('b', shape=[3, 8])
b = containers.StridedArrayView2D(v)
# Check consistency with slices on memoryview
self.assertEqual(v.shape, (3, 8))
self.assertEqual(b.size, (3, 8))
self.assertEqual(v.strides, (8, 1))
self.assertEqual(b.stride, (8, 1))
with self.assertRaises(NotImplementedError):
c1 = v[-1:None:-2, -2:2:-3] # HAH!
c2 = b[-1:None:-2, -2:2:-3]
self.assertEqual(len(c2), 2)
self.assertEqual(bytes(c2), b'eb63')
self.assertEqual(c2.size, (2, 2))
self.assertEqual(c2.stride, (-16, -3))
def test_ops(self):
a = (b'01234567'
b'456789ab'
b'89abcdef')
v = memoryview(a).cast('b', shape=[3, 8])
b = containers.StridedArrayView2D(v).transposed(0, 1).flipped(0)
self.assertEqual(b.size, (8, 3))
self.assertEqual(b.stride, (-1, 8))
self.assertEqual(bytes(b), b'7bf6ae59d48c37b26a159048')
c = containers.StridedArrayView2D(v).transposed(1, 0).flipped(1)
self.assertEqual(c.size, (8, 3))
self.assertEqual(c.stride, (1, -8))
self.assertEqual(bytes(c), b'840951a62b73c84d95ea6fb7')
d = containers.StridedArrayView2D(v).transposed(0, 1)[3:4].broadcasted(0, 5)
self.assertEqual(d.size, (5, 3))
self.assertEqual(d.stride, (0, 8))
self.assertEqual(bytes(d), b'37b37b37b37b37b')
d = containers.StridedArrayView2D(v)[:, 3:4].broadcasted(1, 2)
self.assertEqual(d.size, (3, 2))
self.assertEqual(d.stride, (8, 0))
self.assertEqual(bytes(d), b'3377bb')
def test_convert_memoryview(self):
a = memoryview(b'01234567'
b'456789ab'
b'89abcdef').cast('b', shape=[3, 8])
a_refcount = sys.getrefcount(a)
b = containers.StridedArrayView2D(a)
b_refcount = sys.getrefcount(b)
# memoryview's buffer protocol returns itself, not the underlying
# bytes, as it manages the Py_buffer instance. So this is expected.
self.assertIs(b.owner, a)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
c = memoryview(b)
self.assertEqual(c.ndim, 2)
self.assertEqual(c.shape, (3, 8))
self.assertEqual(c.strides, (8, 1))
self.assertIs(c.obj, b)
self.assertEqual(sys.getrefcount(a), a_refcount + 1)
self.assertEqual(sys.getrefcount(b), b_refcount + 1)
class StridedArrayView3D(unittest.TestCase):
def test_init_buffer(self):
a = (b'01234567'
b'456789ab'
b'89abcdef'
b'cdef0123'
b'01234567'
b'456789ab')
b = containers.StridedArrayView3D(memoryview(a).cast('b', shape=[2, 3, 8]))
self.assertEqual(len(b), 2)
self.assertEqual(bytes(b), b'01234567456789ab89abcdefcdef012301234567456789ab')
self.assertEqual(b.size, (2, 3, 8))
self.assertEqual(b.stride, (24, 8, 1))
self.assertEqual(b[1, 2, 3], '7')
self.assertEqual(b[1][2][3], '7')
def test_init_buffer_mutable(self):
a = bytearray(b'01234567'
b'456789ab'
b'89abcdef'
b'cdef0123'
b'01234567'
b'456789ab')
b = containers.MutableStridedArrayView3D(memoryview(a).cast('b', shape=[2, 3, 8]))
b[0, 0, 7] = '!'
b[0, 1, 7] = '!'
b[0, 2, 7] = '!'
b[1, 0, 7] = '!'
b[1, 1, 7] = '!'
b[1, 2, 7] = '!'
self.assertEqual(b[1][1][7], '!')
self.assertEqual(bytes(b), b'0123456!'
b'456789a!'
b'89abcde!'
b'cdef012!'
b'0123456!'
b'456789a!')
def test_ops(self):
a = (b'01234567'
b'456789ab'
b'89abcdef'
b'cdef0123'
b'01234567'
b'456789ab')
v = memoryview(a).cast('b', shape=[2, 3, 8])
b = containers.StridedArrayView3D(v).transposed(0, 1).flipped(0)
self.assertEqual(b.size, (3, 2, 8))
self.assertEqual(b.stride, (-8, 24, 1))
self.assertEqual(bytes(b), b'89abcdef456789ab456789ab0123456701234567cdef0123')
c = containers.StridedArrayView3D(v).transposed(2, 0).flipped(1)
self.assertEqual(c.size, (8, 3, 2))
self.assertEqual(c.stride, (1, -8, 24))
self.assertEqual(bytes(c), b'84400c95511da6622eb7733fc88440d99551eaa662fbb773')
d = containers.StridedArrayView3D(v).transposed(1, 2)[0:1, 3:5, :].broadcasted(0, 5)
self.assertEqual(d.size, (5, 2, 3))
self.assertEqual(d.stride, (0, 1, 8))
self.assertEqual(bytes(d), b'37b48c37b48c37b48c37b48c37b48c')
e = containers.StridedArrayView3D(v)[:, 1:2, 3:4].flipped(2).broadcasted(1, 2)
self.assertEqual(e.size, (2, 2, 1))
self.assertEqual(e.stride, (24, 0, -1))
self.assertEqual(bytes(e), b'7733')
f = containers.StridedArrayView3D(v)[:, :, 0:1].broadcasted(2, 5)
self.assertEqual(f.size, (2, 3, 5))
self.assertEqual(f.stride, (24, 8, 0))
self.assertEqual(bytes(f), b'000004444488888ccccc0000044444')
# This is just a dumb copy of the above with one dimension inserted at the
# second place.
class StridedArrayView4D(unittest.TestCase):
def test_init_buffer(self):
a = (b'01234567'
b'456789ab'
b'89abcdef'
b'cdef0123'
b'01234567'
b'456789ab')
b = containers.StridedArrayView4D(memoryview(a).cast('b', shape=[2, 1, 3, 8]))
self.assertEqual(len(b), 2)
self.assertEqual(bytes(b), b'01234567456789ab89abcdefcdef012301234567456789ab')
self.assertEqual(b.size, (2, 1, 3, 8))
self.assertEqual(b.stride, (24, 24, 8, 1))
self.assertEqual(b[1, 0, 2, 3], '7')
self.assertEqual(b[1][0][2][3], '7')
def test_init_buffer_mutable(self):
a = bytearray(b'01234567'
b'456789ab'
b'89abcdef'
b'cdef0123'
b'01234567'
b'456789ab')
b = containers.MutableStridedArrayView4D(memoryview(a).cast('b', shape=[2, 1, 3, 8]))
b[0, 0, 0, 7] = '!'
b[0, 0, 1, 7] = '!'
b[0, 0, 2, 7] = '!'
b[1, 0, 0, 7] = '!'
b[1, 0, 1, 7] = '!'
b[1, 0, 2, 7] = '!'
self.assertEqual(b[1][0][1][7], '!')
self.assertEqual(bytes(b), b'0123456!'
b'456789a!'
b'89abcde!'
b'cdef012!'
b'0123456!'
b'456789a!')
def test_ops(self):
a = (b'01234567'
b'456789ab'
b'89abcdef'
b'cdef0123'
b'01234567'
b'456789ab')
v = memoryview(a).cast('b', shape=[2, 1, 3, 8])
b = containers.StridedArrayView4D(v).transposed(0, 2).flipped(0)
self.assertEqual(b.size, (3, 1, 2, 8))
self.assertEqual(b.stride, (-8, 24, 24, 1))
self.assertEqual(bytes(b), b'89abcdef456789ab456789ab0123456701234567cdef0123')
c = containers.StridedArrayView4D(v).transposed(3, 0).flipped(2)
self.assertEqual(c.size, (8, 1, 3, 2))
self.assertEqual(c.stride, (1, 24, -8, 24))
self.assertEqual(bytes(c), b'84400c95511da6622eb7733fc88440d99551eaa662fbb773')
d = containers.StridedArrayView4D(v).transposed(2, 3)[0:1, :, 3:5, :].broadcasted(0, 5)
self.assertEqual(d.size, (5, 1, 2, 3))
self.assertEqual(d.stride, | |
<reponame>uw-it-aca/spotseeker_server
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot
import simplejson as json
from decimal import *
from django.test.utils import override_settings
from mock import patch
from spotseeker_server import models
@override_settings(SPOTSEEKER_AUTH_MODULE="spotseeker_server.auth.all_ok")
class SpotSearchDistanceTest(TestCase):
def test_invalid_latitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{
"center_latitude": "bad_data",
"center_longitude": -40,
"distance": 10,
},
)
self.assertEquals(
response.status_code, 200, "Accepts a query with bad latitude"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_invalid_longitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{
"center_latitude": "30",
"center_longitude": "bad_data",
"distance": "10",
},
)
self.assertEquals(
response.status_code, 200, "Accepts a query with bad longitude"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_invalid_height(self):
c = Client()
response = c.get(
"/api/v1/spot",
{
"center_latitude": "30",
"center_longitude": -40,
"height_from_sea_level": "bad_data",
"distance": "10",
},
)
self.assertEquals(
response.status_code, 200, "Accepts a query with bad height"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_invalid_distance(self):
c = Client()
response = c.get(
"/api/v1/spot",
{
"center_latitude": "30",
"center_longitude": "-40",
"distance": "bad_data",
},
)
self.assertEquals(
response.status_code, 200, "Accepts a query with bad distance"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_large_longitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{"center_latitude": 30, "center_longitude": 190, "distance": 10},
)
self.assertEquals(
response.status_code,
200,
"Accepts a query with too large longitude",
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_large_latitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{"center_latitude": 100, "center_longitude": -40, "distance": 10},
)
self.assertEquals(
response.status_code,
200,
"Accepts a query with too large latitude",
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_large_negative_latitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{"center_latitude": -100, "center_longitude": -40, "distance": 10},
)
self.assertEquals(
response.status_code,
200,
"Accepts a query with too negative latitude",
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_large_negative_longitude(self):
c = Client()
response = c.get(
"/api/v1/spot",
{"center_latitude": 40, "center_longitude": -190, "distance": 10},
)
self.assertEquals(
response.status_code,
200,
"Accepts a query with too negative longitude",
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_no_params(self):
c = Client()
response = c.get("/api/v1/spot", {})
self.assertEquals(
response.status_code, 200, "Accepts a query with no params"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
def test_distances(self):
# Spots are in the atlantic to make them less likely to collide
# with actual spots
center_lat = 30.000000
center_long = -40.000000
# Inner spots are 10 meters away from the center
# Mid spots are 50 meters away from the center
# Outer spots are 100 meters away from the center
# Far out spots are 120 meters away, at the north
# Creating these from the outside in, so things that sort by
# primary key will give bad results for things that should be
# sorted by distance
for i in range(0, 100):
far_out = Spot.objects.create(
name="Far Out %s" % i,
latitude=Decimal("30.0010779783"),
longitude=Decimal("-40.0"),
)
far_out.save()
outer_top = Spot.objects.create(
name="Outer Top",
latitude=Decimal("30.0008983153"),
longitude=Decimal("-40.0"),
)
outer_top.save()
outer_bottom = Spot.objects.create(
name="Outer Bottom",
latitude=Decimal("29.9991016847"),
longitude=Decimal("-40.0"),
)
outer_bottom.save()
outer_left = Spot.objects.create(
name="Outer Left",
latitude=Decimal("30.0"),
longitude=Decimal("-40.0010372851"),
)
outer_left.save()
outer_right = Spot.objects.create(
name="Outer Right",
latitude=Decimal("30.0"),
longitude=Decimal("-39.9989627149"),
)
outer_right.save()
mid_top = Spot.objects.create(
name="Mid Top",
latitude=Decimal(" 30.0004491576"),
longitude=Decimal("-40.0"),
)
mid_top.save()
mid_bottom = Spot.objects.create(
name="Mid Bottom",
latitude=Decimal("29.9995508424"),
longitude=Decimal("-40.0"),
)
mid_bottom.save()
mid_left = Spot.objects.create(
name="Mid Left",
latitude=Decimal("30.0"),
longitude=Decimal("-40.0005186426"),
)
mid_left.save()
mid_right = Spot.objects.create(
name="Mid Right",
latitude=Decimal("30.0"),
longitude=Decimal("-39.9994813574"),
)
mid_right.save()
inner_top = Spot.objects.create(
name="Inner Top",
latitude=Decimal("30.0000898315"),
longitude=Decimal("-40.0"),
)
inner_top.save()
inner_bottom = Spot.objects.create(
name="Inner Bottom",
latitude=Decimal("29.9999101685"),
longitude=Decimal("-40.0"),
)
inner_bottom.save()
inner_left = Spot.objects.create(
name="Inner Left",
latitude=Decimal("30.0"),
longitude=Decimal("-40.0001037285"),
)
inner_left.save()
inner_right = Spot.objects.create(
name="Inner Right",
latitude=Decimal("30.0"),
longitude=Decimal("-39.9998962715"),
)
inner_right.save()
# Testing to make sure too small of a radius returns nothing
c = Client()
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 1,
},
)
self.assertEquals(
response.status_code, 200, "Accepts a query with no matches"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
self.assertEquals(
response.content.decode(), "[]", "Should return no matches"
)
# Testing the inner ring
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 12,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 4, "Returns 4 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]], 1, "Spot matches a unique inner spot"
)
spot_ids[spot["id"]] = 2
# Testing the mid ring
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 60,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 8, "Returns 8 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner or mid spot",
)
spot_ids[spot["id"]] = 2
# Testing the outer ring
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 110,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 12, "Returns 12 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
outer_left.pk: 1,
outer_right.pk: 1,
outer_top.pk: 1,
outer_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner, mid or outer spot",
)
spot_ids[spot["id"]] = 2
# testing a limit - should get the inner 4, and any 2 of the mid
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 60,
"limit": 6,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 6, "Returns 6 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner, mid or outer spot",
)
spot_ids[spot["id"]] = 2
self.assertEquals(
spot_ids[inner_left.pk], 2, "Inner left was selected"
)
self.assertEquals(
spot_ids[inner_right.pk], 2, "Inner right was selected"
)
self.assertEquals(spot_ids[inner_top.pk], 2, "Inner top was selected")
self.assertEquals(
spot_ids[inner_bottom.pk], 2, "Inner bottom was selected"
)
# Testing limits - should get all of the inner and mid, but
# no outer spots
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 101,
"limit": 8,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 8, "Returns 8 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner or mid spot",
)
spot_ids[spot["id"]] = 2
# Testing limits - should get all inner and mid spots, and
# 2 outer spots
response = c.get(
"/api/v1/spot",
{
"center_latitude": center_lat,
"center_longitude": center_long,
"distance": 101,
"limit": 10,
},
)
self.assertEquals(
response.status_code, 200, "Accepts the distance query"
)
self.assertEquals(
response["Content-Type"], "application/json", "Has the json header"
)
spots = json.loads(response.content)
self.assertEquals(len(spots), 10, "Returns 10 spots")
spot_ids = {
inner_left.pk: 1,
inner_right.pk: 1,
inner_top.pk: 1,
inner_bottom.pk: 1,
mid_left.pk: 1,
mid_right.pk: 1,
mid_top.pk: 1,
mid_bottom.pk: 1,
outer_left.pk: 1,
outer_right.pk: 1,
outer_top.pk: 1,
outer_bottom.pk: 1,
}
for spot in spots:
self.assertEquals(
spot_ids[spot["id"]],
1,
"Spot matches a unique inner, mid or outer spot",
)
spot_ids[spot["id"]] = 2
self.assertEquals(
spot_ids[inner_left.pk], 2, "Inner left was selected"
)
self.assertEquals(
spot_ids[inner_right.pk], 2, "Inner right was selected"
)
self.assertEquals(spot_ids[inner_top.pk], 2, "Inner | |
:type testcolor: TestColor color
:param testcolor: Optional TestColor ascii color scheme
'''
alines = ""
blines = ""
for x in xrange(0,b):
blines=blines+"\n"
for x in xrange(0,a):
alines=alines+"\n"
line = "-------------------------------------------------------------------------"
out = blines+line+"\n"+msg+"\n"+line+alines
self.debug(out, traceback=traceback, color=testcolor,linebyline=False)
def startmsg(self,msg=""):
self.status(msg, traceback=3,testcolor=TestColor.get_canned_color('whiteonblue'))
def endtestunit(self,msg=""):
msg = "- UNIT ENDED - " + msg
self.status(msg, traceback=2,a=1, testcolor=TestColor.get_canned_color('whiteongreen'))
def errormsg(self,msg=""):
msg = "- ERROR - " + msg
self.status(msg, traceback=2,a=1,testcolor=TestColor.get_canned_color('failred'))
def endfailure(self,msg="" ):
msg = "- FAILED - " + msg
self.status(msg, traceback=2,a=1,testcolor=TestColor.get_canned_color('failred'))
def resultdefault(self,msg,printout=True,color='blueongrey'):
if printout:
self.debug(msg,traceback=2,color=TestColor.get_canned_color('blueongrey'),linebyline=False)
msg = self.format_line_for_color(msg, color)
return msg
def resultfail(self,msg,printout=True, color='redongrey'):
if printout:
self.debug(msg,traceback=2, color=TestColor.get_canned_color('redongrey'),linebyline=False)
msg = self.format_line_for_color(msg, color)
return msg
def resulterr(self,msg,printout=True,color='failred'):
if printout:
self.debug(msg,traceback=2, color=TestColor.get_canned_color(color),linebyline=False)
msg = self.format_line_for_color(msg, color)
return msg
def format_line_for_color(self,msg,color):
if not self.use_color:
return msg
end=""
if msg.endswith('\n'):
msg = msg.rstrip()
end="\n"
msg = TestColor.get_canned_color(color)+str(msg)+TestColor.reset+end
return msg
def get_pretty_args(self,testunit):
'''
Description: Returns a string buf containing formated arg:value for printing later
:type: testunit: Eutestcase.eutestertestunit object
:param: testunit: A testunit object for which the namespace args will be used
:rtype: string
:returns: formated string containing args and their values.
'''
buf = "\nEnd on Failure:" +str(testunit.eof)
buf += "\nPassing ARGS:"
if not testunit.args and not testunit.kwargs:
buf += '\"\"\n'
else:
buf += "\n---------------------\n"
varnames = self.get_meth_arg_names(testunit.method)
if testunit.args:
for count,arg in enumerate(testunit.args):
buf += str(varnames[count+1])+" : "+str(arg)+"\n"
if testunit.kwargs:
for key in testunit.kwargs:
buf += str(key)+" : "+str(testunit.kwargs[key])+"\n"
buf += "---------------------\n"
return buf
def run_test_case_list(self, list, eof=False, clean_on_exit=True, printresults=True):
'''
Desscription: wrapper to execute a list of ebsTestCase objects
:type list: list
:param list: list of EutesterTestUnit objects to be run
:type eof: boolean
:param eof: Flag to indicate whether run_test_case_list should exit on any failures. If this is set to False it will exit only when
a given EutesterTestUnit fails and has it's eof flag set to True.
:type clean_on_exit: boolean
:param clean_on_exit: Flag to indicate if clean_on_exit should be ran at end of test list execution.
:type printresults: boolean
:param printresults: Flag to indicate whether or not to print a summary of results upon run_test_case_list completion.
:rtype: integer
:returns: integer exit code to represent pass/fail of the list executed.
'''
self.testlist = list
start = time.time()
tests_ran=0
test_count = len(list)
t = Timer("/tmp/eutester_" + str(uuid.uuid4()).replace("-", ""))
try:
for test in list:
tests_ran += 1
self.print_test_unit_startmsg(test)
try:
id = t.start()
test.run(eof=eof or test.eof)
t.end(id, str(test.name))
except Exception, e:
self.debug('Testcase:'+ str(test.name)+' error:'+str(e))
if eof or (not eof and test.eof):
self.endfailure(str(test.name))
raise e
else:
self.endfailure(str(test.name))
else:
self.endtestunit(str(test.name))
self.debug(self.print_test_list_short_stats(list))
finally:
elapsed = int(time.time()-start)
msgout = "RUN TEST CASE LIST DONE:\n"
msgout += "Ran "+str(tests_ran)+"/"+str(test_count)+" tests in "+str(elapsed)+" seconds\n"
t.finish()
if printresults:
try:
self.debug("Printing pre-cleanup results:")
msgout += self.print_test_list_results(list=list,printout=False)
self.status(msgout)
except:pass
try:
if clean_on_exit:
cleanunit = self.create_testunit_from_method(self.clean_method)
list.append(cleanunit)
try:
self.print_test_unit_startmsg(cleanunit)
cleanunit.run()
except Exception, e:
out = StringIO.StringIO()
traceback.print_exception(*sys.exc_info(),file=out)
out.seek(0)
self.debug("Failure in cleanup: " + str(e) + "\n" + out.read())
if printresults:
msgout = self.print_test_list_results(list=list,printout=False)
self.status(msgout)
except:
pass
self.testlist = copy.copy(list)
passed = 0
failed = 0
not_run = 0
for test in list:
if test.result == EutesterTestResult.passed:
passed += 1
if test.result == EutesterTestResult.failed:
failed += 1
if test.result == EutesterTestResult.not_run:
not_run += 1
total = passed + failed + not_run
print "passed:"+str(passed)+" failed:" + str(failed) + " not_run:" + str(not_run) + " total:"+str(total)
if failed:
return(1)
else:
return(0)
def print_test_unit_startmsg(self,test):
startbuf = ''
if self.args.html_anchors:
link = '<a name="' + str(test.anchor_id) + '"></a>\n'
startbuf += '<div id="myDiv" name="myDiv" title="Example Div Element" style="color: #0900C4; font: Helvetica 12pt;border: 1px solid black;">'
startbuf += str(link)
startbuf += "STARTING TESTUNIT: " + test.name
argbuf = self.get_pretty_args(test)
startbuf += str(test.description)+str(argbuf)
startbuf += 'Running list method: "'+str(self.print_testunit_method_arg_values(test))+'"'
if self.args.html_anchors:
startbuf += '\n </div>'
self.startmsg(startbuf)
def has_arg(self,arg):
'''
Description: If arg is present in local testcase args namespace, will return True, else False
:type arg: string
:param arg: string name of arg to check for.
:rtype: boolean
:returns: True if arg is present, false if not
'''
arg = str(arg)
if hasattr(self,'args'):
if self.args and (arg in self.args):
return True
return False
def get_arg(self,arg):
'''
Description: Fetchs the value of an arg within the local testcase args namespace. If the arg
does not exist, None will be returned.
:type arg: string
:param arg: string name of arg to get.
:rtype: value
:returns: Value of arguement given, or None if not found
'''
if self.has_arg(arg):
return getattr(self.args,str(arg))
return None
def add_arg(self,arg,value):
'''
Description: Adds an arg 'arg' within the local testcase args namespace and assigns it 'value'.
If arg exists already in testcase.args, then an exception will be raised.
:type arg: string
:param arg: string name of arg to set.
:type value: value
:param value: value to set arg to
'''
if self.has_arg(arg):
raise Exception("Arg"+str(arg)+'already exists in args')
else:
self.args.__setattr__(arg,value)
def set_arg(self,arg, value):
'''
Description: Sets an arg 'arg' within the local testcase args namespace to 'value'.
If arg does not exist in testcase.args, then it will be created.
:type arg: string
:param arg: string name of arg to set.
:type value: value
:param value: value to set arg to
'''
if self.has_arg(arg):
new = argparse.Namespace()
for val in self.args._get_kwargs():
if arg != val[0]:
new.__setattr__(val[0],val[1])
new.__setattr__(arg,value)
self.args = new
else:
self.args.__setattr__(arg,value)
def clean_method(self):
raise Exception("Clean_method was not implemented. Was run_list using clean_on_exit?")
def print_test_list_results(self,list=None, printout=True, printmethod=None):
'''
Description: Prints a formated list of results for a list of EutesterTestUnits
:type list: list
:param list: list of EutesterTestUnits
:type printout: boolean
:param printout: boolean to flag whether to print using printmethod or self.debug,
or to return a string buffer representing the results output
:type printmethod: method
:param printmethod: method to use for printing test result output. Default is self.debug
'''
buf = "\nTESTUNIT LIST SUMMARY FOR " + str(self.name) + "\n"
if list is None:
list=self.testlist
if not list:
raise Exception("print_test_list_results, error: No Test list provided")
if printmethod is None:
printmethod = lambda msg: self.debug(msg,linebyline=False)
printmethod("Test list results for testcase:"+str(self.name))
for testunit in list:
buf += self.resultdefault("\n"+ self.getline(80)+"\n", printout=False)
#Ascii mark up errors using pmethod() so errors are in bold/red, etc...
pmethod = self.resultfail if not testunit.result == EutesterTestResult.passed else self.resultdefault
test_summary_line = str(" ").ljust(20) + str("| RESULT: " + str(testunit.result)).ljust(20) + "\n" +\
str(" ").ljust(20) + "| TEST NAME: " + str(testunit.name) + "\n" + \
str(" ").ljust(20) + str("| TIME : " + str(testunit.time_to_run))
buf += pmethod(str(test_summary_line),printout=False)
buf += pmethod("\n" + str(" ").ljust(20) + "| ARGS: "
+ str(self.print_testunit_method_arg_values(testunit)), printout=False)
#Print additional line showing error in the failed case...
if testunit.result == EutesterTestResult.failed:
err_sum = "\n".join(str(testunit.error).splitlines()[0:3])
test_error_line = 'ERROR:('+str(testunit.name)+'): '\
+ str(err_sum) \
+ '\n'
buf += "\n"+str(self.resulterr(test_error_line, printout=False))
if testunit.result == EutesterTestResult.not_run:
err_sum = "\n".join(str(testunit.error).splitlines()[0:3])
test_error_line = 'NOT_RUN:('+str(testunit.name)+'): ' \
+ str(err_sum) \
+ '\n'
buf += "\n"+str(self.resulterr(test_error_line, printout=False))
buf += self.resultdefault("\n"+ self.getline(80)+"\n", printout=False)
buf += str(self.print_test_list_short_stats(list))
buf += "\n"
if printout:
printmethod(buf)
else:
return buf
def print_test_list_short_stats(self,list,printmethod=None):
results={}
mainbuf = "RESULTS SUMMARY FOR '"+str(self.name)+"':\n"
fieldsbuf = ""
resultsbuf= ""
total = 0
elapsed = 0
#initialize a dict containing all the possible defined test results
fields = dir(EutesterTestResult)
for fieldname in fields[2:len(fields)]:
results[fieldname]=0
#increment values in results dict based upon result of each testunit in list
for testunit in list:
total += 1
elapsed += testunit.time_to_run
results[testunit.result] += 1
fieldsbuf += str('| TOTAL').ljust(10)
resultsbuf += str('| ' + str(total)).ljust(10)
for field in results:
fieldsbuf += str('| ' + field.upper()).ljust(10)
resultsbuf += str('| ' + str(results[field])).ljust(10)
fieldsbuf += str('| TIME_ELAPSED').ljust(10)
resultsbuf += str('| '+str(elapsed)).ljust(10)
mainbuf += "\n"+self.getline(len(fieldsbuf))+"\n"
mainbuf += fieldsbuf
mainbuf += "\n"+self.getline(len(fieldsbuf))+"\n"
mainbuf += resultsbuf
mainbuf += "\n"+self.getline(len(fieldsbuf))+"\n"
if printmethod:
printmethod(mainbuf)
return mainbuf
@classmethod
| |
Murder at the Vicarage by <NAME>(genre-Crime)',
'The Secret Adversary by <NAME>(genre-Crime)',
'The Woman in White by <NAME>(genre-Crime)',
'The Moonstone by <NAME>(genre-Crime)',
'A Study in Scarlet by <NAME>(genre-Crime)',
'The Hound of the Baskervilles by <NAME>(genre-Crime)',
'The Sign of Four by <NAME>(genre-Crime)',
'The Manchurian Candidate by <NAME>(genre-Crime)',
'The Secret Agent by <NAME>(genre-Crime)',
'Under Western Eyes by <NAME>(genre-Crime)',
'Postmortem by <NAME>(genre-Crime)',
'The Andromeda Strain by <NAME>(genre-Crime)',
'Jurassic Park by <NAME>(genre-Crime)',
'Poetic Justice by <NAME>(genre-Crime)',
'The Ipcress File by <NAME>(genre-Crime)',
'Last Seen Wearing by <NAME>(genre-Crime)',
'The Remorseful Day by <NAME>(genre-Crime)',
'Ratking by <NAME>(genre-Crime)',
'Dead Lagoon by <NAME>(genre-Crime)',
'Dirty Tricks by <NAME>(genre-Crime)',
'A Rich Full Death by <NAME>(genre-Crime)',
'Vendetta by <NAME>(genre-Crime)',
'Crime and Punishment by <NAME>(genre-Crime)',
'An American Tragedy by <NAME>(genre-Crime)',
'My Cousin Rachel by <NAME>(genre-Crime)',
'The Count of Monte Cristo by <NAME>(genre-Crime)',
'The Pledge by <NAME>(genre-Crime)',
'The Crime of <NAME> by <NAME>(genre-Crime)',
'The Name of the Rose by <NAME>(genre-Crime)',
'American Psycho by <NAME>(genre-Crime)',
'LA Confidential by <NAME>(genre-Crime)',
'The Big Nowhere by <NAME>(genre-Crime)',
'A Quiet Belief in Angels by R<NAME>(genre-Crime)',
'Sanctuary by <NAME>(genre-Crime)',
'Casino Royale by <NAME>(genre-Crime)',
'Goldfinger by <NAME>(genre-Crime)',
'You Only Live Twice by <NAME>(genre-Crime)',
'The Day of the Jackal by <NAME>(genre-Crime)',
'Brighton Rock by <NAME>(genre-Crime)',
'A Gun for Sale by <NAME>(genre-Crime)',
'The Ministry of Fear by <NAME>(genre-Crime)',
'The Third Man by <NAME>(genre-Crime)',
'A Time to Kill by <NAME>(genre-Crime)',
'The King of Torts by <NAME>(genre-Crime)',
'Hangover Square by <NAME>(genre-Crime)',
'The Glass Key by <NAME>(genre-Crime)',
'The Maltese Falcon by <NAME>(genre-Crime)',
'Red Harvest by <NAME>(genre-Crime)',
'The Thin Man by <NAME>(genre-Crime)',
'Fatherland by <NAME>(genre-Crime)',
'Black Sunday by <NAME>(genre-Crime)',
'Red Dragon by <NAME>(genre-Crime)',
'Tourist Season by <NAME>(genre-Crime)',
'The Friends of Eddie Coyle by <NAME>(genre-Crime)',
'Strangers on a Train by <NAME>(genre-Crime)',
'The Talented Mr Ripley by <NAME>(genre-Crime)',
'Bones and Silence by <NAME>(genre-Crime)',
'A Rage in Harlem by <NAME>(genre-Crime)',
"Miss Smilla’s Feeling for Snow by <NAME>(genre-Crime)",
'Rogue Male by <NAME>(genre-Crime)',
'Malice Aforethought by <NAME>(genre-Crime)',
'Silence of the Grave by <NAME>(genre-Crime)',
"Death at the President’s Lodging by <NAME>(genre-Crime)",
'Cover Her Face by <NAME>(genre-Crime)',
'A Taste for Death by <NAME>(genre-Crime)',
'Friday the Rabbi Slept Late by <NAME>(genre-Crime)',
'Misery by <NAME>(genre-Crime)',
'<NAME> by <NAME>(genre-Crime)',
'Kim by <NAME>(genre-Crime)',
'The Constant Gardener by <NAME>(genre-Crime)',
'Tinker, Tailor, Soldier, Spy by <NAME>(genre-Crime)',
'The Spy Who Came in from the Cold by <NAME>(genre-Crime)',
'To Kill a Mockingbird by <NAME>(genre-Crime)',
'52 Pick-up by El<NAME>(genre-Crime)',
'Get Shorty by <NAME>(genre-Crime)',
'Motherless Brooklyn by <NAME>(genre-Crime)',
'The Bourne Identity by <NAME>(genre-Crime)',
'Cop Hater by <NAME>(genre-Crime)',
'No Country for Old Men by <NAME>(genre-Crime)',
'Enduring Love by <NAME>(genre-Crime)',
'Sidetracked by <NAME>(genre-Crime)',
'Devil in a Blue Dress by <NAME>(genre-Crime)',
'The Great Impersonation by <NAME>(genre-Crime)',
'The Strange Borders of Palace Crescent by <NAME>enheim(genre-Crime)',
'My Name is Red by <NAME>(genre-Crime)',
'Toxic Shock by <NAME>(genre-Crime)',
'Blacklist by <NAME>(genre-Crime)',
'Nineteen Seventy Four by David Peace(genre-Crime)',
'Nineteen Seventy Seven by David Peace(genre-Crime)',
'The Big Blowdown by <NAME>(genre-Crime)',
'Hard Revolution by <NAME>(genre-Crime)',
'Lush Life by <NAME>(genre-Crime)',
'The Godfather by <NAME>(genre-Crime)',
'V by <NAME>(genre-Crime)',
'The Crying of Lot 49 by <NAME>(genre-Crime)',
'Black and Blue by <NAME>(genre-Crime)',
'The Hanging Gardens by <NAME>(genre-Crime)',
'Exit Music by <NAME>(genre-Crime)',
'Judgment in Stone by <NAME>(genre-Crime)',
'Live Flesh by R<NAME>ll(genre-Crime)',
'Dissolution by C<NAME>(genre-Crime)',
'Whose Body? by <NAME>(genre-Crime)',
'Murder Must Advertise by <NAME>(genre-Crime)',
'The Madman of Bergerac by <NAME>(genre-Crime)',
'The Blue Room by <NAME>(genre-Crime)',
'The Laughing Policeman by <NAME> and <NAME>(genre-Crime)',
'Gorky Park by <NAME>(genre-Crime)',
'Of Mice and Men by <NAME>(genre-Crime)',
'The League of Frightened Men by <NAME>(genre-Crime)',
'Perfume by <NAME>(genre-Crime)',
'The Secret History by <NAME>(genre-Crime)',
'The Daughter of Time by <NAME>(genre-Crime)',
'The Getaway by <NAME>(genre-Crime)',
"Pudd’nhead Wilson by <NAME>(genre-Crime)",
'A Dark-Adapted Eye by <NAME>(genre-Crime)',
'A Fatal inversion by <NAME>(genre-Crime)',
"King Solomon’s Carpet by <NAME>(genre-Crime)",
'The Four Just Men by <NAME>(genre-Crime)',
'Fingersmith by <NAME> (genre-Crime)',
'Native Son by <NAME>(genre-Crime)',
'Therese Raquin by <NAME>(genre-Crime)',
'The Face of Another by <NAME>(genre-Family and self)',
'Little Women by <NAME>(genre-Family and self)',
'Behind the Scenes at the Museum by <NAME>(genre-Family and self)',
"Cat’s Eye by <NAME>(genre-Family and self)",
'Epileptic by <NAME>(genre-Family and self)',
'Room Temperature by <NAME>(genre-Family and self)',
'Eugenie Grandet by <NAME>(genre-Family and self)',
'Le Pere Goriot by <NAME>(genre-Family and self)',
'The Crow Road by <NAME>(genre-Family and self)',
'The L Shaped Room by <NAME>(genre-Family and self)',
'Fun Home by <NAME>(genre-Family and self)',
'Malone Dies by <NAME>(genre-Family and self)',
'A Legacy by <NAME>(genre-Family and self)',
'Herzog by <NAME>(genre-Family and self)',
"Humboldt’s Gift by <NAME>(genre-Family and self)",
"The Old Wives’ Tale by <NAME>(genre-Family and self)",
'G by <NAME>(genre-Family and self)',
'Extinction by <NAME>(genre-Family and self)',
'Two Serious Ladies by <NAME>(genre-Family and self)',
'Any Human Heart by <NAME>(genre-Family and self)',
'The Death of Virgil by <NAME>(genre-Family and self)',
'Evelina by <NAME>(genre-Family and self)',
'The Way of All Flesh by <NAME>(genre-Family and self)',
'The Sound of my Voice by <NAME>(genre-Family and self)',
'The Outsider by <NAME>(genre-Family and self)',
'Wise Children by <NAME>(genre-Family and self)',
"The Professor’s House by <NAME>(genre-Family and self)",
'The Wapshot Chronicle by <NAME>(genre-Family and self)',
'The Awakening by <NAME>(genre-Family and self)',
'Les Enfants Terrible by <NAME>(genre-Family and self)',
'The Vagabond by <NAME>(genre-Family and self)',
'Manservant and Maidservant by <NAME>(genre-Family and self)',
'Being Dead by <NAME>(genre-Family and self)',
'Quarantine by <NAME>(genre-Family and self)',
'The Mandarins by <NAME>(genre-Family and self)',
'Roxana by <NAME>(genre-Family and self)',
'Great Expectations by <NAME>(genre-Family and self)',
'The Brothers Karamazov by <NAME>(genre-Family and self)',
'My New York Diary by <NAME>(genre-Family and self)',
'The Millstone by <NAME>(genre-Family and self)',
'My Family and Other Animals by <NAME>(genre-Family and self)',
'Silence by <NAME>(genre-Family and self)',
'The Gathering by <NAME>(genre-Family and self)',
'Middlesex by <NAME>(genre-Family and self)',
'As I Lay Dying by <NAME>(genre-Family and self)',
'The Sound and the Fury by <NAME>(genre-Family and self)',
'The Sportswriter by <NAME>(genre-Family and self)',
'Howards End by <NAME>(genre-Family and self)',
'Spies by <NAME>(genre-Family and self)',
'Hideous Kinky by <NAME>(genre-Family and self)',
'The Man of Property by <NAME>(genre-Family and self)',
'<NAME> by <NAME>(genre-Family and self)',
'The Immoralist by <NAME>(genre-Family and self)',
'The Vatican Cellars by <NAME>(genre-Family and self)',
'The Vicar of Wakefield by <NAME>(genre-Family and self)',
'The Power and the Glory by <NAME>(genre-Family and self)',
'Hunger by <NAME>(genre-Family and self)',
'The Shrimp and the Anemone by <NAME>(genre-Family and self)',
'The Old Man and the Sea by <NAME>(genre-Family and self)',
'Steppenwolf by <NAME>(genre-Family and self)',
'Narziss and Goldmund by <NAME>(genre-Family and self)',
'The Three Paradoxes by <NAME>(genre-Family and self)',
'<NAME>’s Schooldays by <NAME>(genre-Family and self)',
'A Prayer for Owen Meany by <NAME>(genre-Family and self)',
'The Ambassadors by <NAME>(genre-Family and self)',
'Washington Square by <NAME>(genre-Family and self)',
'The Tortoise and the Hare by <NAME>(genre-Family and self)',
'The Unfortunates by <NAME>(genre-Family and self)',
'A Portrait of the Artist as a Young Man by <NAME>(genre-Family and self)',
'Ulysses by <NAME>(genre-Family and self)',
'Good Behaviour by <NAME>(genre-Family and self)',
'Memet my Hawk by <NAME>(genre-Family and self)',
'One Flew Over the Cuckoo’s Nest by <NAME>(genre-Family and self)',
'The Buddha of Suburbia by <NAME>(genre-Family and self)',
'Sons and Lovers by <NAME>rence(genre-Family and self)',
'Cider with Rosie by <NAME>(genre-Family and self)',
'Invitation to the Waltz by <NAME>(genre-Family and self)',
'The Golden Notebook by <NAME>(genre-Family and self)',
'How Green was My Valley by <NAME>(genre-Family and self)',
'<NAME> by <NAME>(genre-Family and self)',
'Under the Volcano by <NAME>(genre-Family and self)',
'The Member of the Wedding by <NAME>(genre-Family and self)',
'Palace Walk by <NAME>(genre-Family and self)',
'The Assistant by <NAME>(genre-Family and self)',
'Buddenbrooks by <NAME>(genre-Family and self)',
'The Chateau by <NAME>(genre-Family and self)',
'The Rector’s Daughter by FM Mayor (genre-Family and self)',
'The Ordeal of Richard Feverek by <NAME>(genre-Family and self)',
'Family Matters by <NAME>(genre-Family and self)',
'Sour Sweet by <NAME>(genre-Family and self)',
'The Lonely Passion of Judith Hearne by <NAME>(genre-Family and self)',
'The Bluest Eye by <NAME>(genre-Family and self)',
'Song of Solomon by <NAME>(genre-Family and self)',
'Who Do You Think You Are? by <NAME>(genre-Family and self)',
'The Black Prince by <NAME>(genre-Family and self)',
'The Man Without Qualities by <NAME> (genre-Family and self)',
'A House for Mr Biswas by <NAME>(genre-Family and self)',
'At-Swim-Two-Birds by | |
<reponame>exarkun/coveragepy<filename>coverage/sqldata.py<gh_stars>1-10
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Sqlite coverage data."""
# TODO: get sys_info for data class, so we can see sqlite version etc
# TODO: get rid of skip_unless_data_storage_is
# TODO: get rid of "JSON message" and "SQL message" in the tests
# TODO: factor out dataop debugging to a wrapper class?
# TODO: make sure all dataop debugging is in place somehow
# TODO: should writes be batched?
# TODO: run_info
import collections
import glob
import itertools
import os
import sqlite3
import sys
from coverage.backward import get_thread_id, iitems
from coverage.data import filename_suffix
from coverage.debug import NoDebugging, SimpleReprMixin
from coverage.files import PathAliases
from coverage.misc import CoverageException, file_be_gone
SCHEMA_VERSION = 2
SCHEMA = """
-- One row, to record the version of the schema store in this db.
CREATE TABLE coverage_schema (
version integer
-- Schema versions:
-- 1: Released in 5.0a2
-- 2: Added contexts in 5.0a3. This is schema 2.
);
-- One row, to record some metadata about the data
CREATE TABLE meta (
has_lines boolean, -- Is this data recording lines?
has_arcs boolean, -- .. or branches?
sys_argv text -- The coverage command line that recorded the data.
);
-- A row per file measured.
CREATE TABLE file (
id integer primary key,
path text,
unique(path)
);
-- A row per context measured.
CREATE TABLE context (
id integer primary key,
context text,
unique(context)
);
-- If recording lines, a row per context per line executed.
CREATE TABLE line (
file_id integer, -- foreign key to `file`.
context_id integer, -- foreign key to `context`.
lineno integer, -- the line number.
unique(file_id, context_id, lineno)
);
-- If recording branches, a row per context per from/to line transition executed.
CREATE TABLE arc (
file_id integer, -- foreign key to `file`.
context_id integer, -- foreign key to `context`.
fromno integer, -- line number jumped from.
tono integer, -- line number jumped to.
unique(file_id, context_id, fromno, tono)
);
-- A row per file indicating the tracer used for that file.
CREATE TABLE tracer (
file_id integer primary key,
tracer text
);
"""
class CoverageSqliteData(SimpleReprMixin):
def __init__(self, basename=None, suffix=None, warn=None, debug=None):
self._basename = os.path.abspath(basename or ".coverage")
self._suffix = suffix
self._warn = warn
self._debug = debug or NoDebugging()
self._choose_filename()
self._file_map = {}
self._dbs = {}
self._pid = os.getpid()
# Are we in sync with the data file?
self._have_used = False
self._has_lines = False
self._has_arcs = False
self._current_context = None
self._current_context_id = None
self._query_contexts = None
self._query_context_ids = None
def _choose_filename(self):
self._filename = self._basename
suffix = filename_suffix(self._suffix)
if suffix:
self._filename += "." + suffix
def _reset(self):
if self._dbs:
for db in self._dbs.values():
db.close()
self._dbs = {}
self._file_map = {}
self._have_used = False
self._current_context_id = None
def _create_db(self):
if self._debug.should('dataio'):
self._debug.write("Creating data file {!r}".format(self._filename))
self._dbs[get_thread_id()] = Sqlite(self._filename, self._debug)
with self._dbs[get_thread_id()] as db:
for stmt in SCHEMA.split(';'):
stmt = stmt.strip()
if stmt:
db.execute(stmt)
db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
db.execute(
"insert into meta (has_lines, has_arcs, sys_argv) values (?, ?, ?)",
(self._has_lines, self._has_arcs, str(getattr(sys, 'argv', None)))
)
def _open_db(self):
if self._debug.should('dataio'):
self._debug.write("Opening data file {!r}".format(self._filename))
self._dbs[get_thread_id()] = Sqlite(self._filename, self._debug)
with self._dbs[get_thread_id()] as db:
try:
schema_version, = db.execute("select version from coverage_schema").fetchone()
except Exception as exc:
raise CoverageException(
"Data file {!r} doesn't seem to be a coverage data file: {}".format(
self._filename, exc
)
)
else:
if schema_version != SCHEMA_VERSION:
raise CoverageException(
"Couldn't use data file {!r}: wrong schema: {} instead of {}".format(
self._filename, schema_version, SCHEMA_VERSION
)
)
for row in db.execute("select has_lines, has_arcs from meta"):
self._has_lines, self._has_arcs = row
for path, id in db.execute("select path, id from file"):
self._file_map[path] = id
def _connect(self):
if get_thread_id() not in self._dbs:
if os.path.exists(self._filename):
self._open_db()
else:
self._create_db()
return self._dbs[get_thread_id()]
def __nonzero__(self):
if (get_thread_id() not in self._dbs and not os.path.exists(self._filename)):
return False
try:
with self._connect() as con:
rows = con.execute("select * from file limit 1")
return bool(list(rows))
except CoverageException:
return False
__bool__ = __nonzero__
def dump(self): # pragma: debugging
"""Write a dump of the database."""
if self._debug:
with self._connect() as con:
self._debug.write(con.dump())
def _file_id(self, filename, add=False):
"""Get the file id for `filename`.
If filename is not in the database yet, add it if `add` is True.
If `add` is not True, return None.
"""
if filename not in self._file_map:
if add:
with self._connect() as con:
cur = con.execute("insert or replace into file (path) values (?)", (filename,))
self._file_map[filename] = cur.lastrowid
return self._file_map.get(filename)
def _context_id(self, context):
"""Get the id for a context."""
assert context is not None
self._start_using()
with self._connect() as con:
row = con.execute("select id from context where context = ?", (context,)).fetchone()
if row is not None:
return row[0]
else:
return None
def set_context(self, context):
"""Set the current context for future `add_lines` etc."""
if self._debug.should('dataop'):
self._debug.write("Setting context: %r" % (context,))
self._current_context = context
self._current_context_id = None
def _set_context_id(self):
"""Use the _current_context to set _current_context_id."""
context = self._current_context or ""
context_id = self._context_id(context)
if context_id is not None:
self._current_context_id = context_id
else:
with self._connect() as con:
cur = con.execute("insert into context (context) values (?)", (context,))
self._current_context_id = cur.lastrowid
def base_filename(self):
"""The base filename for storing data."""
return self._basename
def data_filename(self):
"""Where is the data stored?"""
return self._filename
def add_lines(self, line_data):
"""Add measured line data.
`line_data` is a dictionary mapping file names to dictionaries::
{ filename: { lineno: None, ... }, ...}
"""
if self._debug.should('dataop'):
self._debug.write("Adding lines: %d files, %d lines total" % (
len(line_data), sum(len(lines) for lines in line_data.values())
))
self._start_using()
self._choose_lines_or_arcs(lines=True)
self._set_context_id()
with self._connect() as con:
for filename, linenos in iitems(line_data):
file_id = self._file_id(filename, add=True)
data = [(file_id, self._current_context_id, lineno) for lineno in linenos]
con.executemany(
"insert or ignore into line (file_id, context_id, lineno) values (?, ?, ?)",
data,
)
def add_arcs(self, arc_data):
"""Add measured arc data.
`arc_data` is a dictionary mapping file names to dictionaries::
{ filename: { (l1,l2): None, ... }, ...}
"""
if self._debug.should('dataop'):
self._debug.write("Adding arcs: %d files, %d arcs total" % (
len(arc_data), sum(len(arcs) for arcs in arc_data.values())
))
self._start_using()
self._choose_lines_or_arcs(arcs=True)
self._set_context_id()
with self._connect() as con:
for filename, arcs in iitems(arc_data):
file_id = self._file_id(filename, add=True)
data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs]
con.executemany(
"insert or ignore into arc "
"(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
data,
)
def _choose_lines_or_arcs(self, lines=False, arcs=False):
if lines and self._has_arcs:
raise CoverageException("Can't add lines to existing arc data")
if arcs and self._has_lines:
raise CoverageException("Can't add arcs to existing line data")
if not self._has_arcs and not self._has_lines:
self._has_lines = lines
self._has_arcs = arcs
with self._connect() as con:
con.execute("update meta set has_lines = ?, has_arcs = ?", (lines, arcs))
def add_file_tracers(self, file_tracers):
"""Add per-file plugin information.
`file_tracers` is { filename: plugin_name, ... }
"""
self._start_using()
with self._connect() as con:
for filename, plugin_name in iitems(file_tracers):
file_id = self._file_id(filename)
if file_id is None:
raise CoverageException(
"Can't add file tracer data for unmeasured file '%s'" % (filename,)
)
existing_plugin = self.file_tracer(filename)
if existing_plugin:
if existing_plugin != plugin_name:
raise CoverageException(
"Conflicting file tracer name for '%s': %r vs %r" % (
filename, existing_plugin, plugin_name,
)
)
elif plugin_name:
con.execute(
"insert into tracer (file_id, tracer) values (?, ?)",
(file_id, plugin_name)
)
def touch_file(self, filename, plugin_name=""):
"""Ensure that `filename` appears in the data, empty if needed.
`plugin_name` is the name of the plugin resposible for this file. It is used
to associate the right filereporter, etc.
"""
self._start_using()
if self._debug.should('dataop'):
self._debug.write("Touching %r" % (filename,))
if not self._has_arcs and not self._has_lines:
raise CoverageException("Can't touch files in an empty CoverageSqliteData")
self._file_id(filename, add=True)
if plugin_name:
# Set the tracer for this file
self.add_file_tracers({filename: plugin_name})
def update(self, other_data, aliases=None):
"""Update this data with data from several other `CoverageData` instances.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
"""
if self._has_lines and other_data._has_arcs:
raise CoverageException("Can't combine arc data with line data")
if self._has_arcs and other_data._has_lines:
raise CoverageException("Can't combine line data with arc data")
aliases = aliases or PathAliases()
# Force the database we're writing to to exist before we start nesting
# contexts.
self._start_using()
# Collector for all arcs, lines and tracers
other_data.read()
with other_data._connect() as conn:
# Get files data.
cur = conn.execute('select path from file')
files = {path: aliases.map(path) | |
return js
class ExplanationOfBenefitAddItem(backboneelement.BackboneElement):
""" Insurer added line items.
The first-tier service adjudications for payor added product or service
lines.
"""
resource_type = "ExplanationOfBenefitAddItem"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.adjudication = None
""" Added items adjudication.
List of `ExplanationOfBenefitItemAdjudication` items (represented as `dict` in JSON). """
self.bodySite = None
""" Anatomical location.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.detail = None
""" Insurer added line items.
List of `ExplanationOfBenefitAddItemDetail` items (represented as `dict` in JSON). """
self.detailSequence = None
""" Detail sequence number.
List of `int` items. """
self.factor = None
""" Price scaling factor.
Type `float`. """
self.itemSequence = None
""" Item sequence number.
List of `int` items. """
self.locationAddress = None
""" Place of service or where product was supplied.
Type `Address` (represented as `dict` in JSON). """
self.locationCodeableConcept = None
""" Place of service or where product was supplied.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.locationReference = None
""" Place of service or where product was supplied.
Type `FHIRReference` (represented as `dict` in JSON). """
self.modifier = None
""" Service/Product billing modifiers.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.net = None
""" Total item cost.
Type `Money` (represented as `dict` in JSON). """
self.noteNumber = None
""" Applicable note numbers.
List of `int` items. """
self.productOrService = None
""" Billing, service, product, or drug code.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.programCode = None
""" Program the product or service is provided under.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.provider = None
""" Authorized providers.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.quantity = None
""" Count of products or services.
Type `Quantity` (represented as `dict` in JSON). """
self.servicedDate = None
""" Date or dates of service or product delivery.
Type `FHIRDate` (represented as `str` in JSON). """
self.servicedPeriod = None
""" Date or dates of service or product delivery.
Type `Period` (represented as `dict` in JSON). """
self.subDetailSequence = None
""" Subdetail sequence number.
List of `int` items. """
self.subSite = None
""" Anatomical sub-location.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.unitPrice = None
""" Fee, charge or cost per item.
Type `Money` (represented as `dict` in JSON). """
super(ExplanationOfBenefitAddItem, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ExplanationOfBenefitAddItem, self).elementProperties()
js.extend([
("adjudication", "adjudication", ExplanationOfBenefitItemAdjudication, True, None, False),
("bodySite", "bodySite", codeableconcept.CodeableConcept, False, None, False),
("detail", "detail", ExplanationOfBenefitAddItemDetail, True, None, False),
("detailSequence", "detailSequence", int, True, None, False),
("factor", "factor", float, False, None, False),
("itemSequence", "itemSequence", int, True, None, False),
("locationAddress", "locationAddress", address.Address, False, "location", False),
("locationCodeableConcept", "locationCodeableConcept", codeableconcept.CodeableConcept, False, "location", False),
("locationReference", "locationReference", fhirreference.FHIRReference, False, "location", False),
("modifier", "modifier", codeableconcept.CodeableConcept, True, None, False),
("net", "net", money.Money, False, None, False),
("noteNumber", "noteNumber", int, True, None, False),
("productOrService", "productOrService", codeableconcept.CodeableConcept, False, None, True),
("programCode", "programCode", codeableconcept.CodeableConcept, True, None, False),
("provider", "provider", fhirreference.FHIRReference, True, None, False),
("quantity", "quantity", quantity.Quantity, False, None, False),
("servicedDate", "servicedDate", fhirdate.FHIRDate, False, "serviced", False),
("servicedPeriod", "servicedPeriod", period.Period, False, "serviced", False),
("subDetailSequence", "subDetailSequence", int, True, None, False),
("subSite", "subSite", codeableconcept.CodeableConcept, True, None, False),
("unitPrice", "unitPrice", money.Money, False, None, False),
])
return js
class ExplanationOfBenefitAddItemDetail(backboneelement.BackboneElement):
""" Insurer added line items.
The second-tier service adjudications for payor added services.
"""
resource_type = "ExplanationOfBenefitAddItemDetail"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.adjudication = None
""" Added items adjudication.
List of `ExplanationOfBenefitItemAdjudication` items (represented as `dict` in JSON). """
self.factor = None
""" Price scaling factor.
Type `float`. """
self.modifier = None
""" Service/Product billing modifiers.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.net = None
""" Total item cost.
Type `Money` (represented as `dict` in JSON). """
self.noteNumber = None
""" Applicable note numbers.
List of `int` items. """
self.productOrService = None
""" Billing, service, product, or drug code.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.quantity = None
""" Count of products or services.
Type `Quantity` (represented as `dict` in JSON). """
self.subDetail = None
""" Insurer added line items.
List of `ExplanationOfBenefitAddItemDetailSubDetail` items (represented as `dict` in JSON). """
self.unitPrice = None
""" Fee, charge or cost per item.
Type `Money` (represented as `dict` in JSON). """
super(ExplanationOfBenefitAddItemDetail, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ExplanationOfBenefitAddItemDetail, self).elementProperties()
js.extend([
("adjudication", "adjudication", ExplanationOfBenefitItemAdjudication, True, None, False),
("factor", "factor", float, False, None, False),
("modifier", "modifier", codeableconcept.CodeableConcept, True, None, False),
("net", "net", money.Money, False, None, False),
("noteNumber", "noteNumber", int, True, None, False),
("productOrService", "productOrService", codeableconcept.CodeableConcept, False, None, True),
("quantity", "quantity", quantity.Quantity, False, None, False),
("subDetail", "subDetail", ExplanationOfBenefitAddItemDetailSubDetail, True, None, False),
("unitPrice", "unitPrice", money.Money, False, None, False),
])
return js
class ExplanationOfBenefitAddItemDetailSubDetail(backboneelement.BackboneElement):
""" Insurer added line items.
The third-tier service adjudications for payor added services.
"""
resource_type = "ExplanationOfBenefitAddItemDetailSubDetail"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.adjudication = None
""" Added items adjudication.
List of `ExplanationOfBenefitItemAdjudication` items (represented as `dict` in JSON). """
self.factor = None
""" Price scaling factor.
Type `float`. """
self.modifier = None
""" Service/Product billing modifiers.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.net = None
""" Total item cost.
Type `Money` (represented as `dict` in JSON). """
self.noteNumber = None
""" Applicable note numbers.
List of `int` items. """
self.productOrService = None
""" Billing, service, product, or drug code.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.quantity = None
""" Count of products or services.
Type `Quantity` (represented as `dict` in JSON). """
self.unitPrice = None
""" Fee, charge or cost per item.
Type `Money` (represented as `dict` in JSON). """
super(ExplanationOfBenefitAddItemDetailSubDetail, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ExplanationOfBenefitAddItemDetailSubDetail, self).elementProperties()
js.extend([
("adjudication", "adjudication", ExplanationOfBenefitItemAdjudication, True, None, False),
("factor", "factor", float, False, None, False),
("modifier", "modifier", codeableconcept.CodeableConcept, True, None, False),
("net", "net", money.Money, False, None, False),
("noteNumber", "noteNumber", int, True, None, False),
("productOrService", "productOrService", codeableconcept.CodeableConcept, False, None, True),
("quantity", "quantity", quantity.Quantity, False, None, False),
("unitPrice", "unitPrice", money.Money, False, None, False),
])
return js
class ExplanationOfBenefitBenefitBalance(backboneelement.BackboneElement):
""" Balance by Benefit Category.
"""
resource_type = "ExplanationOfBenefitBenefitBalance"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.category = None
""" Benefit classification.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.description = None
""" Description of the benefit or services covered.
Type `str`. """
self.excluded = None
""" Excluded from the plan.
Type `bool`. """
self.financial = None
""" Benefit Summary.
List of `ExplanationOfBenefitBenefitBalanceFinancial` items (represented as `dict` in JSON). """
self.name = None
""" Short name for the benefit.
Type `str`. """
self.network = None
""" In or out of network.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.term = None
""" Annual or lifetime.
Type `CodeableConcept` (represented as `dict` in JSON). | |
<filename>sparv/modules/hist/hist.py
import sparv.modules.saldo.saldo as saldo
import sparv.util as util
import sparv.diapivot as diapivot
import re
import itertools
import os
# The minimun precision difference for two annotations to be considered equal
PRECISION_DIFF = 0.01
def annotate_variants(word, out, spellmodel, delimiter="|", affix="|", model=None):
"""Use a lexicon model and a spelling model to annotate words with their spelling variants.
- word is existing annotations for wordforms
- out is a string containing the resulting annotation file
- spellmodel is the spelling model
- model is the lexicon model
- delimiter is the delimiter character to put between ambiguous results
- affix is an optional character to put before and after results
"""
# model -> {word : [(variant, dist)]}
def parsevariant(modelfile):
d = {}
def addword(res, word, info):
for part in info.strip().split("^^"):
if part:
xs = part.split(",")
res.setdefault(word, []).append((xs[0], float(xs[1])))
with open(modelfile, encoding="utf8") as f:
for line in f:
wd, info = line.split(":::")
addword(d, wd, info)
return d
if model is None:
lexicon = saldo.SaldoLexicon(model)
variations = parsevariant(spellmodel)
def findvariants(tokid, theword):
variants = [x_d for x_d in variations.get(theword.lower(), []) if x_d[0] != theword]
# return set(_concat([get_single_annotation(lexicon, v, "lemgram") for v, d in variants]))
return set([v for v, d in variants])
annotate_standard(out, word, findvariants, split=False)
def extract_pos(out, lemgrams, extralemgrams="", delimiter="|", affix="|"):
"""Annotate each lemgram with pos-tags, extracted from this.
- out is the resulting annotation file
- lemgrams is the existing annotations for lemgram
- extralemgrams is an optional extra annotation from which more pos-tags can be extracted
- delimiter is the delimiter character to put between ambiguous results
- affix is an optional character to put before and after results
"""
def oktag(tag):
return tag is not None and tag.group(1) not in ["e", "sxc", "mxc"]
def mkpos(tokid, thelems):
pos = [re.search(r"\.\.(.*?)\.", lem) for lem in thelems]
# The function lag18002pos has been moved into the corpus (SVN)!
return set(sum([util.tagsets.lag18002pos(p.group(1)) for p in pos if oktag(p)], []))
annotate_standard(out, lemgrams, mkpos, extralemgrams)
def annotate_fallback(out, word, msd, lemgram, models, key="lemgram", lexicons=None):
"""Annotate the words that do not already have a lemgram, according to model(s).
- out is the resulting annotation file
- word is the words to be annotated
- lemgram is the existing annotations for lemgram
- model is the crosslink model
"""
# catalaunch stuff
if lexicons is None:
models = models.split()
lexicons = [saldo.SaldoLexicon(lex) for lex in models]
WORD = util.read_annotation(word)
MSD = util.read_annotation(msd)
def annotate_empties(tokid, lemgrams):
fallbacks = []
if not lemgrams:
word = WORD[tokid]
msdtag = MSD[tokid]
fallbacks.extend(get_single_annotation(lexicons, word, key, msdtag))
return fallbacks
annotate_standard(out, lemgram, annotate_empties)
def annotate_diachron(out, lemgram, model, extralemgrams="", delimiter="|", affix="|"):
"""Annotate each lemgram with its corresponding saldo_id, according to model (diapivot.pickle).
- out is the resulting annotation file
- lemgram is the existing annotations for lemgram
- model is the diapivot model
- delimiter is the delimiter character to put between ambiguous results
- affix is an optional character to put before and after results
"""
lexicon = diapivot.PivotLexicon(model)
def diachronlink(tokid, thelems):
all_lemgrams = thelems
for lemgram in thelems:
s_i = lexicon.get_exactMatch(lemgram)
if s_i:
all_lemgrams += [s_i]
return all_lemgrams
annotate_standard(out, lemgram, diachronlink, extralemgrams)
def mergemany(out, annotations, separator="|"):
"""Concatenate values from two or more annotations, with an optional separator.
Remove superfluous separators.
"""
# annotations = [util.read_annotation(a) for a in annotations]
d = {}
OUT = {}
if isinstance(annotations, str):
annotations = annotations.split()
for annotation in [util.read_annotation(a) for a in annotations]:
for key_a, val_a in list(annotation.items()):
if val_a:
d.setdefault(key_a, []).append(val_a)
for key, lst in list(d.items()):
OUT[key] = separator + separator.join(lst) + separator if lst else separator
util.write_annotation(out, OUT)
def merge(out, left, right, separator=""):
"""Concatenate values from two annotations, with an optional separator.
Remove superfluous separators.
"""
b = util.read_annotation(right)
OUT = {}
for key_a, val_a in util.read_annotation_iteritems(left):
val = [x for x in [val_a, b[key_a]] if x != separator]
OUT[key_a] = separator.join(list(val)) if val else separator
util.write_annotation(out, OUT)
def posset(out, pos, separator="|"):
"""Annotate with POS sets."""
def makeset(tokid, thepos):
"""Annotate thepos with separators (dummy function)."""
return [thepos]
annotate_standard(out, pos, makeset, split=False)
def annotate_standard(out, input_annotation, annotator, extra_input="", delimiter="|", affix="|", split=True):
"""Apply the 'annotator' function to the annotations in 'input_annotation' and write the new output to 'out'.
The annotator function should have type :: token_id -> oldannotations -> newannotations
No support for multiword expressions
- out is the output file
- input_annotation is the given input annotation
- f is the function which is to be applied to the input annotation
- extra_input is an extra input annotation
- delimiter is the delimiter character to put between ambiguous results
- affix is an optional character to put before and after results
- split defines if the input annatoation is a set, with elements separated by delimiter
if so, return a list. Else, return one single element
"""
def merge(d1, d2):
result = dict(d1)
for k, v in list(d2.items()):
if k in result:
result[k] = result[k] + delimiter + v
else:
result[k] = v
return result
LEMS = util.read_annotation(input_annotation)
if extra_input:
LEMS = merge(LEMS, util.read_annotation(extra_input))
clear_annotation(out)
OUT = {}
for tokid in LEMS:
thelems = LEMS[tokid]
if split:
thelems = [x for x in thelems.split(delimiter) if x != ""]
output_annotation = set(annotator(tokid, thelems))
OUT[tokid] = affix + delimiter.join(list(output_annotation)) + affix if output_annotation else affix
util.write_annotation(out, OUT)
def annotate_full(word, sentence, reference, out, annotations, models, msd="",
delimiter="|", affix="|", precision=":%.3f", precision_filter=None, min_precision=0.0,
skip_multiword=False, lexicons=None):
# TODO almost the same as normal saldo.annotate, but doesn't use msd or saldo-specific stuff
"""Use a lmf-lexicon model to annotate (pos-tagged) words.
- word, msd are existing annotations for wordforms and part-of-speech
- sentence is an existing annotation for sentences and their children (words)
- reference is an existing annotation for word references, to be used when
annotating multi-word units
- out is a string containing a whitespace separated list of the resulting annotation files
- annotations is a string containing a whitespace separate list of annotations to be written.
Currently: gf (= baseform), lem (=lemgram)
Number of annotations and their order must correspond to the list in the 'out' argument.
- model is the Saldo model
- delimiter is the delimiter character to put between ambiguous results
- affix is an optional character to put before and after results
- precision is a format string for how to print the precision for each annotation
(use empty string for no precision)
- precision_filter is an optional filter, currently there are the following values:
max: only use the annotations that are most probable
first: only use the most probable annotation (or one of the most probable if more than one)
- min_precision: only use annotations with a probability score higher than this
- skip_multiword can be set to True to disable multi word annotations
- lexicon: this argument cannot be set from the command line,
but is used in the catapult. This argument must be last.
"""
# allow use of multiple lexicons
if not lexicons:
models = [(os.path.basename(m).rstrip(".pickle"), m) for m in models.split()]
lexicons = [(name, saldo.SaldoLexicon(lex)) for name, lex in models]
max_gaps = 0 # Maximum number of gaps in multi-word units.
# Set to 0 since many (most?) multi-word in the old lexicons are unseparable (half öre etc)
annotations = annotations.split()
out = out.split()
assert len(out) == len(annotations), "Number of target files and annotations must be the same"
if isinstance(skip_multiword, str):
skip_multiword = (skip_multiword.lower() == "true")
if skip_multiword:
util.log.info("Skipping multi word annotations")
min_precision = float(min_precision)
WORD = util.read_annotation(word)
REF = util.read_annotation(reference)
if msd:
MSD = util.read_annotation(msd)
for out_file in out:
clear_annotation(out_file)
sentences = [sent.split() for _, sent in util.read_annotation_iteritems(sentence)]
OUT = {}
for sent in sentences:
incomplete_multis = [] # [{annotation, words, [ref], is_particle, lastwordWasGap, numberofgaps}]
complete_multis = [] # ([ref], annotation)
sentence_tokens = {}
for tokid in sent:
thewords = [w for w in WORD[tokid].split("|") if w]
ref = REF[tokid]
if msd:
msdtag = MSD[tokid]
else:
msdtag = ""
annotation_info = {}
sentence_tokens[ref] = {"tokid": tokid, "word": thewords, "msd": msdtag, "annotations": annotation_info}
for theword | |
<reponame>home-suite-home/Home-Suite-Home
#!/usr/bin/env python
import sys
import json
import time
import dash
import urllib
import dash_daq as daq
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State, MATCH
from Email_Component import Email_Component
from AnalyticsComponent import LineGraph
from HTTP_Component.Sensors import Sensor
from UI_Utils import *
from subprocess import check_output
from Server_Component.Database import Database
from settings import Settings
from conversions import Units
SECONDS_PER_REFRESH = 30
NU = dash.no_update
settings = Settings()
def getCardDivs(isField=False, isEdit=False):
divList = []
print("-- getCardDivs: --")
for sensor in db.getConfigData():
if(sensor != None):
sensorName = sensor['name']
sensorType = sensor['type']
curUnits = Units(sensorType, sensor['units'])
cardData = curUnits.convert_to_string(db.getMostRecentSensorData(sensorName, sensorType))
print("{}-{}: {}".format(sensorType, sensorName, cardData))
divList.append(
html.Div(className='card',
id={'type': 'div-card', 'index': '{}`{}'.format(sensorType, sensorName)},
children=[
html.H4(sensor['name']),
html.Div('Type: ' + sensor['type']),
html.H2(
cardData,
id={'type': 'sensor-data', 'index': '{}`{}'.format(sensorType, sensorName)},
),
html.Button('Edit Card', id={
'type': 'edit-card-button',
'index': '{}`{}'.format(sensorType, sensorName)
}
),
html.H4(),
dcc.Link(
html.Button('View Graph', id={
'type': 'graph-card-button',
'index': '{}`{}'.format(sensorType, sensorName)}
),
href='/analytics/{}'.format(urllib.parse.urlencode(
{'name': sensorName, 'type': sensorType}))
),
html.Br(),
html.Br(),
html.Br(),
dcc.Loading(
id={
'type': 'graph-loading-container',
'index': '{}`{}'.format(sensorType, sensorName)
},
children=html.Div(
id={
'type': 'graph-loading',
'index': '{}`{}'.format(sensorType, sensorName)
}
),
style={'display':'none'}
)
]
)
)
tempEditCard, tempFieldsCard, tempNewCard = getFieldsAndNewAndEditCards(isField=isField, isEdit=isEdit)
divList.append(tempEditCard)
divList.append(tempFieldsCard)
divList.append(tempNewCard)
return divList
def getTypesDropdownList():
optionsList = []
for curType in db.getFields('type'):
optionsList.append({'label': curType, 'value': curType})
optionsList.append({'label': 'New Type of Sensor', 'value': 'other-type'})
return optionsList
def getUsersDropdownLists(getOptions=False, getValue=False):
optionsDictList = []
optionsList = []
if(getOptions and getValue):
for curUser in db.getAllUsers():
optionsDictList.append({'label': curUser['email'], 'value': curUser['email']})
optionsList.append(curUser['email'])
return optionsDictList, optionsList
elif(getOptions):
for curUser in db.getAllUsers():
optionsDictList.append({'label': curUser['email'], 'value': curUser['email']})
return optionsDictList
elif(getValue):
for curUser in db.getAllUsers():
optionsList.append(curUser['email'])
return optionsList
def getFieldsAndNewAndEditCards(isField=False, isEdit=False):
if(isField):
displayFields = 'inline-block'
displayNew = 'none'
else:
displayFields = 'none'
displayNew = 'inline-block'
if(isEdit):
displayEdit = 'inline-block'
else:
displayEdit = 'none'
temp_edits_card = html.Div(className='card',
id='edits-card',
children=edit_card_fields,
style={'display': displayEdit},
)
temp_fields_card = html.Div(className='card',
id='fields-card',
children=new_card_fields,
style={'display': displayFields},
)
temp_new_sensor_card = html.Div(className='card',
id='new-card',
children=[
html.H4(''),
html.Button('Add New Sensor', id='new-card-button',),
],
style={'display': displayNew}
)
return temp_edits_card, temp_fields_card, temp_new_sensor_card
def populateEditCard(valuesDict, curId):
edit_card_list = [
html.Button('Discard', id='edit_discard-button'),
html.Div(
[
dcc.Input(
id='edit_sensor-name',
className='field_element',
autoFocus=True,
debounce=True,
placeholder='Sensor Name',
value=valuesDict['edit_sensor-name'],
),
dcc.Input(
id='edit_ip-address',
className='field_element',
debounce=True,
placeholder='IP Address',
value=valuesDict['edit_ip-address'],
),
dcc.Input(
id='edit_port-number',
className='field_element',
debounce=True,
placeholder='Port Number (Optional)',
value=valuesDict['edit_port-number'],
),
dcc.Input(
id='edit_url-plug',
className='field_element',
debounce=True,
placeholder='URL Plug',
value=valuesDict['edit_url-plug'],
),
dcc.Input(
id='edit_units',
className='field_element',
debounce=True,
placeholder='Units (Optional)',
value=valuesDict['edit_units'],
),
daq.BooleanSwitch(
id='edit_alert',
className='field_element',
on=valuesDict['edit_alert'],
color='#9ad6aa',
label='Alerts:',
labelPosition='top',
),
dcc.Input(
id='edit_minimum-bound',
className='field_element',
debounce=True,
placeholder='Minimum bound',
value=valuesDict['edit_minimum-bound'],
),
dcc.Input(
id='edit_maximum-bound',
className='field_element',
debounce=True,
placeholder='Maximum bound',
value=valuesDict['edit_maximum-bound'],
),
html.Br(),
html.Button('Save', id='edit_save-card-button'),
html.Br(),
html.Br(),
html.Div(curId['index'], id='edit_name-passer', style={'display':'none'}),
html.Button('DELETE', id='edit_delete-button'),
html.H4('Invalid Selection',
style={'color': 'red','display': 'none' },
id='edit_invalid-selection'
),
],
style={'display': 'inline-block'}
),
]
return edit_card_list
def getAnalyticsPage(sensorType, sensorName):
analyticsPage = [
html.Div(
children=[
html.H1(children="Analytics"),
dcc.Link('Homepage', href='/'),
html.Br(),
html.Div(
id='graph_holder',
children=[
dcc.Graph(
id={'type':'graph', 'index':'{}-{}'.format(sensorType, sensorName)},
figure=LineGraph.with_buttons(sensorType, sensorName),
style={'display':'block'},
)
],
)
],
style={'display':'block', 'textAlign':'center'}
)
]
return analyticsPage
db = Database()
new_card_fields = [
html.Button('Discard', id='field_discard-button'),
html.H4('New Sensor'),
html.Div(
[
dcc.Input(
id='field_sensor-name',
className='field_element',
autoFocus=True,
debounce=True,
placeholder='Sensor Name',
value='',
),
dcc.Dropdown(
id='field_types-dropdown',
className='field_element',
options=getTypesDropdownList(),
placeholder='Sensor Type',
),
dcc.Input(
id='field_new-type',
className='field_element',
debounce=True,
placeholder='Name of New Type',
style={'display':'none'},
value='',
),
dcc.Input(
id='field_ip-address',
className='field_element',
debounce=True,
placeholder='IP Address',
value='',
),
dcc.Input(
id='field_port-number',
className='field_element',
debounce=True,
placeholder='Port Number (Optional)',
value='',
),
dcc.Input(
id='field_url-plug',
className='field_element',
debounce=True,
placeholder='URL Plug',
value='',
),
dcc.Input(
id='field_units',
className='field_element',
debounce=True,
placeholder='Units (Optional)',
value='',
),
daq.BooleanSwitch(
id='field_alert',
className='field_element',
on=False,
color='#9ad6aa',
label='Alerts:',
labelPosition='top',
),
dcc.Input(
id='field_minimum-bound',
className='field_element',
debounce=True,
placeholder='Minimum bound',
value='',
),
dcc.Input(
id='field_maximum-bound',
className='field_element',
debounce=True,
placeholder='Maximum bound',
value='',
),
html.Br(),
html.Button('Create', id='field_create-card-button'),
html.H4('Invalid Selection',
style={'color': 'red','display': 'none' },
id='invalid-selection'
),
],
style={'display': 'inline-block'}
),
]
edit_card_fields = [
html.Button('Discard', id='edit_discard-button'),
html.Div(
[
dcc.Input(
id='edit_sensor-name',
className='edit_element',
autoFocus=True,
debounce=True,
placeholder='Sensor Name',
),
dcc.Input(
id='edit_ip-address',
className='edit_element',
debounce=True,
placeholder='IP Address',
),
dcc.Input(
id='edit_port-number',
className='edit_element',
debounce=True,
placeholder='Port Number (Optional)',
),
dcc.Input(
id='edit_url-plug',
className='edit_element',
debounce=True,
placeholder='URL Plug',
),
dcc.Input(
id='edit_units',
className='edit_element',
debounce=True,
placeholder='Units (Optional)',
),
dcc.Input(
id='edit_minimum-bound',
className='edit_element',
debounce=True,
placeholder='Minimum bound',
),
dcc.Input(
id='edit_maximum-bound',
className='edit_element',
debounce=True,
placeholder='Maximum bound',
),
daq.BooleanSwitch(
id='edit_alert',
className='edit_element',
on=False,
color='#9ad6aa',
label='Alerts:',
labelPosition='top',
),
html.Br(),
html.Button('save', id='edit_save-card-button'),
html.Br(),
html.Div('default', id='edit_name-passer', style={'display':'none'}),
html.Button('DELETE', id='edit_delete-button'),
html.H4('Invalid Selection',
style={'color': 'red','display': 'none' },
id='edit_invalid-selection'
),
],
style={'display': 'inline-block'}
),
]
new_sensor_card = html.Div(className='card',
id='new-card',
children=[
html.H4(''),
html.Button('Add New Sensor', id='new-card-button',),
]
)
fields_card = html.Div(className='card',
id='fields-card',
children=new_card_fields,
style={'display': 'none'},
)
edits_card = html.Div(className='card',
id='edits-card',
children=edit_card_fields,
style={'display': 'none'},
)
colors = {"background": "343434"}
app = dash.Dash(__name__, title='Home-Suite-Home', suppress_callback_exceptions=True)
mainPage = [
dcc.Location(id='url', refresh=False),
dcc.Interval(
id='interval-component',
interval=SECONDS_PER_REFRESH*1000, # in ms
n_intervals=0,
),
html.Div(id='page-content'),
]
mainDivChildren =[
html.Div(
id="title",
children=[
html.H1(children="Home Sensor Suite"),
dcc.Link('Settings', href='/settings'),
],
style={"textAlign": "center"},
),
html.Div(id='createCardMessenger',style={'display':'none'}),
html.Div(id='editCardMessenger', style={'display':'none'}),
html.Div(id='deleteCardMessenger', style={'display':'none'}),
html.Div(id="cards-container",
style={
'width': '100%',
'height': '100%',
'display': 'grid',
'align-content': 'start',
'grid-template-columns': 'repeat(auto-fill, 230px)',
},
children=[
edits_card,
fields_card,
new_sensor_card
]
),
]
dropdownOptions, dropdownValue = getUsersDropdownLists(getOptions=True, getValue=True)
settingsPage = [
html.Div(children=[
html.H1(children="Settings"),
dcc.Link('Homepage', href='/'),
html.Div(
id="major_container1",
style={
'width': '100%',
'height': '100%',
'display':'grid',
'grid-template-columns': '33.33% 33.33% 33.33%',
},
children=[
html.Div(id='dump', style={'display':'none'}),
html.Div(
id="retrieve-email",
className="settings_column",
children=[
html.H3(children='RaspberryPi Email'),
dcc.Input(
id="pi-email",
placeholder="Enter a New Email for the RaspberryPi",
className="settings_input",
type="email",
value="",
debounce=True,
),
dcc.Input(
id='pi-password',
placeholder="Enter Password for Pi's Email",
type="password",
className="settings_input",
value="",
debounce=True,
),
html.Button("Submit", id="pi-button",),
],
),
html.Div(
id='user-emails',
className="settings_column",
children=[
html.H3('User Emails'),
dcc.Input(
id="user-email-input",
placeholder="Enter a User's Email",
type='email',
className='settings_input',
value='',
debounce=True,
style={
'border-width': 'thin',
'width': '100%',
'height': '40px',
}
),
html.Button(
children='Add',
id='new-user-button',
),
html.Br(),
html.Br(),
html.Div('Enable Alerts for Emails:'),
dcc.Dropdown(
id='users-dropdown',
options=dropdownOptions,
value=dropdownValue,
className='settings_input',
multi=True,
clearable=False,
style={'display':'inline-block', 'height':'auto', 'width':'100%'}
),
html.Br(),
]
),
html.Div(
id='other-settings',
className="settings_column",
children=[
html.H3('Other Settings'),
html.Div('Disable ALL Email Notifications:'),
daq.BooleanSwitch(
id='global-switch',
on=settings.get_bool_setting('alerts', 'silence_alerts'),
color='#9ad6aa',
labelPosition='top',
),
html.Br(),
html.Div('Cooldown Between Email Notifications in Minutes:'),
dcc.Input(
id='email-rate-limit',
type='number',
value=settings.get_int_setting('alerts', 'rate_limit'),
),
html.Br(),
html.Br(),
html.Div("Sensor's Polling Rate in Seconds:"),
dcc.Input(
id='poll-rate',
type='number',
value=settings.get_int_setting('sensors', 'poll_rate'),
)
]
)
],
),
],
style={'textAlign':'center'},
)
]
analyticsPage = [
html.H1(children="Analytics", id='analytics-title'),
dcc.Link('Homepage', href='/'),
html.Div(
id='graph_holder',
children=[
dcc.Graph(
id='graph',
style={'display':'inline-block'}
)
],
style={'display':'inline-block', 'textAlign':'center'}
)
]
errorPage = [
html.H1("ERROR")
]
app.layout = html.Div(
style={"backgroundColor": colors["background"]},
children=mainPage
)
@app.callback(
Output('page-content', 'children'),
Input('url', 'pathname'),
)
def display_page(pathname):
pathname_split = pathname.split('/')
if(pathname == '/'):
return mainDivChildren
if(pathname == '/settings'):
return settingsPage
if(len(pathname_split) == 3):
pair_dict = urllib.parse.parse_qs(pathname_split[-1])
sensorName = pair_dict['name'][0]
sensorType = pair_dict['type'][0]
if(db.getSensorConfig(sensorName, sensorType)):
return getAnalyticsPage(sensorType, sensorName)
else:
return errorPage
else:
return errorPage
# Email Entry Callback
@app.callback(
[
Output("pi-email", "value"),
Output('pi-password', 'value')
],
Input("pi-button", "n_clicks_timestamp"),
[
State("pi-email", "value"),
State('pi-password', 'value'),
],
)
def handle_email(button_timestamp, email, password):
if(button_timestamp != None):
db.saveCredentials(email, password)
return ['','']
else:
return dash.no_update
@app.callback(
[
Output('users-dropdown', 'options'),
Output('users-dropdown', 'value'),
Output('user-email-input', 'value'),
],
[
Input('new-user-button', 'n_clicks'),
Input('users-dropdown', 'value'),
],
[
State('user-email-input', 'value'),
]
)
def handle_users(add_button, dropdown_value, email):
ctx = dash.callback_context
curButton = '';
if ctx.triggered:
curButton = ctx.triggered[0]['prop_id'].split('.')[0]
if(curButton == 'users-dropdown'):
dbValue = getUsersDropdownLists(getValue=True)
dbValue = set(dbValue)
dropdown_value = set(dropdown_value)
if(dropdown_value != dbValue):
toDelete = dbValue.difference(dropdown_value)
for email in toDelete:
print('deleting email: ', email)
db.deleteUser(email)
dbOptions, dbValue = getUsersDropdownLists(getOptions=True, getValue=True)
return [dbOptions, dbValue, NU]
elif(curButton == 'new-user-button' and add_button != None):
db.saveUser(None, email)
dbOptions, dbValue = getUsersDropdownLists(getOptions=True, getValue=True)
return [dbOptions, dbValue, '']
else:
dbOptions, dbValue = getUsersDropdownLists(getOptions=True, getValue=True)
return [dbOptions, dbValue, NU]
return NU
@app.callback(
[
Output('global-switch', 'on'),
Output('email-rate-limit', 'value'),
Output('poll-rate', 'value'),
],
[
Input('global-switch', 'on'),
Input('email-rate-limit', 'value'),
Input('poll-rate', 'value'),
],
)
def other_settings(switch ,rate_limit, polling):
ctx = dash.callback_context
curButton = '';
if ctx.triggered:
curButton = ctx.triggered[0]['prop_id'].split('.')[0]
if(curButton == 'global-switch'):
settings.set_setting('alerts', 'silence_alerts', str(switch))
return [switch, NU, NU]
elif(curButton == 'email-rate-limit'):
settings.set_setting('alerts', 'rate_limit', str(rate_limit))
return [NU, rate_limit, NU]
elif(curButton == 'poll-rate'):
settings.set_setting('sensors', 'poll_rate', str(polling))
return [NU, NU, polling]
else:
switch = settings.get_bool_setting('alerts', 'silence_alerts')
rate_limit = settings.get_setting('alerts', 'rate_limit')
polling = settings.get_setting('sensors', 'poll_rate')
return [switch, rate_limit, polling]
return NU
# Editor of cards-container.
# Anything that wants to change cards-container has to send a 'messenger' to this callback.
@app.callback(
Output('cards-container','children'),
[
Input('new-card-button', 'n_clicks'),
Input('createCardMessenger', 'children'),
Input('editCardMessenger', 'children'),
Input('deleteCardMessenger', 'children'),
Input('field_discard-button', 'n_clicks'),
Input('edit_discard-button', 'n_clicks'),
]
)
def set_cards_container(sensor_button, createCardMessenger, editCardMessenger,
deleteCardMessenger, field_discard_button,edit_discard_button):
ctx = dash.callback_context
curButton = '';
if ctx.triggered:
curButton = ctx.triggered[0]['prop_id'].split('.')[0]
if(curButton == 'new-card-button'):
return getCardDivs(isField=True)
elif(curButton == 'createCardMessenger'):
return getCardDivs()
elif(curButton == 'editCardMessenger'):
return getCardDivs()
elif(curButton == 'deleteCardMessenger'):
return getCardDivs()
elif(curButton == 'field_discard-button'):
return getCardDivs()
elif(curButton =='edit_discard-button'):
if(edit_discard_button is not None):
return getCardDivs()
else:
return NU
else:
return getCardDivs()
@app.callback(
[
Output('createCardMessenger', 'children'),
Output('invalid-selection', 'style'),
Output('field_new-type', 'style'),
Output('field_types-dropdown', 'options'),
],
[
Input('field_create-card-button', 'n_clicks'),
Input('field_types-dropdown', 'value'),
],
[
State('field_sensor-name', 'value'),
State('field_new-type', 'value'),
State('field_units', 'value'),
State('field_ip-address', 'value'),
State('field_port-number', 'value'),
State('field_url-plug', 'value'),
State('field_minimum-bound', 'value'),
State('field_maximum-bound', 'value'),
State('field_alert', 'on'),
]
)
def create_new_card(create_button, sensor_type,
sensor_name, new_type, units, ip_address, port, url_plug, min_bound, max_bound, alert,):
if(port == None | |
<gh_stars>1-10
import numpy as np
import math
import shap
from .base_plotting import PlotStructure
from ..common.utils import combine_like_features
import matplotlib.pyplot as plt
from .dependence import dependence_plot
from matplotlib.lines import Line2D
import matplotlib
import re
from shap.plots import colors
def format_value(s, format_str):
""" Strips trailing zeros and uses a unicode minus sign.
"""
if not issubclass(type(s), str):
s = format_str % s
s = re.sub(r'\.?0+$', '', s)
if s[0] == "-":
s = u"\u2212" + s[1:]
return s
def waterfall(data,key,model_name, features, ax=None, fig=None, display_feature_names={}, display_units={},
label_fontsize=8, **kwargs):
""" Plots an explantion of a single prediction as a waterfall plot.
The SHAP value of a feature represents the impact of the evidence provided by that feature on the model's
output. The waterfall plot is designed to visually display how the SHAP values (evidence) of each feature
move the model output from our prior expectation under the background data distribution, to the final model
prediction given the evidence of all the features. Features are sorted by the magnitude of their SHAP values
with the smallest magnitude features grouped together at the bottom of the plot when the number of features
in the models exceeds the max_display parameter.
Parameters
----------
shap_values : Explanation
A one-dimensional Explanation object that contains the feature values and SHAP values to plot.
max_display : str
The maximum number of features to plot.
show : bool
Whether matplotlib.pyplot.show() is called before returning. Setting this to False allows the plot
to be customized further after it has been created.
"""
max_display = kwargs.get('max_display')
all_features = data.attrs['feature_names']
feature_names = np.array([display_feature_names.get(f,f) for f in features])
units = np.array([display_units.get(f,"") for f in features])
vars_c = [f'{var}_contrib' for var in features if 'Bias' not in var]
vars_val = [f'{var}_val' for var in features if 'Bias' not in var]
values = data.loc[key].loc[model_name, vars_c]
features = data.loc[key].loc[model_name, vars_val]
base_values = data.loc[key].loc[model_name, 'Bias_contrib']
lower_bounds = getattr(values, "lower_bounds", None)
upper_bounds = getattr(values, "upper_bounds", None)
# init variables we use for tracking the plot locations
num_features = min(max_display, len(values))
row_height = 0.5
rng = range(num_features - 1, -1, -1)
order = np.argsort(-np.abs(values))
pos_lefts = []
pos_inds = []
pos_widths = []
pos_low = []
pos_high = []
neg_lefts = []
neg_inds = []
neg_widths = []
neg_low = []
neg_high = []
loc = base_values + values.sum()
yticklabels = ["" for i in range(num_features + 1)]
# see how many individual (vs. grouped at the end) features we are plotting
if num_features == len(values):
num_individual = num_features
else:
num_individual = num_features - 1
# compute the locations of the individual features and plot the dashed connecting lines
for i in range(num_individual):
sval = values[order[i]]
loc -= sval
if sval >= 0:
pos_inds.append(rng[i])
pos_widths.append(sval)
if lower_bounds is not None:
pos_low.append(lower_bounds[order[i]])
pos_high.append(upper_bounds[order[i]])
pos_lefts.append(loc)
else:
neg_inds.append(rng[i])
neg_widths.append(sval)
if lower_bounds is not None:
neg_low.append(lower_bounds[order[i]])
neg_high.append(upper_bounds[order[i]])
neg_lefts.append(loc)
if num_individual != num_features or i + 4 < num_individual:
ax.plot([loc, loc], [rng[i] -1 - 0.4, rng[i] + 0.4], color="#bbbbbb", linestyle="--", linewidth=0.5, zorder=-1)
if features is None:
yticklabels[rng[i]] = feature_names[order[i]]
else:
if abs(features[order[i]]) < 1 :
fmt = "%0.03f"
elif abs(features[order[i]]) > 10:
fmt = "%0.f"
else:
fmt = "%0.02f"
yticklabels[rng[i]] = format_value(features[order[i]], fmt) + " " + units[order[i]] + " = " + \
feature_names[order[i]]
# add a last grouped feature to represent the impact of all the features we didn't show
if num_features < len(values):
yticklabels[0] = "%d other features" % (len(values) - num_features + 1)
remaining_impact = base_values - loc
if remaining_impact < 0:
pos_inds.append(0)
pos_widths.append(-remaining_impact)
pos_lefts.append(loc + remaining_impact)
c = colors.red_rgb
else:
neg_inds.append(0)
neg_widths.append(-remaining_impact)
neg_lefts.append(loc + remaining_impact)
c = colors.blue_rgb
points = pos_lefts + list(np.array(pos_lefts) + np.array(pos_widths)) + neg_lefts + list(np.array(neg_lefts) + np.array(neg_widths))
dataw = np.max(points) - np.min(points)
# draw invisible bars just for sizing the axes
label_padding = np.array([0.1*dataw if w < 1 else 0 for w in pos_widths])
ax.barh(pos_inds, np.array(pos_widths) + label_padding + 0.02*dataw,
left=np.array(pos_lefts) - 0.01*dataw, color=colors.red_rgb, alpha=0)
label_padding = np.array([-0.1*dataw if -w < 1 else 0 for w in neg_widths])
ax.barh(neg_inds, np.array(neg_widths) + label_padding - 0.02*dataw,
left=np.array(neg_lefts) + 0.01*dataw, color=colors.blue_rgb, alpha=0)
# define variable we need for plotting the arrows
head_length = 0.08
bar_width = 0.8
xlen = ax.get_xlim()[1] - ax.get_xlim()[0]
xticks = ax.get_xticks()
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
bbox_to_xscale = xlen/width
hl_scaled = bbox_to_xscale * head_length
renderer = fig.canvas.get_renderer()
# draw the positive arrows
for i in range(len(pos_inds)):
dist = pos_widths[i]
arrow_obj = ax.arrow(
pos_lefts[i], pos_inds[i], max(dist-hl_scaled, 0.000001), 0,
head_length=min(dist, hl_scaled),
color=colors.red_rgb, width=bar_width,
head_width=bar_width
)
if pos_low is not None and i < len(pos_low):
ax.errorbar(
pos_lefts[i] + pos_widths[i], pos_inds[i],
xerr=np.array([[pos_widths[i] - pos_low[i]], [pos_high[i] - pos_widths[i]]]),
ecolor=colors.light_red_rgb
)
txt_obj = ax.text(
pos_lefts[i] + 0.5*dist, pos_inds[i], format_value(pos_widths[i], '%+0.02f'),
horizontalalignment='center', verticalalignment='center', color="white",
fontsize=12
)
text_bbox = txt_obj.get_window_extent(renderer=renderer)
arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)
# if the text overflows the arrow then draw it after the arrow
if text_bbox.width > arrow_bbox.width:
txt_obj.remove()
txt_obj = ax.text(
pos_lefts[i] + (5/72)*bbox_to_xscale + dist, pos_inds[i], format_value(pos_widths[i], '%+0.02f'),
horizontalalignment='left', verticalalignment='center', color=colors.red_rgb,
fontsize=label_fontsize
)
# draw the negative arrows
for i in range(len(neg_inds)):
dist = neg_widths[i]
arrow_obj = ax.arrow(
neg_lefts[i], neg_inds[i], -max(-dist-hl_scaled, 0.000001), 0,
head_length=min(-dist, hl_scaled),
color=colors.blue_rgb, width=bar_width,
head_width=bar_width
)
if neg_low is not None and i < len(neg_low):
ax.errorbar(
neg_lefts[i] + neg_widths[i], neg_inds[i],
xerr=np.array([[neg_widths[i] - neg_low[i]], [neg_high[i] - neg_widths[i]]]),
ecolor=colors.light_blue_rgb
)
txt_obj = ax.text(
neg_lefts[i] + 0.5*dist, neg_inds[i], format_value(neg_widths[i], '%+0.02f'),
horizontalalignment='center', verticalalignment='center', color="white",
fontsize=label_fontsize
)
text_bbox = txt_obj.get_window_extent(renderer=renderer)
arrow_bbox = arrow_obj.get_window_extent(renderer=renderer)
# if the text overflows the arrow then draw it after the arrow
if text_bbox.width > arrow_bbox.width:
txt_obj.remove()
txt_obj = ax.text(
neg_lefts[i] - (5/72)*bbox_to_xscale + dist, neg_inds[i], format_value(neg_widths[i], '%+0.02f'),
horizontalalignment='right', verticalalignment='center', color=colors.blue_rgb,
fontsize=label_fontsize
)
# draw the y-ticks twice, once in gray and then again with just the feature names in black
# The 1e-8 is so matplotlib 3.3 doesn't try and collapse the ticks
ax.set_yticks(list(range(num_features)) + list(np.arange(num_features)+1e-8) ,)
ax.set_yticklabels(yticklabels[:-1] + [l.split('=')[-1] for l in yticklabels[:-1]], fontsize=label_fontsize)
# put horizontal lines for each feature row
for i in range(num_features):
ax.axhline(i, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
# mark the prior expected value and the model prediction
ax.axvline(base_values, 0, 1/num_features, color="#bbbbbb", linestyle="--", linewidth=0.5, zorder=-1)
fx = base_values + values.sum()
ax.axvline(fx, 0, 1, color="#bbbbbb", linestyle="--", linewidth=0.5, zorder=-1)
# clean up the main axis
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('none')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
#ax.tick_params(labelsize=13)
#pl.xlabel("\nModel output", fontsize=12)
# draw the E[f(X)] tick mark
xmin,xmax = ax.get_xlim()
ax2=ax.twiny()
ax2.set_xlim(xmin,xmax)
ax2.set_xticks([base_values, base_values+1e-8]) # The 1e-8 is so matplotlib 3.3 doesn't try and collapse the ticks
ax2.set_xticklabels(["\n$E[f(X)]$","\n$ = "+format_value(base_values, "%0.03f")+"$"],
fontsize=label_fontsize, ha="left")
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['left'].set_visible(False)
# draw the f(x) tick mark
ax3=ax2.twiny()
ax3.set_xlim(xmin,xmax)
ax3.set_xticks([base_values + values.sum(), base_values + values.sum() + 1e-8])
# The 1e-8 is so matplotlib 3.3 doesn't try and collapse the ticks
ax3.set_xticklabels(["$f(x)$","$ = "+format_value(fx, "%0.03f")+"$"],
fontsize=label_fontsize, ha="left")
tick_labels = ax3.xaxis.get_majorticklabels()
tick_labels[0].set_transform(tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(-10/72., 0, fig.dpi_scale_trans))
tick_labels[1].set_transform(tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation(12/72., 0, fig.dpi_scale_trans))
tick_labels[1].set_color("#999999")
ax3.spines['right'].set_visible(False)
ax3.spines['top'].set_visible(False)
ax3.spines['left'].set_visible(False)
# adjust the position of the E[f(X)] = x.xx label
tick_labels = ax2.xaxis.get_majorticklabels()
tick_labels[0].set_transform(tick_labels[0].get_transform() + matplotlib.transforms.ScaledTranslation(-20/72., 0, fig.dpi_scale_trans))
tick_labels[1].set_transform(tick_labels[1].get_transform() + matplotlib.transforms.ScaledTranslation(22/72., -1/72., fig.dpi_scale_trans))
tick_labels[1].set_color("#999999")
# color the y tick labels that have the feature values as gray
# (these fall behind the black ones with just the feature name)
tick_labels = ax.yaxis.get_majorticklabels()
for i in range(num_features):
tick_labels[i].set_color("#999999")
return fx, base_values
class PlotFeatureContributions(PlotStructure):
"""
PlotFeatureContributions handles plotting contribution-based plotting for
single examples or some subset. This class also handles plotting SHAP-style
plots, which include summary and dependence plots.
"""
def __init__(self, BASE_FONT_SIZE=12):
super().__init__(BASE_FONT_SIZE=BASE_FONT_SIZE)
def plot_contributions(
self,
data,
estimator_names,
features,
to_only_varname=None,
display_feature_names={},
display_units={},
**kwargs,
):
"""
Plot the results of feature contributions
Args:
---------------
result : pandas.Dataframe
a single row/example from the
result dataframe from tree_interpreter_simple
"""
kwargs["max_display"] = kwargs.get('max_display', 10)
estimator_output = kwargs.get('estimator_output', None)
only_one_model = True if len(estimator_names) == 1 else False
outer_indexs = list(set([f[0] for f in data.index.values]))
if estimator_output=='probability' and | |
)
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(DIR_, "DIR_")
, root_1)
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:203:23: ( objset_decl )*
while stream_objset_decl.hasNext():
self._adaptor.addChild(root_1, stream_objset_decl.nextTree())
stream_objset_decl.reset();
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:203:36: ( declarations )*
while stream_declarations.hasNext():
self._adaptor.addChild(root_1, stream_declarations.nextTree())
stream_declarations.reset();
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "dir_block"
class network_block_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "network_block"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:206:5: network_block : NETWORK OCBRACE ( network_element )* CCBRACE SEMICOLON -> ^( NETWORK_ ( network_element )* ) ;
def network_block(self, ):
retval = self.network_block_return()
retval.start = self.input.LT(1)
root_0 = None
NETWORK90 = None
OCBRACE91 = None
CCBRACE93 = None
SEMICOLON94 = None
network_element92 = None
NETWORK90_tree = None
OCBRACE91_tree = None
CCBRACE93_tree = None
SEMICOLON94_tree = None
stream_NETWORK = RewriteRuleTokenStream(self._adaptor, "token NETWORK")
stream_OCBRACE = RewriteRuleTokenStream(self._adaptor, "token OCBRACE")
stream_SEMICOLON = RewriteRuleTokenStream(self._adaptor, "token SEMICOLON")
stream_CCBRACE = RewriteRuleTokenStream(self._adaptor, "token CCBRACE")
stream_network_element = RewriteRuleSubtreeStream(self._adaptor, "rule network_element")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:206:19: ( NETWORK OCBRACE ( network_element )* CCBRACE SEMICOLON -> ^( NETWORK_ ( network_element )* ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:206:21: NETWORK OCBRACE ( network_element )* CCBRACE SEMICOLON
pass
NETWORK90 = self.match(self.input, NETWORK, self.FOLLOW_NETWORK_in_network_block1554)
stream_NETWORK.add(NETWORK90)
OCBRACE91 = self.match(self.input, OCBRACE, self.FOLLOW_OCBRACE_in_network_block1556)
stream_OCBRACE.add(OCBRACE91)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:206:37: ( network_element )*
while True: #loop16
alt16 = 2
LA16_0 = self.input.LA(1)
if (LA16_0 in {94, 95}) :
alt16 = 1
if alt16 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:206:37: network_element
pass
self._state.following.append(self.FOLLOW_network_element_in_network_block1558)
network_element92 = self.network_element()
self._state.following.pop()
stream_network_element.add(network_element92.tree)
else:
break #loop16
CCBRACE93 = self.match(self.input, CCBRACE, self.FOLLOW_CCBRACE_in_network_block1561)
stream_CCBRACE.add(CCBRACE93)
SEMICOLON94 = self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_network_block1563)
stream_SEMICOLON.add(SEMICOLON94)
# AST Rewrite
# elements: network_element
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 206:72: -> ^( NETWORK_ ( network_element )* )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:206:75: ^( NETWORK_ ( network_element )* )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(NETWORK_, "NETWORK_")
, root_1)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:206:86: ( network_element )*
while stream_network_element.hasNext():
self._adaptor.addChild(root_1, stream_network_element.nextTree())
stream_network_element.reset();
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "network_block"
class element_type_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "element_type"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:207:9: element_type : ( 'Ordered' | 'Unordered' );
def element_type(self, ):
retval = self.element_type_return()
retval.start = self.input.LT(1)
root_0 = None
set95 = None
set95_tree = None
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:207:22: ( 'Ordered' | 'Unordered' )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:
pass
root_0 = self._adaptor.nil()
set95 = self.input.LT(1)
if self.input.LA(1) in {94, 95}:
self.input.consume()
self._adaptor.addChild(root_0, self._adaptor.createWithPayload(set95))
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "element_type"
class network_element_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "network_element"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:208:9: network_element : element_type ID SEMICOLON -> ^( ELEMENT_ element_type ID ) ;
def network_element(self, ):
retval = self.network_element_return()
retval.start = self.input.LT(1)
root_0 = None
ID97 = None
SEMICOLON98 = None
element_type96 = None
ID97_tree = None
SEMICOLON98_tree = None
stream_SEMICOLON = RewriteRuleTokenStream(self._adaptor, "token SEMICOLON")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
stream_element_type = RewriteRuleSubtreeStream(self._adaptor, "rule element_type")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:208:25: ( element_type ID SEMICOLON -> ^( ELEMENT_ element_type ID ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:208:27: element_type ID SEMICOLON
pass
self._state.following.append(self.FOLLOW_element_type_in_network_element1606)
element_type96 = self.element_type()
self._state.following.pop()
stream_element_type.add(element_type96.tree)
ID97 = self.match(self.input, ID, self.FOLLOW_ID_in_network_element1608)
stream_ID.add(ID97)
SEMICOLON98 = self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_network_element1610)
stream_SEMICOLON.add(SEMICOLON98)
# AST Rewrite
# elements: element_type, ID
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 208:53: -> ^( ELEMENT_ element_type ID )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:208:56: ^( ELEMENT_ element_type ID )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(ELEMENT_, "ELEMENT_")
, root_1)
self._adaptor.addChild(root_1, stream_element_type.nextTree())
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "network_element"
class network_send_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "network_send"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:209:5: network_send : ID DOT send_function OBRACE ID CBRACE SEMICOLON -> ^( SEND_ ID ID ) ;
def network_send(self, ):
retval = self.network_send_return()
retval.start = self.input.LT(1)
root_0 = None
ID99 = None
DOT100 = None
OBRACE102 = None
ID103 = None
CBRACE104 = None
SEMICOLON105 = None
send_function101 = None
ID99_tree = None
DOT100_tree = None
OBRACE102_tree = None
ID103_tree = None
CBRACE104_tree = None
SEMICOLON105_tree = None
stream_OBRACE = RewriteRuleTokenStream(self._adaptor, "token OBRACE")
stream_SEMICOLON = RewriteRuleTokenStream(self._adaptor, "token SEMICOLON")
stream_DOT = RewriteRuleTokenStream(self._adaptor, "token DOT")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
stream_CBRACE = RewriteRuleTokenStream(self._adaptor, "token CBRACE")
stream_send_function = RewriteRuleSubtreeStream(self._adaptor, "rule send_function")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:209:18: ( ID DOT send_function OBRACE ID CBRACE SEMICOLON -> ^( SEND_ ID ID ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:209:20: ID DOT send_function OBRACE ID CBRACE SEMICOLON
pass
ID99 = self.match(self.input, ID, self.FOLLOW_ID_in_network_send1631)
stream_ID.add(ID99)
DOT100 = self.match(self.input, DOT, self.FOLLOW_DOT_in_network_send1633)
stream_DOT.add(DOT100)
self._state.following.append(self.FOLLOW_send_function_in_network_send1635)
send_function101 = self.send_function()
self._state.following.pop()
stream_send_function.add(send_function101.tree)
OBRACE102 = self.match(self.input, OBRACE, self.FOLLOW_OBRACE_in_network_send1637)
stream_OBRACE.add(OBRACE102)
ID103 = self.match(self.input, ID, self.FOLLOW_ID_in_network_send1639)
stream_ID.add(ID103)
CBRACE104 = self.match(self.input, CBRACE, self.FOLLOW_CBRACE_in_network_send1641)
stream_CBRACE.add(CBRACE104)
SEMICOLON105 = self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_network_send1643)
stream_SEMICOLON.add(SEMICOLON105)
# AST Rewrite
# elements: ID, ID
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 209:68: -> ^( SEND_ ID ID )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:209:71: ^( SEND_ ID ID )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(SEND_, "SEND_")
, root_1)
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "network_send"
class network_mcast_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "network_mcast"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:210:5: network_mcast : ID DOT mcast_function OBRACE ID COMMA ID CBRACE SEMICOLON -> ^( MCAST_ ID ID ID ) ;
def network_mcast(self, ):
retval = self.network_mcast_return()
retval.start = self.input.LT(1)
root_0 = None
ID106 = None
DOT107 = None
OBRACE109 = None
ID110 = None
COMMA111 = None
ID112 = None
CBRACE113 = None
SEMICOLON114 = None
mcast_function108 = None
ID106_tree = None
DOT107_tree = None
OBRACE109_tree = None
ID110_tree = None
COMMA111_tree = None
ID112_tree = None
CBRACE113_tree = None
SEMICOLON114_tree = None
stream_COMMA = RewriteRuleTokenStream(self._adaptor, "token COMMA")
stream_OBRACE = RewriteRuleTokenStream(self._adaptor, "token OBRACE")
stream_SEMICOLON = RewriteRuleTokenStream(self._adaptor, "token SEMICOLON")
stream_DOT = RewriteRuleTokenStream(self._adaptor, "token DOT")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
stream_CBRACE = RewriteRuleTokenStream(self._adaptor, "token CBRACE")
stream_mcast_function = RewriteRuleSubtreeStream(self._adaptor, "rule mcast_function")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:210:18: ( ID DOT mcast_function OBRACE ID COMMA ID CBRACE SEMICOLON -> ^( MCAST_ ID ID ID ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:210:20: ID DOT mcast_function OBRACE ID COMMA ID CBRACE SEMICOLON
pass
ID106 = self.match(self.input, ID, self.FOLLOW_ID_in_network_mcast1663)
stream_ID.add(ID106)
DOT107 = self.match(self.input, DOT, self.FOLLOW_DOT_in_network_mcast1665)
stream_DOT.add(DOT107)
self._state.following.append(self.FOLLOW_mcast_function_in_network_mcast1667)
mcast_function108 = self.mcast_function()
self._state.following.pop()
stream_mcast_function.add(mcast_function108.tree)
OBRACE109 = self.match(self.input, OBRACE, self.FOLLOW_OBRACE_in_network_mcast1669)
stream_OBRACE.add(OBRACE109)
ID110 = self.match(self.input, ID, self.FOLLOW_ID_in_network_mcast1671)
stream_ID.add(ID110)
COMMA111 = self.match(self.input, COMMA, self.FOLLOW_COMMA_in_network_mcast1673)
stream_COMMA.add(COMMA111)
ID112 = self.match(self.input, ID, self.FOLLOW_ID_in_network_mcast1675)
stream_ID.add(ID112)
CBRACE113 = self.match(self.input, CBRACE, self.FOLLOW_CBRACE_in_network_mcast1677)
stream_CBRACE.add(CBRACE113)
SEMICOLON114 = self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_network_mcast1679)
stream_SEMICOLON.add(SEMICOLON114)
# AST Rewrite
# elements: ID, ID, ID
# token labels:
# rule labels: retval
# token | |
# Copyright 2021 United States Government as represented by the Administrator of the National Aeronautics and Space
# Administration. No copyright is claimed in the United States under Title 17, U.S. Code. All Other Rights Reserved.
r"""
This module provides utilities for visually inspecting star identification and attitude estimation results.
In general, the only functions a user will directly interface with from this module are the :func:`show_id_results`
which shows the results of performing star identification and attitude estimation, :func:`residual_histograms` which
shows histograms of the residuals, and :func:`plot_residuals_vs_magnitude` which generates a scatter plot of residuals
as a function of star magnitude. The other contents of this model are used for manual outlier inspection, which is
typically done by using the :meth:`~.StellarOpNav.review_outliers` method.
"""
from typing import Optional
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.figure import Figure
from giant._typing import PATH
from giant.stellar_opnav.stellar_class import StellarOpNav
def show_id_results(sopnav: StellarOpNav, pdf_name: Optional[PATH] = None,
flattened_image: bool = False, log_scale: bool = False):
"""
This function generates a figure for each turned on image in ``sopnav`` showing the star identification results, as
well as a couple figures showing the residuals between the predicted catalogue star locations and the image star
locations for all images combined.
For each individual figure, the matched catalogue projected star locations are shown with one marker, the matched
image points of interest are shown with another marker, the unmatched catalogue projected star locations in the
field of view are shown with another marker, and the unmatched image points of interest are shown with another
marker. In addition, an array is drawn indicating the residuals for the match star pairs.
You must have called :meth:`~.StellarOpNav.id_stars` at least once before calling this function.
If ``pdf_name`` param is not ``None``, the figures will be saved to a pdf file of the same name.
If ``flattened_image`` is set to True, then the individual figures will show the flattened image that is used for
initial detection of possible stars in the image. Typically this should be left to ``False`` unless you are having
issues finding stars and want to inspect the image to see what is going on.
If ``log_scale`` is set to ``True`` then the individual images are shown using a logarithmic scale to help make
stars stand out more from the background. This is generally a fairly useful feature and is used frequently.
.. warning::
This function generates 1 figure for every image in your :class:`.StellarOpNav` instance, which can really slow
down your computer if you have a lot of images loaded and turned on. To avoid this problem, try turning some of
the images off using the :attr:`~.Camera.image_mask` attribute before using this function, or save to
pdf instead.
:param sopnav: The :class:`.StellarOpNav` instance containing the star identification results
:param pdf_name: Label of the star id results to be shown. Used as the file name for saving figures to pdf
:param flattened_image: A boolean flag specifying whether to show the flattened image instead of the raw image
:param log_scale: A boolean flag specifying whether to use a logarithmic scale for the image intensity values.
"""
all_resids_fig = plt.figure()
ax = all_resids_fig.add_subplot(111)
big_resids = []
if pdf_name:
pdf = PdfPages(pdf_name)
else:
pdf = None
for ind, image in sopnav.camera:
cat_locations = sopnav.matched_catalogue_image_points[ind]
if flattened_image:
image, _ = sopnav.image_processing.flatten_image_and_get_noise_level(image)
# make the min of the image 100
image -= image.min() - 100
fig = plt.figure()
axt = plt.gca()
if log_scale:
image = image.astype(np.float32) - image.min() + 100
axt.imshow(np.log(image), cmap='gray', interpolation='none')
else:
axt.imshow(image, cmap='gray', interpolation='none')
if (cat_locations is not None) and cat_locations.size:
resids = cat_locations - sopnav.matched_extracted_image_points[ind]
# # Plot Matched Pairs
axt.scatter(*sopnav.matched_extracted_image_points[ind], color='none', edgecolors='yellow',
linewidths=1.5, label='Matched Centroids')
axt.scatter(*cat_locations, marker='x', color='red', linewidths=1.5, label='Matched Catalog')
c = np.arange(sopnav.matched_extracted_image_points[ind].shape[1])
for x, y, label in zip(*cat_locations, c.astype(np.str_)):
axt.text(x, y, label, color="white", fontsize=8)
big_resids.append(resids)
ax.quiver(*sopnav.matched_extracted_image_points[ind], *resids, angles='xy', scale_units='xy',
color='black', width=0.0005, headwidth=20, headlength=20)
in_fov = ((sopnav.unmatched_catalogue_image_points[ind] >= 0) &
(sopnav.unmatched_catalogue_image_points[ind] <= [[sopnav.model.n_cols],
[sopnav.model.n_rows]])).all(axis=0)
# Plot Unmatched Pairs
axt.scatter(*sopnav.unmatched_extracted_image_points[ind], marker='d', color='none', edgecolors='green',
linewidths=0.5, label='Unmatched Centroids')
axt.scatter(*(sopnav.unmatched_catalogue_image_points[ind][:, in_fov]), marker='d', color='none',
edgecolors='cyan',
linewidths=0.5, label='Unmatched Catalog')
axt.legend().set_draggable(True)
plt.title('Observation Date: {}'.format(image.observation_date))
if pdf_name:
pdf.savefig(fig)
fig.clear()
plt.close(fig)
if big_resids:
big_resids = np.concatenate(big_resids, axis=1)
ax.set_xlim([0, sopnav.model.n_cols])
ax.set_ylim([0, sopnav.model.n_rows])
ax.invert_yaxis()
ax.set_xlabel('columns, pix')
ax.set_ylabel('rows, pix')
ax.set_title('Residuals All Images\n '
'std = ({0:5.3f}, {1:5.3f}) '
'mean = ({2:5.3f}, {3:5.3f}) pixels'.format(*np.std(big_resids, axis=1),
*np.mean(big_resids, axis=1)))
print(*np.std(big_resids, axis=1))
print(*np.mean(big_resids, axis=1))
if pdf_name:
pdf.savefig(all_resids_fig)
pdf.close()
all_resids_fig.savefig(pdf_name + '_combined_resids.pdf')
all_resids_fig.clear()
plt.close(all_resids_fig)
else:
plt.show()
def residual_histograms(sopnav: StellarOpNav, individual_images: bool = False, pdf_name: Optional[PATH] = None):
"""
This function generates histograms of the matched star residuals for a given stellar opnav object.
Typically, 3 histograms are created in a single figure. The first shows the histogram of all residuals (both column
and row) for all images. The second shows the histogram of just the column residuals for all images. The third shows
the histogram of just the row residuals for all images. Optionally, if you specify ``individual_images`` to be
``True`` then this function will generate a single figure for each image turned on in ``sopnav`` showing the three
histograms described previously. If using this option it is recommended to save the plots to a PDF instead of
showing interactively because many figures would be opened, possibly causing your compute to slow down.
You must have called :meth:`~.StellarOpNav.id_stars` at least once before calling this function.
If the ``pdf_name`` param is provided, the figures will be saved to a pdf file of the same name, and will not be
displayed interactively.
:param sopnav: The stellar opnav object to plot the histograms for
:param individual_images: A flag specifying whether to generate a single histogram for all images or no
:param pdf_name: Used as the file name for saving the figures to a pdf
"""
if pdf_name:
pdf = PdfPages(pdf_name)
else:
pdf = None
column_residuals = []
row_residuals = []
all_residuals = []
image_residuals = []
for ind, _ in sopnav.camera:
residuals = sopnav.matched_star_residuals(ind)
if individual_images:
image_residuals.append(residuals)
if (residuals is not None) and residuals.size:
column_residuals.extend(residuals[0])
row_residuals.extend(residuals[1])
all_residuals.extend(residuals[0])
all_residuals.extend(residuals[1])
fig = plt.figure()
plt.hist(all_residuals, bins='auto', histtype='bar', ec='white', label='all')
plt.hist(column_residuals, bins='auto', histtype='bar', ec='white', alpha=0.5, label='column')
plt.hist(row_residuals, bins='auto', histtype='bar', ec='white', alpha=0.5, label='row')
plt.title('Residual Histogram All Images\n'
'STD (Total (column, row)) = {:.3g} ({:.3g}, {:.3g})'.format(np.std(all_residuals),
np.std(column_residuals),
np.std(row_residuals)))
plt.xlabel('Residuals, pix')
plt.ylabel('Count')
try:
plt.legend().draggable()
except AttributeError:
plt.legend().set_draggable(True)
if pdf_name:
pdf.savefig(fig)
plt.close(fig)
if individual_images:
for ind, image in sopnav.camera:
if image_residuals[ind]:
fig = plt.figure()
residuals = image_residuals[ind]
plt.hist(residuals.ravel(), bins='auto', histtype='bar', ec='white', label='all')
plt.hist(residuals[0], bins='auto', histtype='bar', ec='white', alpha=0.5, label='column')
plt.hist(residuals[1], bins='auto', histtype='bar', ec='white', alpha=0.5, label='row')
plt.title(
'Residual Histogram Images {}\n'
'STD (Total (column, row)) = {:.3g} ({:.3g}, {:.3g})'.format(image.observation_date.isoformat(),
np.std(residuals.ravel()),
np.std(residuals[0]),
np.std(residuals[1])))
plt.xlabel('Residuals, pix')
plt.ylabel('Count')
try:
plt.legend().draggable()
except AttributeError:
plt.legend().set_draggable(True)
if pdf_name:
pdf.savefig(fig)
plt.close(fig)
if pdf_name:
pdf.close()
else:
plt.show()
def plot_residuals_vs_magnitude(sopnav: StellarOpNav, individual_images: bool = False, pdf_name: Optional[PATH] = None):
"""
This function generates a scatter plot of x and y residuals versus star magnitudes from the matched catalogue
stars for a given stellar opnav object.
Generally, this function will generate a single scatter plot showing the residuals vs magnitude across all images,
however, if you specify ``individual_images`` as ``True``, then in addition to the summary plot, a single plot will
be made showing the residuals vs magnitude for each image.
You must have called :meth:`~.StellarOpNav.id_stars` at least once before calling this function.
If the ``pdf_name`` param is provided, the figures will be saved to a pdf file of the same name, and will not be
displayed interactively.
:param sopnav: The stellar opnav object to plot the scatters for
:param individual_images: A flag specifying whether to generate individual plots for each image in addition to the
plot spanning all images
:param pdf_name: Used as the file name for saving the figures to a pdf
"""
if pdf_name:
pdf = PdfPages(pdf_name)
else:
pdf = None
column_residuals = []
row_residuals = []
all_residuals = []
image_residuals = []
star_magnitudes = []
for ind, _ in sopnav.camera:
residuals = sopnav.matched_star_residuals(ind)
if individual_images:
image_residuals.append(residuals)
if (residuals | |
<gh_stars>1-10
import os
import time
import shutil
import pickle
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
import torch.nn.utils as utils
from tqdm import tqdm
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tensorboard_logger import configure, log_value
from model import RecurrentAttention
from utils import AverageMeter
class Trainer:
"""A Recurrent Attention Model trainer.
All hyperparameters are provided by the user in the
config file.
"""
def __init__(self, config, data_loader):
"""
Construct a new Trainer instance.
Args:
config: object containing command line arguments.
data_loader: A data iterator.
"""
self.config = config
if config.use_gpu and torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
# glimpse network params
self.patch_size = config.patch_size
self.glimpse_scale = config.glimpse_scale
self.num_patches = config.num_patches
self.loc_hidden = config.loc_hidden
self.glimpse_hidden = config.glimpse_hidden
# core network params
self.num_glimpses = config.num_glimpses
self.hidden_size = config.hidden_size
# reinforce params
self.std = config.std
self.M = config.M
# data params
if config.is_train:
self.train_loader = data_loader[0]
self.valid_loader = data_loader[1]
self.num_train = len(self.train_loader)
self.num_valid = len(self.valid_loader)
else:
self.test_loader = data_loader
self.num_test = len(self.test_loader)
self.act_dimension = 64
self.num_channels = 1
# training params
self.epochs = config.epochs
self.start_epoch = 0
self.momentum = config.momentum
self.lr = config.init_lr
# misc params
self.best = config.best
self.ckpt_dir = config.ckpt_dir
self.logs_dir = config.logs_dir
self.best_valid_reward = 0.0
self.counter = 0
self.lr_patience = config.lr_patience
self.train_patience = config.train_patience
self.use_tensorboard = config.use_tensorboard
self.resume = config.resume
self.print_freq = config.print_freq
self.plot_freq = config.plot_freq
self.model_name = "ram_{}_{}x{}_{}".format(
config.num_glimpses,
config.patch_size,
config.patch_size,
config.glimpse_scale,
)
self.plot_dir = "./plots/" + self.model_name + "/"
if not os.path.exists(self.plot_dir):
os.makedirs(self.plot_dir)
# configure tensorboard logging
if self.use_tensorboard:
tensorboard_dir = self.logs_dir + self.model_name
print("[*] Saving tensorboard logs to {}".format(tensorboard_dir))
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
configure(tensorboard_dir)
# build RAM model
self.model = RecurrentAttention(
self.patch_size,
self.num_patches,
self.glimpse_scale,
self.num_channels,
self.loc_hidden,
self.glimpse_hidden,
self.std,
self.hidden_size,
self.act_dimension,
)
self.model.to(self.device)
# initialize optimizer and scheduler
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=self.config.init_lr
)
self.scheduler = ReduceLROnPlateau(
self.optimizer, "max", factor= 0.1, patience=self.lr_patience
)
with open("Train.pickle", "rb") as f:
self.Image_Array_Train, self.Sketch_Array_Train, self.Image_Name_Train, self.Sketch_Name_Train = pickle.load(f)
with open("Test.pickle", "rb") as f:
self.Image_Array_Test, self.Sketch_Array_Test, self.Image_Name_Test, self.Sketch_Name_Test = pickle.load(f)
# with open("TrainRL.pickle", "rb") as f:
# self.Sketch_Array_Train_RL, self.Sketch_Name_Train_RL = pickle.load(f)
# with open("TestRL.pickle", "rb") as f:
# self.Sketch_Array_Test_RL, self.Sketch_Name_Test_RL = pickle.load(f)
# self.Sketch_Array_Train_RL = torch.stack(self.Sketch_Array_Train_RL)
# print(self.Sketch_Array_Train_RL.shape)
# self.Sketch_Array_Test_RL = torch.stack(self.Sketch_Array_Test_RL)
# print(self.Sketch_Array_Test_RL.shape)
print("pretrained load completed!")
self.Sketch_Array_Valid = self.Sketch_Array_Test[:100]
self.Sketch_Name_Valid = self.Sketch_Name_Test[:100]
def reset(self):
h_t = torch.zeros(
self.batch_size,
self.hidden_size,
dtype=torch.float,
device=self.device,
requires_grad=True,
)
l_t = torch.FloatTensor(self.batch_size, 2).uniform_(-1, 1).to(self.device)
l_t.requires_grad = True
return h_t, l_t
def train(self):
"""Train the model on the training set.
A checkpoint of the model is saved after each epoch
and if the validation accuracy is improved upon,
a separate ckpt is created for use on the test set.
"""
# load the most recent checkpoint
if self.resume:
self.load_checkpoint(best=False)
print(
"\n[*] Train on {} samples, validate on {} samples".format(
self.num_train, self.num_valid
)
)
dict_list = []
epoches = []
t_loss = []
t_reward = []
v_reward = []
v_acc = []
v_acc10 = []
v_rp = []
counter1 = 0
for epoch in range(self.start_epoch, self.epochs):
print(
"\nEpoch: {}/{} - LR: {:.6f}".format(
epoch + 1, self.epochs, self.optimizer.param_groups[0]["lr"]
)
)
# train for 1 epoch
train_loss, train_reward, train_loss_action, train_loss_reinforce = self.train_one_epoch(epoch, dict_list, counter1)
# evaluate on validation set
valid_reward, valid_acc, valid_acc10, rp = self.validate(epoch)
# reduce lr if validation loss plateaus
self.scheduler.step(valid_reward)
is_best = valid_reward > self.best_valid_reward
msg1 = "train loss: {:.3f} - train reward: {:.3f} - train action_loss: {:.3f} - train reinforce_loss: {:.3f}"
msg2 = "- val reward: {:.3f} - val acc: {:.3f} - val err: {:.3f}"
if is_best:
self.counter = 0
msg2 += " [*]"
msg = msg1 + msg2
print(
msg.format(
train_loss, train_reward, train_loss_action, train_loss_reinforce, valid_reward, valid_acc, 1 - valid_acc
)
)
epoches.append(epoch)
t_loss.append(train_loss)
t_reward.append(train_reward)
v_reward.append(valid_reward)
v_acc.append(valid_acc)
v_acc10.append(valid_acc10)
v_rp.append(rp)
counter1 += 1
# if self.use_tensorboard:
# log_value("train_loss", train_loss, epoch)
# log_value("train_reward", train_reward, epoch)
# # log_value("train_acc", train_acc, epoch)
# log_value("train_loss_action", train_loss_action, epoch)
# log_value("train_loss_reinforce", train_loss_reinforce, epoch)
# log_value("valid reward", valid_reward,epoch)
# log_value("top5 acc", valid_acc, epoch)
# log_value("top10 acc", valid_acc10, epoch)
# check for improvement
if not is_best:
self.counter += 1
if self.counter > self.train_patience:
print("[!] No improvement in a while, stopping training.")
break
self.best_valid_reward = max(valid_reward, self.best_valid_reward)
self.save_checkpoint(
{
"epoch": epoch + 1,
"model_state": self.model.state_dict(),
"optim_state": self.optimizer.state_dict(),
"best_valid_acc": self.best_valid_reward,
},
is_best,
)
with open("Paint.pickle", "wb") as f:
pickle.dump(v_reward, f)
plt.plot(epoches, t_loss, color='blue', label='train_loss')
plt.ylabel('training loss')
plt.xlabel('epochs')
# my_x_ticks = np.arange(0, 1000, 50)
# plt.xticks(my_x_ticks)
plt.legend(loc='best')
plt.title('Training Loss Plot')
plt.savefig('train_loss.eps')
plt.close()
plt.plot(epoches, t_reward, color='blue', label='train_reward')
plt.ylabel('training reward')
plt.xlabel('epochs')
# my_x_ticks = np.arange(0, 1000, 50)
# plt.xticks(my_x_ticks)
# my_y_ticks = np.arange(0, 1, 0.05)
# plt.yticks(my_y_ticks)
plt.legend(loc='best')
plt.title('Training Reward Plot')
plt.savefig('train_reward.eps')
plt.close()
plt.plot(epoches, v_reward, color='blue', label='valid_reward')
plt.ylabel('valid reward')
plt.xlabel('epochs')
# my_x_ticks = np.arange(0, 1000, 50)
# plt.xticks(my_x_ticks)
# my_y_ticks = np.arange(0, 1, 0.05)
# plt.yticks(my_y_ticks)
plt.legend(loc='best')
plt.title('Valid Reward Plot')
plt.savefig('valid_reward.eps')
plt.close()
plt.plot(epoches, v_acc, color='blue', label='valid_acc')
plt.ylabel('valid accuracy')
plt.xlabel('epochs')
# my_y_ticks = np.arange(0, 1, 0.01)
# plt.yticks(my_y_ticks)
plt.legend(loc='best')
plt.title('Valid top5@Accuracy Plot')
plt.savefig('valid_accuracy.eps')
plt.close()
plt.plot(epoches, v_acc10, color='blue', label='valid_acc')
plt.ylabel('valid accuracy')
plt.xlabel('epochs')
# my_y_ticks = np.arange(0, 1, 0.01)
# plt.yticks(my_y_ticks)
plt.legend(loc='best')
plt.title('Valid top10@Accuracy Plot')
plt.savefig('valid_accuracy10.eps')
plt.close()
plt.plot(epoches, v_rp, color='blue', label='valid_rp')
plt.ylabel('valid ranking percentile')
plt.xlabel('epochs')
# my_y_ticks = np.arange(0, 1, 0.01)
# plt.yticks(my_y_ticks)
plt.legend(loc='best')
plt.title('Valid Rank Percentile Plot')
plt.savefig('valid_rp.eps')
plt.close()
def get_reward(self, action, sketch_name):
sketch_query_name = '_'.join(sketch_name.split('/')[-1].split('_')[:-1])
position_query = self.Image_Name_Train.index(sketch_query_name)
target_distance = F.pairwise_distance(action,
self.Image_Array_Train[position_query].unsqueeze(0))
distance = F.pairwise_distance(action, self.Image_Array_Train)
rank = distance.le(target_distance).sum()
if rank.item() == 0:
reward = 1. / (rank.item() + 1)
else:
reward = 1. / rank.item()
return reward, rank.item(), self.Image_Array_Train[position_query].unsqueeze(0).to(self.device).detach()
def train_one_epoch(self, epoch, dict_list, counter):
"""
Train the model for 1 epoch of the training set.
An epoch corresponds to one full pass through the entire
training set in successive mini-batches.
This is used by train() and should not be called manually.
"""
self.model.train()
batch_time = AverageMeter()
reward = AverageMeter()
losses_action = AverageMeter()
losses_reinforce = AverageMeter()
# losses_baseline = AverageMeter()
losses = AverageMeter()
# accs = AverageMeter()
tic = time.time()
# imgs = []
# locs = []
with tqdm(total=self.num_train) as pbar:
for i, sampled_batch in enumerate(self.train_loader):
self.optimizer.zero_grad()
plot = False
if (epoch % self.plot_freq == 0) and (i == 0):
plot = True
imgs = []
locs_list = []
loss_buffer = []
for j, sampled_sketch in enumerate(sampled_batch['sketch_img']):
if (epoch == 0 or counter==0) and i==0:
dict = {}
dict_list.append(dict)
# x, y = x.to(self.device), y.to(self.device)
x = sampled_sketch.to(self.device)
# initialize location vector and hidden state
self.batch_size = x.shape[0]
h_t, l_t = self.reset()
# h_t = torch.tensor(self.Sketch_Array_Train[i][-1], dtype=torch.float, device=self.device,requires_grad=True).unsqueeze(0)
# save images
if j==8 or j == 16:
imgs.append(x[0:9])
# imgs = []
# imgs.append(x[0:9])
# extract the glimpses
locs = []
log_pi = []
baselines = []
entropys = []
# actions = []
np.set_printoptions(threshold=np.inf)
for t in range(self.num_glimpses - 1):
# forward pass through model
h_t, l_t, b_t, p, entropy = self.model(x, l_t, h_t, epoch, t, False)
# actions.append(action)
print("l_t_{}/{}/{}".format(epoch, i, t), l_t)
print("h_t_{}/{}/{}".format(epoch, i, t), h_t[0].detach().cpu().numpy())
# store
locs.append(l_t[0:9])
baselines.append(b_t)
entropys.append(entropy)
log_pi.append(p)
# last iteration
h_t, l_t, b_t, action, p, entropy = self.model(x, l_t, h_t, epoch, t, False, last=True)
# actions.append(action)
log_pi.append(p)
print("l_t_{}/{}/{}".format(epoch,i,t), l_t)
print("h_t_{}/{}/{}".format(epoch,i,t), h_t[0].detach().cpu().numpy())
baselines.append(b_t)
entropys.append(entropy)
locs.append(l_t[0:9])
# locs_list.append(locs)
# loc.append(l_t)
# loc = torch.cat(loc)
# locs.append(loc)
if j==8 or j==16:
locs_list.append(locs)
# convert list to tensors and reshape
# baselines = torch.stack(baselines).transpose(1, 0)
log_pi = torch.stack(log_pi).transpose(1, 0)
# entropys = torch.stack(entropys).transpose(1, 0)
# compute losses for differentiable modules
sketch_name_list = sampled_batch['sketch_path']
one_hot = []
# R_list = []
# Reward_back_list = []
# for k1, action in enumerate(actions):
R = []
Reward_back = []
# RL_loss = 0
for k, sketch_name in enumerate(sketch_name_list):
# assert sketch_name == self.Sketch_Name_Train[i * 32 + k]
action_single = action[k].unsqueeze(0)
Reward, rank, target_img = self.get_reward(action_single, sketch_name)
# if rank > 10:
# Reward1 = 0.
# else:
# Reward1 = Reward
# if F.mse_loss(action_single, target_img) > 0.1:
# Reward1 = 0.
# RL_loss = RL_loss - Reward1*a_p[k]
# flag = False
# if sketch_name in dict_list[j]:
# if rank <= dict_list[j][sketch_name]:
# flag = True
# dict_list[j][sketch_name] = rank
# else:
# dict_list[j][sketch_name] = rank
# if k1 == self.num_glimpses - | |
<reponame>CoderPat/structured-neural-summarization
import argparse
from opengnn.models import GraphToSequence, SequencedGraphToSequence
from opengnn.encoders import GGNNEncoder, SequencedGraphEncoder
from opengnn.decoders.sequence import RNNDecoder, HybridPointerDecoder
from opengnn.inputters import TokenEmbedder, CopyingTokenEmbedder
from opengnn.inputters import GraphEmbedder
from opengnn.inputters import SequencedGraphInputter
from opengnn.utils import CoverageBahdanauAttention, read_jsonl_gz_file
import tensorflow as tf
import os
import json
from tensorflow.contrib.seq2seq import BahdanauAttention
from tensorflow.python.util import function_utils
from tensorflow.python import debug as tf_debug
from rouge import Rouge
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
DEFAULT_TRAIN_SOURCE_FILE = 'data/naturallanguage/cnn_dailymail/split/train/inputs.jsonl.gz'
DEFAULT_TRAIN_TARGET_FILE = 'data/naturallanguage/cnn_dailymail/split/train/summaries.jsonl.gz'
DEFAULT_VALID_SOURCE_FILE = 'data/naturallanguage/cnn_dailymail/split/valid/inputs.jsonl.gz'
DEFAULT_VALID_TARGET_FILE = 'data/naturallanguage/cnn_dailymail/split/valid/summaries.jsonl.gz'
DEFAULT_NODE_VOCAB_FILE = 'data/naturallanguage/cnn_dailymail/node.vocab'
DEFAULT_EDGE_VOCAB_FILE = 'data/naturallanguage/cnn_dailymail/edge.vocab'
DEFAULT_TARGET_VOCAB_FILE = 'data/naturallanguage/cnn_dailymail/output.vocab'
DEFAULT_MODEL_NAME = 'cnndailymail_summarizer'
def main():
# argument parsing
parser = argparse.ArgumentParser()
# optimization arguments
parser.add_argument('--optimizer', default='adam', type=str,
help="Number of epochs to train the model")
parser.add_argument('--train_steps', default=300000, type=int,
help="Number of steps to optimize")
parser.add_argument('--learning_rate', default=0.001, type=float,
help="The learning rate for the optimizer")
parser.add_argument('--lr_decay_rate', default=0.0, type=float,
help="Learning rate decay rate")
parser.add_argument('--lr_decay_steps', default=10000, type=float,
help="Number of steps between learning rate decay application")
parser.add_argument('--adagrad_initial_accumulator', default=0.1, type=float,
help="Number of epochs to train the model")
parser.add_argument('--momentum_value', default=0.95, type=float,
help="Number of epochs to train the model")
parser.add_argument('--batch_size', default=16, type=int,
help="Number of epochs to train the model")
parser.add_argument('--sample_buffer_size', default=10000, type=int,
help="The number of samples in the buffer shuffled before training")
parser.add_argument('--bucket_width', default=5, type=int,
help="Range of allowed lengths in a batch. Optimizes RNN loops")
parser.add_argument('--clip_gradients', default=5., type=float,
help="Maximum norm of the gradients")
parser.add_argument('--validation_interval', default=20000, type=int,
help="The number of training steps between each validation run")
parser.add_argument('--validation_metric', default='rouge', type=str,
help="The metric to compare models between validations")
parser.add_argument('--patience', default=5, type=int,
help="Number of worse validations needed to trigger early stop")
parser.add_argument('--logging_window', default=200, type=int,
help="Number of steps taken when logging")
# model options arguments
parser.add_argument('--source_embeddings_size', default=128, type=int,
help="Size of the input tokens embeddings")
parser.add_argument('--target_embeddings_size', default=128, type=int,
help="Size of the target token embeddings")
parser.add_argument('--embeddings_dropout', default=0.2, type=float,
help="Dropout applied to the node embeddings during training")
parser.add_argument('--node_features_size', default=256, type=int,
help="Size of the node features hidden state")
parser.add_argument('--node_features_dropout', default=0.2, type=float,
help="Dropout applied to the node features during training")
parser.add_argument('--ggnn_num_layers', default=4, type=int,
help="Number of GGNN layers with distinct weights")
parser.add_argument('--ggnn_timesteps_per_layer', default=1, type=int,
help="Number of GGNN propagations per layer")
parser.add_argument('--rnn_num_layers', default=1, type=int,
help="Number of layers in the input and output rnns")
parser.add_argument('--rnn_hidden_size', default=256, type=int,
help="Size of the input and output rnns hidden state")
parser.add_argument('--rnn_hidden_dropout', default=0.3, type=float,
help="Dropout applied to the rnn hidden state during training")
parser.add_argument('--attend_all_nodes', default=False, action='store_true',
help="If enabled, attention and copying will consider all nodes "
"rather than only the ones in the primary sequence")
parser.add_argument('--only_graph_encoder', default=False, action='store_true',
help="If enabled, the model will ignore the sequence encoder, "
"using only the graph structure")
parser.add_argument('--ignore_graph_encoder', default=False, action='store_true',
help="If enabled, the model ignore the graph encoder, using only "
"the primary sequence encoder")
parser.add_argument('--copy_attention', default=False, action='store_true',
help="Number of epochs to train the model")
parser.add_argument('--coverage_layer', default=False, action='store_true',
help="Number of epochs to train the model")
parser.add_argument('--coverage_loss', default=0., type=float,
help="Number of epochs to train the model")
parser.add_argument('--max_iterations', default=120, type=int,
help="The maximum number of decoding iterations at inference time")
parser.add_argument('--beam_width', default=10, type=int,
help="The number of beam to search while decoding")
parser.add_argument('--length_penalty', default=1.0, type=float,
help="The length ")
parser.add_argument('--case_sensitive', default=False, action='store_true',
help="If enabled, node labels are case sentitive")
# arguments for loading data
parser.add_argument('--train_source_file', default=DEFAULT_TRAIN_SOURCE_FILE, type=str,
help="Path to the jsonl.gz file containing the train input graphs")
parser.add_argument('--train_target_file', default=DEFAULT_TRAIN_TARGET_FILE, type=str,
help="Path to the jsonl.gz file containing the train input graphs")
parser.add_argument('--valid_source_file', default=DEFAULT_VALID_SOURCE_FILE, type=str,
help="Path to the jsonl.gz file containing the valid input graphs")
parser.add_argument('--valid_target_file', default=DEFAULT_VALID_TARGET_FILE, type=str,
help="Path to the jsonl.gz file containing the valid input graphs")
parser.add_argument('--infer_source_file', default=None,
help="Path to the jsonl.gz file in which we wish to do inference "
"after training is complete")
parser.add_argument('--infer_predictions_file', default=None,
help="Path to the file to save the results from inference")
parser.add_argument('--node_vocab_file', default=DEFAULT_NODE_VOCAB_FILE, type=str,
help="Path to the json containing the dataset")
parser.add_argument('--edge_vocab_file', default=DEFAULT_EDGE_VOCAB_FILE, type=str,
help="Path to the json containing the dataset")
parser.add_argument('--target_vocab_file', default=DEFAULT_TARGET_VOCAB_FILE, type=str,
help="Path to the json containing the dataset")
parser.add_argument('--truncated_source_size', default=500, type=int,
help="Max size for source sequences in the input graphs after truncation")
parser.add_argument('--truncated_target_size', default=100, type=int,
help="Max size for target sequences after truncation")
parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME, type=str,
help="Model name")
# arguments for persistence
parser.add_argument('--checkpoint_interval', default=5000, type=int,
help="The number of steps between model checkpoints")
parser.add_argument('--checkpoint_dir', default=None, type=str,
help="Directory to where to save the checkpoints")
# arguments for debugging
parser.add_argument('--debug_mode', default=False, action='store_true',
help="If true, it will enable the tensorflow debugger")
args = parser.parse_args()
model = build_model(args)
if args.checkpoint_dir is None:
args.checkpoint_dir = args.model_name
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
os.makedirs(os.path.join(args.checkpoint_dir, "valid"))
elif not os.path.exists(os.path.join(args.checkpoint_dir, "valid")):
os.makedirs(os.path.join(args.checkpoint_dir, "valid"))
train_and_eval(model, args)
if args.infer_source_file is not None:
infer(model, args)
def train_and_eval(model, args):
optimizer = build_optimizer(args)
metadata = build_metadata(args)
config = build_config(args)
params = build_params(args)
train_input_fn = model.input_fn(
mode=tf.estimator.ModeKeys.TRAIN,
batch_size=args.batch_size,
metadata=metadata,
features_file=args.train_source_file,
labels_file=args.train_target_file,
features_bucket_width=args.bucket_width,
sample_buffer_size=args.sample_buffer_size)
valid_input_fn = model.input_fn(
mode=tf.estimator.ModeKeys.EVAL,
batch_size=args.batch_size,
metadata=metadata,
features_file=args.valid_source_file,
labels_file=args.valid_target_file,)
valid_targets = read_jsonl_gz_file(args.valid_target_file)
train_iterator = get_iterator_from_input_fn(train_input_fn)
valid_iterator = get_iterator_from_input_fn(valid_input_fn)
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=tf.GPUOptions(
allow_growth=False))
with tf.Session(config=session_config) as session:
if args.debug_mode:
session = tf_debug.LocalCLIDebugWrapperSession(
session, dump_root="~/Downloads/tf-debug")
# build train graph, loss and optimization ops
features, labels = train_iterator.get_next()
with tf.variable_scope(args.model_name):
outputs, _ = model(
features, labels, tf.estimator.ModeKeys.TRAIN, params, config)
train_loss, train_tb_loss = model.compute_loss(
features, labels, outputs, params, tf.estimator.ModeKeys.TRAIN)
train_op = optimizer(train_loss)
# build eval graph, loss and prediction ops
features, labels = valid_iterator.get_next()
with tf.variable_scope(args.model_name, reuse=True):
outputs, predictions = model(
features, labels, tf.estimator.ModeKeys.EVAL, params, config)
_, valid_tb_loss = model.compute_loss(
features, labels, outputs, params, tf.estimator.ModeKeys.EVAL)
global_step = tf.train.get_global_step()
best_metric = 0
worse_epochs = 0
saver = tf.train.Saver(max_to_keep=100)
train_summary = Summary(args.checkpoint_dir)
valid_summary = Summary(os.path.join(args.checkpoint_dir, "valid"))
# TODO: Initialize tables some other way
session.run([
train_iterator.initializer,
tf.tables_initializer()])
# check if we are restarting a run
latest_checkpoint = tf.train.latest_checkpoint(args.checkpoint_dir)
if latest_checkpoint is not None:
saver.restore(session, latest_checkpoint)
else:
session.run(tf.global_variables_initializer())
initial_step = session.run(global_step)
window_loss = 0
window_steps = 0
for train_step in range(initial_step+1, args.train_steps+1):
step_loss, _ = session.run([train_tb_loss, train_op])
window_loss += step_loss
window_steps += 1
# check if in logging schedule
if train_step % args.logging_window == 0:
train_summary.scalar("loss", window_loss / window_steps, train_step)
print("step %d, train loss: %0.2f" %
(train_step, window_loss / window_steps))
window_loss = 0
window_steps = 0
# and checkpointing schedule
if train_step % args.checkpoint_interval == 0:
print("saving current model...")
saver.save(session, os.path.join(args.checkpoint_dir, "current.ckpt"), global_step)
# after training, do evaluation if on schedule
if train_step % args.validation_interval == 0:
valid_loss, valid_rouge = evaluate(
session,
model,
valid_iterator,
valid_tb_loss,
predictions,
valid_targets)
print("eval loss: %0.2f, eval rouge: %0.2f" % (valid_loss, valid_rouge))
valid_summary.scalar("loss", valid_loss, train_step)
valid_summary.scalar("rouge", valid_rouge, train_step)
if args.validation_metric == "rouge":
# check for new best model
if valid_rouge > best_metric:
best_metric = valid_rouge
worse_epochs = 0
print("saving best model...")
saver.save(session, os.path.join(args.checkpoint_dir, "best.ckpt"))
else:
worse_epochs += 1
# and stop training if triggered patience
if worse_epochs >= args.patience:
print("early stopping triggered...")
break
else:
raise ValueError("%s not supported as validation metric" %
args.validation_metric)
def evaluate(session,
model,
iterator,
loss,
predictions,
targets):
""" """
valid_loss = 0
valid_steps = 0
valid_predictions = []
session.run([iterator.initializer, tf.tables_initializer()])
while True:
try:
batch_loss, batch_predictions = session.run([loss, predictions])
batch_predictions = [model.process_prediction({"tokens": prediction})
for prediction in batch_predictions["tokens"]]
valid_loss += batch_loss
valid_predictions = valid_predictions + batch_predictions
valid_steps += 1
except tf.errors.OutOfRangeError:
break
loss = valid_loss / valid_steps
rouge = compute_rouge(valid_predictions, targets)
return loss, rouge
def get_iterator_from_input_fn(input_fn):
with tf.device('/cpu:0'):
return input_fn().make_initializable_iterator()
def build_model(args):
""""""
if args.coverage_layer:
attention_layer = CoverageBahdanauAttention
else:
attention_layer = BahdanauAttention
if args.copy_attention:
node_embedder = CopyingTokenEmbedder(
vocabulary_file_key="node_vocabulary",
output_vocabulary_file_key="target_vocabulary",
embedding_size=args.source_embeddings_size,
dropout_rate=args.embeddings_dropout,
lowercase=not args.case_sensitive)
target_inputter = CopyingTokenEmbedder(
vocabulary_file_key="target_vocabulary",
input_tokens_fn=lambda data: data['labels'],
embedding_size=args.target_embeddings_size,
dropout_rate=args.embeddings_dropout,
truncated_sentence_size=args.truncated_target_size)
decoder = HybridPointerDecoder(
num_units=args.rnn_hidden_size,
num_layers=args.rnn_num_layers,
output_dropout_rate=args.rnn_hidden_dropout,
attention_mechanism_fn=attention_layer,
coverage_loss_lambda=args.coverage_loss,
copy_state=True)
else:
node_embedder = TokenEmbedder(
vocabulary_file_key="node_vocabulary",
embedding_size=args.source_embeddings_size,
dropout_rate=args.embeddings_dropout,
lowercase=not args.case_sensitive)
target_inputter = TokenEmbedder(
vocabulary_file_key="target_vocabulary",
embedding_size=args.target_embeddings_size,
dropout_rate=args.embeddings_dropout,
truncated_sentence_size=args.truncated_target_size)
decoder = RNNDecoder(
num_units=args.rnn_hidden_size,
num_layers=args.rnn_num_layers,
output_dropout_rate=args.rnn_hidden_dropout,
attention_mechanism_fn=attention_layer,
coverage_loss_lambda=args.coverage_loss,
copy_state=True)
if args.only_graph_encoder:
model = GraphToSequence(
source_inputter=GraphEmbedder(
edge_vocabulary_file_key="edge_vocabulary",
node_embedder=node_embedder),
target_inputter=target_inputter,
encoder=GGNNEncoder(
num_timesteps=[args.ggnn_timesteps_per_layer
for _ in range(args.ggnn_num_layers)],
node_feature_size=args.node_features_size,
gru_dropout_rate=args.node_features_dropout),
decoder=decoder,
name=args.model_name)
else:
model = SequencedGraphToSequence(
source_inputter=SequencedGraphInputter(
graph_inputter=GraphEmbedder(
edge_vocabulary_file_key="edge_vocabulary",
node_embedder=node_embedder),
truncated_sequence_size=args.truncated_source_size),
target_inputter=target_inputter,
encoder=SequencedGraphEncoder(
base_graph_encoder=GGNNEncoder(
num_timesteps=[args.ggnn_timesteps_per_layer
for _ in range(args.ggnn_num_layers)],
node_feature_size=args.node_features_size,
gru_dropout_rate=args.node_features_dropout),
gnn_input_size=args.node_features_size,
encoder_type='bidirectional_rnn',
num_units=args.rnn_hidden_size,
num_layers=args.rnn_num_layers,
dropout_rate=args.rnn_hidden_dropout,
ignore_graph_encoder=args.ignore_graph_encoder,),
decoder=decoder,
only_attend_primary=not args.attend_all_nodes,
name=args.model_name)
return model
def infer(model, args):
metadata = build_metadata(args)
config = build_config(args)
params = build_params(args)
input_fn = model.input_fn(
mode=tf.estimator.ModeKeys.PREDICT,
batch_size=args.batch_size,
metadata=metadata,
features_file=args.infer_source_file)
session_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=tf.GPUOptions(
allow_growth=False))
iterator = get_iterator_from_input_fn(input_fn)
with tf.Session(config=session_config) as session:
saver = tf.train.Saver(max_to_keep=100)
saver.restore(session, os.path.join(args.checkpoint_dir, "best.ckpt"))
# build eval graph, loss and prediction ops
features = iterator.get_next()
with tf.variable_scope(args.model_name, reuse=True):
_, predictions = model(
features, None, tf.estimator.ModeKeys.PREDICT, params, config)
session.run([iterator.initializer, tf.tables_initializer()])
steps = 0
infer_predictions = []
while | |
seeking medical "
"advice about treatment/care because of this service. Rely on info at your "
"own risk.",
"1. Next",
]
)
app.messages = []
msg = Message(
content="invalid",
to_addr="27820001002",
from_addr="27820001001",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert reply.content == "\n".join(
[
"You confirm that you shouldn't disregard/delay seeking medical "
"advice about treatment/care because of this service. Rely on info at your "
"own risk.",
"1. Next",
]
)
@pytest.mark.asyncio
async def test_state_terms_returning_user():
u = User(
addr="27820001003",
state=StateData(name="state_welcome"),
session_id=1,
answers={"returning_user": "yes"},
)
app = Application(u)
msg = Message(
content="start",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_privacy_policy"
@pytest.mark.asyncio
async def test_state_privacy_policy(rapidpro_mock):
u = User(
addr="27820001003",
state=StateData(name="state_welcome"),
session_id=1,
answers={"returning_user": "yes"},
)
app = Application(u)
msg = Message(
content="start",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_privacy_policy"
assert reply.content == "\n".join(
[
"Your personal information is protected under POPIA and in "
"accordance with the provisions of the HealthCheck Privacy "
"Notice sent to you by SMS.",
"1. Accept",
]
)
assert [r.path for r in rapidpro_mock.app.requests] == ["/api/v2/flow_starts.json"]
assert [r.json for r in rapidpro_mock.app.requests] == [
{"flow": "flow-uuid", "urns": ["tel:27820001003"]}
]
@pytest.mark.asyncio
async def test_state_privacy_policy_confirmed_contact():
u = User(
addr="27820001003",
state=StateData(name="state_privacy_policy"),
session_id=1,
answers={"state_privacy_policy_accepted": "yes", "confirmed_contact": "yes"},
)
app = Application(u)
msg = Message(
content="start",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_fever"
@pytest.mark.asyncio
async def test_state_privacy_policy_non_confirmed_contact():
u = User(
addr="27820001003",
state=StateData(name="state_privacy_policy"),
session_id=1,
answers={"state_privacy_policy_accepted": "yes", "confirmed_contact": "no"},
)
app = Application(u)
msg = Message(
content="start",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_age"
@pytest.mark.asyncio
async def test_state_end():
u = User(addr="27820001003", state=StateData(name="state_terms"), session_id=1)
app = Application(u)
msg = Message(
content="no",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert (
reply.content
== "You can return to this service at any time. Remember, if you think you "
"have COVID-19 STAY HOME, avoid contact with other people and self-isolate."
)
assert u.state.name == "state_start"
@pytest.mark.asyncio
async def test_state_end_confirmed_contact():
u = User(
addr="27820001003",
state=StateData(name="state_terms"),
session_id=1,
answers={"confirmed_contact": "yes"},
)
app = Application(u)
msg = Message(
content="no",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert (
reply.content
== "You can return to this service at any time. Remember, if you think you "
"have COVID-19 STAY HOME, avoid contact with other people and self-quarantine."
)
assert u.state.name == "state_start"
@pytest.mark.asyncio
async def test_state_province():
u = User(addr="27820001003", state=StateData(name="state_age"), session_id=1)
app = Application(u)
msg = Message(
content="1",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_province"
assert reply.content == "\n".join(
[
"Select your province",
"",
"Reply:",
"1. EASTERN CAPE",
"2. FREE STATE",
"3. GAUTENG",
"4. KWAZULU NATAL",
"5. LIMPOPO",
"6. MPUMALANGA",
"7. NORTH WEST",
"8. NORTHERN CAPE",
"9. WESTERN CAPE",
]
)
app.messages = []
msg = Message(
content="invalid",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_province"
assert reply.content == "\n".join(
[
"Select your province",
"",
"Reply:",
"1. EASTERN CAPE",
"2. FREE STATE",
"3. GAUTENG",
"4. KWAZULU NATAL",
"5. LIMPOPO",
"6. MPUMALANGA",
"7. NORTH WEST",
"8. NORTHERN CAPE",
"9. WESTERN CAPE",
]
)
@pytest.mark.asyncio
async def test_state_city():
u = User(
addr="27820001003",
state=StateData(name="state_age"),
session_id=1,
answers={"state_province": "ZA-WC"},
)
app = Application(u)
msg = Message(
content="2",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_city"
assert reply.content == (
"Please TYPE the name of your Suburb, Township, Town or " "Village (or nearest)"
)
app.messages = []
msg = Message(
content=" ",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_city"
assert reply.content == (
"Please TYPE the name of your Suburb, Township, Town or " "Village (or nearest)"
)
@pytest.mark.asyncio
async def test_state_city_skip():
u = User(
addr="27820001003",
state=StateData(name="state_privacy_policy"),
session_id=1,
answers={
"state_province": "ZA-WC",
"state_city": "Cape Town",
"city_location": "+1+1/",
"state_age": "18-35",
},
)
app = Application(u)
msg = Message(
content="accept",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_fever"
@pytest.mark.asyncio
async def test_state_city_skip_minor():
u = User(
addr="27820001003",
state=StateData(name="state_privacy_policy"),
session_id=1,
answers={"state_province": "ZA-WC", "state_age": "<18"},
)
app = Application(u)
msg = Message(
content="accept",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_fever"
@pytest.mark.asyncio
async def test_state_city_skip_confirmed_contact():
u = User(
addr="27820001003",
state=StateData(name="state_age_years"),
session_id=1,
answers={
"state_province": "ZA-WC",
"state_city": "Cape Town",
"city_location": "+1+1/",
"confirmed_contact": "yes",
},
)
app = Application(u)
msg = Message(
content="19",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_tracing"
@pytest.mark.asyncio
async def test_state_confirm_city(google_api_mock):
u = User(
addr="27820001003",
state=StateData(name="state_city"),
session_id=1,
answers={"google_session_token": "<PASSWORD>"},
)
app = Application(u)
msg = Message(
content="cape town",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_confirm_city"
assert reply.content == "\n".join(
[
"Please confirm the address below based on info you shared:",
"Cape Town, South Africa",
"",
"Reply",
"1. Yes",
"2. No",
]
)
assert [r.path for r in google_api_mock.app.requests] == [
"/maps/api/place/autocomplete/json"
]
assert u.answers["place_id"] == "ChIJD7fiBh9u5kcRYJSMaMOCCwQ"
assert u.answers["state_city"] == "Cape Town, South Africa"
app.messages = []
msg = Message(
content="no",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_city"
@pytest.mark.asyncio
async def test_state_city_no_results(google_api_mock):
google_api_mock.app.status = "NO_RESULT"
u = User(
addr="27820001003",
state=StateData(name="state_city"),
session_id=1,
answers={"google_session_token": "<PASSWORD>"},
)
app = Application(u)
msg = Message(
content="cape town",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_city"
@pytest.mark.asyncio
async def test_state_city_error(google_api_mock):
google_api_mock.app.api_errormax = 3
u = User(
addr="27820001003",
state=StateData(name="state_city"),
session_id=1,
answers={"google_session_token": "<PASSWORD>"},
)
app = Application(u)
msg = Message(
content="cape town",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert (
reply.content
== "Sorry, something went wrong. We have been notified. Please try again later"
)
@pytest.mark.asyncio
async def test_state_city_temporary_error(google_api_mock):
google_api_mock.app.api_errormax = 1
u = User(
addr="27820001003",
state=StateData(name="state_city"),
session_id=1,
answers={"google_session_token": "<PASSWORD>"},
)
app = Application(u)
msg = Message(
content="cape town",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
await app.process_message(msg)
assert u.state.name == "state_confirm_city"
@pytest.mark.asyncio
async def test_state_place_details_lookup(google_api_mock):
u = User(
addr="27820001003",
state=StateData(name="state_confirm_city"),
session_id=1,
answers={
"state_city": "Cape Town",
"google_session_token": "<PASSWORD>",
"place_id": "ChIJD7fiBh9u5kcRYJSMaMOCCwQ",
"confirmed_contact": "no",
},
)
app = Application(u)
msg = Message(
content="yes",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_fever"
assert [r.path for r in google_api_mock.app.requests] == [
"/maps/api/place/details/json"
]
assert u.answers["city_location"] == "-03.866651+051.195827/"
@pytest.mark.asyncio
async def test_state_place_details_lookup_invalid_response(google_api_mock):
google_api_mock.app.status = "ERROR"
u = User(
addr="27820001003",
state=StateData(name="state_confirm_city"),
session_id=1,
answers={
"state_city": "Cape Town",
"google_session_token": "123",
"place_id": "ChIJD7fiBh9u5kcRYJSMaMOCCwQ",
"confirmed_contact": "no",
},
)
app = Application(u)
msg = Message(
content="yes",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
await app.process_message(msg)
assert u.state.name == "state_city"
@pytest.mark.asyncio
async def test_state_place_details_lookup_error(google_api_mock):
google_api_mock.app.api_errormax = 3
u = User(
addr="27820001003",
state=StateData(name="state_confirm_city"),
session_id=1,
answers={
"state_city": "Cape Town",
"google_session_token": "123",
"place_id": "ChIJD7fiBh9u5kcRYJSMaMOCCwQ",
"confirmed_contact": "no",
},
)
app = Application(u)
msg = Message(
content="yes",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert (
reply.content
== "Sorry, something went wrong. We have been notified. Please try again later"
)
@pytest.mark.asyncio
async def test_state_place_details_lookup_temporary_error(google_api_mock):
google_api_mock.app.api_errormax = 1
u = User(
addr="27820001003",
state=StateData(name="state_confirm_city"),
session_id=1,
answers={
"state_city": "Cape Town",
"google_session_token": "123",
"place_id": "ChIJD7fiBh9u5kcRYJSMaMOCCwQ",
"confirmed_contact": "no",
},
)
app = Application(u)
msg = Message(
content="yes",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
await app.process_message(msg)
assert u.state.name == "state_fever"
@pytest.mark.asyncio
async def test_state_place_details_lookup_confirmed_contact(google_api_mock):
u = User(
addr="27820001003",
state=StateData(name="state_confirm_city"),
session_id=1,
answers={
"state_city": "Cape Town",
"google_session_token": "123",
"place_id": "ChIJD7fiBh9u5kcRYJSMaMOCCwQ",
"confirmed_contact": "yes",
},
)
app = Application(u)
msg = Message(
content="yes",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_tracing"
assert [r.path for r in google_api_mock.app.requests] == [
"/maps/api/place/details/json"
]
assert u.answers["city_location"] == "-03.866651+051.195827/"
@pytest.mark.asyncio
async def test_state_age():
u = User(addr="27820001003", state=StateData(name="state_age"), session_id=1)
app = Application(u)
msg = Message(
content="invalid",
to_addr="27820001002",
from_addr="27820001003",
transport_name="whatsapp",
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
)
[reply] = await app.process_message(msg)
assert len(reply.content) < 160
assert u.state.name == "state_age"
assert reply.content == "\n".join(
[
"Please use numbers from list.",
"",
"How old are you?",
| |
from config import BAD_ACTOR_NOTIFICATION_URL
from datetime import datetime
from decimal import Decimal
from pytz import timezone
from wtforms import validators
import pytest
from batch import amount_to_charge
from npsp import RDO, Contact, Opportunity, SalesforceConnection, Account
from util import clean, construct_slack_message
from forms import format_amount, validate_amount
from charges import generate_stripe_description
from bad_actor import (
BadActor,
BadActorJudgmentType,
BadActorResponse,
BadActorResponseItem,
)
class SalesforceConnectionSubClass(SalesforceConnection):
def __init__(self):
pass
@property
def instance_url(self):
return "quux"
sf = SalesforceConnectionSubClass()
bad_actor_request = {
"email": "<EMAIL>",
"ip": "127.0.0.1",
"country_code": "US",
"zipcode": "78701",
"given_name": "nick",
"family_name": "cage",
"referer": "https://foo.com/foobar?foobar=baz",
"captcha_token": "captcha_token",
"reason": "because I care",
"amount": 1.34,
}
def test_bad_actor_init():
bad_actor_response_suspect = BadActorResponse(
overall_judgment=BadActorJudgmentType.suspect, items=[]
)
bad_actor_response_good = BadActorResponse(
overall_judgment=BadActorJudgmentType.good, items=[]
)
bad_actor = BadActor(bad_actor_request=bad_actor_request)
assert bad_actor.quarantine is False
bad_actor.bad_actor_api_response = bad_actor_response_suspect
assert bad_actor.quarantine is True
bad_actor.bad_actor_api_response = bad_actor_response_good
assert bad_actor.quarantine is False
def test_bad_actor_slackify_items():
bad_actor_item1 = BadActorResponseItem(
label="foo", value="bar", judgment=BadActorJudgmentType.suspect
)
bad_actor_item2 = BadActorResponseItem(
label="foo", value="bar", judgment=BadActorJudgmentType.good
)
actual = BadActor._slackify_items([bad_actor_item1, bad_actor_item2])
expected = [
{"type": "mrkdwn", "text": ":eyes: *foo*: bar"},
{"type": "mrkdwn", "text": ":white_check_mark: *foo*: bar"},
]
assert actual == expected
def test_slackify_all():
bad_actor_item1 = BadActorResponseItem(
label="foo", value="bar", judgment=BadActorJudgmentType.suspect
)
bad_actor_item2 = BadActorResponseItem(
label="foo", value="bar", judgment=BadActorJudgmentType.good
)
bad_actor_response = BadActorResponse(
overall_judgment=BadActorJudgmentType.suspect,
items=[bad_actor_item1, bad_actor_item2],
)
opportunity = Opportunity(sf_connection=sf)
opportunity.id = "baz"
bad_actor = BadActor(bad_actor_request=None)
bad_actor.bad_actor_api_response = bad_actor_response
bad_actor.transaction = opportunity
bad_actor.transaction_type = "Opportunity"
expected = [
{
"fields": [{"text": "<quux/baz|Salesforce>", "type": "mrkdwn"}],
"type": "section",
},
{
"fields": [
{"text": ":eyes: *foo*: bar", "type": "mrkdwn"},
{"text": ":white_check_mark: *foo*: bar", "type": "mrkdwn"},
],
"type": "section",
},
{
"block_id": "choices",
"elements": [
{
"action_id": "approve",
"style": "primary",
"text": {"emoji": True, "text": "Approve", "type": "plain_text"},
"type": "button",
"value": "Opportunity:baz",
},
{
"action_id": "reject",
"style": "danger",
"text": {"emoji": True, "text": "Reject", "type": "plain_text"},
"type": "button",
"value": "Opportunity:baz",
},
],
"type": "actions",
},
]
actual = bad_actor._slackify_all()
assert actual == expected
def test_generate_stripe_description():
# if description is blank use type
opp = Opportunity(sf_connection=sf)
opp.type = "Recurring Donation"
opp.description = ""
actual = generate_stripe_description(opp)
assert actual == "Texas Tribune Sustaining Membership"
# strip leading "The "
opp = Opportunity(sf_connection=sf)
opp.description = "The Cuddly Kitty"
actual = generate_stripe_description(opp)
assert actual == "Cuddly Kitty"
# description overrides type
opp = Opportunity(sf_connection=sf)
opp.type = "Recurring Donation"
opp.description = "Cats in Hats Are Cute!"
actual = generate_stripe_description(opp)
assert actual == "Cats in Hats Are Cute!"
# if we can't find anything else at least they'll know it's from us
opp = Opportunity(sf_connection=sf)
opp.type = "Something Bogus"
opp.description = ""
actual = generate_stripe_description(opp)
assert actual == "Texas Tribune"
def test_net_amount_none():
opp = Opportunity(sf_connection=sf)
opp.net_amount = None
assert opp.net_amount == "0.00"
def test__clean():
form = {
"a": "None",
"b": "True",
"c": "False",
"d": "None",
"e": "none",
"f": None,
"g": True,
"h": False,
"i": 9,
"j": 8.1,
"k": "3.2",
"l": "4",
"m": "string",
}
expected = {
"a": None,
"b": True,
"c": False,
"d": None,
"e": "none",
"f": None,
"g": True,
"h": False,
"i": 9,
"j": 8.1,
"k": 3.2,
"l": 4,
"m": "string",
}
actual = clean(form)
assert expected == actual
assert actual["bogus"] is None
def test__format_amount():
opp = Opportunity(sf_connection=sf)
opp.amount = "1500.123"
actual = opp.amount
expected = "1500.12"
assert actual == expected
opp.amount = "1500"
actual = opp.amount
expected = "1500.00"
assert actual == expected
opp.amount = "1500.00"
actual = opp.amount
expected = "1500.00"
assert actual == expected
opp.amount = "1500.126"
actual = opp.amount
expected = "1500.13"
assert actual == expected
class Response(object):
pass
def test_check_response():
response = Response()
response.status_code = 204
with pytest.raises(Exception):
sf.check_response(response)
assert sf.check_response(response, expected_status=204)
response.status_code = 500
with pytest.raises(Exception):
sf.check_response(response)
response.status_code = 404
with pytest.raises(Exception):
sf.check_response(response)
response.status_code = 200
response = sf.check_response(response)
assert response is True
zone = timezone("US/Central")
today = datetime.now(tz=zone).strftime("%Y-%m-%d")
def test__campaign_id_validation():
ID_15_VALID_CHARS = "111AAA222bbb333"
ID_18_VALID_CHARS = "111AAA222bbb333ccc"
ID_15_INVALID_CHARS = "1!1A;-+22bbb333"
ID_18_INVALID_CHARS = "111AAA222bbb333#c;"
ID_INCORRECT_LENGTH = "AAADDD"
opp = Opportunity(sf_connection=sf)
opp.campaign_id = ID_15_VALID_CHARS
assert not opp.has_invalid_campaign_id_format()
opp.campaign_id = ID_18_VALID_CHARS
assert not opp.has_invalid_campaign_id_format()
opp.campaign_id = ID_15_INVALID_CHARS
assert opp.has_invalid_campaign_id_format()
opp.campaign_id = ID_18_INVALID_CHARS
assert opp.has_invalid_campaign_id_format()
opp.campaign_id = ID_INCORRECT_LENGTH
assert opp.has_invalid_campaign_id_format()
def test__format_slack():
opportunity = Opportunity(sf_connection=sf)
opportunity.account_id = "0011700000BpR8PAAV"
opportunity.amount = 9
opportunity.encouraged_by = "Because I love the Trib!"
opportunity.name = "<NAME> (<EMAIL>)"
opportunity.stripe_id = "cus_78MqJSBejMN9gn"
opportunity.agreed_to_pay_fees = True
opportunity.referral_id = "1234"
opportunity.lead_source = "Stripe"
opportunity.description = "The Texas Tribune Membership"
opportunity.stripe_customer = "cus_78MqJSBejMN9gn"
opportunity.campaign_id = "111111111111111"
opportunity.campaign_name = "Test Campaign Name"
no_campaign = Opportunity(sf_connection=sf)
no_campaign.account_id = "0011700000BpR8PAAV"
no_campaign.amount = 9
no_campaign.encouraged_by = "Because I love the Trib!"
no_campaign.name = "<NAME> (<EMAIL>)"
no_campaign.stripe_id = "cus_78MqJSBejMN9gn"
no_campaign.agreed_to_pay_fees = True
no_campaign.referral_id = "1234"
no_campaign.lead_source = "Stripe"
no_campaign.description = "The Texas Tribune Membership"
no_campaign.stripe_customer = "cus_78MqJSBejMN9gn"
rdo = RDO(sf_connection=sf)
rdo.referral_id = "1234"
rdo.encouraged_by = "Because I love the Trib!"
rdo.lead_source = "Stripe"
rdo.contact_id = "0031700000BHQzBAAX"
rdo.installment_period = "yearly"
rdo.stripe_customer = "cus_78MqJSBejMN9gn"
rdo.amount = 100
rdo.name = "foo"
rdo.installments = 3
rdo.open_ended_status = None
rdo.description = "Texas Tribune Circle Membership"
rdo.agreed_to_pay_fees = True
rdo.type = "Giving Circle"
rdo.campaign_id = "000000000000000"
rdo.campaign_name = "Recurring Test Campaign Name"
contact = Contact(sf_connection=sf)
contact.email = "<EMAIL>"
contact.first_name = "D"
contact.last_name = "C"
contact.lead_source = "Stripe"
contact.work_email = "<EMAIL>"
account = Account(sf_connection=sf)
account.name = "Acme Inc."
account.website = "http://acme.com"
account.shipping_street = "Street"
account.shipping_city = "Austin"
account.shipping_postalcode = "78701"
account.shipping_state = "TX"
account.record_type_name = "Household"
actual = construct_slack_message(
account=account, rdo=rdo, opportunity=None, contact=None
)
expected = "Acme Inc. pledged $100 [yearly] (Because I love the Trib!) (Recurring Test Campaign Name)"
assert actual == expected
actual = construct_slack_message(
account=None, rdo=rdo, opportunity=None, contact=contact
)
expected = "D C pledged $100 [yearly] (Because I love the Trib!) (Recurring Test Campaign Name)"
assert actual == expected
actual = construct_slack_message(
account=None, rdo=None, opportunity=opportunity, contact=contact
)
expected = (
"D C pledged $9 [one-time] (Because I love the Trib!) (Test Campaign Name)"
)
assert actual == expected
actual = construct_slack_message(
account=None, rdo=None, opportunity=no_campaign, contact=contact
)
expected = "D C pledged $9 [one-time] (Because I love the Trib!) "
assert actual == expected
def test__format_opportunity():
opportunity = Opportunity(sf_connection=sf)
opportunity.account_id = "<KEY>"
opportunity.amount = 9
opportunity.net_amount = 8
opportunity.encouraged_by = "Because I love the Trib!"
opportunity.name = "<NAME> (<EMAIL>)"
opportunity.stripe_id = "cus_78MqJSBejMN9gn"
opportunity.agreed_to_pay_fees = True
opportunity.referral_id = "1234"
opportunity.lead_source = "Stripe"
opportunity.description = "The Texas Tribune Membership"
opportunity.stripe_customer = "cus_78MqJSBejMN9gn"
response = opportunity._format()
expected = {
"AccountId": "0011700000BpR8PAAV",
"CampaignId": None,
"Amount": "9.00",
"CloseDate": today,
"Encouraged_to_contribute_by__c": "Because I love the Trib!",
"LeadSource": "Stripe",
"Name": "<NAME> (<EMAIL>)",
"RecordType": {"Name": "Membership"},
"StageName": "Pledged",
"Stripe_Customer_ID__c": "cus_78MqJSBejMN9gn",
"Referral_ID__c": "1234",
"Description": "The Texas Tribune Membership",
"Stripe_Agreed_to_pay_fees__c": True,
"Type": "Single",
"Stripe_Card__c": None,
"Stripe_Transaction_ID__c": None,
"npsp__Closed_Lost_Reason__c": None,
"Stripe_Card_Brand__c": None,
"Stripe_Card_Expiration__c": None,
"Stripe_Card_Last_4__c": None,
"Amazon_Order_Id__c": None,
"Net_Amount__c": "8.00",
"Donor_Selected_Amount__c": 0,
"Quarantined__c": False,
}
assert response == expected
def test__format_circle_donation():
rdo = RDO(sf_connection=sf)
rdo.referral_id = "1234"
rdo.encouraged_by = "Because I love the Trib!"
rdo.lead_source = "Stripe"
rdo.contact_id = "0031700000BHQzBAAX"
rdo.installment_period = "yearly"
rdo.stripe_customer = "cus_78MqJSBejMN9gn"
rdo.amount = 100
rdo.name = "foo"
rdo.installments = 3
rdo.open_ended_status = None
rdo.description = "Texas Tribune Circle Membership"
rdo.agreed_to_pay_fees = True
rdo.type = "Giving Circle"
rdo.quarantined = True
response = rdo._format()
expected_response = {
"Referral_ID__c": "1234",
"Encouraged_to_contribute_by__c": "Because I love the Trib!",
"npe03__Date_Established__c": today,
"Lead_Source__c": "Stripe",
"npe03__Contact__c": "0031700000BHQzBAAX",
"npe03__Installment_Period__c": "yearly",
"Stripe_Customer_ID__c": "cus_78MqJSBejMN9gn",
"Billing_Email__c": None,
"Blast_Subscription_Email__c": None,
"npe03__Organization__c": None,
"npe03__Amount__c": "300.0", # 3 * 100
"Name": "foo",
"npe03__Installments__c": 3,
"npe03__Open_Ended_Status__c": None,
"Stripe_Description__c": "Texas Tribune Circle Membership",
"Stripe_Agreed_to_pay_fees__c": True,
"Type__c": "Giving Circle",
"npe03__Recurring_Donation_Campaign__c": None,
"Stripe_Card_Brand__c": None,
"Stripe_Card_Expiration__c": None,
"Stripe_Card_Last_4__c": None,
"Quarantined__c": True,
}
assert response == expected_response
def test__format_cent_circle_donation():
rdo = RDO(sf_connection=sf)
rdo.referral_id = "1234"
rdo.encouraged_by = "Because I love the Trib!"
rdo.lead_source = "Stripe"
rdo.contact_id = "0031700000BHQzBAAX"
rdo.installment_period = "yearly"
rdo.stripe_customer = "cus_78MqJSBejMN9gn"
rdo.amount = 1501.01
rdo.name = "foo"
rdo.installments = 3
rdo.open_ended_status = None
rdo.description = "Texas Tribune Circle Membership"
rdo.agreed_to_pay_fees = True
rdo.type = "Giving Circle"
response = rdo._format()
expected_response = {
"Referral_ID__c": "1234",
"Encouraged_to_contribute_by__c": "Because I love the Trib!",
"npe03__Date_Established__c": today,
"Lead_Source__c": "Stripe",
"npe03__Organization__c": None,
"npe03__Contact__c": "0031700000BHQzBAAX",
"npe03__Installment_Period__c": "yearly",
"Stripe_Customer_ID__c": "cus_78MqJSBejMN9gn",
"npe03__Amount__c": "4503.03", # 3 * 1501.01
"Name": "foo",
"npe03__Installments__c": 3,
"npe03__Open_Ended_Status__c": None,
"Stripe_Description__c": "Texas Tribune Circle Membership",
"Stripe_Agreed_to_pay_fees__c": True,
"Type__c": "Giving Circle",
"npe03__Recurring_Donation_Campaign__c": None,
"Billing_Email__c": None,
"Blast_Subscription_Email__c": None,
"Stripe_Card_Brand__c": None,
"Stripe_Card_Expiration__c": None,
"Stripe_Card_Last_4__c": None,
"Quarantined__c": False,
}
response["Name"] = "foo"
assert response == expected_response
def test__format_recurring_donation():
rdo = RDO(sf_connection=sf)
rdo.referral_id = "1234"
rdo.encouraged_by = "Because I | |
> 0):
for key_indicator in compute_other_key_indicators:
for _id, _type in zip(key_indicator.target.score_ids, key_indicator.target.scores):
_kpiKI = {
"kpiId": _id,
"type": _type,
"kpiFamily": key_indicator.target.indicator_family,
"scoreType": _type,
"kpiType": key_indicator.target.indicator_type,
"output": key_indicator.target.variable_name,
"kpiName": key_indicator.target.name,
"omodality": key_indicator.target.modality
}
if (key_indicator.target.indicator_type == self._DISCRETE_MODALITY or key_indicator.target.indicator_type == self._DISCRETE):
if _type == self._PURITY:
if key_indicator.purity_min is not None:
_kpiKI['minValue'] = key_indicator.purity_min
if key_indicator.purity_max is not None:
_kpiKI['maxValue'] = key_indicator.purity_max
elif _type == self._COVERAGE:
if key_indicator.coverage_min is not None:
_kpiKI['minValue'] = key_indicator.coverage_min
if key_indicator.coverage_max is not None:
_kpiKI['maxValue'] = key_indicator.coverage_max
elif _type == self._LIFT:
if key_indicator.lift_min is not None:
_kpiKI['minValue'] = key_indicator.lift_min
if key_indicator.lift_max is not None:
_kpiKI['maxValue'] = key_indicator.lift_max
elif _type == self._ZSCORE:
if key_indicator.zscore_min is not None:
_kpiKI['minValue'] = key_indicator.zscore_min
if key_indicator.zscore_max is not None:
_kpiKI['maxValue'] = key_indicator.zscore_max
else:
if _type == self._AVERAGE_VALUE:
if key_indicator.average_value_min is not None:
_kpiKI['minValue'] = key_indicator.average_value_min
if key_indicator.average_value_max is not None:
_kpiKI['maxValue'] = key_indicator.average_value_max
elif _type == self._STANDARD_DEVIATION:
if key_indicator.standard_deviation_min is not None:
_kpiKI['minValue'] = key_indicator.standard_deviation_min
if key_indicator.standard_deviation_max is not None:
_kpiKI['maxValue'] = key_indicator.standard_deviation_max
elif _type == self._SHIFT:
if key_indicator.shift_min is not None:
_kpiKI['minValue'] = key_indicator.shift_min
if key_indicator.shift_max is not None:
_kpiKI['maxValue'] = key_indicator.shift_max
if 'kpis' not in data['task']['params']:
data['task']['params']['kpis'] = []
data['task']['params']['kpis'].append(_kpiKI)
if (locally_increase_complexity):
data['task']['params']['maxComplexity'] = max_complexity
data['task']['params']['nbMinimizations'] = nb_minimizations
data['task']['params']['coverageIncrement'] = coverage_increment
msg += "\n\t- Max complexity: {} \n\t- Number of Minimizations: {} \n\t- Minimization \
Coverage Increment: {}".format(max_complexity, nb_minimizations, coverage_increment)
if (validate_stability):
data['task']['params']['percentageSplit'] = split_ratio
data['task']['params']['nbModels'] = nb_iterations
data['task']['params']['purityTolerance'] = purity_tolerance
msg += "\n\t- Percentage split: {} \n\t- Number of Iterations: {} \n\t- Purity Tolerance: {}".format(split_ratio, nb_iterations, purity_tolerance)
print(msg)
_ruleset = self.__api.Task.createtask(project_ID=self.__project_id, json=data)
self.__api.handle_work_states(self.__project_id, work_type='learning', work_id=_ruleset.get('_id'))
return self.get(name)
@Helper.try_catch
def filter(self):
"""
filter()
Get all the rulesets of the project.
Returns:
list(Ruleset): all the rulesets
"""
from HyperAPI.hyper_api.dataset import DatasetFactory
factory = DatasetFactory(self.__api, self.__project_id)
ruleset_project = self.__api.Rules.getlearnings(project_ID=self.__project_id)
return [Ruleset(self, self.__api, factory.get_by_id(ruleset.get('datasetId')), ruleset) for ruleset in ruleset_project]
@Helper.try_catch
def minimize(self, ruleset, minimization_name, score_to_minimize='Purity', increment_threshold=0.01):
"""
minimize(ruleset, minimization_name, score_to_minimize='Purity', increment_threshold=0.01)
Perform a minimzation on a given ruleset.
Args:
ruleset (Ruleset): Ruleset to minimize
minimization_name (str): Name of the new ruleset
score_to_minimize (str): Score to apply the minimization, default is 'Purity'
increment_threshold (float): Percentage increment of target samples that a new rule must bring to be added to the minimized ruleset, default is 0.01
Return:
Ruleset: Minimized ruleset
"""
kpisList = ruleset.kpis.copy()
json = {
"type": "minimization",
"datasetId": ruleset.dataset_id,
"projectId": ruleset.project_id,
"params": {
"query": "tagsfilter={}".format(urllib.parse.quote(ruleset.name)),
"taglist": [ruleset.name],
"incrementThreshold": increment_threshold,
"tag": minimization_name
}
}
_kpiId = decode_kpiname_to_id(ruleset.kpis, score_to_minimize)
if _kpiId != score_to_minimize:
json['params']['kpiId'] = _kpiId
_kpis_corr = self.__api.Kpi.getkpicorrelation(project_ID=ruleset.project_id)
for _kpi in kpisList:
_kpi_corr = next((_kpi_corr for _kpi_corr in _kpis_corr if _kpi_corr.get('_id') == _kpi.get('kpiId')), {})
_kpi.update(_kpi_corr)
if kpisList[0].get('kpiType') in [RulesetFactory._CONTINUOUS, RulesetFactory._CONTINUOUS_RATIO]:
raise ApiException(
f'Unsupported target type in ruleset minimization: {kpisList[0].get("kpiType")}',
f'Supported types: {RulesetFactory._DISCRETE_MODALITY}'
)
json['params']['kpisList'] = kpisList
_ruleset = self.__api.Task.createtask(project_ID=ruleset.project_id, json=json)
self.__api.handle_work_states(ruleset.project_id, work_type='minimization', work_id=_ruleset.get('_id'))
return self.get(minimization_name)
def get(self, name):
"""
get(name)
Get a ruleset by name
Args:
name (str): Name of the ruleset
Returns:
Ruleset: Retrieved ruleset
"""
try:
return [ruleset for ruleset in self.filter() if ruleset.name == name][0]
except IndexError:
return []
@Helper.try_catch
def get_by_id(self, id):
"""
get_by_id(id)
Get the ruleset matching the given ID or None if there is no match
Args:
id (str): ID of the ruleset
Returns:
Ruleset or None: retrieved ruleset
"""
rulesets = [ruleset for ruleset in self.filter() if ruleset.id == id]
if rulesets:
return rulesets[0]
return None
def get_or_create(self, dataset, name, target=None, purity_min=None, coverage_min=None, lift_min=None, zscore_min=None, average_value_min=None,
standard_deviation_max=None, shift_min=None, rule_complexity=2, quantiles=10,
enable_custom_discretizations=True, min_marginal_contribution=None, compute_other_key_indicators=None,
locally_increase_complexity=False, max_complexity=3, nb_minimizations=1, coverage_increment=0.01,
validate_stability=False, split_ratio=0.7, nb_iterations=1, purity_tolerance=0.1):
"""
get_or_create(dataset, name, target=None, purity_min=None, coverage_min=None, lift_min=None, zscore_min=None, average_value_min=None, standard_deviation_max=None, shift_min=None, rule_complexity=2, quantiles=10, enable_custom_discretizations=True, min_marginal_contribution=None, compute_other_key_indicators=None, locally_increase_complexity=False, max_complexity=3, nb_minimizations=1, coverage_increment=0.01, validate_stability=False, split_ratio=0.7, nb_iterations=1, purity_tolerance=0.1)
Get or create a ruleset, if the ruleset exists, only the name is mandatory
Args:
dataset (Dataset): Dataset used to generate the ruleset
name (str): Name of the new ruleset
target (Target): Target to generate the ruleset
purity_min (float): Minimum purity of rules, default is the entire dataset purity (discrete target only)
coverage_min (int): Minimum coverage of the target population for each rule, default is 10 (discrete target only)
lift_min (float): Minimum lift, default is 1 (discrete target only)
zscore_min (float): Minimum Z-score, default is None (discrete target only)
average_value_min (float): Minimum average value, default is average value of the target on the whole dataset (continuous target only)
standard_deviation_max (float) : Maximum standard deviation, default is None (continuous target only)
shift_min (float): Minimum shift, default is None (continuous target only)
rule_complexity (int): Maximum number of variables in rules, default is 2
quantiles (int): Number of intervals the continuous variables are quantized in, default is 10
enable_custom_discretizations (boolean): use custom discretizations, eventually use "quantiles" parameter for remaining variables, default is True
min_marginal_contribution (float): a new rule R', created by adding a new constraint to an existing rule R (and thus increasing its complexity),
is added to the ruleset if and only if it increases the original purity of R by the minimum marginal contribution or more. Default is 0.1
compute_other_key_indicators (list of KeyIndicatorOption): Compute other Key Indicators.
locally_increase_complexity (bool): Enable the locally increase complexity when set as true. Default is False
max_complexity (int): Maximum numbers of features per rule. Default is 3
nb_minimizations (int):Interate the minimization process. Default is 1
coverage_increment (float): Percentage increment of target samples that a new rule must bring to be added to the minimization ruleset.
Default is 0.01
validate_stability (bool): Enable to split your dataset, add iteration and set a purity tolerance when set as true. Default is False
split_ratio (float): The percentage for the split (Between 0 and 1). Default is 0.7
nb_iterations (int): Number of iterations wanted. Default is 1
purity_tolerance (float): Purity tolerence allowed (Between 0 and 1). Default is 0.1
Returns:
Ruleset: Retrieved or created ruleset
"""
for ruleset in dataset.rulesets:
if (ruleset.name == name) and (ruleset.dataset_id == dataset.dataset_id):
return ruleset
return self.create(dataset, name, target, purity_min, coverage_min, lift_min, zscore_min, average_value_min, standard_deviation_max, shift_min,
rule_complexity, quantiles, enable_custom_discretizations,
min_marginal_contribution, compute_other_key_indicators, locally_increase_complexity, max_complexity,
nb_minimizations, coverage_increment, validate_stability, split_ratio, nb_iterations, purity_tolerance)
class Ruleset(Base):
"""
Ruleset()
"""
def __init__(self, factory, api, dataset, json_return):
self.__api = api
self.__factory = factory
self.__json_returned = json_return
self.__dataset = dataset
self._is_deleted = False
self._is_in_error = self.__json_returned.get('status', '').lower() == "error"
def __repr__(self):
return """\n{} : {} <{}>\n""".format(
self.__class__.__name__,
self.name,
self.id
) + ("\t<! This ruleset has been deleted>\n" if self._is_deleted else "") + \
("\t<! This ruleset is in error>\n" if self._is_in_error else "") + \
"""\t- Dataset : {}\n\t- Rules count : {}\n\t- Created on : {}\n""".format(
self.dataset_name,
self.rules_count,
self.created.strftime('%Y-%m-%d %H:%M:%S UTC'))
# Property part
@property
def _json(self):
return self.__json_returned
@property
def dataset_name(self):
return self.__json_returned.get('datasetName')
@property
def name(self):
"""
str: Ruleset name
"""
if self.__json_returned.get('tag') is not None:
if type(self.__json_returned.get('tag')) == str:
return self.__json_returned.get('tag')
else:
return self.__json_returned.get('tag').get('tagName', '')
else:
return ''
@property
def kpis(self):
"""
list(dict): Kpis of the ruleset
"""
if self._is_in_error:
return None
return self.__json_returned.get('tag').get('kpis')
@property
def rules_count(self):
"""
int: Number of rules in the ruleset
"""
return self.__json_returned.get('rulesCount', None)
@property
def dataset_id(self):
"""
str: Dataset ID
"""
return self.__json_returned.get('datasetId')
@property
def project_id(self):
"""
str: Project ID
"""
return self.__json_returned.get('projectId')
@property
def created(self):
"""
datetime: Created date
"""
createdAt = self.__json_returned.get('lastChangeAt', self.__json_returned.get('createdAt'))
if createdAt.find('.') > 0:
return self.str2date(createdAt, '%Y-%m-%dT%H:%M:%S.%fZ')
return self.str2date(createdAt, '%Y-%m-%dT%H:%M:%S')
@property
def id(self):
"""
str: Ruleset ID
"""
return self.__json_returned.get('_id')
# Method part
@Helper.try_catch
def _get_params(self):
if not self._is_deleted:
return NotImplemented
@Helper.try_catch
def _export(self):
if not self._is_deleted:
return NotImplemented
@Helper.try_catch
def delete(self):
"""
delete()
Delete the ruleset
"""
if not self._is_deleted:
json = {
'_id': self.id,
'status': self.__json_returned.get('status', 'done').lower() if
type(self.__json_returned.get('tag')) == str else 'done',
'tagName': self.name
}
self.__api.Rules.removealearning(project_ID=self.project_id, dataset_ID=self.dataset_id, json=json)
if RulesetFactory(self.__api, self.project_id).get_by_id(self.id) is None:
self._is_deleted = True
return self
@Helper.try_catch
def minimize(self, minimization_name, score_to_minimize='Purity', increment_threshold=0.01):
"""
minimize(minimization_name, score_to_minimize='Purity', increment_threshold=0.01)
Function to apply a minimization on a ruleset
Args:
minimization_name (str): Name of the new ruleset
score_to_minimize (str): Score to apply the minimization, default is purity
increment_threshold | |
<filename>fiftyone/core/plots/matplotlib.py
"""
Matplotlib plots.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import itertools
import logging
import warnings
import numpy as np
import matplotlib as mpl
from matplotlib.widgets import Button, LassoSelector
from matplotlib.path import Path
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition
from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import
import sklearn.linear_model as skl
import sklearn.metrics.pairwise as skp
import sklearn.metrics as skm
import eta.core.utils as etau
import fiftyone.core.context as foc
import fiftyone.core.expressions as foe
import fiftyone.core.fields as fof
import fiftyone.core.labels as fol
import fiftyone.core.media as fom
import fiftyone.core.utils as fou
from .base import InteractivePlot
from .utils import load_button_icon
logger = logging.getLogger(__name__)
def plot_confusion_matrix(
confusion_matrix,
labels,
show_values=True,
show_colorbar=True,
cmap="viridis",
xticks_rotation=45.0,
values_format=None,
ax=None,
figsize=None,
):
"""Plots a confusion matrix.
Args:
confusion_matrix: a ``num_true x num_preds`` confusion matrix
labels: a ``max(num_true, num_preds)`` array of class labels
show_values (True): whether to show counts in the confusion matrix
cells
show_colorbar (True): whether to show a colorbar
cmap ("viridis"): a colormap recognized by ``matplotlib``
xticks_rotation (45.0): a rotation for the x-tick labels. Can be
numeric degrees, "vertical", "horizontal", or None
values_format (None): an optional format string like ``".2g"`` or
``"d"`` to use to format the cell counts
ax (None): an optional matplotlib axis to plot in
figsize (None): an optional ``(width, height)`` for the figure, in
inches
Returns:
a matplotlib figure
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
confusion_matrix = np.asarray(confusion_matrix)
nrows = confusion_matrix.shape[0]
ncols = confusion_matrix.shape[1]
im = ax.imshow(confusion_matrix, interpolation="nearest", cmap=cmap)
if show_values:
# Print text with appropriate color depending on background
cmap_min = im.cmap(0)
cmap_max = im.cmap(256)
thresh = (confusion_matrix.max() + confusion_matrix.min()) / 2.0
for i, j in itertools.product(range(nrows), range(ncols)):
color = cmap_max if confusion_matrix[i, j] < thresh else cmap_min
if values_format is None:
text_cm = format(confusion_matrix[i, j], ".2g")
if confusion_matrix.dtype.kind != "f":
text_d = format(confusion_matrix[i, j], "d")
if len(text_d) < len(text_cm):
text_cm = text_d
else:
text_cm = format(confusion_matrix[i, j], values_format)
ax.text(j, i, text_cm, ha="center", va="center", color=color)
ax.set(
xticks=np.arange(ncols),
yticks=np.arange(nrows),
xticklabels=labels[:ncols],
yticklabels=labels[:nrows],
xlabel="Predicted label",
ylabel="True label",
)
ax.set_ylim((nrows - 0.5, -0.5)) # flip axis
if xticks_rotation is not None:
plt.setp(ax.get_xticklabels(), rotation=xticks_rotation)
if show_colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
fig.colorbar(im, cax=cax)
if figsize is not None:
fig.set_size_inches(*figsize)
plt.tight_layout()
return fig
def plot_regressions(
ytrue,
ypred,
samples=None,
ids=None,
labels=None,
sizes=None,
classes=None,
gt_field=None,
pred_field=None,
best_fit_label=None,
marker_size=None,
cmap=None,
ax=None,
figsize=None,
style="seaborn-ticks",
**kwargs,
):
"""Plots the given regression results.
Args:
ytrue: an array of ground truth values
ypred: an array of predicted values
samples (None): the :class:`fiftyone.core.collections.SampleCollection`
whose data is being visualized
ids (None): an array of sample or frame IDs corresponding to the
regressions. If not provided but ``samples`` are provided, the
appropriate IDs will be extracted from the samples
labels (None): data to use to color the points. Can be any of the
following:
- the name of a sample field or ``embedded.field.name`` of
``samples`` from which to extract numeric or string values
- a :class:`fiftyone.core.expressions.ViewExpression` defining
numeric or string values to compute from ``samples`` via
:meth:`fiftyone.core.collections.SampleCollection.values`
- a list or array-like of numeric or string values
- a list of lists of numeric or string values, if ``link_field``
refers to frames
sizes (None): data to use to scale the sizes of the points. Can be any
of the following:
- the name of a sample field or ``embedded.field.name`` of
``samples`` from which to extract numeric values
- a :class:`fiftyone.core.expressions.ViewExpression` defining
numeric values to compute from ``samples`` via
:meth:`fiftyone.core.collections.SampleCollection.values`
- a list or array-like of numeric values
- a list of lists of numeric or string values, if ``link_field``
refers to frames
classes (None): an optional list of classes whose points to plot.
Only applicable when ``labels`` contains strings
gt_field (None): the name of the ground truth field
pred_field (None): the name of the predictions field
best_fit_label (None): a custom legend label for the best fit line
marker_size (None): the marker size to use. If ``sizes`` are provided,
this value is used as a reference to scale the sizes of all points
cmap (None): a colormap recognized by ``matplotlib``
ax (None): an optional matplotlib axis to plot in
figsize (None): an optional ``(width, height)`` for the figure, in
inches
style ("seaborn-ticks"): a style to use for the plot
**kwargs: optional keyword arguments for matplotlib's ``scatter()``
Returns:
a matplotlib figure
"""
if ax is None:
_, ax = plt.subplots()
points = np.stack([ytrue, ypred], axis=-1)
points, labels, sizes, _, inds, _ = _parse_scatter_inputs(
points, labels, sizes, classes
)
if ids is not None and inds is not None:
ids = np.asarray(ids)[inds]
ytrue = points[:, 0]
ypred = points[:, 1]
if best_fit_label is None:
r2_score = skm.r2_score(ytrue, ypred, sample_weight=None)
best_fit_label = "r^2: %0.3f" % r2_score
model = skl.LinearRegression()
model.fit(ytrue[:, np.newaxis], ypred)
xline = np.array([ytrue.min(), ytrue.max()])
yline = model.predict(xline[:, np.newaxis])
xlabel = gt_field if gt_field is not None else "Ground truth"
ylabel = pred_field if pred_field is not None else "Predictions"
with plt.style.context(style):
ax.plot(xline, yline, color="k", label=best_fit_label)
ax.set(xlabel=xlabel, ylabel=ylabel)
ax.legend()
ax.axis("equal")
if (
samples is not None
and gt_field is not None
and samples._is_frame_field(gt_field)
):
link_field = "frames"
else:
link_field = None
return scatterplot(
points,
samples=samples,
ids=ids,
link_field=link_field,
labels=labels,
sizes=sizes,
marker_size=marker_size,
cmap=cmap,
ax=ax,
ax_equal=True,
figsize=figsize,
style=style,
**kwargs,
)
def plot_pr_curve(
precision,
recall,
label=None,
ax=None,
figsize=None,
style="seaborn-ticks",
**kwargs,
):
"""Plots a precision-recall (PR) curve.
Args:
precision: an array of precision values
recall: an array of recall values
label (None): a label for the curve
ax (None): an optional matplotlib axis to plot in
figsize (None): an optional ``(width, height)`` for the figure, in
inches
style ("seaborn-ticks"): a style to use for the plot
**kwargs: optional keyword arguments for matplotlib's ``plot()``
Returns:
a matplotlib figure
"""
with plt.style.context(style):
display = skm.PrecisionRecallDisplay(
precision=precision, recall=recall
)
display.plot(ax=ax, label=label, **kwargs)
if figsize is not None:
display.figure_.set_size_inches(*figsize)
return display.figure_
def plot_pr_curves(
precisions,
recall,
classes,
ax=None,
figsize=None,
style="seaborn-ticks",
**kwargs,
):
"""Plots a set of per-class precision-recall (PR) curves.
Args:
precisions: a ``num_classes x num_recalls`` array of per-class
precision values
recall: an array of recall values
classes: the list of classes
ax (None): an optional matplotlib axis to plot in
figsize (None): an optional ``(width, height)`` for the figure, in
inches
style ("seaborn-ticks"): a style to use for the plot
**kwargs: optional keyword arguments for matplotlib's ``plot()``
Returns:
a matplotlib figure
"""
# Plot in descending order of AP
avg_precisions = np.mean(precisions, axis=1)
inds = np.argsort(-avg_precisions) # negative for descending order
with plt.style.context(style):
for idx in inds:
precision = precisions[idx]
_class = classes[idx]
avg_precision = avg_precisions[idx]
label = "AP = %.2f, class = %s" % (avg_precision, _class)
display = skm.PrecisionRecallDisplay(
precision=precision, recall=recall
)
display.plot(ax=ax, label=label, **kwargs)
ax = display.ax_
if ax is None:
ax = plt.gca()
if figsize is not None:
ax.figure.set_size_inches(*figsize)
return ax.figure
def plot_roc_curve(
fpr,
tpr,
roc_auc=None,
ax=None,
figsize=None,
style="seaborn-ticks",
**kwargs,
):
"""Plots a receiver operating characteristic (ROC) curve.
Args:
fpr: an array of false postive rates
tpr: an array of true postive rates
roc_auc (None): the area under the ROC curve
ax (None): an optional matplotlib axis to plot in
figsize (None): an optional ``(width, height)`` for the figure, in
inches
style ("seaborn-ticks"): a style to use for the plot
**kwargs: optional keyword arguments for matplotlib's ``plot()``
Returns:
a matplotlib figure
"""
with plt.style.context(style):
display = skm.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc)
display.plot(ax=ax, **kwargs)
if figsize is not None:
display.figure_.set_size_inches(*figsize)
return display.figure_
def scatterplot(
points,
samples=None,
ids=None,
link_field=None,
labels=None,
sizes=None,
classes=None,
marker_size=None,
cmap=None,
ax=None,
ax_equal=False,
figsize=None,
style="seaborn-ticks",
buttons=None,
**kwargs,
):
"""Generates an interactive scatterplot of the given points.
You can attach plots generated by this method to an App session via its
:attr:`fiftyone.core.session.Session.plots` attribute, which will
automatically sync the session's view with the currently selected points in
the plot. To enable this functionality, you must pass ``samples`` to this
method.
This method supports 2D or 3D visualizations, but interactive point
selection is only available in 2D.
You can use the ``labels`` parameters to define a coloring for the points,
and you can use the ``sizes`` parameter to scale the sizes of the points.
Args:
points: a ``num_points x num_dims`` array of points
samples (None): the :class:`fiftyone.core.collections.SampleCollection`
whose | |
import numpy as np
import matplotlib.pyplot as plt
from astropy import units as u
import scipy.integrate
from sys import float_info
import warnings
class Baseliner:
"""
A class for interactive baseliner of spectroscopic data.
The class works by being fed a spectrum and a matplotlib axis on which
it should be plotted. The spectrum is then plotted to the given axis,
and a number of interactive options are made available to the user.
Left-clicking with the mouse for the first time starts defining a window
from the x-axis location of the click. A second click finishes the
window between the locations of the first and second click.
A third click will finish selecting windows, and perform the baselining.
Alternately, right-clicking will cancel the last left-clicking action,
allowing misplaced windows to be adjusted.
Two keys are also accepted:
Pressing "q" will cause the baselining process to be canceled,
effectively skipping the baselining of this spectrum.
Pressing "a" will allow an additional window to be defined, assuming
one has been defined so far (by left-clicking twice to define its
boundaries).
Attributes
----------
windows : `list`
A list of all the set windows.
"""
def __init__(self,ax,spec):
"""
Baseliner(ax,spec,order=1)
Initialise the `Baseliner` class by giving it the target axis and
spectrum.
Parameters
----------
ax : `matplotlib.axis`
The matplotlib axis on which the interation will happen.
spec : `omnifit.spectrum.BaseSpectrum`
The spectrum which will be plotted as the visual reference on
the given axis.
"""
self.__ax = ax
self.__spec = spec
self.__x = spec.x.value
self.__y = spec.y.value
self.__limlo=None
self.__limhi=None
self.__minx=np.min(self.__x)
self.__maxx=np.max(self.__x)
self.__miny=np.min(self.__y)
self.__maxy=np.max(self.__y)
self.__ax.set_xlim(self.__minx,self.__maxx)
self.__ax.set_ylim(self.__miny,self.__maxy)
self.__specplot,=self.__ax.plot(self.__x,self.__y,'k-',drawstyle='steps-mid')
self.__buttonListener = self.__ax.figure.canvas.mpl_connect('button_press_event', self.__mouse_press)
self.__keyListener = self.__ax.figure.canvas.mpl_connect('key_press_event', self.__key_press)
self.windows=[]
def __key_press(self, event):
if event.key=='q':
self.__skip()
if event.key=='a' and self.__limlo != None and self.__limhi != None:
self.__addwindow(self.__limlo,self.__limhi)
self.__ax.plot([self.__limlo,self.__limlo],[self.__miny,self.__maxy],'g-')
self.__ax.plot([self.__limhi,self.__limhi],[self.__miny,self.__maxy],'g-')
self.__remlim()
self.__remlim()
print 'Window added. Ready to receive another one.'
else:
return
def __mouse_press(self, event):
if event.button==1:
self.__setlim(event.xdata)
elif event.button==2:
return
elif event.button==3:
self.__remlim()
def __skip(self):
plt.close()
def __setlim(self,i_x):
if self.__limlo==None:
self.__limlo=i_x
self.__limloplot,=self.__ax.plot([i_x,i_x],[self.__miny,self.__maxy],'b-')
self.__ax.figure.canvas.draw()
elif self.__limhi==None:
self.__limhi=i_x
self.__limhiplot,=self.__ax.plot([i_x,i_x],[self.__miny,self.__maxy],'b-')
self.__ax.figure.canvas.draw()
print 'Ready for finalising. Press once more to do so, or press a to add another window.'
else:
self.__finalise()
def __remlim(self):
if self.__limhi!=None:
self.__limhi=None
self.__limhiplot.set_ydata([self.__miny,self.__miny])
self.__ax.figure.canvas.draw()
elif self.__limlo!=None:
self.__limlo=None
self.__limloplot.set_ydata([self.__miny,self.__miny])
self.__ax.figure.canvas.draw()
else:
print 'No limits to cancel.'
def __addwindow(self,limlo,limhi):
if limhi < limlo:
limlo,limhi = limhi,limlo
self.windows.append([limlo,limhi])
def __finalise(self):
self.__addwindow(self.__limlo,self.__limhi)
self.__ax.figure.canvas.mpl_disconnect(self.__buttonListener)
self.__ax.figure.canvas.mpl_disconnect(self.__keyListener)
plt.close(self.__ax.figure)
#---------------------
#New units definitions
#---------------------
#the units themselves
unit_t = u.def_unit('transmittance units',doc='Transmittance of radiation')
unit_transmittance = unit_t
unit_abs = u.def_unit('absorbance units',doc='Absorbance of radiation')
unit_absorbance = unit_abs
unit_od = u.def_unit('optical depth units',doc='Optical depth of radiation')
unit_opticaldepth = unit_od
#the equivalencies between the units
equivalencies_absorption = [
(unit_t,unit_abs,lambda x:-np.log10(x),lambda x:10**-x),
(unit_od,unit_abs,lambda x:x/np.log(10),lambda x:x*np.log(10)),
(unit_od,unit_t,lambda x:10**(-x/np.log(10)),lambda x:-np.log10(x)*np.log(10))
]
#------------------------------------------------------
#Functions related to light scattering and transmission
#------------------------------------------------------
def cde_correct(freq,m):
"""
cde_correct(freq,m)
Generate a CDE-corrected spectrum from a complex refractive index
spectrum.
Parameters
----------
freq : `numpy.ndarray`
The frequency data of the input spectrum, in reciprocal
wavenumbers (cm^-1).
m : `numpy.ndarray`
The complex refractive index spectrum.
Returns
-------
A list containing the following numpy arrays, in given order:
* The spectrum of the absorption cross section of the simulated grain.
* The spectrum of the absorption cross section of the simulated grain,
normalized by the volume distribution of the grain. This parameter
is the equivalent of optical depth in most cases.
* The spectrum of the scattering cross section of the simulated grain,
normalized by the volume distribution of the grain.
* The spectrum of the total cross section of the simulated grain.
"""
wl=1.e4/freq
m2=m**2.0
im_part=((m2/(m2-1.0))*np.log(m2)).imag
cabs_vol=(4.0*np.pi/wl)*im_part
cabs=freq*(2.0*m.imag/(m.imag-1))*np.log10(m.imag)
cscat_vol=(freq**3.0/(6.0*np.pi))*cabs
ctot=cabs+cscat_vol
return cabs,cabs_vol,cscat_vol,ctot
def complex_transmission_reflection(in_m0,in_m1,in_m2):
"""
complex_transmission_reflection(in_m0,in_m1,in_m2)
Calculate the complex transmission and reflection coefficients between
media 0, 1, and 2 given their complex refractive indices.
In the Kramers-Kronig implementation (in which this is most likely used
in the context of Omnifit) media 0, 1, and 2 correspond
respectively to the vacuum, ice, and substrate.
Parameters
----------
in_m0 : `complex` or `numpy.ndarray`
The complex refractive index of medium 0.
in_m1 : `complex` or `numpy.ndarray`
The complex refractive index of medium 1.
in_m2 : `complex` or `numpy.ndarray`
The complex refractive index of medium 2.
Returns
-------
A tuple containing the following elements:
* The complex transmission coefficient between media 0 and 1
* The complex transmission coefficient between media 0 and 2
* The complex transmission coefficient between media 1 and 2
* The complex reflection coefficient between media 0 and 1
* The complex reflection coefficient between media 0 and 2
* The complex reflection coefficient between media 1 and 2
"""
complex_transmission = lambda m1,m2: (2.*m1.real)/(m1+m2)
complex_reflection = lambda m1,m2: (m1-m2)/(m1+m2)
return (
complex_transmission(in_m0,in_m1),
complex_transmission(in_m0,in_m2),
complex_transmission(in_m1,in_m2),
complex_reflection(in_m0,in_m1),
complex_reflection(in_m0,in_m2),
complex_reflection(in_m1,in_m2)
)
def kramers_kronig(freq,transmittance,m_substrate,d_ice,m0,freq_m0,m_guess=1.0+0.0j,tol=0.001,maxiter=100,ignore_fraction=0.1,force_kkint_unity=False,precalc=False):
"""
kramers_kronig(freq,transmittance,m_substrate,d_ice,m0,freq_m0,
m_guess=1.0+0.0j,tol=0.001,maxiter=100,ignore_fraction=0.1,
force_kkint_unity=False,precalc=False)
Kramers-Kronig relation.
This is an implementation of the Kramers-Kronig relation calculation
presented in Hudgins et al 1993 (1993ApJS...86..713H), with an improved
integration method adapted from Trotta et al 1996
(The Cosmic Dust Connection, 1996 169-184)
Parameters
----------
wn : `astropy.units.Quantity` or `numpy.ndarray`
The frequency data of the input spectrum. If no units are given, this
is assumed to be in reciprocal wavenumbers (cm^-1).
transmittance : `astropy.units.Quantity` or `numpy.ndarray`
The transmittance data of the input spectrum. This can be given in
units other than transmittance, as long as they can be converted to
transmittance by making use of the `utils.equivalencies_absorption`
equivalency information. If no units are given, transmittance is
assumed.
m_substrate : `complex`
The complex refractive index of the substrate on which the ice being
studied was grown.
d_ice : `astropy.units.Quantity` or `float`
The thickness of the ice which is being studied. If no units are given,
centimeters are assumed.
m0 : `complex`
The complex refractive index of the ice at the reference frequency
defined by `freq_m0` (see below).
freq_m0 : `astropy.units.Quantity` or `float`
The frequency at which the reference complex refractive index `m0`
(see above) is defined. Best results are usually achieved if this
frequency is high compared to the frequency range being probed by
the spectrum.
If this is not defined as `astropy.units.Quantity` in spectroscopic
units, it is assumed to be in reciprocal wavenumbers (cm^-1).
m_guess : `complex` or `numpy.ndarray`
The starting guess of the complex refractive index of the ice. This
can either be a single number (in which case it is assumed to be this
number throughout the entire spectrum) or an array
tol : `float`
The square-sum of the residual between the original transmittance and
the transmittance modeled with the iterated complex refractive index
of the ice must be below this value for the iteration to converge. In
other words, the smaller this number is, the better the final result
will be at the expense of extra iterations.
maxiter : `int`
The maximum number of iterations allowed. If this number is reached,
the iteration is considered to not have converged, and an exception is
raised.
ignore_fraction : `float` between 0 and 0.5
The edges of the spectrum are blanked out (and replaced with the
non-blanked value closest to the edge) during iteration to avoid edge
effects arising from the usage of a non-infinite integration range.
This parameter controls how large of a fraction of the edges is blanked
out.
force_kkint_unity : `bool`
The results of the Kramers-Kronig integration are responsible for
determining the real part of the complex refractive index i.e. the
one which represents refraction. Normally this number should not drop
below unity, and unexpected behaviour can arise if it does.
Usually this means that there is something wrong with the input
parameters, but sometimes forcing the result to always be greater or
equal to unity can help. It should be noted, however, that the
accuracy of the results of an integration forced in this way are
suspect at best.
precalc : `bool`
The Kramers-Kronig iteration can be a very computationally intensive
operation. In some situations it may result in a faster iteration to
pre-calculate the large denominator which is part of the
Kramers-Kronig integration instead of computing new values of it in a
| |
5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Over 0%", "Payment 2 Under 1%", "Payment 3 Under 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Over 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Over 2%", "Payment 4 Under 3%", "Payment 5 Under 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Over 5%"]
},{
"name": "Payment 6 Less Than 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment 5 Over 4%", "Payment 6 Less Than 5%"]
},{
"name": "Payment 6 Over 5%",
"taxonomy": ["Payment 1 Under 0%", "Payment 2 Over 1%", "Payment 3 Under 2%", "Payment 4 Over 3%", "Payment | |
<reponame>Technologicat/unpythonic<filename>unpythonic/funutil.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""Function call and return value related utilities."""
__all__ = ["call", "callwith",
"Values", "valuify"]
from functools import wraps
from .lazyutil import passthrough_lazy_args, islazy, maybe_force_args, force
from .regutil import register_decorator
from .symbol import sym
# HACK: break dependency loop llist -> fun -> funutil -> collections -> llist
_init_done = False
frozendict = sym("frozendict") # doesn't matter what the value is, will be overwritten later
def _init_module(): # called by unpythonic.__init__ when otherwise done
global frozendict, _init_done
from .collections import frozendict
_init_done = True
# Only the single-argument form (just f) of the "call" decorator is supported by unpythonic.syntax.util.sort_lambda_decorators.
#
# This is as it should be; if given any arguments beside f, the call doesn't conform
# to the decorator API, but is a normal function call. See "callwith" if you need to
# pass arguments and then call f from a decorator position.
@register_decorator(priority=80)
@passthrough_lazy_args
def call(f, *args, **kwargs):
"""Call the function f.
**When used as a decorator**:
Run the function immediately, then overwrite the definition by its
return value.
Useful for making lispy not-quite-functions where the def just delimits
a block of code that runs immediately (think call-with-something in Lisps,
but without the something).
The function will be called with no arguments. If you need to pass
arguments when using ``call`` as a decorator, see ``callwith``.
**When called normally**:
``call(f, *a, **kw)`` is the same as ``f(*a, **kw)``.
*Why ever use call() normally?*
- Readability and aesthetics in cases like ``makef(dostuffwith(args))()``,
where ``makef`` is a function factory, and we want to immediately
call its result.
Rewriting this as ``call(makef(dostuffwith(args)))`` relocates the
odd one out from the mass of parentheses at the end. (A real FP example
would likely have more levels of nesting.)
- Notational uniformity with ``curry(f, *args, **kwargs)`` for cases
without currying. See ``unpythonic.fun.curry``.
- For fans of S-expressions. Write Python almost like Lisp!
Name inspired by "call-with-something", but since here we're calling
without any specific thing, it's just "call".
Examples::
@call
def result(): # this block of code runs immediately
return "hello"
print(result) # "hello"
# if the return value is of no interest:
@call
def _():
... # code with cheeky side effects goes here
@call
def x():
a = 2 # many temporaries that help readability...
b = 3 # ...of this calculation, but would just pollute locals...
c = 5 # ...after the block exits
return a * b * c
@call
def _():
for x in range(10):
for y in range(10):
if x * y == 42:
return # "multi-break" out of both loops!
...
Note that in the multi-break case, ``x`` and ``y`` are no longer in scope
outside the block, since the block is a function.
"""
# return f(*args, **kwargs)
return maybe_force_args(force(f), *args, **kwargs) # support unpythonic.syntax.lazify
@register_decorator(priority=80)
@passthrough_lazy_args
def callwith(*args, **kwargs):
"""Freeze arguments, choose function later.
**Used as decorator**, this is like ``@call``, but with arguments::
@callwith(3)
def result(x):
return x**2
assert result == 9
**Called normally**, this creates a function to apply the given arguments
to a callable to be specified later::
def myadd(a, b):
return a + b
def mymul(a, b):
return a * b
apply23 = callwith(2, 3)
assert apply23(myadd) == 5
assert apply23(mymul) == 6
When called normally, the two-step application is mandatory. The first step
stores the given arguments. It returns a function ``f(callable)``. When
``f`` is called, it calls its ``callable`` argument, passing in the arguments
stored in the first step.
In other words, ``callwith`` is similar to ``functools.partial``, but without
specializing to any particular function. The function to be called is
given later, in the second step.
Hence, ``callwith(2, 3)(myadd)`` means "make a function that passes in
two positional arguments, with values ``2`` and ``3``. Then call this
function for the callable ``myadd``".
But if we instead write``callwith(2, 3, myadd)``, it means "make a function
that passes in three positional arguments, with values ``2``, ``3`` and
``myadd`` - not what we want in the above example.
Curry obviously does not help; it will happily pass in all arguments
in one go. If you want to specialize some arguments now and some later,
use ``partial``::
from functools import partial
p1 = partial(callwith, 2)
p2 = partial(p1, 3)
p3 = partial(p2, 4)
apply234 = p3() # actually call callwith, get the function
def add3(a, b, c):
return a + b + c
def mul3(a, b, c):
return a * b * c
assert apply234(add3) == 9
assert apply234(mul3) == 24
If the code above feels weird, it should. Arguments are gathered first,
and the function to which they will be passed is chosen in the last step.
A pythonic alternative to the above examples is::
a = [2, 3]
def myadd(a, b):
return a + b
def mymul(a, b):
return a * b
assert myadd(*a) == 5
assert mymul(*a) == 6
a = [2]
a += [3]
a += [4]
def add3(a, b, c):
return a + b + c
def mul3(a, b, c):
return a * b * c
assert add3(*a) == 9
assert mul3(*a) == 24
Another use case of ``callwith`` is ``map``, if we want to vary the function
instead of the data::
m = map(callwith(3), [lambda x: 2*x, lambda x: x**2, lambda x: x**(1/2)])
assert tuple(m) == (6, 9, 3**(1/2))
The pythonic alternative here is to use the comprehension notation,
which can already do this::
m = (f(3) for f in [lambda x: 2*x, lambda x: x**2, lambda x: x**(1/2)])
assert tuple(m) == (6, 9, 3**(1/2))
Inspiration:
*Function application with $* in
http://learnyouahaskell.com/higher-order-functions
"""
def applyfrozenargsto(f):
return maybe_force_args(force(f), *args, **kwargs)
return applyfrozenargsto
class Values:
"""Structured multiple-return-values.
That is, return multiple values positionally and by name. This completes
the symmetry between passing function arguments and returning values
from a function: Python itself allows passing arguments by name, but has
no concept of returning values by name. This class adds that concept.
Having a `Values` type separate from `tuple` also helps with semantic
accuracy. In `unpythonic` 0.15.0 and later, a `tuple` return value now
means just that - one value that is a `tuple`. It is different from a
`Values` that contains several positional return values (that are meant
to be treated separately e.g. by a function composition utility).
**When to use**:
Most of the time, returning a tuple to denote multiple-return-values
and unpacking it is just fine, and that is exactly what `unpythonic`
does internally in many places.
But the distinction is critically important in function composition,
so that positional return values can be automatically mapped into
positional arguments to the next function in the chain, and named
return values into named arguments.
Accordingly, various parts of `unpythonic` that deal with function
composition use the `Values` abstraction; particularly `curry`, and
the `compose` and `pipe` families, and the `with continuations` macro.
**Behavior**:
`Values` is a duck-type with some features of both sequences and mappings,
but not the full `collections.abc` API of either.
Each operation that obviously and without ambiguity makes sense only
for the positional or named part, accesses that part.
The only exception is `__getitem__` (subscripting), which makes sense
for both parts, unambiguously, because the key types differ. If the index
expression is an `int` or a `slice`, it is an index/slice for the
positional part. If it is an `str`, it is a key for the named part.
If you need to explicitly access either part (and its full API),
use the `rets` and `kwrets` attributes. The names are in analogy
with `args` and `kwargs`.
`rets` is a `tuple`, and `kwrets` is an `unpythonic.collections.frozendict`.
`Values` objects can be compared for equality. Two `Values` objects
are equal if both their `rets` and `kwrets` (respectively) are.
Examples::
def f():
return Values(1, 2, 3)
result = f()
assert isinstance(result, Values)
assert result.rets == (1, 2, 3)
assert not result.kwrets
assert result[0] == 1
assert result[:-1] == (1, 2)
a, b, | |
a final feature map size of %s" %
(feature_map_size, ))
interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
interp_block6 = interp_block(res, 6, feature_map_size, input_shape)
# concat all these layers. resulted
# shape=(1,feature_map_size_x,feature_map_size_y,4096)
res = Concatenate()([res,interp_block6,
interp_block3,
interp_block2,
interp_block1])
return res
def build_pyramid_pooling_mult_module(res,org, input_shape):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0))
for input_dim in input_shape)
print("PSP module will interpolate to a final feature map size of %s" %(feature_map_size, ))
interp_rblock1 = interp_block(res, 1, feature_map_size, input_shape)
interp_rblock2 = interp_block(res, 2, feature_map_size, input_shape)
interp_rblock3 = interp_block(res, 3, feature_map_size, input_shape)
interp_rblock6 = interp_block(res, 6, feature_map_size, input_shape)
interp_oblock1 = interp_block_t(org, 1, feature_map_size, input_shape)
interp_oblock2 = interp_block_t(org, 2, feature_map_size, input_shape)
interp_oblock3 = interp_block_t(org, 3, feature_map_size, input_shape)
interp_oblock6 = interp_block_t(org, 6, feature_map_size, input_shape)
interp_block1 =Multiply()([interp_rblock1, interp_oblock1])
interp_block2 =Multiply()([interp_rblock2, interp_oblock2])
interp_block3 =Multiply()([interp_rblock3, interp_oblock3])
interp_block6 =Multiply()([interp_rblock6, interp_oblock6])
rr=Multiply()([res, org])
re1 = Concatenate()([rr,
interp_block6,
interp_block3,
interp_block2,
interp_block1])
return re1
def build_pyramid_pooling_aver_module(res,org, input_shape):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0))
for input_dim in input_shape)
print("PSP module will interpolate to a final feature map size of %s" %(feature_map_size, ))
interp_rblock1 = interp_block(res, 1, feature_map_size, input_shape)
interp_rblock2 = interp_block(res, 2, feature_map_size, input_shape)
interp_rblock3 = interp_block(res, 3, feature_map_size, input_shape)
interp_rblock6 = interp_block(res, 6, feature_map_size, input_shape)
interp_oblock1 = interp_block_t(org, 1, feature_map_size, input_shape)
interp_oblock2 = interp_block_t(org, 2, feature_map_size, input_shape)
interp_oblock3 = interp_block_t(org, 3, feature_map_size, input_shape)
interp_oblock6 = interp_block_t(org, 6, feature_map_size, input_shape)
interp_block1 =Average()([interp_rblock1, interp_oblock1])
interp_block2 =Average()([interp_rblock2, interp_oblock2])
interp_block3 =Average()([interp_rblock3, interp_oblock3])
interp_block6 =Average()([interp_rblock6, interp_oblock6])
#rr=Multiply()([res, org])
re1 = Concatenate()([org,
interp_block6,
interp_block3,
interp_block2,
interp_block1])
return re1
def build_pspnet(nb_classes, resnet_layers, input_shape, activation='softmax'):
"""Build PSPNet."""
print("Building a PSPNet based on ResNet %i expecting inputs of shape %s predicting %i classes" % (resnet_layers, input_shape, nb_classes))
inp = Input((input_shape[0], input_shape[1], 3))
res = ResNet(inp, layers=resnet_layers)
print (res.shape)
psp = build_pyramid_pooling_module(res, input_shape)
print (psp.shape)
x = Conv2D(512, (3, 3), strides=(1, 1), padding="same", name="conv5_4",use_bias=False)(psp)
x = BN(name="conv5_4_bn")(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="conv6")(x)
# x = Lambda(Interp, arguments={'shape': (
# input_shape[0], input_shape[1])})(x)
x = Interp([input_shape[0], input_shape[1]])(x)
x = Activation('softmax')(x)
model = Model(inputs=inp, outputs=x)
model.summary()
# Solver
sgd = SGD(lr=learning_rate, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])
return model
#1: get weigth,2
def identity_block(X, f, filters, stage, block):
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a')(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path (3 lines)
X = Conv2D(filters= F2, kernel_size=(f,f),strides=(1,1),padding='same',name=conv_name_base + '2b')(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2b')(X)
X = Activation('relu')(X)
# Third component of main path ( lines)
X = Conv2D(filters=F3,kernel_size=(1,1),strides=(1,1),padding='valid',name=conv_name_base+'2c')(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2c')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (lines)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
return X
def convolutional_block(X, f, filters, stage, block, s = 2):
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = Conv2D(F1, (1, 1), strides = (s,s),padding='valid',name = conv_name_base + '2a')(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(F2,(f,f),strides=(1,1),padding='same',name=conv_name_base+'2b')(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2b')(X)
X = Activation('relu')(X)
# Third component of main path
X = Conv2D(F3,(1,1),strides=(1,1),padding='valid',name=conv_name_base+'2c')(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2c')(X)
##### SHORTCUT PATH ####
X_shortcut = Conv2D(F3,(1,1),strides=(s,s),padding='valid',name=conv_name_base+'1')(X_shortcut)
X_shortcut = BatchNormalization(axis=3,name =bn_name_base+'1')(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = Add()([X,X_shortcut])
X = Activation('relu')(X)
return X
# GRADED FUNCTION: ResNet50
def RResNet50(input_shape = (64, 64, 3), classes=200):
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1')(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
### START CODE HERE ###
# Stage 3
X = convolutional_block(X, f = 3,filters= [128,128,512],stage=3,block='a',s=2)
X = identity_block(X,3,[128,128,512],stage=3,block='b')
X = identity_block(X,3,[128,128,512],stage=3,block='c')
X = identity_block(X,3,[128,128,512],stage=3,block='d')
# Stage 4
X = convolutional_block(X,f=3,filters=[256,256,1024],stage=4,block='a',s=2)
X = identity_block(X,3,[256,256,1024],stage=4,block='b')
X = identity_block(X,3,[256,256,1024],stage=4,block='c')
X = identity_block(X,3,[256,256,1024],stage=4,block='d')
X = identity_block(X,3,[256,256,1024],stage=4,block='e')
X = identity_block(X,3,[256,256,1024],stage=4,block='f')
# Stage 5
X = convolutional_block(X, f = 3,filters= [512,512,2048],stage=5,block='a',s=2)
X = identity_block(X,3,[512,512,2048],stage=5,block='b')
X = identity_block(X,3,[512,512,2048],stage=5,block='c')
# AVGPOOL
X = AveragePooling2D((2,2),strides=(2,2))(X)
# output layer
X = Flatten()(X)
model = Model(inputs = X_input, outputs = X, name='ResNet50')
return model
from keras.applications.resnet50 import ResNet50
def create_resnet50(input_img):
net = ResNet50(weights='imagenet', include_top=False,
input_tensor=input_img)
for layer in net.layers[1:]:
layer.trainable = False
net = Reshape((-1,))(net.outputs[0])
return net
def true_ResNet50(classes):
base_model = RResNet50(input_shape=(224,224,3),classes=200)
base_model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers:
layer.trainable=False
res = base_model.get_layer('activation_49').output
# print res.shape
res = BatchNormalization()(res)
model = Model(inputs=base_model.input, outputs=res,name='true-ResNet50')
#model.summary()
return model
def fake_ResNet50_base(index,input_shape=(224,224,3),classes=200):
base_model = RResNet50(input_shape=(224,224,3),classes=200)
base_model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
base_model.summary()
#Num=(index+2)*49+index*6
Num=(index+2)*49
res_layer='activation_'+str(Num)+ str("_")+str(index)
print(res_layer)
res = base_model.get_layer(res_layer).output
#print res.shape
res = BatchNormalization()(res)
model = Model(inputs=base_model.input, outputs=res)
return model
def fake_ResNet50_base_new(index,input_shape=(224,224,3),classes=200):
base_model = RResNet50(input_shape=(224,224,3),classes=200)
base_model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
for layer in base_model.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
base_model.summary()
Num=(index+2)*49+index*6
res_layer='activation_'+str(Num)+ str("_")+str(index)
print(res_layer)
res = base_model.get_layer(res_layer).output
res = BatchNormalization()(res)
model = Model(inputs=base_model.input, outputs=res)
return model
def text_cnnmodel(classes=200):
main_input = Input(shape=(64,), dtype='float64')
embedder = Embedding(len(vocab) + 1, 256, input_length = 64)
#embedder = Embedding(9999, 256, input_length = 64)
embed = embedder(main_input)
conv1_1 = Conv1D(256, 3, padding='same')(embed)
bn1_1 = BatchNormalization()(conv1_1)
relu1_1 = Activation('relu')(bn1_1)
conv1_2 = Conv1D(128, 3, padding='same')(relu1_1)
bn1_2 = BatchNormalization()(conv1_2)
relu1_2 = Activation('relu')(bn1_2)
cnn1 = MaxPooling1D(pool_size=4)(relu1_2)
# kernel_size = 4
conv2_1 = Conv1D(256, 4, padding='same')(embed)
bn2_1 = BatchNormalization()(conv2_1)
relu2_1 = Activation('relu')(bn2_1)
conv2_2 = Conv1D(128, 4, padding='same')(relu2_1)
bn2_2 = BatchNormalization()(conv2_2)
relu2_2 = Activation('relu')(bn2_2)
cnn2 = MaxPooling1D(pool_size=4)(relu2_2)
# kernel_size = 5
conv3_1 = Conv1D(256, 5, padding='same')(embed)
bn3_1 = BatchNormalization()(conv3_1)
relu3_1 = Activation('relu')(bn3_1)
conv3_2 = Conv1D(128, 5, padding='same')(relu3_1)
bn3_2 = BatchNormalization()(conv3_2)
relu3_2 = Activation('relu')(bn3_2)
cnn3 = MaxPooling1D(pool_size=4)(relu3_2)
#
conc = Concatenate()([cnn1,cnn2,cnn3])
flat = Flatten()(conc)
drop = Dropout(0.5)(flat)
fc = Dense(2048)(drop)
bn = BatchNormalization(name='bn')(fc)
model = Model(inputs = main_input, outputs = bn)
return model
def text_cnnmodel_base(index,classes):
base_model = text_cnnmodel(classes)
for layer in base_model.layers:
layer.trainable=False
layer.name = layer.name + str("_")+str(index)
res = base_model.output
#print res.shape
model = Model(inputs=base_model.input, outputs=res)
return model
#es = EarlyStopping(monitor='val_loss', patience=1)
#model.fit(x=X_train,y=Y_train,epochs=20,batch_size=32,validation_data=(X_val, Y_val),callbacks=[es])
#tt=build_pspnet(102, 50, input_shape=(224,224), activation='softmax')
def mult_text_cnnmodel(classes):
capt1_model=text_cnnmodel_base(0,classes)
capt1_feature=capt1_model.output
capt1_in=capt1_model.input
capt2_model=text_cnnmodel_base(1,classes)
capt2_feature=capt2_model.output
capt2_in=capt2_model.input
capt3_model=text_cnnmodel_base(2,classes)
capt3_feature=capt3_model.output
capt3_in=capt3_model.input
capt4_model=text_cnnmodel_base(3,classes)
capt4_feature=capt4_model.output
capt4_in=capt4_model.input
capt5_model=text_cnnmodel_base(4,classes)
capt5_feature=capt5_model.output
capt5_in=capt5_model.input
capt6_model=text_cnnmodel_base(5,classes)
capt6_feature=capt6_model.output
capt6_in=capt6_model.input
capt7_model=text_cnnmodel_base(6,classes)
capt7_feature=capt7_model.output
capt7_in=capt7_model.input
capt8_model=text_cnnmodel_base(7,classes)
capt8_feature=capt8_model.output
capt8_in=capt8_model.input
capt9_model=text_cnnmodel_base(8,classes)
capt9_feature=capt9_model.output
capt9_in=capt9_model.input
capt10_model=text_cnnmodel_base(9,classes)
capt10_feature=capt10_model.output
capt10_in=capt10_model.input
outs =Average()([capt1_feature, capt2_feature,capt3_feature, capt4_feature,capt5_feature,capt6_feature,capt7_feature, capt8_feature,capt9_feature, capt10_feature])
model = Model(inputs= [capt1_in,capt2_in,capt3_in,capt4_in,capt5_in,capt6_in,capt7_in,capt8_in,capt9_in,capt10_in], outputs=outs,name='mult_text_cnnmodel')
#model.summary()
return model
def fake_ResNet50_new(classes):
fake_base_model1=fake_ResNet50_base(0,input_shape = (224, 224, 3),classes=200)
temp_feature1=fake_base_model1.output
in1=fake_base_model1.input
fake_base_model2=fake_ResNet50_base(1,input_shape = (224, 224, 3),classes=200)
temp_feature2=fake_base_model2.output
in2=fake_base_model2.input
fake_base_model3=fake_ResNet50_base(2,input_shape = (224, 224, 3),classes=200)
temp_feature3=fake_base_model3.output
in3=fake_base_model3.input
fake_base_model4=fake_ResNet50_base(3,input_shape = (224, 224, 3),classes=200)
temp_feature4=fake_base_model4.output
in4=fake_base_model4.input
fake_base_model5=fake_ResNet50_base(4,input_shape = (224, 224, 3),classes=200)
temp_feature5=fake_base_model5.output
in5=fake_base_model5.input
fake_base_model6=fake_ResNet50_base(5,input_shape = (224, 224, 3),classes=200)
temp_feature6=fake_base_model6.output
in6=fake_base_model6.input
fake_base_model7=fake_ResNet50_base(6,input_shape = (224, 224, 3),classes=200)
temp_feature7=fake_base_model7.output
in7=fake_base_model7.input
fake_base_model8=fake_ResNet50_base(7,input_shape = (224, 224, 3),classes=200)
temp_feature8=fake_base_model8.output
in8=fake_base_model8.input
fake_base_model9=fake_ResNet50_base(8,input_shape = (224, 224, 3),classes=200)
temp_feature9=fake_base_model9.output
in9=fake_base_model9.input
fake_base_model10=fake_ResNet50_base(9,input_shape = (224, 224, 3),classes=200)
temp_feature10=fake_base_model10.output
in10=fake_base_model10.input
| |
"""
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from utils import utils
from commons import similarity_calculator
from boosting_decision_making.boosting_decision_maker import BoostingDecisionMaker
import logging
import numpy as np
from datetime import datetime
from collections import deque
logger = logging.getLogger("analyzerApp.boosting_featurizer")
class BoostingFeaturizer:
def __init__(self, all_results, config, feature_ids,
weighted_log_similarity_calculator=None,
features_dict_with_saved_objects=None):
self.config = config
self.previously_gathered_features = {}
self.models = {}
self.features_dict_with_saved_objects = {}
if features_dict_with_saved_objects is not None:
self.features_dict_with_saved_objects = features_dict_with_saved_objects
self.similarity_calculator = similarity_calculator.SimilarityCalculator(
self.config,
weighted_similarity_calculator=weighted_log_similarity_calculator)
if type(feature_ids) == str:
self.feature_ids = utils.transform_string_feature_range_into_list(feature_ids)
else:
self.feature_ids = feature_ids
self.fields_to_replace_with_merged_logs = [
"message", "detected_message",
"detected_message_without_params_extended",
"message_without_params_extended",
"message_extended",
"detected_message_extended",
"message_without_params_and_brackets",
"detected_message_without_params_and_brackets"]
self.feature_functions = {
0: (self._calculate_score, {}, []),
1: (self._calculate_place, {}, []),
3: (self._calculate_max_score_and_pos, {"return_val_name": "max_score_pos"}, []),
5: (self._calculate_min_score_and_pos, {"return_val_name": "min_score_pos"}, []),
7: (self._calculate_percent_count_items_and_mean, {"return_val_name": "cnt_items_percent"}, []),
9: (self._calculate_percent_issue_types, {}, []),
11: (self._calculate_similarity_percent, {"field_name": "message"}, []),
12: (self.is_only_merged_small_logs, {}, []),
13: (self._calculate_similarity_percent, {"field_name": "merged_small_logs"}, []),
14: (self._has_test_item_several_logs, {}, []),
15: (self._has_query_several_logs, {}, []),
18: (self._calculate_similarity_percent, {"field_name": "detected_message"}, []),
19: (self._calculate_similarity_percent, {"field_name": "detected_message_with_numbers"}, []),
23: (self._calculate_similarity_percent, {"field_name": "stacktrace"}, []),
25: (self._calculate_similarity_percent, {"field_name": "only_numbers"}, []),
26: (self._calculate_max_score_and_pos, {"return_val_name": "max_score"}, []),
27: (self._calculate_min_score_and_pos, {"return_val_name": "min_score"}, []),
28: (self._calculate_percent_count_items_and_mean,
{"return_val_name": "mean_score"}, []),
29: (self._calculate_similarity_percent, {"field_name": "message_params"}, []),
34: (self._calculate_similarity_percent, {"field_name": "found_exceptions"}, []),
35: (self._is_all_log_lines, {}, []),
36: (self._calculate_similarity_percent, {"field_name": "detected_message_extended"}, []),
37: (self._calculate_similarity_percent,
{"field_name": "detected_message_without_params_extended"}, []),
38: (self._calculate_similarity_percent, {"field_name": "stacktrace_extended"}, []),
40: (self._calculate_similarity_percent, {"field_name": "message_without_params_extended"}, []),
41: (self._calculate_similarity_percent, {"field_name": "message_extended"}, []),
42: (self.is_the_same_test_case, {}, []),
43: (self.has_the_same_test_case_in_all_results, {}, []),
48: (self.is_text_of_particular_defect_type, {"label_type": "ab"}, []),
49: (self.is_text_of_particular_defect_type, {"label_type": "pb"}, []),
50: (self.is_text_of_particular_defect_type, {"label_type": "si"}, []),
51: (self.predict_particular_defect_type, {}, []),
52: (self._calculate_similarity_percent, {"field_name": "namespaces_stacktrace"}, []),
53: (self._calculate_similarity_percent,
{"field_name": "detected_message_without_params_and_brackets"}, []),
55: (self._calculate_similarity_percent,
{"field_name": "potential_status_codes"}, []),
56: (self._calculate_similarity_percent, {"field_name": "launch_name"}, []),
57: (self.is_launch_id_the_same, {}, []),
58: (self._calculate_model_probability,
{"model_folder": self.config["boosting_model"]},
self.get_necessary_features(self.config["boosting_model"])),
59: (self._calculate_similarity_percent, {"field_name": "found_tests_and_methods"}, []),
61: (self._calculate_similarity_percent, {"field_name": "test_item_name"}, []),
64: (self._calculate_decay_function_score, {"field_name": "start_time"}, []),
65: (self._calculate_test_item_logs_similar_percent, {}, []),
66: (self._count_test_item_logs, {}, []),
67: (self._encode_into_vector,
{"field_name": "launch_name", "feature_name": 67, "only_query": True}, []),
68: (self._encode_into_vector,
{"field_name": "detected_message", "feature_name": 68, "only_query": False}, []),
69: (self._encode_into_vector,
{"field_name": "stacktrace", "feature_name": 69, "only_query": False}, []),
70: (self._encode_into_vector,
{"field_name": "launch_name", "feature_name": 70, "only_query": True}, []),
71: (self._encode_into_vector,
{"field_name": "test_item_name", "feature_name": 71, "only_query": False}, []),
72: (self._encode_into_vector,
{"field_name": "unique_id", "feature_name": 72, "only_query": True}, []),
73: (self._encode_into_vector,
{"field_name": "found_exceptions", "feature_name": 73, "only_query": True}, [])
}
fields_to_calc_similarity = self.find_columns_to_find_similarities_for()
all_results = self._perform_additional_text_processing(all_results)
if "filter_min_should_match" in self.config and len(self.config["filter_min_should_match"]) > 0:
self.similarity_calculator.find_similarity(
all_results,
self.config["filter_min_should_match"] + ["merged_small_logs"])
for field in self.config["filter_min_should_match"]:
all_results = self.filter_by_min_should_match(all_results, field=field)
if "filter_min_should_match_any" in self.config and\
len(self.config["filter_min_should_match_any"]) > 0:
self.similarity_calculator.find_similarity(
all_results,
self.config["filter_min_should_match_any"] + ["merged_small_logs"])
all_results = self.filter_by_min_should_match_any(
all_results,
fields=self.config["filter_min_should_match_any"])
self.test_item_log_stats = self._calculate_stats_by_test_item_ids(all_results)
if "filter_by_all_logs_should_be_similar" in self.config:
if self.config["filter_by_all_logs_should_be_similar"]:
all_results = self.filter_by_all_logs_should_be_similar(all_results)
if "filter_by_unique_id" in self.config and self.config["filter_by_unique_id"]:
all_results = self.filter_by_unique_id(all_results)
if "calculate_similarities" not in self.config or self.config["calculate_similarities"]:
self.similarity_calculator.find_similarity(
all_results,
fields_to_calc_similarity)
self.raw_results = all_results
self.all_results = self.normalize_results(all_results)
self.scores_by_issue_type = None
self.defect_type_predict_model = None
self.used_model_info = set()
self.features_to_recalculate_always = set([51, 58] + list(range(67, 74)))
def _count_test_item_logs(self):
scores_by_issue_type = self.find_most_relevant_by_type()
sim_logs_num_scores = {}
for issue_type in scores_by_issue_type:
sim_logs_num_scores[issue_type] = len(self.all_results)
return sim_logs_num_scores
def _calculate_test_item_logs_similar_percent(self):
scores_by_issue_type = self.find_most_relevant_by_type()
sim_logs_num_scores = {}
for issue_type in scores_by_issue_type:
test_item_id = scores_by_issue_type[issue_type]["mrHit"]["_source"]["test_item"]
sim_logs_num_scores[issue_type] = 0.0
if test_item_id in self.test_item_log_stats:
sim_logs_num_scores[issue_type] = self.test_item_log_stats[test_item_id]
return sim_logs_num_scores
def _calculate_stats_by_test_item_ids(self, all_results):
test_item_log_stats = {}
for log, res in all_results:
for r in res["hits"]["hits"]:
if r["_source"]["test_item"] not in test_item_log_stats:
test_item_log_stats[r["_source"]["test_item"]] = 0
test_item_log_stats[r["_source"]["test_item"]] += 1
all_logs = len(all_results)
if all_logs:
for test_item_id in test_item_log_stats:
test_item_log_stats[test_item_id] /= all_logs
return test_item_log_stats
def _perform_additional_text_processing(self, all_results):
for log, res in all_results:
for r in res["hits"]["hits"]:
if "found_tests_and_methods" in r["_source"]:
r["_source"]["found_tests_and_methods"] = utils.preprocess_found_test_methods(
r["_source"]["found_tests_and_methods"])
return all_results
def _calculate_decay_function_score(self, field_name):
scores_by_issue_type = self.find_most_relevant_by_type()
dates_by_issue_types = {}
for issue_type in scores_by_issue_type:
field_date = scores_by_issue_type[issue_type]["mrHit"]["_source"][field_name]
field_date = datetime.strptime(field_date, '%Y-%m-%d %H:%M:%S')
compared_field_date = scores_by_issue_type[issue_type]["compared_log"]["_source"][field_name]
compared_field_date = datetime.strptime(compared_field_date, '%Y-%m-%d %H:%M:%S')
if compared_field_date < field_date:
field_date, compared_field_date = compared_field_date, field_date
dates_by_issue_types[issue_type] = np.exp(
np.log(self.config["time_weight_decay"]) * ((compared_field_date - field_date).days) / 7)
return dates_by_issue_types
def _encode_into_vector(self, field_name, feature_name, only_query):
if feature_name not in self.features_dict_with_saved_objects:
logger.error(self.features_dict_with_saved_objects)
logger.error("Feature '%s' has no encoder" % feature_name)
return []
if field_name != self.features_dict_with_saved_objects[feature_name].field_name:
logger.error(field_name)
logger.error("Field name '%s' is not the same as in the settings '%s'" % (
field_name, self.features_dict_with_saved_objects[feature_name].field_name))
return []
scores_by_issue_type = self.find_most_relevant_by_type()
encodings_by_issue_type = {}
issue_types, gathered_data = [], []
for issue_type in scores_by_issue_type:
field_data = scores_by_issue_type[issue_type]["compared_log"]["_source"][field_name]
issue_types.append(issue_type)
gathered_data.append(field_data)
if not only_query:
gathered_data.append(
scores_by_issue_type[issue_type]["mrHit"]["_source"][field_name])
if gathered_data:
encoded_data = self.features_dict_with_saved_objects[feature_name].transform(
gathered_data).toarray()
encoded_data[encoded_data != 0.0] = 1.0
for idx in range(len(issue_types)):
if only_query:
encodings_by_issue_type[issue_types[idx]] = list(encoded_data[idx])
else:
encodings_by_issue_type[issue_types[idx]] = list(
(encoded_data[2 * idx] + encoded_data[2 * idx + 1]) / 2)
return encodings_by_issue_type
def _calculate_model_probability(self, model_folder=""):
if not model_folder.strip():
return []
if model_folder not in self.models:
logger.error("Model folder is not found: '%s'", model_folder)
return []
feature_ids = self.models[model_folder].get_feature_ids()
feature_data = utils.gather_feature_list(self.previously_gathered_features, feature_ids, to_list=True)
predicted_labels, predicted_labels_probability = self.models[model_folder].predict(
feature_data)
predicted_probability = []
for res in predicted_labels_probability:
predicted_probability.append(float(res[1]))
return [[round(r, 2)] for r in predicted_probability]
def get_necessary_features(self, model_folder):
if not model_folder.strip():
return[]
if model_folder not in self.models:
try:
self.models[model_folder] = BoostingDecisionMaker(folder=model_folder)
return self.models[model_folder].get_feature_ids()
except Exception as err:
logger.debug(err)
return []
return self.models[model_folder].get_feature_ids()
def fill_prevously_gathered_features(self, feature_list, feature_ids):
self.previously_gathered_features = utils.fill_prevously_gathered_features(
feature_list, feature_ids)
def get_used_model_info(self):
return list(self.used_model_info)
def set_defect_type_model(self, defect_type_model):
self.defect_type_predict_model = defect_type_model
def predict_particular_defect_type(self):
scores_by_issue_type = self.find_most_relevant_by_type()
result = {}
for issue_type in scores_by_issue_type:
compared_log = scores_by_issue_type[issue_type]["compared_log"]
det_message = compared_log["_source"]["detected_message_without_params_extended"]
mr_hit = scores_by_issue_type[issue_type]["mrHit"]
issue_type_to_compare = mr_hit["_source"]["issue_type"]
det_message = utils.clean_from_brackets(det_message)
result[issue_type] = 0.0
try:
model_to_use = issue_type_to_compare.lower()[:2]
if model_to_use in ["nd", "ti"]:
continue
if issue_type_to_compare in self.defect_type_predict_model.models:
model_to_use = issue_type_to_compare
res, res_prob = self.defect_type_predict_model.predict(
[det_message], model_to_use)
result[issue_type] = res_prob[0][1] if len(res_prob[0]) == 2 else 0.0
self.used_model_info.update(self.defect_type_predict_model.get_model_info())
except Exception as err:
logger.error(err)
return result
def is_text_of_particular_defect_type(self, label_type):
scores_by_issue_type = self.find_most_relevant_by_type()
issue_type_stats = {}
for issue_type in scores_by_issue_type:
mr_hit = scores_by_issue_type[issue_type]["mrHit"]
rel_item_issue_type = mr_hit["_source"]["issue_type"]
issue_type_stats[issue_type] = int(label_type == rel_item_issue_type.lower()[:2])
return issue_type_stats
def filter_by_all_logs_should_be_similar(self, all_results):
new_results = []
for log, res in all_results:
new_elastic_res = []
for r in res["hits"]["hits"]:
if r["_source"]["test_item"] in self.test_item_log_stats:
if self.test_item_log_stats[r["_source"]["test_item"]] > 0.99:
new_elastic_res.append(r)
new_results.append((log, {"hits": {"hits": new_elastic_res}}))
return new_results
def filter_by_unique_id(self, all_results):
new_results = []
for log, res in all_results:
unique_id_dict = {}
for r in res["hits"]["hits"]:
if r["_source"]["unique_id"] not in unique_id_dict:
unique_id_dict[r["_source"]["unique_id"]] = []
unique_id_dict[r["_source"]["unique_id"]].append(
(r["_id"], int(r["_score"]), datetime.strptime(
r["_source"]["start_time"], '%Y-%m-%d %H:%M:%S')))
log_ids_to_take = set()
for unique_id in unique_id_dict:
unique_id_dict[unique_id] = sorted(
unique_id_dict[unique_id],
key=lambda x: (x[1], x[2]),
reverse=True)
scores_used = set()
for sorted_score in unique_id_dict[unique_id]:
if sorted_score[1] not in scores_used:
log_ids_to_take.add(sorted_score[0])
scores_used.add(sorted_score[1])
new_elastic_res = []
for elastic_res in res["hits"]["hits"]:
if elastic_res["_id"] in log_ids_to_take:
new_elastic_res.append(elastic_res)
new_results.append((log, {"hits": {"hits": new_elastic_res}}))
return new_results
def is_launch_id_the_same(self):
scores_by_issue_type = self.find_most_relevant_by_type()
num_of_logs_issue_type = {}
for issue_type in scores_by_issue_type:
rel_item_launch_id = scores_by_issue_type[issue_type]["mrHit"]["_source"]["launch_id"]
queiried_item_launch_id = scores_by_issue_type[issue_type]["compared_log"]["_source"]["launch_id"]
num_of_logs_issue_type[issue_type] = int(rel_item_launch_id == queiried_item_launch_id)
return num_of_logs_issue_type
def is_the_same_test_case(self):
scores_by_issue_type = self.find_most_relevant_by_type()
num_of_logs_issue_type = {}
for issue_type in scores_by_issue_type:
rel_item_unique_id = scores_by_issue_type[issue_type]["mrHit"]["_source"]["unique_id"]
queiried_item_unique_id = scores_by_issue_type[issue_type]["compared_log"]["_source"]["unique_id"]
if not rel_item_unique_id.strip() and not queiried_item_unique_id.strip():
num_of_logs_issue_type[issue_type] = 0
else:
num_of_logs_issue_type[issue_type] = int(rel_item_unique_id == queiried_item_unique_id)
return num_of_logs_issue_type
def has_the_same_test_case_in_all_results(self):
scores_by_issue_type = self.find_most_relevant_by_type()
num_of_logs_issue_type = {}
has_the_same_test_case = 0
for issue_type in scores_by_issue_type:
rel_item_unique_id = scores_by_issue_type[issue_type]["mrHit"]["_source"]["unique_id"]
queiried_item_unique_id = scores_by_issue_type[issue_type]["compared_log"]["_source"]["unique_id"]
if not rel_item_unique_id.strip():
continue
if rel_item_unique_id == queiried_item_unique_id:
has_the_same_test_case = 1
break
for issue_type in scores_by_issue_type:
num_of_logs_issue_type[issue_type] = has_the_same_test_case
return num_of_logs_issue_type
def find_columns_to_find_similarities_for(self):
fields_to_calc_similarity = set()
for feature in self.feature_ids:
method_params = self.feature_functions[feature]
if "field_name" in method_params[1]:
fields_to_calc_similarity.add(method_params[1]["field_name"])
return list(fields_to_calc_similarity)
def _is_all_log_lines(self):
scores_by_issue_type = self._calculate_score()
num_of_logs_issue_type = {}
for issue_type in scores_by_issue_type:
num_of_logs_issue_type[issue_type] = int(self.config["number_of_log_lines"] == -1)
return num_of_logs_issue_type
def is_only_merged_small_logs(self):
scores_by_issue_type = self.find_most_relevant_by_type()
similarity_percent_by_type = {}
for issue_type in scores_by_issue_type:
group_id = (scores_by_issue_type[issue_type]["mrHit"]["_id"],
scores_by_issue_type[issue_type]["compared_log"]["_id"])
sim_obj = self.similarity_calculator.similarity_dict["message"][group_id]
similarity_percent_by_type[issue_type] = int(sim_obj["both_empty"])
return similarity_percent_by_type
def filter_by_min_should_match(self, all_results, field="message"):
new_results = []
for log, res in all_results:
new_elastic_res = []
for elastic_res in res["hits"]["hits"]:
group_id = (elastic_res["_id"], log["_id"])
| |
load data that will be used for evaluating the recognition
# accuracy of the base categories.
data_base = np.load(file_train_categories_val_phase_data)
labels_base = load_data(file_train_categories_val_phase_labels)
# load data that will be use for evaluating the few-shot recogniton
# accuracy on the novel categories.
data_novel = np.load(file_val_categories_val_phase_data)
labels_novel = load_data(file_val_categories_val_phase_labels)
else:
data_base, labels_base, data_novel, labels_novel = self. prepare_data_and_labels_val()
self.data = np.concatenate(
[data_base, data_novel], axis=0)
self.labels = labels_base + labels_novel
self.label2ind = buildLabelIndex(self.labels)
self.labelIds = sorted(self.label2ind.keys())
self.num_cats = len(self.labelIds)
self.labelIds_base = buildLabelIndex(labels_base).keys()
self.labelIds_novel = buildLabelIndex(labels_novel).keys()
self.num_cats_base = len(self.labelIds_base)
self.num_cats_novel = len(self.labelIds_novel)
intersection = set(self.labelIds_base) & set(self.labelIds_novel)
assert (len(intersection) == 0)
else:
raise ValueError('Not valid phase {0}'.format(self.phase))
# mean_pix = [x / 255.0 for x in [120.39586422, 115.59361427, 104.54012653]]
# std_pix = [x / 255.0 for x in [70.68188272, 68.27635443, 72.54505529]]
normalize = transforms.Normalize(mean=self.mean_pix, std=self.std_pix)
if (self.phase == 'test' or self.phase == 'val') or (do_not_use_random_transf == True):
if load_data_from_file:
self.transform = transforms.Compose([
lambda x: np.asarray(x),
transforms.ToTensor(),
normalize
])
else:
self.transform = transforms.Compose([
lambda x: np.asarray(x),
transforms.ToTensor(),
normalize
])
else:
if load_data_from_file:
self.transform = transforms.Compose([
#transforms.RandomCrop(self.image_res, padding=8),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x),
transforms.ToTensor(),
normalize
])
else:
self.transform = transforms.Compose([
#transforms.RandomCrop(self.image_res, padding=8),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x),
transforms.ToTensor(),
normalize
])
# roidb_ni_fname = '/dccstor/jsdata1/data/inloc_roidb_ni_s.pkl'
# with open(roidb_ni_fname, 'rb')as fid:
# self.roidb_ni = pickle.load(fid)
def __getitem__(self, index):
xml, label = self.data[index], self.labels[index]
# if False: # check class name
# train_classes = open(self.training_classes_list_path).read().splitlines()
# test_classes = open(self.test_classes_list_path).read().splitlines()
# val_classes = open("/dccstor/jsdata1/data/inloc_Amit_val_classes.txt").read().splitlines()
# n_code = os.path.basename(os.path.dirname(xml))
# class_name = folder_name_to_class_name[n_code]
# grp_cnt = 0
# if class_name in train_classes:
# print('getting image from train classes')
# grp_cnt+=1
# if class_name in test_classes:
# print('getting image from test classes')
# grp_cnt += 1
# if class_name in val_classes:
# print('getting image from val classes')
# grp_cnt += 1
# if grp_cnt==0:
# print('image came from unidentified class list !!!!!!!!!!!!!!!!!!!!!!')
# if grp_cnt>1:
# print('image came from more than one class list !!!!!!!!!!!!!!!!!!!!!!')
if self.phase == 'test':
img = xml.replace('.xml','.JPEG').replace('Annotations/CLS-LOC','Data/CLS-LOC')
# doing this so that it is consistent with all other datasets
# to return a PIL Image
gt_entry, img = load_imagenet_annotation(xml, img, label)
# show_gt_boxes(img,gt_entry['boxes'],[str(i) for i in gt_entry['gt_classes']],save_file_path='/dccstor/jsdata1/dev/tmp.jpg')
# DB_entry['gt_classes'],DB_entry['boxes'],DB_entry['gt_names']
else:
from PIL import Image
# im = Image.open(img_filename)
img = Image.open(xml).convert('RGB')
gt_entry = None
old_size = img.size #[columns, rows]
if self.crop_style==1:
larger = 0 if img.size[0] > img.size[1] else 1
new_size = int((self.image_res * img.size[abs(larger - 1)]) / img.size[larger])
resize_transform = transforms.Resize(new_size)
img = resize_transform(img)
if gt_entry is not None:
gt_entry['boxes'] = gt_entry['boxes'].astype('float')
gt_entry['boxes']*=float(new_size)/float(min(old_size))
gt_entry['boxes'] = gt_entry['boxes'].round().astype('int16')
pad_width_left = int((self.image_res - img.size[0]) / 2)
pad_width_right = int(pad_width_left + ((self.image_res - img.size[0]) % 2))
pad_width_top = int((self.image_res - img.size[1]) / 2)
pad_width_bottom = int(pad_width_top + ((self.image_res - img.size[1]) % 2))
pad_transform = transforms.Pad((pad_width_left, pad_width_top, pad_width_right, pad_width_bottom),
padding_mode=self.pad_mode)
img = pad_transform(img)
if gt_entry is not None:
gt_entry['boxes'][:, [0, 2]]+= pad_width_left
gt_entry['boxes'][:, [1, 3]] += pad_width_top
elif self.crop_style==2:
downsize_tfm = transforms.Compose([
transforms.Resize(self.image_res), # resize to have the smaller dimension equal to image_res
transforms.CenterCrop(self.image_res) # crop the other dimension to image_res
])
img = downsize_tfm(img)
img_scale = float(self.image_res)/float(min(old_size))
if gt_entry is not None:
gt_entry['boxes'] = gt_entry['boxes'].astype('float')*img_scale
if old_size[1]>old_size[0]: #rows>columns, portrait. crop top-bottom. img_size is now [img_scale*old_size[1],image_res]
crop_height= int((img_scale*old_size[1] - self.image_res) / 2)
if gt_entry is not None:
gt_entry['boxes'][:, [1, 3]] -= crop_height
else: # crop left-right
crop_width = int((img_scale*old_size[0] -self.image_res) / 2)
if gt_entry is not None:
gt_entry['boxes'][:, [0, 2]] -= crop_width
if gt_entry is not None:
gt_entry['boxes'] = gt_entry['boxes'].round().astype('int16')
else:
os.error('Imagenet_loc __getitem__: Unrecognized crop stype')
#show_gt_boxes(img, gt_entry['boxes'], [str(i) for i in gt_entry['gt_classes']], save_file_path='/dccstor/jsdata1/dev/tmp.jpg')
if self.transform is not None:
img = self.transform(img)
if gt_entry is not None:
boxes = np.pad(gt_entry['boxes'],((0,10-gt_entry['boxes'].shape[0]),(0,0)),'constant')
else:
boxes = None
if self.phase == 'test':
return img, label, torch.tensor(boxes)
else:
return img, label
def __len__(self):
return len(self.data)
def prepare_data_and_labels_train(self):
# transform = transforms.Compose([
# transforms.RandomResizedCrop(args.image_size),
# transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.2),
# transforms.ToTensor(),
# normalize])
# get train classes indices
ClassIdxDictTrainTrain = load_data('./datasets/ImageNet/ImageNetClassIdxDictTrainCatTrainPhase.pkl',)
# save train cat train phase data to file
data_train_train = []
labels_train_train = []
for i in ClassIdxDictTrainTrain.keys():
for j, idx in enumerate(ClassIdxDictTrainTrain[i]):
path, label = self.samples[idx]
data_train_train += [path]
labels_train_train += [label]
if self.debug and j == 50:
break
print("data train cat train phase shape: {}".format(len(data_train_train)))
print("labels train cat train phase shape: {}".format(len(labels_train_train)))
self.data = data_train_train
self.labels = labels_train_train
def prepare_data_and_labels_val(self):
# transform = transforms.Compose([
# transforms.RandomResizedCrop(args.image_size),
# transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.2),
# transforms.ToTensor(),
# normalize])
# get train classes indices
#ClassIdxDictTrainVal = load_data('/dccstor/alfassy/StarNet/data/ImageNetClassIdxDictTrainCatValPhase.pkl')
ClassIdxDictTrainVal = load_data('./datasets/ImageNet/ImageNetClassIdxDictTrainCatValPhase.pkl')
# save train cat train phase data to file
data_train_val = []
labels_train_val = []
for i in ClassIdxDictTrainVal.keys():
for j, idx in enumerate(ClassIdxDictTrainVal[i]):
path, label = self.samples[idx]
data_train_val += [path]
labels_train_val += [label]
if self.debug and j == 50:
break
print("data train cat val phase shape: {}".format(len(data_train_val)))
print("labels train cat val phase shape: {}".format(len(labels_train_val)))
# get validation classes indices
# val_indices = load_data('/dccstor/alfassy/StarNet/data/ImageNetValClasses.pkl')
# ClassIdxDict = load_data('/dccstor/alfassy/StarNet/data/ImageNetClassIdxDict.pkl'
# save val cat val phase data to file
data_val = []
labels_val = []
for i in val_indices:
for j, idx in enumerate(ClassIdxDict[i]):
path, label = self.samples[idx]
data_val += [path]
labels_val += [label]
if self.debug and j == 50:
break
print("data val shape: {}".format(len(data_val)))
print("labels val shape: {}".format(len(labels_val)))
return data_train_val, labels_train_val, data_val, labels_val
def prepare_data_and_labels_test(self):
# transform = transforms.Compose([
# transforms.RandomResizedCrop(args.image_size),
# transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.2),
# transforms.ToTensor(),
# normalize])
# get train classes indices
base_folder = assert_folder('./datasets/ImageNet')
train_classes_path = os.path.join(base_folder, 'ImageNetTrainClasses.pkl')
ClassIdxDict_Test_path = os.path.join(base_folder, 'ImageNetClassIdxDictTest.pkl')
TestClasses_path = os.path.join(base_folder, 'ImageNetTestClasses.pkl')
train_indices = load_data(train_classes_path)
ClassIdxDictTest = load_data(ClassIdxDict_Test_path)
# save train cat train phase data to file
data_test_train = []
labels_test_train = []
for i in train_indices:
for j, idx in enumerate(ClassIdxDictTest[i]):
path, label = self.samples[idx]
data_test_train += [path]
labels_test_train += [label]
if self.debug and j == 50:
break
print("data train cat test phase shape: {}".format(len(data_test_train)))
print("labels train cat test phase shape: {}".format(len(labels_test_train)))
# get validation classes indices
test_indices = load_data(TestClasses_path)
# save val cat val phase data to file
data_test_test = []
labels_test_test = []
for i in test_indices:
for j, idx in enumerate(ClassIdxDictTest[i]):
path, label = self.samples[idx]
data_test_test += [path]
labels_test_test += [label]
print("data val shape: {}".format(len(data_test_test)))
print("labels val shape: {}".format(len(labels_test_test)))
return data_test_train, labels_test_train, data_test_test, labels_test_test
def create_data_dictionaries_train_val(self):
base_folder = assert_folder('./datasets/ImageNet')
#val_classes_list_path = os.path.join(base_folder,'imagenet_val_classes.txt')
train_classes_path = os.path.join(base_folder,'ImageNetTrainClasses.pkl')
ClassIdxDict_TrainCatTrainPhase_path = os.path.join(base_folder,'ImageNetClassIdxDictTrainCatTrainPhase.pkl')
ClassIdxDict_TrainCatValPhase_path = os.path.join(base_folder,'ImageNetClassIdxDictTrainCatValPhase.pkl')
IdxDict_path = os.path.join(base_folder,'ImageNetClassIdxDict.pkl')
ValClasses_path = os.path.join(base_folder,'ImageNetValClasses.pkl')
train_classes_joseph = open(self.training_classes_list_path).read().splitlines()
# get train classes indices
train_indices = []
for class_name_joseph in train_classes_joseph:
for folder_name, class_name in folder_name_to_class_name.items():
if class_name_joseph == class_name:
train_indices += [self.class_to_idx[folder_name]]
print("train indices num: {}".format(len(train_indices)))
print("train classes list len: {}".format(len(train_classes_joseph)))
with open(train_classes_path, 'wb') as f:
pickle.dump(train_indices, f)
f.close()
print(train_indices)
# Dictionary - Keys: labels(int), Values: list of indices of images tagged as $key
ClassIdxDict = {el: [] for el in range(1000)}
for idx, (path, label) in enumerate(self.samples):
ClassIdxDict[label] += [idx]
# divide train data between train and val phase
random.seed(0)
ClassIdxDictTrainTrain = {el: [] for el in train_indices}
ClassIdxDictTrainVal = {el: [] for el in train_indices}
for label in train_indices:
ClassIdxDictTrainVal[label] = random.sample(ClassIdxDict[label], int(len(ClassIdxDict[label]) * 0.1))
ClassIdxDictTrainTrain[label] = [idx for idx in ClassIdxDict[label] if
idx not in ClassIdxDictTrainVal[label]]
with open(ClassIdxDict_TrainCatTrainPhase_path, 'wb') as f:
pickle.dump(ClassIdxDictTrainTrain, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
print(ClassIdxDictTrainTrain.keys())
with open(ClassIdxDict_TrainCatValPhase_path, 'wb') as f:
pickle.dump(ClassIdxDictTrainVal, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
print(ClassIdxDictTrainVal.keys())
with open(IdxDict_path, 'wb') as f:
pickle.dump(ClassIdxDict, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
print(ClassIdxDict.keys())
# val_classes_list = open(val_classes_list_path).read().splitlines()
# val_indices = []
# for class_name_val in val_classes_list:
# for folder_name, class_name in folder_name_to_class_name.items():
# if class_name_val == class_name:
# val_indices += [self.class_to_idx[folder_name]]
# print("val indices num: {}".format(len(val_indices)))
# print("val classes num: {}".format(len(val_classes_list)))
# with open(ValClasses_path, 'wb') as f:
# pickle.dump(val_indices, f)
# f.close()
# print(val_indices)
def create_data_dictionaries_test(self):
base_folder = assert_folder('./datasets/ImageNet')
# load train classes indices
train_classes_path = os.path.join(base_folder, 'ImageNetTrainClasses.pkl')
ClassIdxDict_Test_path = os.path.join(base_folder, 'ImageNetClassIdxDictTest.pkl')
TestClasses_path = os.path.join(base_folder,'ImageNetTestClasses.pkl')
train_indices = load_data(train_classes_path)
print(train_indices)
# Dictionary - Keys: labels(int), Values: | |
)
):
continue
# Resolve lookups - use lookup variable instead of index variable
if variable_name.endswith('_index') | hasattr(variable, 'lookup'): # Index variable found
# Use lookup variable name for output
try:
short_name = variable.lookup # Use lookup attribute
except AttributeError:
short_name = re.sub('_index$', '', variable_name, re.IGNORECASE) # Use associated variable name
try:
lookup_variable = self.nc_dataset.variables[short_name]
except:
logger.warning('Unable to access lookup variable "{}" for index variable "{}"'.format(short_name, variable_name))
continue
variable_attributes = lookup_variable.__dict__
aseg_gdf_format, dtype, columns, width_specifier, decimal_places, python_format = variable2aseg_gdf_format(lookup_variable)
else: # Non-lookup or line-indexed variable
short_name = variable_name
variable_attributes = variable.__dict__
aseg_gdf_format, dtype, columns, width_specifier, decimal_places, python_format = variable2aseg_gdf_format(variable)
# Map variable name to standard ASEG-GDF field name if required
short_name = self.settings['aseg_field_mapping'].get(short_name) or short_name
try:
read_chunk_size = int(variable.chunking()[0])
except:
read_chunk_size = CACHE_CHUNK_ROWS # Use default chunking for reads
field_definition = {'variable_name': variable_name,
'short_name': short_name,
'dtype': dtype,
'chunk_size': read_chunk_size,
'columns': columns,
'format': aseg_gdf_format,
'width_specifier': width_specifier,
'decimal_places': decimal_places,
'python_format': python_format
}
fill_value = variable_attributes.get('_FillValue') # System attribute
if fill_value is not None:
field_definition['fill_value'] = fill_value
long_name = variable_attributes.get('long_name')
if long_name is not None:
field_definition['long_name'] = long_name
# Set variable attributes in field definition
variable_attribute_dict = {attribute_name: variable_attributes.get(key.upper())
for key, attribute_name in self.settings['variable_attributes'].items()
if variable_attributes.get(key.upper()) is not None
}
if variable_attribute_dict:
field_definition['variable_attributes'] = variable_attribute_dict
self.field_definitions.append(field_definition)
logger.debug('self.field_definitions: {}'.format(pformat(self.field_definitions)))
# Read overriding field definition values from settings
if self.settings.get('field_definitions'):
for field_definition in self.field_definitions:
overriding_field_definition = self.settings['field_definitions'].get(field_definition['short_name'])
if overriding_field_definition:
field_definition.update(overriding_field_definition)
logger.debug('self.field_definitions: {}'.format(pformat(self.field_definitions)))
def write_dfn_file():
'''
Helper function to output .dfn file
'''
def write_defns(dfn_file):
"""
Helper function to write multiple DEFN lines
"""
self.defn = 0 # reset DEFN number
for field_definition in self.field_definitions:
short_name = field_definition['short_name']
optional_attribute_list = []
units = field_definition.get('units')
if units:
optional_attribute_list.append('UNITS={units}'.format(units=units))
fill_value = field_definition.get('fill_value')
if fill_value is not None:
optional_attribute_list.append('NULL=' + field_definition['python_format'].format(fill_value).strip())
long_name = field_definition.get('long_name')
if long_name:
optional_attribute_list.append('NAME={long_name}'.format(long_name=long_name))
# Check for additional ASEG-GDF attributes defined in settings
variable_attributes = field_definition.get('variable_attributes')
if variable_attributes:
for aseg_gdf_attribute, netcdf_attribute in self.settings['variable_attributes'].items():
attribute_value = variable_attributes.get(netcdf_attribute)
if attribute_value is not None:
optional_attribute_list.append('{aseg_gdf_attribute}={attribute_value}'.format(aseg_gdf_attribute=aseg_gdf_attribute,
attribute_value=attribute_value
))
if optional_attribute_list:
definition = ' , '.join(optional_attribute_list)
else:
definition = None
self.write_record2dfn_file(dfn_file,
rt='',
name=short_name,
aseg_gdf_format=field_definition['format'],
definition=definition,
)
# Write 'END DEFN'
self.write_record2dfn_file(dfn_file,
rt='',
name='END DEFN',
aseg_gdf_format=''
)
return # End of function write_defns
def write_proj(dfn_file):
"""
Helper function to write PROJ lines
From standard:
DEFN 1 ST=RECD,RT=PROJ; RT: A4
DEFN 2 ST=RECD,RT=PROJ; COORDSYS: A40: NAME=projection name, POSC projection name
DEFN 3 ST=RECD,RT=PROJ; DATUM: A40: NAME=datum name, EPSG compliant ellipsoid name
DEFN 4 ST=RECD,RT=PROJ; MAJ_AXIS: D12.1: UNIT=m, NAME=major_axis, Major axis in units
relevant to the ellipsoid definition
DEFN 5 ST=RECD,RT=PROJ; INVFLATT: D14.9: NAME=inverse flattening, 1/f inverse of flattening
DEFN 6 ST=RECD,RT=PROJ; PRIMEMER: F10.1: UNIT=deg, NAME=prime_meridian, Location of prime
meridian relative to Greenwich
DEFN 7 ST=RECD,RT=PROJ; PROJMETH: A30: NAME=projection_method, eg. Transverse Mercator,
Lambert etc
DEFN 8 ST=RECD,RT=PROJ; PARAM1: D14.0: NAME=Proj_par1, 1st projecton paramater See Table 1
DEFN 9 ST=RECD,RT=PROJ; PARAM2: D14.0: NAME=Proj_par2, 2nd projection parameter
DEFN 10 ST=RECD,RT=PROJ; PARAM3: D14.0: NAME=Proj_par3, 3rd projection parameter
DEFN 11 ST=RECD,RT=PROJ; PARAM4: D14.0: NAME=Proj_par4, 4th projection parameter
DEFN 12 ST=RECD,RT=PROJ; PARAM5: D14.0: NAME=Proj_par5, 5th projection parameter
DEFN 13 ST=RECD,RT=PROJ; PARAM6: D14.0: NAME=Proj_par6, 6th projection parameter
DEFN 14 ST=RECD,RT=PROJ; PARAM7: D14.0: NAME=Proj_par7, 7th projection parameter
DEFN 15 ST=RECD,RT=PROJ; END DEFN
From sample file:
DEFN 1 ST=RECD,RT=PROJ; RT:A4
DEFN 2 ST=RECD,RT=PROJ; PROJNAME:A30: COMMENT=GDA94 / MGA zone 54
DEFN 3 ST=RECD,RT=PROJ; ELLPSNAM:A30: COMMENT=GRS 1980
DEFN 4 ST=RECD,RT=PROJ; MAJ_AXIS: D12.1: UNIT=m, COMMENT=6378137.000000
DEFN 5 ST=RECD,RT=PROJ; ECCENT: D12.9: COMMENT=298.257222
DEFN 6 ST=RECD,RT=PROJ; PRIMEMER: F10.1: UNIT=deg, COMMENT=0.000000
DEFN 7 ST=RECD,RT=PROJ; PROJMETH: A30: COMMENT=Transverse Mercator
DEFN 8 ST=RECD,RT=PROJ; PARAM1: D14.0: COMMENT= 0.000000
DEFN 9 ST=RECD,RT=PROJ; PARAM2: D14.0: COMMENT= 141.000000
DEFN 10 ST=RECD,RT=PROJ; PARAM3: D14.0: COMMENT= 0.999600
DEFN 11 ST=RECD,RT=PROJ; PARAM4: D14.0: COMMENT= 500000.000000
DEFN 12 ST=RECD,RT=PROJ; PARAM5: D14.0: COMMENT=10000000.00000
DEFN 13 ST=RECD,RT=PROJ; PARAM6: D14.0:
DEFN 14 ST=RECD,RT=PROJ; PARAM7: D14.0:
DEFN 15 ST=RECD,RT=PROJ; END DEFN
PROJGDA94 / MGA zone 54 GRS 1980 6378137.0000 298.257222 0.000000 Transverse Mercator 0.000000 141.000000 0.999600 500000.000000 10000000.00000
"""
geogcs = self.spatial_ref.GetAttrValue('geogcs') # e.g. 'GDA94'
projcs = self.spatial_ref.GetAttrValue('projcs') # e.g. 'UTM Zone 54, Southern Hemisphere'
ellipse_name = self.spatial_ref.GetAttrValue('spheroid', 0)
major_axis = float(self.spatial_ref.GetAttrValue('spheroid', 1))
prime_meridian = float(self.spatial_ref.GetAttrValue('primem', 1))
inverse_flattening = float(self.spatial_ref.GetInvFlattening())
#eccentricity = self.spatial_ref.GetAttrValue('spheroid', 2) # Non-standard definition same as inverse_flattening?
if self.spatial_ref.IsProjected():
if projcs.startswith(geogcs):
projection_name = projcs
else:
projection_name = geogcs + ' / ' + re.sub('[\:\,\=]+', '', projcs) # e.g. 'GDA94 / UTM Zone 54, Southern Hemisphere'
projection_method = self.spatial_ref.GetAttrValue('projection').replace('_', ' ')
projection_parameters = [(key, float(value))
for key, value in re.findall('PARAMETER\["(.+)",(\d+\.?\d*)\]', self.spatial_ref.ExportToPrettyWkt())
]
else: # Unprojected CRS
projection_name = geogcs
projection_method = None
projection_parameters = None
self.defn = 0 # reset DEFN number
# write 'DEFN 1 ST=RECD,RT=PROJ; RT:A4'
self.write_record2dfn_file(dfn_file,
rt='PROJ',
name='RT',
aseg_gdf_format='A4'
)
self.write_record2dfn_file(dfn_file,
rt='PROJ',
name='COORDSYS',
aseg_gdf_format='A40',
definition='NAME={projection_name}, Projection name'.format(projection_name=projection_name)
)
self.write_record2dfn_file(dfn_file,
rt='PROJ',
name='DATUM',
aseg_gdf_format='A40',
definition='NAME={ellipse_name}, Ellipsoid name'.format(ellipse_name=ellipse_name)
)
self.write_record2dfn_file(dfn_file,
rt='PROJ',
name='MAJ_AXIS',
aseg_gdf_format='D12.1',
definition='UNIT={unit}, NAME={major_axis}, Major axis'.format(unit='m', major_axis=major_axis)
)
self.write_record2dfn_file(dfn_file,
rt='PROJ',
name='INVFLATT',
aseg_gdf_format='D14.9',
definition='NAME={inverse_flattening}, 1/f inverse of flattening'.format(inverse_flattening=inverse_flattening)
)
self.write_record2dfn_file(dfn_file,
rt='PROJ',
name='PRIMEMER',
aseg_gdf_format='F10.1',
definition='UNIT={unit}, NAME={prime_meridian}, Location of prime meridian'.format(unit='degree', prime_meridian=prime_meridian)
)
#===============================================================================
# # Non-standard definitions
# self.write_record2dfn_file(dfn_file,
# rt='PROJ',
# name='ELLPSNAM',
# aseg_gdf_format='A30',
# definition='NAME={ellipse_name}, Non-standard definition for ellipse name'.format(ellipse_name=ellipse_name)
# )
#
# self.write_record2dfn_file(dfn_file,
# rt='PROJ',
# name='PROJNAME',
# aseg_gdf_format='A40',
# definition='NAME={projection_name}, Non-standard definition for projection name'.format(projection_name=projection_name)
# )
#
# self.write_record2dfn_file(dfn_file,
# rt='PROJ',
# name='ECCENT',
# aseg_gdf_format='D12.9',
# definition='NAME={eccentricity}, Non-standard definition for ellipsoidal eccentricity'.format(eccentricity=eccentricity)
# )
#===============================================================================
if projection_method:
self.write_record2dfn_file(dfn_file,
rt='PROJ',
name='PROJMETH',
aseg_gdf_format='A30',
definition='NAME={projection_method}, projection method'.format(projection_method=projection_method)
)
# Write all projection parameters starting from DEFN 8
param_no = 0
for param_name, param_value in projection_parameters:
param_no += 1
self.write_record2dfn_file(dfn_file,
rt='PROJ',
name='PARAM{param_no}'.format(param_no=param_no),
aseg_gdf_format='D14.0', #TODO: Investigate whether this is OK - it looks dodgy to me
definition='NAME={param_value}, {param_name}'.format(param_value=param_value, param_name=param_name)
)
# Write 'END DEFN'
self.write_record2dfn_file(dfn_file,
rt='PROJ',
name='END DEFN',
aseg_gdf_format=''
)
#TODO: Write fixed length PROJ line at end of file
return # End of function write_proj
# Create, write and close .dat file
dfn_file = open(self.dfn_out_path, 'w')
dfn_file.write('DEFN ST=RECD,RT=COMM;RT:A4;COMMENTS:A76\n') # TODO: Check this first line
write_defns(dfn_file)
write_proj(dfn_file)
dfn_file.close()
self.info_output('Finished writing .dfn file {}'.format(self.dfn_out_path))
def write_dat_file(cache_chunk_rows=None, point_mask=None):
'''
Helper function to output .dat file
'''
def chunk_buffer_generator(point_mask=None):
'''
Generator to yield all line strings across all point variables for specified row range
'''
def chunk_line_generator(start_index, end_index, point_mask=None):
'''
Helper Generator to yield line strings for specified rows across all point variables
'''
python_format_list = []
for field_definition in self.field_definitions:
for _column_index in range(field_definition['columns']):
python_format_list.append(field_definition['python_format'])
logger.debug('python_format_list: {}'.format(python_format_list))
value_count = len(python_format_list)
logger.debug('Reading rows {:n}-{:n}'.format(start_index+1, end_index))
line_cache.read_points(start_index, end_index, point_mask=point_mask)
logger.debug('Preparing ASEG-GDF lines for rows {:n}-{:n}'.format(start_index+1, end_index))
for value_list in line_cache.chunk_buffer_generator():
logger.debug('value_list: {}'.format(value_list))
# Turn list of values into a string using python_formats
yield ''.join([python_format_list[value_index].format(value_list[value_index])
for value_index in range(value_count)]).lstrip()
# Start of chunk_buffer_generator
line_cache = RowCache(self) # Create cache for multiple rows
# Process all chunks
point_count = 0
for chunk_index in range(self.total_points // cache_chunk_rows + 1):
for line in chunk_line_generator(start_index=chunk_index*cache_chunk_rows,
end_index=min((chunk_index+1)*cache_chunk_rows,
self.total_points
),
point_mask=point_mask
):
point_count += 1
if point_count == point_count // self.line_report_increment * self.line_report_increment:
self.info_output('{:n} / {:n} rows written'.format(point_count, self.total_points))
logger.debug('line: {}'.format(line))
yield line
if POINT_LIMIT and (point_count >= POINT_LIMIT):
break
if POINT_LIMIT and (point_count >= POINT_LIMIT):
break
self.info_output('{:n} rows output'.format(point_count))
# Start of write_dat_file function
cache_chunk_rows = cache_chunk_rows or CACHE_CHUNK_ROWS
# Create, write and close .dat file
dat_out_file = open(self.dat_out_path, mode='w')
logger.debug('Writing lines to {}'.format(self.dat_out_path))
for line in chunk_buffer_generator(point_mask):
dat_out_file.write(line + '\n')
dat_out_file.close()
self.info_output('Finished writing .dat file {}'.format(self.dat_out_path))
# Start of convert2aseg_gdf function
self.dat_out_path = dat_out_path or os.path.splitext(self.netcdf_in_path)[0] + '.dat'
self.dfn_out_path = dfn_out_path or os.path.splitext(dat_out_path)[0] + '.dfn'
get_field_definitions()
write_dfn_file()
write_dat_file(point_mask=point_mask)
def main():
'''
Main function
'''
def get_args():
"""
Handles all the arguments that are passed into the script
:return: Returns a parsed version of the arguments.
"""
parser = argparse.ArgumentParser(description='Convert netCDF file to ASEG-GDF2')
parser.add_argument("-s", "--settings",
help="Path to settings file",
type=str,
dest="settings_path")
parser.add_argument("-r", "--crs",
help="Coordinate Reference System string (e.g. GDA94, EPSG:4283) for output",
type=str,
dest="crs")
parser.add_argument('-d', '--debug', action='store_const', const=True, default=False,
help='output debug information. Default is no debug info')
parser.add_argument('-v', '--verbose', action='store_const', const=True, default=False,
help='output verbosity. | |
rootEventselector(sourceList, destFileName, destPathSplit, \
compress=None, recreate=False, first=0, last=-1, selectionString="",
branchinclude="", branchexclude=""):
# Check arguments
if sourceList == [] or destFileName == "": return 1
if recreate and destFileName in sourceList:
logging.error("cannot recreate destination file if this is also a source file")
return 1
# Open destination file
destFile = openROOTFileCompress(destFileName, compress, recreate)
if not destFile: return 1
# Loop on the root file
retcode = 0
for fileName, pathSplitList in sourceList:
retcode += _copyTreeSubsets(fileName, pathSplitList, destFile, destPathSplit, \
first, last, selectionString, branchinclude, branchexclude)
destFile.Close()
return retcode
# End of ROOTEVENTSELECTOR
##########
##########
# ROOTLS
# Ansi characters
ANSI_BOLD = "\x1B[1m"
ANSI_BLUE = "\x1B[34m"
ANSI_GREEN = "\x1B[32m"
ANSI_END = "\x1B[0m"
# Needed for column width calculation
ANSI_BOLD_LENGTH = len(ANSI_BOLD+ANSI_END)
ANSI_BLUE_LENGTH = len(ANSI_BLUE+ANSI_END)
ANSI_GREEN_LENGTH = len(ANSI_GREEN+ANSI_END)
# Terminal and platform booleans
IS_TERMINAL = sys.stdout.isatty()
IS_WIN32 = sys.platform == 'win32'
def isSpecial(ansiCode,string):
"""Use ansi code on 'string' if the output is the
terminal of a not Windows platform"""
if IS_TERMINAL and not IS_WIN32: return ansiCode+string+ANSI_END
else: return string
def write(string,indent=0,end=""):
"""Use sys.stdout.write to write the string with an indentation
equal to indent and specifying the end character"""
sys.stdout.write(" "*indent+string+end)
TREE_TEMPLATE = "{0:{nameWidth}}"+"{1:{titleWidth}}{2:{memoryWidth}}"
def _recursifTreePrinter(tree,indent):
"""Print recursively tree informations"""
listOfBranches = tree.GetListOfBranches()
if len(listOfBranches) > 0: # Width informations
maxCharName = max([len(branch.GetName()) \
for branch in listOfBranches])
maxCharTitle = max([len(branch.GetTitle()) \
for branch in listOfBranches])
dic = { \
"nameWidth":maxCharName+2, \
"titleWidth":maxCharTitle+4, \
"memoryWidth":1}
for branch in listOfBranches: # Print loop
rec = \
[branch.GetName(), \
"\""+branch.GetTitle()+"\"", \
str(branch.GetTotBytes())]
write(TREE_TEMPLATE.format(*rec,**dic),indent,end="\n")
_recursifTreePrinter(branch,indent+2)
def _prepareTime(time):
"""Get time in the proper shape
ex : 174512 for 17h 45m 12s
ex : 094023 for 09h 40m 23s"""
time = str(time)
time = '000000'+time
time = time[len(time)-6:]
return time
MONTH = {1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'Jun', \
7:'Jul',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'}
LONG_TEMPLATE = \
isSpecial(ANSI_BOLD,"{0:{classWidth}}")+"{1:{timeWidth}}" + \
"{2:{nameWidth}}{3:{titleWidth}}"
def _rootLsPrintLongLs(keyList,indent,treeListing):
"""Print a list of Tkey in columns
pattern : classname, datetime, name and title"""
if len(keyList) > 0: # Width informations
maxCharClass = max([len(key.GetClassName()) for key in keyList])
maxCharTime = 12
maxCharName = max([len(key.GetName()) for key in keyList])
dic = { \
"classWidth":maxCharClass+2, \
"timeWidth":maxCharTime+2, \
"nameWidth":maxCharName+2, \
"titleWidth":1}
date = ROOT.Long(0)
for key in keyList:
datime = key.GetDatime()
time = datime.GetTime()
date = datime.GetDate()
year = datime.GetYear()
time = _prepareTime(time)
rec = \
[key.GetClassName(), \
MONTH[int(str(date)[4:6])]+" " +str(date)[6:]+ \
" "+time[:2]+":"+time[2:4]+" "+str(year)+" ", \
key.GetName(), \
"\""+key.GetTitle()+"\""]
write(LONG_TEMPLATE.format(*rec,**dic),indent,end="\n")
if treeListing and isTreeKey(key):
tree = key.ReadObj()
_recursifTreePrinter(tree,indent+2)
if treeListing and isTHnSparseKey(key):
hs = key.ReadObj()
hs.Print('all')
##
# The code of the getTerminalSize function can be found here :
# https://gist.github.com/jtriley/1108174
# Thanks jtriley !!
import os
import shlex
import struct
import platform
import subprocess
def getTerminalSize():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
#print "default"
#_get_terminal_size_windows() or _get_terminal_size_tput don't work
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
# End of getTerminalSize code
##
def _rootLsPrintSimpleLs(keyList,indent,oneColumn):
"""Print list of strings in columns
- blue for directories
- green for trees"""
# This code is adapted from the pprint_list function here :
# http://stackoverflow.com/questions/25026556/output-list-like-ls
# Thanks hawkjo !!
if len(keyList) == 0: return
(term_width, term_height) = getTerminalSize()
term_width = term_width - indent
min_chars_between = 2
min_element_width = min( len(key.GetName()) for key in keyList ) \
+ min_chars_between
max_element_width = max( len(key.GetName()) for key in keyList ) \
+ min_chars_between
if max_element_width >= term_width: ncol,col_widths = 1,[1]
else:
# Start with max possible number of columns and reduce until it fits
ncol = 1 if oneColumn else min( len(keyList), term_width / min_element_width )
while True:
col_widths = \
[ max( len(key.GetName()) + min_chars_between \
for j, key in enumerate(keyList) if j % ncol == i ) \
for i in range(ncol) ]
if sum( col_widths ) <= term_width: break
else: ncol -= 1
for i, key in enumerate(keyList):
if i%ncol == 0: write("",indent) # indentation
# Don't add spaces after the last element of the line or of the list
if (i+1)%ncol != 0 and i != len(keyList)-1:
if not IS_TERMINAL: write( \
key.GetName().ljust(col_widths[i%ncol]))
elif isDirectoryKey(keyList[i]): write( \
isSpecial(ANSI_BLUE,key.GetName()).ljust( \
col_widths[i%ncol] + ANSI_BLUE_LENGTH))
elif isTreeKey(keyList[i]): write( \
isSpecial(ANSI_GREEN,key.GetName()).ljust( \
col_widths[i%ncol] + ANSI_GREEN_LENGTH))
else: write(key.GetName().ljust(col_widths[i%ncol]))
else: # No spaces after the last element of the line or of the list
if not IS_TERMINAL: write(key.GetName())
elif isDirectoryKey(keyList[i]):
write(isSpecial(ANSI_BLUE, key.GetName()))
elif isTreeKey(keyList[i]):
write(isSpecial(ANSI_GREEN, key.GetName()))
else: write(key.GetName())
write('\n')
def _rootLsPrint(keyList, indent, oneColumn, \
longListing, treeListing):
"""Print informations given by keyList with a rootLs
style choosen with the options"""
if longListing or treeListing: \
_rootLsPrintLongLs(keyList, indent, treeListing)
else:
_rootLsPrintSimpleLs(keyList, indent, oneColumn)
def _rootLsProcessFile(fileName, pathSplitList, manySources, indent, \
oneColumn, longListing, treeListing):
'''rootls main routine for one file looping over paths in the file
sorts out directories and key, and loops over all paths, then forwards to
(_rootLsPrintLongLs or _rootLsPrintSimpleLs) - split in _rootLsPrint
args:
oneColumn (bool):
longListing (bool):
treeListing (bool):
indent (int): how many columns the printout should be indented globally
manySources (bool): if more than one file is printed
fileName (str): the root file name
pathSplitList: a list of subdirectories,
each being represented as a list itself with a string per level
e.g.
[['Method_BDT','BDT']]
Returns:
retcode (int): 0 in case of success, 1 if the file could not be opened
'''
retcode = 0
rootFile = openROOTFile(fileName)
if not rootFile: return 1
keyList,dirList = keyClassSpliter(rootFile,pathSplitList)
# keyList lists the TKey objects from pathSplitList
# dirList is 'just the pathSplitList' for what aren't TKeys
if manySources: write("{0} :".format(fileName)+"\n")
_rootLsPrint(keyList, indent, oneColumn, longListing, treeListing)
# Loop on the directories
manyPathSplits = len(pathSplitList) > 1
indentDir = 2 if manyPathSplits else 0
for pathSplit in dirList:
keyList = getKeyList(rootFile,pathSplit)
keyListSort(keyList)
if manyPathSplits: write("{0} :".format("/".join(pathSplit)),indent,end="\n")
_rootLsPrint(keyList, indent+indentDir, oneColumn, longListing, treeListing)
rootFile.Close()
return retcode
def rootLs(sourceList, oneColumn=False, longListing=False, treeListing=False):
'''rootls main routine for an arbitrary number of files
args:
oneColumn (bool):
longListing (bool):
treeListing (bool):
sourceList: a list of tuples with one list element per file
the first tuple entry being the root file,
the second a list of subdirectories,
each being represented as a list itself with a string per level
e.g.
rootls tutorial/tmva/TMVA.root:Method_BDT/BDT turns into
[('tutorials/tmva/TMVA.root', [['Method_BDT','BDT']])]
returns:
retcode (int): 0 in case of success
'''
# Check arguments
if sourceList == []: return 1
# sort sourceList according to filenames
tupleListSort(sourceList)
# Loop on the ROOT files
retcode = 0
manySources = len(sourceList) > 1
indent = 2 if manySources else 0
for fileName, pathSplitList in sourceList:
retcode += _rootLsProcessFile(fileName, pathSplitList, manySources, indent, \
oneColumn, longListing, treeListing)
return retcode
# End of ROOTLS
##########
##########
# ROOTMKDIR
MKDIR_ERROR = "cannot create directory '{0}'"
def _createDirectories(rootFile,pathSplit,parents):
"""Same behaviour as createDirectory but allows the possibility
to build an whole path recursively with the option \"parents\" """
retcode = 0
lenPathSplit = len(pathSplit)
if lenPathSplit == 0:
pass
elif parents:
for i in range(lenPathSplit):
currentPathSplit = pathSplit[:i+1]
if not (isExisting(rootFile,currentPathSplit) \
and isDirectory(rootFile,currentPathSplit)):
retcode += createDirectory(rootFile,currentPathSplit)
else:
doMkdir = True
for i in range(lenPathSplit-1):
currentPathSplit = pathSplit[:i+1]
if not (isExisting(rootFile,currentPathSplit) \
and isDirectory(rootFile,currentPathSplit)):
doMkdir = False
break
if | |
%s: Trans. prob. is INCREASED by a factor of %.4f; P %s %f, MD = %f, CI(%f, %f)'
% (states[si], states[sj], Mod[si-1, sj-1], s, val, md, cil, cih))
else:
print('%s -> %s: Trans. prob. is increased by a factor of %.4f; P %s %f, MD = %f, CI(%f, %f)'
% (states[si], states[sj], Mod[si-1, sj-1], s, val, md, cil, cih))
else:
val = p
if val == 1:
s = '>'
val = 1 - 1.0 / nboot
if val == 0:
s = '<'
val = 1.0 / nboot
Sig[si-1, sj-1] = val
# division by 2.0 to make the test two-sided!
if val < alpha:#/2.0:
print('%s -> %s: Trans. prob. is DECREASED by a factor of %.4f; P %s %f, MD = %f, CI(%f, %f)'
% (states[si], states[sj], Mod[si - 1, sj - 1], s, val, md, cil, cih))
else:
print('%s -> %s: Trans. prob. is decreased by a factor of %.4f; P %s %f, MD = %f, CI(%f, %F)'
% (states[si], states[sj], Mod[si - 1, sj - 1], s, val, md, cil, cih))
############################################################################################################
if len(fig_file) > 0:
save_figure(fig_file)
return M_us, Laser, Base
def build_markov_matrix_blocks(MX, tdown, large_bin, overlap_mode=False):
"""
pMX = build_markov_matrix_blocks(MX, down, large_bin)
build sequence of Markov matrices; build one Matrix for each large bin (block) of
duration $large_bin by using smaller bins of duration $down;
i.e. $down <= $large_bin
:param MX, np.array, with time bins on fine (tdown) time scale
:param tdown: fine time scale
:param large_bin: coarse time scale
:param overlap_mode: if True, include the last fine time, bordering the next large bin
:return: pMX, 3x3xtime np.array, series of markov matrices; time is the third dimension
"""
nbins = MX.shape[1] # number of time bins on fine time scale
ndown = int(large_bin/tdown) # number of fine bins in large bin
nstep = int(nbins/ndown) # number of large time bins
nrows = MX.shape[0]
pMX = np.zeros((3, 3, nstep))
for s in range(0, nstep):
if not overlap_mode:
mx = MX[:, s*ndown:(s+1)*ndown]
else:
mx = MX[:, s*ndown:((s+1)*ndown+1)]
pmx = np.zeros((3,3))
c = np.zeros((3,))
for i in range(0, nrows):
seq = mx[i,:]
m = mx.shape[1]
for j in range(0, m-1):
pmx[int(seq[j])-1, int(seq[j+1])-1] += 1
c[int(seq[j])-1] += 1
for i in range(3):
pmx[i,:] = pmx[i,:] / c[i]
pMX[:,:, s] = pmx
return pMX
def transition_markov_strength(ppath, rec_file, laser_tend, tdown, dur, bootstrap_mode=0,
backup='', pstationary=False, stats_lastpoint=True, paired_stats=True, nboot=1000, ma_thr=0):
"""
Cumulative transition probabilities:
How likely is is that a specific brain state transitions happens within a given time interval?
The function compares the cumulative probabilities during baseline (including time points
from -$pre till laser onset) with those during the laser stimulation interval (of duratin $laser_tend)
The brainstate is downsampled to time bins of duration $tdown.
See also function &quantify_transition()
NOTE: The CDF for a transition from si -> sj within time interval X is defined as P(si -> sj, t <= X)
I define the CDF for the discrete timepoint tdown as P(si -> sj, t <= tdown).
and count whether a brain state change happened between bin [-todown, 0[ and bin [0, tdown].
In other words the brain state right before the laser serves as si P(si -> sj, t <= X)
Example call:
sleepy.transition_markov_strength(ppath, recs, 120, 120, 20, np.arange(0, 121, 20))
:param ppath: base folder with sleep recordings
:param rec_file: file OR list of recordings
:param pre: time before laser onset [s]
:param laser_tend: duration of laser stimulation
:param tdown: downsample brainstate sequence to $tdown time bins; same as parameter tdown in sleepy.transition_analysis()
:param dur: list/vector with cumulative (=increasing) time intervals for each the cum. probabilities are computed
:param bootstrap_mode:
bootstrap_mode == 0: Take inter-mouse variance and inter-trial variance (of each mouse) into account.
That is, bootstrapping re-models the variance expected when re-doing the same
experimental design (same mouse number and total trial number).
To account for potentially different number of trials per mouse, resample the data
during each iteration the following way: Assume that there are n laser trials from m mice;
randomly select (with replacment) ntrial mice; then select from each mouse randomly one trial.
bootstrap_mode == 1: Only take inter-trial variance (of each mouse) into account; That is,
bootstrapping models the variance expected when redoing the experiment
with exactly the same mice.
If unsure, use bootstrap_mode = 0
:param backup: if a recording is not located in $ppath, the function looks for it in folder $backup
:param pstationary: if True, we assume that the laser induced changes in transition probabilities
are stationary; i.e. they are constant acorss the laser stimulation interval.
:param stats_lastpoint: if True, test whether the laser time point during laser (at laser_tend) is significantly different
from the corresponding time point in the baseline interval; if False, compare the averages of the cumulative distributions
to each other.
:param paired_stats: if True, treat baseline interval and following laser interval as paired statistics.
If False, treat baseline and laser interval as independent.
:param nboot: number of bootstrap iterations; >1000 recommended; but for a quick check just set to ~100
:param ma_thr: if > 0, set wake periods < $ma_thr to NREM.
:return:
"""
alpha = 0.05
fontsize = 12
# Note: We increase the preceding baseline period by one bin, to account for the
# fact that for the laser period, we also use one bin right before the laser as starting point.
pre = laser_tend + tdown
if type(rec_file) == str:
E = load_recordings(ppath, rec_file)[1]
else:
E = rec_file
post = pre + laser_tend
# set path for each recording: either ppath or backup
rec_paths = {}
mouse_ids = {}
for m in E:
idf = re.split('_', m)[0]
if len(backup) == 0:
rec_paths[m] = ppath
else:
if os.path.isdir(os.path.join(ppath, m)):
rec_paths[m] = ppath
else:
rec_paths[m] = backup
if not idf in mouse_ids:
mouse_ids[idf] = 1
mouse_ids = list(mouse_ids.keys())
# Dict: Mouse_id --> all trials of this mouse
MouseMx = {idf:[] for idf in mouse_ids}
for m in E:
trials = _whole_mx(rec_paths[m], m, pre, post, tdown, tstart=0, tend=-1, ma_thr=ma_thr)
idf = re.split('_', m)[0]
MouseMx[idf] += trials
ntdown = len(trials[0])
for idf in mouse_ids:
MouseMx[idf] = np.array(MouseMx[idf])
# dict mouse_id --> number of trials
num_trials = {k:len(MouseMx[k]) for k in MouseMx}
# number of all trials
ntrials = sum(list(num_trials.values()))
# number of mice
nmice = len(mouse_ids)
# states
cum_base = dict()
cum_laser = dict()
bounds_bsl = dict()
bounds_lsr = dict()
states = {1:'R', 2:'W', 3:'N'}
for si in range(1,4):
for sj in range(1,4):
id = states[si] + states[sj]
cum_base[id] = np.zeros((nboot,len(dur)))
cum_laser[id] = np.zeros((nboot,len(dur)))
bounds_bsl[id] = np.zeros((2, len(dur)))
bounds_lsr[id] = np.zeros((2,len(dur)))
# that's the matrix used for computation in each bootstrap iteration
if bootstrap_mode == 1:
# each mouse contributes the same number of trials
mouse_trials = int(np.mean(list(num_trials.values())))
MXsel = np.zeros((mouse_trials*len(mouse_ids), ntdown), dtype='int')
else:
MXsel = np.zeros((ntrials, ntdown), dtype='int')
for b in range(nboot):
if (b % 100) == 0:
print('done with iteration %d' % b)
if bootstrap_mode == 1:
i = 0
for idf in mouse_ids:
num = num_trials[idf]
itrials = rand.randint(0, num, (mouse_trials,))
sel = MouseMx[idf][itrials,:]
MXsel[i*mouse_trials:(i+1)*mouse_trials,:] = sel
i += 1
else:
irand_mice = rand.randint(0, nmice, ntrials)
i=0
for j in irand_mice:
idf = mouse_ids[j]
itrial = rand.randint(0, num_trials[idf])
MXsel[i,:] = MouseMx[idf][itrial,:]
i+=1
base_boot = np.zeros((3,3, len(dur)))
lsr_boot = np.zeros((3,3, len(dur)))
k=0
for d in dur:
base_boot[:,:,k] = quantify_transition(MXsel.astype('int'), pre, laser_tend, tdown, False, d, pstationary=pstationary)
lsr_boot[:,:,k] = quantify_transition(MXsel.astype('int'), pre, laser_tend, tdown, True, d, pstationary=pstationary)
k+=1
for si in states:
for sj in states:
id = states[si] + states[sj]
cum_base[id][b,:] = base_boot[si-1, sj-1,:]
cum_laser[id][b,:] = lsr_boot[si-1, sj-1,:]
# bounds_bsl[id][0,:] is the lower bounds
for si in states:
for sj in states:
id = states[si] + states[sj]
bounds_bsl[id][0,:] = np.sort(cum_base[id], axis=0)[int(nboot*(alpha / 2)),:]
bounds_bsl[id][1,:] = np.sort(cum_base[id], axis=0)[-int(nboot * (alpha / 2)), :]
bounds_lsr[id][0,:] = np.sort(cum_laser[id], axis=0)[int(nboot*(alpha / 2)),:]
bounds_lsr[id][1,:] = np.sort(cum_laser[id], axis=0)[-int(nboot * (alpha / 2)), | |
i in range(times):
if are_segments_synchronized():
return
time.sleep(sleeptime)
raise Exception('segments are not in sync after %d seconds' % (times * sleeptime))
@when('at least one segment is resynchronized')
@then('at least one segment is resynchronized')
@given('at least one segment is resynchronized')
def impl(context):
times = 30
sleeptime = 10
for i in range(times):
if is_any_segment_resynchronized():
return
time.sleep(sleeptime)
raise Exception('segments are not in resync after %d seconds' % (times * sleeptime))
@when('table "{table_list}" is assumed to be in dirty state in "{dbname}"')
@then('table "{table_list}" is assumed to be in dirty state in "{dbname}"')
@given('table "{table_list}" is assumed to be in dirty state in "{dbname}"')
def impl(context, table_list, dbname):
tables = table_list.split(',')
for t in tables:
modify_data(context, t.strip(), dbname)
backup_data(context, t.strip(), dbname)
get_distribution_policy(dbname)
@given('all the data from "{dbname}" is saved for verification')
@when('all the data from "{dbname}" is saved for verification')
@then('all the data from "{dbname}" is saved for verification')
def impl(context, dbname):
backup_db_data(context, dbname)
@then(
'partition "{partition}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"')
@when(
'partition "{partition}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"')
@given(
'partition "{partition}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"')
@then(
'partition "{partition}" in partition level "{partitionlevel}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"')
@when(
'partition "{partition}" in partition level "{partitionlevel}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"')
@given(
'partition "{partition}" in partition level "{partitionlevel}" of partition table "{table_list}" is assumed to be in dirty state in "{dbname}" in schema "{schema}"')
def impl(context, partition, table_list, dbname, schema, partitionlevel=1):
tables = table_list.split(',')
for t in tables:
part_t = get_partition_names(schema, t.strip(), dbname, partitionlevel, partition)
if len(part_t) < 1 or len(part_t[0]) < 1:
print part_t
dirty_table_name = part_t[0][0].strip()
modify_partition_data(context, dirty_table_name, dbname, int(partition))
backup_data(context, dirty_table_name, dbname)
def validate_timestamp(ts):
try:
int_ts = int(ts)
except Exception as e:
raise Exception('Timestamp is not valid %s' % ts)
if len(ts) != 14:
raise Exception('Timestamp is invalid %s' % ts)
@when('the subdir from gpcrondump is stored')
@then('the subdir from gpcrondump is stored')
def impl(context):
stdout = context.stdout_message
for line in stdout.splitlines():
if 'Dump subdirectory' in line:
log_msg, delim, subdir = line.partition('=')
context.backup_subdir = subdir.strip()
return
raise Exception('Dump subdirectory not found %s' % stdout)
def get_timestamp_from_output(context):
ts = None
stdout = context.stdout_message
for line in stdout.splitlines():
if 'Timestamp key = ' in line:
log_msg, delim, timestamp = line.partition('=')
ts = timestamp.strip()
validate_timestamp(ts)
return ts
raise Exception('Timestamp not found %s' % stdout)
@given('the full backup timestamp from gpcrondump is stored')
@when('the full backup timestamp from gpcrondump is stored')
@then('the full backup timestamp from gpcrondump is stored')
def impl(context):
context.full_backup_timestamp = get_timestamp_from_output(context)
_write_timestamp_to_json(context)
@when('the timestamp from gpcrondump is stored')
@then('the timestamp from gpcrondump is stored')
def impl(context):
context.backup_timestamp = get_timestamp_from_output(context)
_write_timestamp_to_json(context)
@when('the timestamp is labeled "{lbl}"')
def impl(context, lbl):
if not 'timestamp_labels' in global_labels:
global_labels['timestamp_labels'] = {}
global_labels['timestamp_labels'][lbl] = get_timestamp_from_output(context)
@when('the timestamp for scenario "{scenario_number}" is labeled "{lbl}"')
def impl(context, scenario_number, lbl):
labels_key = 'timestamp_labels' + scenario_number
if not labels_key in global_labels:
global_labels[labels_key] = {}
global_labels[labels_key][lbl] = get_timestamp_from_output(context)
_write_label_to_json(context, labels_key, lbl)
@given('there is a list to store the incremental backup timestamps')
def impl(context):
context.inc_backup_timestamps = []
@given('there is a list to store the backup timestamps')
def impl(context):
context.backup_timestamp_list = []
@then('the timestamp from gpcrondump is stored in a list')
@when('the timestamp from gpcrondump is stored in a list')
def impl(context):
context.backup_timestamp = get_timestamp_from_output(context)
context.inc_backup_timestamps.append(context.backup_timestamp)
_write_timestamp_to_json(context)
@when('the timestamp for database dumps "{db_list}" are stored')
def impl(context, db_list):
context.db_timestamps = get_timestamp_from_output_for_db(context)
scenario_name = context._stack[0]['scenario'].name
if not global_timestamps.has_key(scenario_name):
global_timestamps[scenario_name] = list()
global_timestamps[scenario_name].append(context.db_timestamps.values())
with open(timestamp_json, 'w') as outfile:
json.dump(global_timestamps, outfile)
def get_timestamp_from_output_for_db(context):
db_timestamps = {}
ts = None
database = None
stdout = context.stdout_message
for line in stdout.splitlines():
if 'Target database' in line:
log_msg, delim, database = line.partition('=')
db = database.strip()
elif 'Dump key ' in line:
log_msg, delim, timestamp = line.partition('=')
ts = timestamp.strip()
validate_timestamp(ts)
# TODO: database could be an empty string; need to check result of line.partition()
if database is None:
raise Exception('Database not found for timestamp "%s"' % ts)
db_timestamps[db] = ts
database = None
if not db_timestamps:
raise Exception('No Timestamps found')
return db_timestamps
@then('verify data integrity of database "{dbname}" between source and destination system, work-dir "{dirname}"')
def impl(context, dbname, dirname):
dbconn_src = 'psql -p $GPTRANSFER_SOURCE_PORT -h $GPTRANSFER_SOURCE_HOST -U $GPTRANSFER_SOURCE_USER -d %s' % dbname
dbconn_dest = 'psql -p $GPTRANSFER_DEST_PORT -h $GPTRANSFER_DEST_HOST -U $GPTRANSFER_DEST_USER -d %s' % dbname
for filename in os.listdir(dirname):
if filename.endswith('.sql'):
filename_prefix = os.path.splitext(filename)[0]
ans_file_path = os.path.join(dirname, filename_prefix + '.ans')
out_file_path = os.path.join(dirname, filename_prefix + '.out')
diff_file_path = os.path.join(dirname, filename_prefix + '.diff')
# run the command to get the exact data from the source system
command = '%s -f %s > %s' % (dbconn_src, os.path.join(dirname, filename), ans_file_path)
run_command(context, command)
# run the command to get the data from the destination system, locally
command = '%s -f %s > %s' % (dbconn_dest, os.path.join(dirname, filename), out_file_path)
run_command(context, command)
gpdiff_cmd = 'gpdiff.pl -w -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gpd_init=test/behave/mgmt_utils/steps/data/global_init_file %s %s > %s' % (
ans_file_path, out_file_path, diff_file_path)
run_command(context, gpdiff_cmd)
if context.ret_code != 0:
with open(diff_file_path, 'r') as diff_file:
diff_file_contents = diff_file.read()
raise Exception(
"Found difference between source and destination system, see %s. \n Diff contents: \n %s" % (
diff_file_path, diff_file_contents))
@then('run post verifying workload under "{dirname}"')
def impl(context, dirname):
for filename in os.listdir(dirname):
if filename.endswith('.sql'):
filename_prefix = os.path.splitext(filename)[0]
ans_file_path = os.path.join(dirname, filename_prefix + '.ans')
out_file_path = os.path.join(dirname, filename_prefix + '.out')
diff_file_path = os.path.join(dirname, filename_prefix + '.diff')
# run the command to get the data from the destination system, locally
dbconn = 'psql -d template1 -p $GPTRANSFER_DEST_PORT -U $GPTRANSFER_DEST_USER -h $GPTRANSFER_DEST_HOST'
command = '%s -f %s > %s' % (dbconn, os.path.join(dirname, filename), out_file_path)
run_command(context, command)
gpdiff_cmd = 'gpdiff.pl -w -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gpd_init=test/behave/mgmt_utils/steps/data/global_init_file %s %s > %s' % (
ans_file_path, out_file_path, diff_file_path)
run_command(context, gpdiff_cmd)
for filename in os.listdir(dirname):
full_filename_path = os.path.join(dirname, filename)
if filename.endswith('.diff') and os.path.getsize(full_filename_path) > 0:
with open(full_filename_path, 'r') as diff_file:
diff_file_contents = diff_file.read()
# if there is some difference generated into the diff file, raise expception
raise Exception(
"Found difference between source and destination system, see %s. \n Diff contents: \n %s" % (
full_filename_path, diff_file_contents))
@then('verify that the incremental file has the stored timestamp')
def impl(context):
inc_file_name = 'gp_dump_%s_increments' % context.full_backup_timestamp
subdirectory = context.full_backup_timestamp[0:8]
full_path = os.path.join(master_data_dir, 'db_dumps', subdirectory, inc_file_name)
if not os.path.isfile(full_path):
raise Exception("Can not find increments file: %s" % full_path)
contents = ""
with open(full_path) as fd:
contents = fd.read().strip()
if context.backup_timestamp != contents:
raise Exception(
"The increments file '%s' does not contain the timestamp %s" % (full_path, context.backup_timestamp))
def check_increments_file_for_list(context, location):
inc_file_name = 'gp_dump_%s_increments' % context.full_backup_timestamp
subdirectory = context.full_backup_timestamp[0:8]
full_path = os.path.join(location, 'db_dumps', subdirectory, inc_file_name)
if not os.path.isfile(full_path):
raise Exception("Can not find increments file: %s" % full_path)
found_timestamps = []
contents = ""
with open(full_path) as fd:
contents = fd.read()
for line in contents.splitlines():
line = line.strip()
if not line:
continue
found_timestamps.append(line)
found_timestamps = sorted(found_timestamps)
context.inc_backup_timestamps = sorted(context.inc_backup_timestamps)
if found_timestamps != context.inc_backup_timestamps:
print "Found timestamps: "
print found_timestamps
print "Expected timestamps: "
print context.inc_backup_timestamps
raise Exception("Expected timestamps not found")
@then('verify that the incremental file in "{location}" has all the stored timestamps')
def impl(context, location):
check_increments_file_for_list(context, location)
@then('verify that the incremental file has all the stored timestamps')
def impl(context):
check_increments_file_for_list(context, master_data_dir)
@then('verify that the plan file is created for the latest timestamp')
def impl(context):
context.inc_backup_timestamps = sorted(context.inc_backup_timestamps)
latest_ts = context.inc_backup_timestamps[-1]
plan_file_dir = os.path.join(master_data_dir, 'db_dumps', latest_ts[0:8])
plan_file_count = len(glob.glob('/%s/*%s*_plan' % (plan_file_dir, latest_ts)))
if plan_file_count != 1:
raise Exception('Expected only one plan file, found %s' % plan_file_count)
filename = '%s/gp_restore_%s_plan' % (plan_file_dir, latest_ts)
if not os.path.exists(filename):
raise Exception('Plan file %s not created for the latest timestamp' % filename)
@then('the timestamp from gp_dump is stored and subdir is "{subdir}"')
def impl(context, subdir):
stdout = context.stdout_message
context.backup_subdir = subdir
for line in stdout.splitlines():
if 'Timestamp Key: ' in line:
context.backup_timestamp = line.split()[-1]
validate_timestamp(context.backup_timestamp)
return
raise Exception('Timestamp not found %s' % stdout)
@when('the state files are generated under "{dir}" for stored "{backup_type}" timestamp')
@then('the state files are generated under "{dir}" for stored "{backup_type}" timestamp')
def impl(context, dir, backup_type):
dump_dir = dir | |
**params)
self._spec_log = []
if self.timestamp == (0,)*9:
self.timestamp = tuple(time.localtime())
self.pprint_args(['batch_name','args','command'],
['description', 'tag', 'output_directory',
'subdir','metadata'])
self.dynamic = isinstance(args, DynamicArgs)
def get_root_directory(self, timestamp=None):
"""
A helper method that supplies the root directory name given a
timestamp.
"""
if timestamp is None: timestamp = self.timestamp
if self.timestamp_format is not None:
root_name = (time.strftime(self.timestamp_format, timestamp)
+ '-' + self.batch_name)
else:
root_name = self.batch_name
path = os.path.join(self.output_directory,
*(self.subdir+[root_name]))
return os.path.abspath(path)
def _append_log(self, specs):
"""
The log contains the tids and corresponding specifications
used during launch with the specifications in JSON format.
"""
self._spec_log += specs # This should be removed
log_path = os.path.join(self.root_directory, ("%s.log" % self.batch_name))
core.Log.write_log(log_path, [spec for (_, spec) in specs], allow_append=True)
def _record_info(self, setup_info=None):
"""
All launchers should call this method to write the info file
at the end of the launch. The .info file is saved given
setup_info supplied by _setup_launch into the
root_directory. When called without setup_info, the existing
info file is updated with the end-time.
"""
info_path = os.path.join(self.root_directory, ('%s.info' % self.batch_name))
if setup_info is None:
try:
with open(info_path, 'r') as info_file:
setup_info = json.load(info_file)
except:
setup_info = {}
setup_info.update({'end_time' : tuple(time.localtime())})
else:
setup_info.update({
'end_time' : None,
'metadata' : self.metadata
})
with open(info_path, 'w') as info_file:
json.dump(setup_info, info_file, sort_keys=True, indent=4)
def _setup_launch(self):
"""
Method to be used by all launchers that prepares the root
directory and generate basic launch information for command
templates to use (including a registered timestamp).
"""
self.root_directory = self.get_root_directory()
if not os.path.isdir(self.root_directory):
os.makedirs(self.root_directory)
platform_dict = {}
python_version = (platform.python_implementation()
+ platform.python_version())
platform_dict['platform'] = platform.platform()
platform_dict['python_version'] = python_version
platform_dict['lancet_version'] = str(lancet_version)
return {'root_directory': self.root_directory,
'batch_name': self.batch_name,
'batch_tag': self.tag,
'batch_description': self.description,
'launcher': repr(self),
'platform' : platform_dict,
'timestamp': self.timestamp,
'timestamp_format': self.timestamp_format,
'varying_keys': self.args.varying_keys,
'constant_keys': self.args.constant_keys,
'constant_items': self.args.constant_items}
def _setup_streams_path(self):
streams_path = os.path.join(self.root_directory, "streams")
try: os.makedirs(streams_path)
except: pass
# Waiting till these directories exist (otherwise potential qstat error)
while not os.path.isdir(streams_path): pass
return streams_path
def _launch_process_group(self, process_commands, streams_path):
"""
Launches processes defined by process_commands, but only
executes max_concurrency processes at a time; if a process
completes and there are still outstanding processes to be
executed, the next processes are run until max_concurrency is
reached again.
"""
processes = {}
def check_complete_processes(wait=False):
"""
Returns True if a process completed, False otherwise.
Optionally allows waiting for better performance (avoids
sleep-poll cycle if possible).
"""
result = False
# list creates copy of keys, as dict is modified in loop
for proc in list(processes):
if wait: proc.wait()
if proc.poll() is not None:
# process is done, free up slot
self.debug("Process %d exited with code %d."
% (processes[proc]['tid'], proc.poll()))
processes[proc]['stdout'].close()
processes[proc]['stderr'].close()
del processes[proc]
result = True
return result
for cmd, tid in process_commands:
self.debug("Starting process %d..." % tid)
job_timestamp = time.strftime('%H%M%S')
basename = "%s_%s_tid_%d" % (self.batch_name, job_timestamp, tid)
stdout_handle = open(os.path.join(streams_path, "%s.o.%d"
% (basename, tid)), "wb")
stderr_handle = open(os.path.join(streams_path, "%s.e.%d"
% (basename, tid)), "wb")
proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle)
processes[proc] = { 'tid' : tid,
'stdout' : stdout_handle,
'stderr' : stderr_handle }
if self.max_concurrency:
# max_concurrency reached, wait until more slots available
while len(processes) >= self.max_concurrency:
if not check_complete_processes(len(processes)==1):
time.sleep(0.1)
# Wait for all processes to complete
while len(processes) > 0:
if not check_complete_processes(True):
time.sleep(0.1)
def __call__(self):
"""
Call to start Launcher execution. Typically invoked by
review_and_launch but may be called directly by the user.
"""
launchinfo = self._setup_launch()
streams_path = self._setup_streams_path()
self.command.finalize(launchinfo)
self._record_info(launchinfo)
last_tid = 0
last_tids = []
for gid, groupspecs in enumerate(self.args):
tids = list(range(last_tid, last_tid+len(groupspecs)))
last_tid += len(groupspecs)
allcommands = [self.command(
self.command._formatter(spec), tid, launchinfo) \
for (spec,tid) in zip(groupspecs,tids)]
self._append_log(list(zip(tids,groupspecs)))
self.message("Group %d: executing %d processes..." % (gid, len(allcommands)))
self._launch_process_group(zip(allcommands,tids), streams_path)
last_tids = tids[:]
if self.dynamic:
self.args.update(last_tids, launchinfo)
self._record_info()
if self.reduction_fn is not None:
self.reduction_fn(self._spec_log, self.root_directory)
def summary(self):
"""
A succinct summary of the Launcher configuration. Unlike the
repr, a summary does not have to be complete but must supply
key information relevant to the user.
"""
print("Type: %s" % self.__class__.__name__)
print("Batch Name: %r" % self.batch_name)
if self.tag:
print("Tag: %s" % self.tag)
print("Root directory: %r" % self.get_root_directory())
print("Maximum concurrency: %s" % self.max_concurrency)
if self.description:
print("Description: %s" % self.description)
class QLauncher(Launcher):
"""
Launcher that operates with Grid Engine using default arguments
chosen to be suitable for a typical cluster (tested on
the Edinburgh University Eddie cluster).
One of the main features of this class is that it is non-blocking
- it alway exits shortly after invoking qsub. This means that the
script is not left running or waiting for long periods of time.
By convention the standard output and error streams go to the
corresponding folders in the 'streams' subfolder of the root
directory - any -o or -e qsub options will be overridden. The job
name (the -N flag) is specified automatically and any user value
will be ignored.
"""
qsub_switches = param.List(default=['-V', '-cwd'], doc = '''
Specifies the qsub switches (flags without arguments) as a list
of strings. By default the -V switch is used to exports all
environment variables in the host environment to the batch job.''')
qsub_flag_options = param.Dict(default={'-b':'y'}, doc='''
Specifies qsub flags and their corresponding options as a
dictionary. Valid values may be strings or lists of string. If
a plain Python dictionary is used, the keys arealphanumerically
sorted, otherwise the dictionary is assumed to be an
OrderedDict (Python 2.7+ or param.external.OrderedDict) and the
key ordering will be preserved.
By default the -b (binary) flag is set to 'y' to allow binaries
to be directly invoked. Note that the '-' is added to the key
if missing (to make into a valid flag) so you can specify using
keywords in the dict constructor: ie. using
qsub_flag_options=dict(key1=value1, key2=value2, ....)''')
def __init__(self, batch_name, args, command, **params):
super(QLauncher, self).__init__(batch_name, args,
command, **params)
self._launchinfo = None
self.last_tids = []
self._spec_log = []
self.last_tid = 0
self.collate_count = 0
self.spec_iter = iter(self.args)
self.max_concurrency = None # Inherited
def _qsub_args(self, override_options, cmd_args, append_options=[]):
"""
Method to generate Popen style argument list for qsub using
the qsub_switches and qsub_flag_options parameters. Switches
are returned first. The qsub_flag_options follow in keys()
ordered if not a vanilla Python dictionary (ie. a Python 2.7+
or param.external OrderedDict). Otherwise the keys are sorted
alphanumerically. Note that override_options is a list of
key-value pairs.
"""
opt_dict = type(self.qsub_flag_options)()
opt_dict.update(self.qsub_flag_options)
opt_dict.update(override_options)
if type(self.qsub_flag_options) == dict: # Alphanumeric sort if vanilla Python dictionary
ordered_options = [(k, opt_dict[k]) for k in sorted(opt_dict)]
else:
ordered_options = list(opt_dict.items())
ordered_options += append_options
unpacked_groups = [[(k,v) for v in val] if type(val)==list else [(k,val)]
for (k,val) in ordered_options]
unpacked_kvs = [el for group in unpacked_groups for el in group]
# Adds '-' if missing (eg, keywords in dict constructor) and flattens lists.
ordered_pairs = [(k,v) if (k[0]=='-') else ('-%s' % (k), v)
for (k,v) in unpacked_kvs]
ordered_options = [[k]+([v] if type(v) == str else list(v)) for (k,v) in ordered_pairs]
flattened_options = [el for kvs in ordered_options for el in kvs]
return (['qsub'] + self.qsub_switches
+ flattened_options + [pipes.quote(c) for c in cmd_args])
def __call__(self):
"""
Main entry point for the launcher. Collects the static
information about the launch and sets up the stdout and stderr
stream output directories. Generates the first call to
collate_and_launch().
"""
self._launchinfo = self._setup_launch()
self.command.finalize(self._launchinfo)
self.job_timestamp = time.strftime('%H%M%S')
streams_path = self._setup_streams_path()
self.qsub_flag_options['-o'] = streams_path
self.qsub_flag_options['-e'] = streams_path
self.collate_and_launch()
self._record_info(self._launchinfo)
def collate_and_launch(self):
"""
Method that collates the previous jobs and launches the next
block of concurrent jobs when using DynamicArgs. This method
is invoked on initial launch and then subsequently via a
commandline call (to Python via qsub) to collate the
previously run jobs and launch the next block of jobs.
"""
try: specs = next(self.spec_iter)
except StopIteration:
self.qdel_batch()
if self.reduction_fn is not None:
self.reduction_fn(self._spec_log, self.root_directory)
self._record_info()
return
tid_specs = [(self.last_tid + i, spec) for (i,spec) in enumerate(specs)]
self.last_tid += len(specs)
self._append_log(tid_specs)
# Updating the argument specifier
if self.dynamic:
self.args.update(self.last_tids, self._launchinfo)
self.last_tids = [tid for (tid,_) in tid_specs]
output_dir = self.qsub_flag_options['-o']
error_dir | |
== 'fx'`` force
`w_time_range` to be ``True``.
If ``self.table_id = '*'`` or set multi values and you
want force <time_range> part to be omitted, set
``w_time_range=False`` explicitly.
Examples:
Usual case;
>>> str(drs.DRS(**drs.sample_attrs).fileName())
'tas_Amon_MIROC6_piControl_r1i1p1f1_gn_320001-329912.nc'
With ``sub_experiment_id``;
>>> str(drs.DRS(**drs.sample_attrs_w_subexp).fileName())
'rsdscs_Amon_IPSL-CM6A-LR_dcppC-atl-pacemaker_s1950-r1i1p1f1_gr_192001-201412.nc'
No ``time_range``;
>>> str(drs.DRS(**drs.sample_attrs_no_time_range).fileName(w_time_range=False))
'areacella_fx_MIROC6_historical_r1i1p1f1_gn.nc'
With prefix;
>>> prefix=Path('/data/CMIP6/')
>>> str(drs.DRS(**drs.sample_attrs).fileName(prefix))
'/data/CMIP6/tas_Amon_MIROC6_piControl_r1i1p1f1_gn_320001-329912.nc'
Invalid value for valid attribute;
>>> attrs = {k: v for k, v in drs.sample_attrs.items()}
>>> attrs.update({'table_id': 'invalid'})
>>> str(drs.DRS(**attrs).fileName())
'tas_*_MIROC6_piControl_r1i1p1f1_gn_320001-329912.nc'
>>> str(drs.DRS(**attrs).fileName(allow_asterisk=False))
Traceback (most recent call last):
...
AttributeError: 'DRS' object has no attribute 'table_id'
Missing attributes;
>>> attrs = {k: v for k, v in drs.sample_attrs.items()}
>>> del attrs['time_range']
>>> str(drs.DRS(**attrs).fileName())
'tas_Amon_MIROC6_piControl_r1i1p1f1_gn_*.nc'
>>> str(drs.DRS(**attrs).fileName(allow_asterisk=False))
Traceback (most recent call last):
...
AttributeError: 'DRS' object has no attribute 'time_range'
Allow multi values;
>>> attrs = {k: v for k, v in drs.sample_attrs.items()}
>>> attrs.update({'experiment_id':'amip, piControl'})
>>> str(drs.DRS(**attrs).fileName())
'tas_Amon_MIROC6_{amip,piControl}_r1i1p1f1_gn_320001-329912.nc'
"""
attr = {}
for a in self.filenameAttribs + self.filenameAttribsOptional:
try:
v = getattr(self, a)
except AttributeError:
if (allow_asterisk):
v = '*'
else:
raise
if type(v) is list:
v = '{'+','.join(v)+'}'
attr[a] = v
if (attr['table_id'] == 'fx'):
w_time_range = False
if w_time_range:
f = ("{variable_id}_{table_id}_{source_id}_{experiment_id}"
"_{member_id}_{grid_label}_{time_range}.nc").format(**attr)
else:
f = ("{variable_id}_{table_id}_{source_id}_{experiment_id}"
"_{member_id}_{grid_label}.nc").format(**attr)
f = Path(f)
if (prefix):
f = Path(prefix) / f
return f
def fileNameList(self, prefix=None):
"""
Returns a list of filenames constructed by the instance member
attributes that may contains '*' and/or braces.
Returns:
list of str: filenames
Examples:
>>> attrs = {k: v for k, v in drs.sample_attrs.items()}
>>> attrs.update({'experiment_id':'amip, piControl'})
>>> del attrs['time_range']
>>> str(drs.DRS(**attrs).fileName())
'tas_Amon_MIROC6_{amip,piControl}_r1i1p1f1_gn_*.nc'
>>> dlist = drs.DRS(**attrs).fileNameList() # doctest: +SKIP
>>> [str(d) for d in dlist] # doctest: +SKIP
['tas_Amon_MIROC6_amip_r1i1p1f1_gn_*.nc',
'tas_Amon_MIROC6_piControl_r1i1p1f1_gn_*.nc']
The last example will return ``[]`` if expanded files do not
exist.
"""
fname = self.fileName(prefix=prefix)
flist = [glob.glob(p) for p in braceexpand(str(fname))]
return [f for ff in flist for f in ff]
def dirName(self, prefix=None, allow_asterisk=True):
"""
Construct directory name by DRS from :class:`DRS` instance members.
If `allow_asterisk` is ``True``, invalid
If you want glob/brace expaned list, use :meth:`dirNameList` instead.
Args:
prefix (Path-like): prepend to the result path.
allow_asterisk: allow result contains ``*`` or not.
Raises:
AttributeError: any attributes are missing or invalid and
``allow_asterisk=False``
Returns:
Path-like : directory name
Examples:
Usual case;
>>> str(drs.DRS(**drs.sample_attrs).dirName())
'CMIP6/CMIP/MIROC/MIROC6/piControl/r1i1p1f1/Amon/tas/gn/v20181212'
With ``sub_experiment_id``;
>>> str(drs.DRS(**drs.sample_attrs_w_subexp).dirName())
'CMIP6/DCPP/IPSL/IPSL-CM6A-LR/dcppC-atl-pacemaker/s1950-r1i1p1f1/Amon/rsdscs/gr/v20190110'
Invalid value for valid attribute;
>>> attrs = {k:v for k,v in drs.sample_attrs.items()}
>>> attrs['table_id'] = 'invalid'
>>> str(drs.DRS(**attrs).dirName())
'CMIP6/CMIP/MIROC/MIROC6/piControl/r1i1p1f1/*/tas/gn/v20181212'
>>> str(drs.DRS(**attrs).dirName(allow_asterisk=False))
Traceback (most recent call last):
...
AttributeError: 'DRS' object has no attribute 'table_id'
Missing attributes;
>>> attrs = {k:v for k,v in drs.sample_attrs.items()}
>>> del attrs['experiment_id']
>>> str(drs.DRS(**attrs).dirName(prefix='/data/'))
'/data/CMIP6/CMIP/MIROC/MIROC6/*/r1i1p1f1/Amon/tas/gn/v20181212'
>>> str(drs.DRS(**attrs).dirName(prefix='/data/', allow_asterisk=False))
Traceback (most recent call last):
...
AttributeError: 'DRS' object has no attribute 'experiment_id'
Allow multi values;
>>> attrs = {k: v for k, v in drs.sample_attrs.items()}
>>> attrs.update({'experiment_id':'amip, piControl'})
>>> str(drs.DRS(**attrs).dirName())
'CMIP6/CMIP/MIROC/MIROC6/{amip,piControl}/r1i1p1f1/Amon/tas/gn/v20181212'
"""
attr = {}
for a in self.dirnameAttribs:
try:
v = getattr(self, a)
except AttributeError:
if allow_asterisk:
v = '*'
else:
raise
if type(v) is list:
v = '{'+','.join(v)+'}'
attr[a] = v
d = Path(
attr["mip_era"],
attr["activity_id"],
attr["institution_id"],
attr["source_id"],
attr["experiment_id"],
attr["member_id"],
attr["table_id"],
attr["variable_id"],
attr["grid_label"],
attr["version"])
if (prefix):
d = Path(prefix) / d
return d
def dirNameList(self, prefix=None):
"""
Return list of directory name constructed by DRS from
:class:`DRS` instance members, that contains asterisk and/or
braces
Args:
prefix(path-like): dirname to prepend.
Returns:
list of path-like: directory names
Note:
Non-existent directories are omitted.
Examples:
>>> attrs = {k: v for k, v in drs.sample_attrs.items()}
>>> attrs.update({'experiment_id':'amip, piControl'})
>>> del attrs['version']
>>> str(drs.DRS(**attrs).dirName())
'CMIP6/CMIP/MIROC/MIROC6/{amip,piControl}/r1i1p1f1/Amon/tas/gn/*'
>>> res = drs.DRS(**attrs).dirNameList(prefix='/data')
>>> ref = [Path('/data/CMIP6/CMIP/MIROC/MIROC6/amip/r1i1p1f1/Amon/tas/gn/v20181214'),
... Path('/data/CMIP6/CMIP/MIROC/MIROC6/piControl/r1i1p1f1/Amon/tas/gn/v20181212')]
>>> print(ref == res)
True
The last example will return ``[]`` if expanded directories do
not exist.
"""
dname = self.dirName(prefix=prefix) # may contain '*' and braces
plist = [glob.iglob(p) for p in braceexpand(str(dname))]
return [Path(p) for pp in plist for p in pp]
def splitFileName(self, fname, validate=False):
"""Split filename to attributes for DRS.
If ``varidate=False``, just split only. So if the `fname`
consist of the same number of components with DRS-valid
filename, no error happens. You should set `validate=True` or
use :meth:`isValidValueForAttr` by yourself.
Args:
fname (Path-like) : filename
validate(bool) : validate the resulting attribute/value pair
Raises:
ValueError: if `fname` is invalid for DRS.
Returns:
dict: attribute and it's value
Note:
Instance members keep untouched, give :meth:`set` the
result of this method.
Examples:
>>> fname = "tas_Amon_MIROC6_piControl_r1i1p1f1_gn_320001-329912.nc"
>>> drs.DRS().splitFileName(fname)
{'experiment_id': 'piControl', 'grid_label': 'gn', 'source_id': 'MIROC6', 'table_id': 'Amon', 'time_range': '320001-329912', 'variable_id': 'tas', 'variant_label': 'r1i1p1f1'}
>>> fname='invalid_very_long_file_name.nc'
>>> drs.DRS().splitFileName(fname)
Traceback (most recent call last):
...
ValueError: not follow the name template: "invalid_very_long_file_name.nc"
>>> fname='invalid_but_same_length_with_drs.nc'
>>> drs.DRS().splitFileName(fname)
{'experiment_id': 'length', 'grid_label': 'drs', 'source_id': 'same', 'table_id': 'but', 'variable_id': 'invalid', 'variant_label': 'with'}
>>> drs.DRS().splitFileName(fname, validate=True)
Traceback (most recent call last):
...
ValueError: "length" is invalid for <experiment_id>
"""
try:
(variable_id, table_id, source_id, experiment_id, member_id,
grid_label) = Path(fname).stem.split('_', 5)
except ValueError:
raise ValueError(f'not follow the name template: "{fname}"')
try:
(grid_label, time_range) = grid_label.split('_')
except ValueError:
# time_range = None
pass
try:
(sub_experiment_id, variant_label) = member_id.split('-')
except ValueError:
variant_label = member_id
# sub_experiment_id = None
res = {}
for a in self.requiredAttribs:
try:
res[a] = eval(a)
except NameError:
pass
if validate:
for a, v in res.items():
if not self.isValidValueForAttr(v, a):
raise ValueError(f'"{v}" is invalid for <{a}>')
return res
def splitDirName(self, dname, validate=False):
"""Split dirname to attributes for DRS.
If ``varidate=False``, just split only. So if the `dname`
consist of the same number of components with DRS-valid
directory name, no error happens. You should set
`validate=True` or use :meth:`isValidValueForAttr` by
yourself.
Args:
dname (path-like) : directory name
validate(bool) : validate the resulting attribute/value pair
Returns:
dict: attribute and it's value
Note:
Instance members keep untouched, give :meth:`set` the
result of this method.
Examples:
>>> dname = 'CMIP6/CMIP/MIROC/MIROC6/piControl/r1i1p1f1/Amon/tas/gn/v20181212'
>>> drs.DRS().splitDirName(dname)
{'activity_id': 'CMIP', 'experiment_id': 'piControl', 'grid_label': 'gn', 'institution_id': 'MIROC', 'mip_era': 'CMIP6', 'source_id': 'MIROC6', 'table_id': 'Amon', 'variable_id': 'tas', 'variant_label': 'r1i1p1f1', 'version': 'v20181212', 'prefix': ''}
With `prefix`;
>>> dname = ('/work/data/CMIP6/CMIP6/CMIP/MIROC/MIROC6/piControl/r1i1p1f1/Amon/tas/gn/v20181212')
>>> drs.DRS().splitDirName(dname)
{'activity_id': 'CMIP', 'experiment_id': 'piControl', 'grid_label': 'gn', 'institution_id': 'MIROC', 'mip_era': 'CMIP6', 'source_id': 'MIROC6', 'table_id': 'Amon', 'variable_id': 'tas', 'variant_label': 'r1i1p1f1', 'version': 'v20181212', 'prefix': '/work/data/CMIP6'}
Invalid case;
>>> dname = 'Some/Invalid/Path'
>>> drs.DRS().splitDirName(dname)
Traceback (most recent call last):
...
ValueError: Invalid dirname: "Some/Invalid/Path"
>>> dname = 'Some/Invalid/but/has/occasionally/the/same/number/of/component/'
>>> drs.DRS().splitDirName(dname)
{'activity_id': 'Invalid', 'experiment_id': 'occasionally', 'grid_label': 'of', 'institution_id': 'but', 'mip_era': 'Some', 'source_id': 'has', 'table_id': 'same', 'variable_id': 'number', 'variant_label': 'the', 'version': 'component', 'prefix': ''}
>>> drs.DRS().splitDirName(dname, validate=True)
Traceback (most recent call last):
...
ValueError: "Invalid" is invalid for <activity_id>
"""
res = {}
d = Path(dname)
try:
(version, grid_label, variable_id, table_id, member_id,
experiment_id, source_id, institution_id, activity_id,
mip_era) = d.parts[-1:-11:-1]
except ValueError:
raise ValueError(f'Invalid dirname: "{dname}"')
try:
(sub_experiment_id, variant_label) = member_id.split('-')
except ValueError:
variant_label = member_id
for k in self.requiredAttribs:
try:
res[k] = eval(k)
except NameError:
pass
if validate:
for a, v in res.items():
if not self.isValidValueForAttr(v, a):
raise ValueError(f'"{v}" is invalid for <{a}>')
if (len(d.parts) > 10):
res["prefix"] = str(Path(*d.parts[:-10]))
else:
res["prefix"] = ''
return res
def isValidPath(self, path, directory=False, separated=False):
"""
Check if given `path` is DRS compliant.
`path` may be a URL obtained by ESGF Search function. See
:mod:`cmiputil.esgfsearch` for details.
Args:
path (Path-like) : pathname to be checked
directory (bool) : treat `path` is a directory
separated (bool) : return a tuple of two dicts
Returns:
bool or list of bool : valid or not (see below)
If `separate` is True, return a tuple of two dicts, first
element is for the filename, second is for the directory name,
both dicts' key/value shows that each attributes are valid or
not. If `directory` is ``True``, first elements is ``{'all': True}``.
Examples:
>>> ourl = ('http://vesg.ipsl.upmc.fr/thredds/fileServer/cmip6/DCPP/'
... 'IPSL/IPSL-CM6A-LR/dcppC-pac-pacemaker/s1920-r1i1p1f1/'
... 'Amon/rsdscs/gr/v20190110/rsdscs_Amon_IPSL-CM6A-LR_'
... 'dcppC-pac-pacemaker_s1920-r1i1p1f1_gr_192001-201412.nc')
>>> drs.DRS().isValidPath(url)
True
>>> drs.DRS().isValidPath(url, separated=True)
({'experiment_id': True, 'grid_label': True, 'source_id': True, 'sub_experiment_id': True, 'table_id': True, 'time_range': True, 'variable_id': True, 'variant_label': True}, {'activity_id': True, 'experiment_id': True, 'grid_label': True, 'institution_id': True, 'mip_era': True, 'source_id': True, 'sub_experiment_id': True, 'table_id': True, 'variable_id': True, 'variant_label': True, 'version': True})
>>> url = | |
places.nm",
)
zalias = zips.alias("main_zip")
qlat = select(
[zips.c.latitude], zips.c.zipcode == zalias.c.zipcode
).as_scalar()
qlng = select(
[zips.c.longitude], zips.c.zipcode == zalias.c.zipcode
).as_scalar()
q = select(
[
places.c.id,
places.c.nm,
zalias.c.zipcode,
func.latlondist(qlat, qlng).label("dist"),
],
order_by=["dist", places.c.nm],
)
self.assert_compile(
q,
"SELECT places.id, places.nm, "
"main_zip.zipcode, latlondist((SELECT "
"zips.latitude FROM zips WHERE "
"zips.zipcode = main_zip.zipcode), (SELECT "
"zips.longitude FROM zips WHERE "
"zips.zipcode = main_zip.zipcode)) AS dist "
"FROM places, zips AS main_zip ORDER BY "
"dist, places.nm",
)
a1 = table2.alias("t2alias")
s1 = select([a1.c.otherid], table1.c.myid == a1.c.otherid).as_scalar()
j1 = table1.join(table2, table1.c.myid == table2.c.otherid)
s2 = select([table1, s1], from_obj=j1)
self.assert_compile(
s2,
"SELECT mytable.myid, mytable.name, "
"mytable.description, (SELECT "
"t2alias.otherid FROM myothertable AS "
"t2alias WHERE mytable.myid = "
"t2alias.otherid) AS anon_1 FROM mytable "
"JOIN myothertable ON mytable.myid = "
"myothertable.otherid",
)
def test_label_comparison_one(self):
x = func.lala(table1.c.myid).label("foo")
self.assert_compile(
select([x], x == 5),
"SELECT lala(mytable.myid) AS foo FROM "
"mytable WHERE lala(mytable.myid) = "
":param_1",
)
def test_label_comparison_two(self):
self.assert_compile(
label("bar", column("foo", type_=String)) + "foo",
"foo || :param_1",
)
def test_order_by_labels_enabled(self):
lab1 = (table1.c.myid + 12).label("foo")
lab2 = func.somefunc(table1.c.name).label("bar")
dialect = default.DefaultDialect()
self.assert_compile(
select([lab1, lab2]).order_by(lab1, desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY foo, bar DESC",
dialect=dialect,
)
# the function embedded label renders as the function
self.assert_compile(
select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), bar DESC",
dialect=dialect,
)
# binary expressions render as the expression without labels
self.assert_compile(
select([lab1, lab2]).order_by(lab1 + "test"),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1 + :param_1",
dialect=dialect,
)
# labels within functions in the columns clause render
# with the expression
self.assert_compile(
select([lab1, func.foo(lab1)]).order_by(lab1, func.foo(lab1)),
"SELECT mytable.myid + :myid_1 AS foo, "
"foo(mytable.myid + :myid_1) AS foo_1 FROM mytable "
"ORDER BY foo, foo(mytable.myid + :myid_1)",
dialect=dialect,
)
lx = (table1.c.myid + table1.c.myid).label("lx")
ly = (func.lower(table1.c.name) + table1.c.description).label("ly")
self.assert_compile(
select([lx, ly]).order_by(lx, ly.desc()),
"SELECT mytable.myid + mytable.myid AS lx, "
"lower(mytable.name) || mytable.description AS ly "
"FROM mytable ORDER BY lx, ly DESC",
dialect=dialect,
)
# expression isn't actually the same thing (even though label is)
self.assert_compile(
select([lab1, lab2]).order_by(
table1.c.myid.label("foo"), desc(table1.c.name.label("bar"))
),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid, mytable.name DESC",
dialect=dialect,
)
# it's also an exact match, not aliased etc.
self.assert_compile(
select([lab1, lab2]).order_by(
desc(table1.alias().c.name.label("bar"))
),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable_1.name DESC",
dialect=dialect,
)
# but! it's based on lineage
lab2_lineage = lab2.element._clone()
self.assert_compile(
select([lab1, lab2]).order_by(desc(lab2_lineage.label("bar"))),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY bar DESC",
dialect=dialect,
)
# here, 'name' is implicitly available, but w/ #3882 we don't
# want to render a name that isn't specifically a Label elsewhere
# in the query
self.assert_compile(
select([table1.c.myid]).order_by(table1.c.name.label("name")),
"SELECT mytable.myid FROM mytable ORDER BY mytable.name",
)
# as well as if it doesn't match
self.assert_compile(
select([table1.c.myid]).order_by(
func.lower(table1.c.name).label("name")
),
"SELECT mytable.myid FROM mytable ORDER BY lower(mytable.name)",
)
def test_order_by_labels_disabled(self):
lab1 = (table1.c.myid + 12).label("foo")
lab2 = func.somefunc(table1.c.name).label("bar")
dialect = default.DefaultDialect()
dialect.supports_simple_order_by_label = False
self.assert_compile(
select([lab1, lab2]).order_by(lab1, desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY mytable.myid + :myid_1, somefunc(mytable.name) DESC",
dialect=dialect,
)
self.assert_compile(
select([lab1, lab2]).order_by(func.hoho(lab1), desc(lab2)),
"SELECT mytable.myid + :myid_1 AS foo, "
"somefunc(mytable.name) AS bar FROM mytable "
"ORDER BY hoho(mytable.myid + :myid_1), "
"somefunc(mytable.name) DESC",
dialect=dialect,
)
def test_no_group_by_labels(self):
lab1 = (table1.c.myid + 12).label("foo")
lab2 = func.somefunc(table1.c.name).label("bar")
dialect = default.DefaultDialect()
self.assert_compile(
select([lab1, lab2]).group_by(lab1, lab2),
"SELECT mytable.myid + :myid_1 AS foo, somefunc(mytable.name) "
"AS bar FROM mytable GROUP BY mytable.myid + :myid_1, "
"somefunc(mytable.name)",
dialect=dialect,
)
def test_conjunctions(self):
a, b, c = text("a"), text("b"), text("c")
x = and_(a, b, c)
assert isinstance(x.type, Boolean)
assert str(x) == "a AND b AND c"
self.assert_compile(
select([x.label("foo")]), "SELECT a AND b AND c AS foo"
)
self.assert_compile(
and_(
table1.c.myid == 12,
table1.c.name == "asdf",
table2.c.othername == "foo",
text("sysdate() = today()"),
),
"mytable.myid = :myid_1 AND mytable.name = :name_1 "
"AND myothertable.othername = "
":othername_1 AND sysdate() = today()",
)
self.assert_compile(
and_(
table1.c.myid == 12,
or_(
table2.c.othername == "asdf",
table2.c.othername == "foo",
table2.c.otherid == 9,
),
text("sysdate() = today()"),
),
"mytable.myid = :myid_1 AND (myothertable.othername = "
":othername_1 OR myothertable.othername = :othername_2 OR "
"myothertable.otherid = :otherid_1) AND sysdate() = "
"today()",
checkparams={
"othername_1": "asdf",
"othername_2": "foo",
"otherid_1": 9,
"myid_1": 12,
},
)
# test a generator
self.assert_compile(
and_(
conj for conj in [table1.c.myid == 12, table1.c.name == "asdf"]
),
"mytable.myid = :myid_1 AND mytable.name = :name_1",
)
def test_nested_conjunctions_short_circuit(self):
"""test that empty or_(), and_() conjunctions are collapsed by
an enclosing conjunction."""
t = table("t", column("x"))
self.assert_compile(
select([t]).where(and_(t.c.x == 5, or_(and_(or_(t.c.x == 7))))),
"SELECT t.x FROM t WHERE t.x = :x_1 AND t.x = :x_2",
)
self.assert_compile(
select([t]).where(and_(or_(t.c.x == 12, and_(or_(t.c.x == 8))))),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2",
)
self.assert_compile(
select([t]).where(
and_(
or_(
or_(t.c.x == 12),
and_(or_(), or_(and_(t.c.x == 8)), and_()),
)
)
),
"SELECT t.x FROM t WHERE t.x = :x_1 OR t.x = :x_2",
)
def test_true_short_circuit(self):
t = table("t", column("x"))
self.assert_compile(
select([t]).where(true()),
"SELECT t.x FROM t WHERE 1 = 1",
dialect=default.DefaultDialect(supports_native_boolean=False),
)
self.assert_compile(
select([t]).where(true()),
"SELECT t.x FROM t WHERE true",
dialect=default.DefaultDialect(supports_native_boolean=True),
)
self.assert_compile(
select([t]),
"SELECT t.x FROM t",
dialect=default.DefaultDialect(supports_native_boolean=True),
)
def test_distinct(self):
self.assert_compile(
select([table1.c.myid.distinct()]),
"SELECT DISTINCT mytable.myid FROM mytable",
)
self.assert_compile(
select([distinct(table1.c.myid)]),
"SELECT DISTINCT mytable.myid FROM mytable",
)
self.assert_compile(
select([table1.c.myid]).distinct(),
"SELECT DISTINCT mytable.myid FROM mytable",
)
self.assert_compile(
select([func.count(table1.c.myid.distinct())]),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable",
)
self.assert_compile(
select([func.count(distinct(table1.c.myid))]),
"SELECT count(DISTINCT mytable.myid) AS count_1 FROM mytable",
)
def test_where_empty(self):
self.assert_compile(
select([table1.c.myid]).where(and_()),
"SELECT mytable.myid FROM mytable",
)
self.assert_compile(
select([table1.c.myid]).where(or_()),
"SELECT mytable.myid FROM mytable",
)
def test_order_by_nulls(self):
self.assert_compile(
table2.select(
order_by=[
table2.c.otherid,
table2.c.othername.desc().nullsfirst(),
]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS FIRST",
)
self.assert_compile(
table2.select(
order_by=[
table2.c.otherid,
table2.c.othername.desc().nullslast(),
]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC NULLS LAST",
)
self.assert_compile(
table2.select(
order_by=[
table2.c.otherid.nullslast(),
table2.c.othername.desc().nullsfirst(),
]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS LAST, "
"myothertable.othername DESC NULLS FIRST",
)
self.assert_compile(
table2.select(
order_by=[
table2.c.otherid.nullsfirst(),
table2.c.othername.desc(),
]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC",
)
self.assert_compile(
table2.select(
order_by=[
table2.c.otherid.nullsfirst(),
table2.c.othername.desc().nullslast(),
]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid NULLS FIRST, "
"myothertable.othername DESC NULLS LAST",
)
def test_orderby_groupby(self):
self.assert_compile(
table2.select(
order_by=[table2.c.otherid, asc(table2.c.othername)]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername ASC",
)
self.assert_compile(
table2.select(
order_by=[table2.c.otherid, table2.c.othername.desc()]
),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC",
)
# generative order_by
self.assert_compile(
table2.select()
.order_by(table2.c.otherid)
.order_by(table2.c.othername.desc()),
"SELECT myothertable.otherid, myothertable.othername FROM "
"myothertable ORDER BY myothertable.otherid, "
"myothertable.othername DESC",
)
self.assert_compile(
table2.select()
.order_by(table2.c.otherid)
.order_by(table2.c.othername.desc())
.order_by(None),
"SELECT myothertable.otherid, myothertable.othername "
"FROM myothertable",
)
self.assert_compile(
select(
[table2.c.othername, func.count(table2.c.otherid)],
group_by=[table2.c.othername],
),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername",
)
# generative group by
self.assert_compile(
select(
[table2.c.othername, func.count(table2.c.otherid)]
).group_by(table2.c.othername),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable GROUP BY myothertable.othername",
)
self.assert_compile(
select([table2.c.othername, func.count(table2.c.otherid)])
.group_by(table2.c.othername)
.group_by(None),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable",
)
self.assert_compile(
select(
[table2.c.othername, func.count(table2.c.otherid)],
group_by=[table2.c.othername],
order_by=[table2.c.othername],
),
"SELECT myothertable.othername, "
"count(myothertable.otherid) AS count_1 "
"FROM myothertable "
"GROUP BY myothertable.othername ORDER BY myothertable.othername",
)
def test_custom_order_by_clause(self):
class CustomCompiler(PGCompiler):
def order_by_clause(self, select, **kw):
return (
super(CustomCompiler, self).order_by_clause(select, **kw)
+ " CUSTOMIZED"
)
class CustomDialect(PGDialect):
name = "custom"
statement_compiler = CustomCompiler
stmt = select([table1.c.myid]).order_by(table1.c.myid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable ORDER BY "
"mytable.myid CUSTOMIZED",
dialect=CustomDialect(),
)
def test_custom_group_by_clause(self):
class CustomCompiler(PGCompiler):
def group_by_clause(self, select, **kw):
return (
super(CustomCompiler, self).group_by_clause(select, **kw)
+ " CUSTOMIZED"
)
class CustomDialect(PGDialect):
name = "custom"
statement_compiler = CustomCompiler
stmt = select([table1.c.myid]).group_by(table1.c.myid)
self.assert_compile(
stmt,
| |
# -*- coding: utf-8 -*-
from ebu_tt_live.bindings.raw._ebuttdt import *
from ebu_tt_live.bindings.raw import _ebuttdt as ebuttdt_raw
from datetime import timedelta
from decimal import Decimal
import re, logging
import six
from pyxb.exceptions_ import SimpleTypeValueError, SimpleFacetValueError
from ebu_tt_live.errors import TimeFormatOverflowError, ExtentMissingError
from ebu_tt_live.strings import ERR_TIME_FORMAT_OVERFLOW, ERR_SEMANTIC_VALIDATION_TIMING_TYPE, ERR_1DIM_ONLY, \
ERR_2DIM_ONLY
from .pyxb_utils import get_xml_parsing_context
from .validation.base import SemanticValidationMixin
from .validation.presentation import SizingValidationMixin
log = logging.getLogger(__name__)
def _get_time_members(checked_time):
hours, seconds = divmod(checked_time.seconds, 3600)
hours += checked_time.days * 24
minutes, seconds = divmod(seconds, 60)
milliseconds, _ = divmod(checked_time.microseconds, 1000)
return hours, minutes, seconds, milliseconds
class _TimedeltaBindingMixin(object):
"""
Wiring in timedelta assignment and conversion operators
"""
# For each timing attribute a list of timeBases is specified, which represents the valid timeBase, timing attribute
# and timing type semantic constraint.
_compatible_timebases = {
'begin': [],
'dur': [],
'end': []
}
@classmethod
def _int_or_none(cls, value):
try:
return int(value)
except TypeError:
return 0
@classmethod
def compatible_timebases(cls):
return cls._compatible_timebases
@classmethod
def _ConvertArguments_vx(cls, args, kw):
"""
This hook is called before the type in question is instantiated. This is meant to do some normalization
of input parameters and convert them to tuple. In this function we check the timeBase and the attribute name
against our compatible_timebases mapping inside the timing type class. If an invalid scenario is encountered
SimpleTypeValueError is raised, which effectively prevents the timingType union to instantiate the type.
:raises pyxb.SimpleTypeValueError:
:param args:
:param kw:
:return: tuple of converted input parameters.
"""
result = []
# In parsing mode check timebase compatibility at instantiation time. This prevents pyxb instantiating
# the wrong type given 2 types having overlapping values in a union as it happens in full and limited
# clock timing types.
context = get_xml_parsing_context()
if context is not None:
# This means we are in XML parsing context. There should be a timeBase and a timing_attribute_name in the
# context object.
time_base = context['timeBase']
# It is possible for a timing type to exist as the value of an element not an attribute,
# in which case no timing_attribute_name is in the context; in that case don't attempt
# to validate the data against a timebase. At the moment this only affects the
# documentStartOfProgramme metadata element.
if 'timing_attribute_name' in context:
timing_att_name = context['timing_attribute_name']
if time_base not in cls._compatible_timebases[timing_att_name]:
log.debug(ERR_SEMANTIC_VALIDATION_TIMING_TYPE.format(
attr_name=timing_att_name,
attr_type=cls,
attr_value=args,
time_base=time_base
))
raise pyxb.SimpleTypeValueError(ERR_SEMANTIC_VALIDATION_TIMING_TYPE.format(
attr_name=timing_att_name,
attr_type=cls,
attr_value=args,
time_base=time_base
))
for item in args:
if isinstance(item, timedelta):
result.append(cls.from_timedelta(item))
else:
result.append(item)
return tuple(result)
@property
def timedelta(self):
return self.as_timedelta(self)
def cells_to_pixels(cells_in, root_extent, cell_resolution):
if not isinstance(root_extent, PixelExtentType):
raise ExtentMissingError(root_extent)
if cells_in.horizontal is not None:
# 2dimensional
return cells_in.horizontal * root_extent.horizontal / cell_resolution.horizontal, \
cells_in.vertical * root_extent.vertical / cell_resolution.vertical
else:
return cells_in.vertical * root_extent.vertical / cell_resolution.vertical,
def pixels_to_cells(pixels_in, root_extent, cell_resolution):
if not isinstance(root_extent, PixelExtentType):
raise ExtentMissingError(root_extent)
if pixels_in.horizontal is not None:
return pixels_in.horizontal * cell_resolution.horizontal / root_extent.horizontal, \
pixels_in.vertical * cell_resolution.vertical / root_extent.vertical
else:
return pixels_in.vertical * cell_resolution.vertical / root_extent.vertical,
def named_color_to_rgba(named_color):
color_map = {
"transparent": "00000000",
"black": "000000ff",
"silver": "c0c0c0ff",
"gray": "808080ff",
"white": "ffffffff",
"maroon": "800000ff",
"red": "ff0000ff",
"purple": "800080ff",
"fuchsia": "ff00ffff",
"magenta": "ff00ffff",
"green": "008000ff",
"lime": "00ff00ff",
"olive": "808000ff",
"yellow": "ffff00ff",
"navy": "000080ff",
"blue": "0000ffff",
"teal": "008080ff",
"aqua": "00ffffff",
"cyan": "00ffffff"
}
return '#{}'.format(color_map[named_color.lower()])
def convert_cell_region_to_percentage(cells_in, cell_resolution):
return '{}% {}%'.format(
round((float(cells_in.horizontal) / float(cell_resolution.horizontal)) * 100, 2),
round((float(cells_in.vertical) / float(cell_resolution.vertical)) * 100, 2)
)
def convert_pixel_region_to_percentage(pixels_in, extent):
return '{}% {}%'.format(
round((float(pixels_in.horizontal) / float(extent.horizontal)) * 100, 2),
round((float(pixels_in.vertical) / float(extent.vertical)) * 100, 2)
)
class TwoDimSizingMixin(object):
_groups_regex = None
_1dim_format = None
_2dim_format = None
@classmethod
def as_tuple(cls, instance):
if cls._2dim_format is None:
first, second = cls._groups_regex.match(instance).groups()[0], None
else:
first, second = cls._groups_regex.match(instance).groups()
if second is not None:
second = float(second)
return float(first), second
@classmethod
def from_tuple(cls, instance):
if len(instance) > 1:
if cls._2dim_format is None:
raise SimpleTypeValueError(cls, ERR_1DIM_ONLY.format(
type=cls
))
return cls._2dim_format.format(*instance)
else:
if cls._1dim_format is None:
raise SimpleTypeValueError(cls, ERR_2DIM_ONLY.format(
type=cls
))
return cls._1dim_format.format(*instance)
@property
def horizontal(self):
# TODO: Caching of tuple
tup_value = self.as_tuple(self)
if tup_value[1] is not None:
return tup_value[0]
else:
return None
@property
def vertical(self):
tup_value = self.as_tuple(self)
if tup_value[1] is not None:
return tup_value[1]
else:
return tup_value[0]
@classmethod
def _ConvertArguments_vx(cls, args, kw):
result = []
current_pair = []
for item in args:
if isinstance(item, int) or isinstance(item, float):
current_pair.append(item)
if len(current_pair) > 1:
result.append(cls.from_tuple(tuple(current_pair)))
current_pair = []
else:
result.append(item)
if len(current_pair) > 0:
result.append(cls.from_tuple(tuple(current_pair)))
return tuple(result)
def __eq__(self, other):
if type(self) == type(other) and self.horizontal == other.horizontal and self.vertical == other.vertical:
return True
elif isinstance(other, six.text_type):
return str(self) == str(other)
else:
return NotImplemented
class TimecountTimingType(_TimedeltaBindingMixin, ebuttdt_raw.timecountTimingType):
"""
Extending the string type with conversions to and from timedelta
"""
# NOTE: Update this regex should the spec change about this type
_groups_regex = re.compile('(?P<numerator>[0-9]+(?:\\.[0-9]+)?)(?P<unit>h|ms|s|m)')
# TODO: Consult and restrict this in an intuitive way to avoid awkward timing type combinations on the timing attributes.
_compatible_timebases = {
'begin': ['clock', 'media'],
'dur': ['clock', 'media'],
'end': ['clock', 'media']
}
@classmethod
def as_timedelta(cls, instance):
"""
Group expression with regex than switch on unit to create timedelta.
:param instance:
:return:
"""
numerator, unit = cls._groups_regex.match(instance).groups()
numerator = float(numerator)
if unit == 's':
return timedelta(seconds=numerator)
elif unit == 'm':
return timedelta(minutes=numerator)
elif unit == 'h':
return timedelta(hours=numerator)
elif unit == 'ms':
return timedelta(milliseconds=numerator)
else:
raise SimpleTypeValueError()
@classmethod
def from_timedelta(cls, instance):
"""
Convert to one dimensional value.
Find the smallest unit and create value using that.
Consistency is ensured to the millisecond. Below that the number will be trimmed.
:param instance:
:return:
"""
# Get the edge case out of the way even though validation will catch a 0 duration later
if not instance:
return '0s'
hours, minutes, seconds, milliseconds = _get_time_members(instance)
multiplier = 1
numerator = 0
unit = None
if milliseconds:
unit = 'ms'
numerator += milliseconds
multiplier *= 1000 # For the next level values so that the algo does not need to look back
if unit or seconds:
if not unit:
unit = 's'
numerator += seconds * multiplier
multiplier *= 60
if unit or minutes:
if not unit:
unit = 'm'
numerator += minutes * multiplier
multiplier *= 60
if unit or hours:
if not unit:
unit = 'h'
numerator += hours * multiplier
return '{}{}'.format(numerator, unit)
ebuttdt_raw.timecountTimingType._SetSupersedingClass(TimecountTimingType)
class FullClockTimingType(SemanticValidationMixin, _TimedeltaBindingMixin, ebuttdt_raw.fullClockTimingType):
"""
Extending the string type with conversions to and from timedelta
"""
_compatible_timebases = {
'begin': ['media'],
'dur': ['media'],
'end': ['media']
}
_groups_regex = re.compile('([0-9][0-9]+):([0-5][0-9]):([0-5][0-9]|60)(?:\.([0-9]+))?')
@classmethod
def compatible_timebases(cls):
return cls._compatible_timebases
@classmethod
def as_timedelta(cls, instance):
"""
Using regex parse value and create timedelta
:param instance:
:return:
"""
hours_str, minutes_str, seconds_str, seconds_fraction_str = [x for x in cls._groups_regex.match(instance).groups()]
milliseconds = seconds_fraction_str and cls._int_or_none('{:0<3}'.format(seconds_fraction_str)[:3]) or 0
return timedelta(hours=cls._int_or_none(hours_str),
minutes=cls._int_or_none(minutes_str),
seconds=cls._int_or_none(seconds_str),
milliseconds=milliseconds)
@classmethod
def from_timedelta(cls, instance):
"""
Generate full clock type from timedelta
:param instance:
:return:
"""
hours, minutes, seconds, milliseconds = _get_time_members(instance)
if milliseconds:
return '{hours:02d}:{minutes:02d}:{seconds:02d}.{milliseconds:03d}'.format(
hours=hours,
minutes=minutes,
seconds=seconds,
milliseconds=milliseconds
)
else:
return '{hours:02d}:{minutes:02d}:{seconds:02d}'.format(
hours=hours,
minutes=minutes,
seconds=seconds
)
ebuttdt_raw.fullClockTimingType._SetSupersedingClass(FullClockTimingType)
class LimitedClockTimingType(_TimedeltaBindingMixin, ebuttdt_raw.limitedClockTimingType):
"""
Extending the string type with conversions to and from timedelta
"""
_compatible_timebases = {
'begin': ['clock'],
'dur': ['clock'],
'end': ['clock']
}
_groups_regex = re.compile('([0-9][0-9]):([0-5][0-9]):([0-5][0-9]|60)(?:\.([0-9]+))?')
@classmethod
def as_timedelta(cls, instance):
"""
Using regex parse value and create timedelta
:param instance:
:return:
"""
hours_str, minutes_str, seconds_str, seconds_fraction_str = [x for x in cls._groups_regex.match(instance).groups()]
milliseconds = seconds_fraction_str and cls._int_or_none('{:0<3}'.format(seconds_fraction_str)[:3]) or 0
return timedelta(hours=cls._int_or_none(hours_str),
minutes=cls._int_or_none(minutes_str),
seconds=cls._int_or_none(seconds_str),
milliseconds=milliseconds)
@classmethod
def from_timedelta(cls, instance):
"""
Generate limited clock type from timedelta
:param instance:
:return:
"""
hours, minutes, seconds, milliseconds = _get_time_members(instance)
# We have our most significant value. Time for range check
if hours > 99:
raise TimeFormatOverflowError(ERR_TIME_FORMAT_OVERFLOW)
if milliseconds:
return '{hours:02d}:{minutes:02d}:{seconds:02d}.{milliseconds:03d}'.format(
hours=hours,
minutes=minutes,
seconds=seconds,
milliseconds=milliseconds
)
else:
return '{hours:02d}:{minutes:02d}:{seconds:02d}'.format(
hours=hours,
minutes=minutes,
seconds=seconds
)
ebuttdt_raw.limitedClockTimingType._SetSupersedingClass(LimitedClockTimingType)
# NOTE: Some of the code below includes handling of SMPTE time base, which was removed from version 1.0 of the specification.
# Here comes the tricky one. The SMPTE requires knowledge about frames. The top level tt element knows the frameRate.
# Unfortunately the conversion methods run before the object gets created let alone inserted into a document structure.
# The conversion paradigm of storing data in the xml | |
sanitized_identifier[11:14]
if ( len(sanitized_identifier) > 14 ):
readable_identifier += "-" + sanitized_identifier[14:18]
return readable_identifier
def truncate_to_study_id( identifier ):
sanitized_identifier = sanitize_identifier( identifier )
return sanitized_identifier[0:8]
def truncate_to_series_id( identifier ):
sanitized_identifier = sanitize_identifier( identifier )
return sanitized_identifier[0:11]
def truncate_to_station_id( identifier ):
sanitized_identifier = sanitize_identifier( identifier )
return sanitized_identifier[0:14]
def add_study( cursor, flow_class_id, year, study_number, study_type_id,
outlier=False, note_ids=[], study_external_ids={}, ):
study_id = identify_study( flow_class_id, year, study_number )
cursor.execute(
"""
INSERT INTO studies( study_id, flow_class_id, year, study_number,
study_type_id, outlier )
VALUES( ?, ?, ?, ?, ?, ? );
""",
(
study_id,
str(flow_class_id),
int(year),
int(study_number),
str(study_type_id),
int(outlier),
)
)
for note_id in note_ids:
cursor.execute(
"""
INSERT INTO study_notes( study_id, note_id )
VALUES( ?, ? );
""",
(
study_id,
int(note_id),
)
)
for compilation_id in study_external_ids:
cursor.execute(
"""
INSERT INTO study_external_ids( study_id, compilation_id,
study_external_id )
VALUES( ?, ?, ? );
""",
(
study_id,
int(compilation_id),
study_external_ids[compilation_id],
)
)
return study_id
def update_study_description( cursor, study_id, study_description ):
cursor.execute(
"""
UPDATE studies
SET study_description=?
WHERE study_id=?;
""",
(
study_description.strip(),
sanitize_identifier(study_id),
)
)
def update_study_provenance( cursor, study_id, study_provenance ):
cursor.execute(
"""
UPDATE studies
SET study_provenance=?
WHERE study_id=?:
""",
(
study_provenance.strip(),
sanitize_identifier(study_id),
)
)
def create_value_types_list( value_type_id ):
if ( value_type_id == VT_BOTH_AVERAGES ):
return [ VT_DENSITY_WEIGHTED_AVERAGE,
VT_UNWEIGHTED_AVERAGE, ]
else:
return [ value_type_id ]
def set_study_value( cursor, study_id, quantity_id, value,
value_type_id=VT_UNAVERAGED_VALUE,
meastech_ids=[], meastech_set=PRIMARY_MT_SET,
corrected=False, outlier=False, note_ids=[] ):
study_value, study_uncertainty = split_float( value )
for value_type_id in create_value_types_list( value_type_id ):
cursor.execute(
"""
INSERT INTO study_values( study_id, quantity_id,
value_type_id, meastech_set,
study_value, study_uncertainty,
corrected, outlier )
VALUES( ?, ?, ?, ?, ?, ?, ?, ? );
""",
(
sanitize_identifier(study_id),
str(quantity_id),
value_type_id,
meastech_set,
study_value,
study_uncertainty,
int(corrected),
int(outlier),
)
)
for meastech_id in meastech_ids:
cursor.execute(
"""
INSERT INTO study_values_mt( study_id, quantity_id, value_type_id,
meastech_set, meastech_id )
VALUES( ?, ?, ?, ?, ? );
""",
(
sanitize_identifier(study_id),
str(quantity_id),
value_type_id,
meastech_set,
meastech_id,
)
)
for note_id in note_ids:
cursor.execute(
"""
INSERT INTO study_value_notes( study_id, quantity_id,
value_type_id, meastech_set,
note_id )
VALUES( ?, ?, ?, ?, ? );
""",
(
sanitize_identifier(study_id),
str(quantity_id),
value_type_id,
meastech_set,
int(note_id),
)
)
def get_study_value( cursor, study_id, quantity_id,
value_type_id=VT_ANY_AVERAGE,
meastech_set=PRIMARY_MT_SET, ):
final_value_type_id = value_type_id
if ( value_type_id == VT_ANY_AVERAGE ):
cursor.execute(
"""
SELECT value_type_id
FROM study_values
WHERE study_id=? AND quantity_id=? AND meastech_set=?;
""",
(
sanitize_identifier(study_id),
str(quantity_id),
meastech_set,
)
)
results = cursor.fetchall()
value_type_ids = []
for result in results:
value_type_ids.append( str(result[0]) )
final_value_type_id = pick_any_average_value_type( value_type_ids )
cursor.execute(
"""
SELECT study_value, study_uncertainty
FROM study_values
WHERE study_id=? AND quantity_id=? AND value_type_id=? AND meastech_set=?;
""",
(
sanitize_identifier(study_id),
str(quantity_id),
final_value_type_id,
meastech_set,
)
)
return fetch_float( cursor )
def add_study_source( cursor, study_id, citation_key, source_classification ):
cursor.execute(
"""
INSERT INTO study_sources( study_id, citation_key, source_classification )
VALUES( ?, ?, ? );
""",
(
sanitize_identifier(study_id),
str(citation_key),
int(source_classification),
)
)
def add_series( cursor, flow_class_id, year, study_number, series_number, \
number_of_dimensions, coordinate_system_id, outlier=False, \
note_ids=[], series_external_ids={}, ):
series_id = identify_series(
flow_class_id,
year,
study_number,
series_number,
)
study_id = identify_study(
flow_class_id,
year,
study_number,
)
cursor.execute(
"""
INSERT INTO series( series_id, study_id, series_number, number_of_dimensions,
coordinate_system_id, outlier )
VALUES( ?, ?, ?, ?, ?, ? );
""",
(
series_id,
study_id,
int(series_number),
int(number_of_dimensions),
str(coordinate_system_id),
int(outlier),
)
)
for note_id in note_ids:
cursor.execute(
"""
INSERT INTO series_notes( series_id, note_id )
VALUES( ?, ? );
""",
(
series_id,
int(note_id),
)
)
for compilation_id in series_external_ids:
cursor.execute(
"""
INSERT INTO series_external_ids( series_id, compilation_id,
series_external_id )
VALUES( ?, ?, ? );
""",
(
series_id,
int(compilation_id),
series_external_ids[compilation_id],
)
)
return series_id
def update_series_geometry( cursor, series_id, geometry_id ):
cursor.execute(
"""
UPDATE series
SET geometry_id=?
WHERE series_id=?;
""",
(
str(geometry_id),
sanitize_identifier(series_id),
)
)
def update_series_description( cursor, series_id, series_description ):
cursor.execute(
"""
UPDATE series
SET series_description=?
WHERE series_id=?;
""",
(
series_description.strip(),
sanitize_identifier(series_id),
)
)
def set_series_value( cursor, series_id, quantity_id, value,
value_type_id=VT_UNAVERAGED_VALUE,
meastech_ids=[], meastech_set=PRIMARY_MT_SET,
corrected=False, outlier=False, note_ids=[] ):
series_value, series_uncertainty = split_float( value )
for value_type_id in create_value_types_list( value_type_id ):
cursor.execute(
"""
INSERT INTO series_values( series_id, quantity_id,
value_type_id, meastech_set,
series_value, series_uncertainty,
corrected, outlier )
VALUES( ?, ?, ?, ?, ?, ?, ?, ? );
""",
(
sanitize_identifier(series_id),
str(quantity_id),
value_type_id,
meastech_set,
series_value,
series_uncertainty,
int(corrected),
int(outlier),
)
)
for meastech_id in meastech_ids:
cursor.execute(
"""
INSERT INTO series_values_mt( series_id, quantity_id,
value_type_id, meastech_set,
meastech_id )
VALUES( ?, ?, ?, ?, ? );
""",
(
sanitize_identifier(series_id),
str(quantity_id),
value_type_id,
meastech_set,
meastech_id,
)
)
for note_id in note_ids:
cursor.execute(
"""
INSERT INTO series_value_notes( series_id, quantity_id,
value_type_id, meastech_set,
note_id )
VALUES( ?, ?, ?, ?, ? );
""",
(
sanitize_identifier(series_id),
str(quantity_id),
value_type_id,
meastech_set,
int(note_id),
)
)
def get_series_value( cursor, series_id, quantity_id,
value_type_id=VT_ANY_AVERAGE,
meastech_set=PRIMARY_MT_SET, ):
final_value_type_id = value_type_id
if ( value_type_id == VT_ANY_AVERAGE ):
cursor.execute(
"""
SELECT value_type_id
FROM series_values
WHERE series_id=? AND quantity_id=? AND meastech_set=?;
""",
(
sanitize_identifier(series_id),
str(quantity_id),
meastech_set,
)
)
results = cursor.fetchall()
value_type_ids = []
for result in results:
value_type_ids.append( str(result[0]) )
final_value_type_id = pick_any_average_value_type( value_type_ids )
cursor.execute(
"""
SELECT series_value, series_uncertainty
FROM series_values
WHERE series_id=? AND quantity_id=? AND value_type_id=? AND meastech_set=?;
""",
(
sanitize_identifier(series_id),
str(quantity_id),
final_value_type_id,
meastech_set,
)
)
return fetch_float( cursor )
def add_station( cursor, flow_class_id, year, study_number, series_number, \
station_number, outlier=False, note_ids=[], station_external_ids={}, ):
station_id = identify_station(
flow_class_id,
year,
study_number,
series_number,
station_number,
)
series_id = identify_series(
flow_class_id,
year,
study_number,
series_number,
)
study_id = identify_study(
flow_class_id,
year,
study_number,
)
cursor.execute(
"""
INSERT INTO stations( station_id, series_id, study_id, station_number,
outlier )
VALUES( ?, ?, ?, ?, ? );
""",
(
station_id,
series_id,
study_id,
int(station_number),
int(outlier),
)
)
for note_id in note_ids:
cursor.execute(
"""
INSERT INTO station_notes( station_id, note_id )
VALUES( ?, ? );
""",
(
station_id,
int(note_id),
)
)
for compilation_id in station_external_ids:
cursor.execute(
"""
INSERT INTO station_external_ids( station_id, compilation_id,
station_external_id )
VALUES( ?, ?, ? );
""",
(
station_id,
int(compilation_id),
station_external_ids[compilation_id],
)
)
return station_id
def set_station_value( cursor, station_id, quantity_id, value,
value_type_id=VT_UNAVERAGED_VALUE,
meastech_ids=[], meastech_set=PRIMARY_MT_SET,
corrected=False, outlier=False, note_ids=[] ):
station_value, station_uncertainty = split_float( value )
for value_type_id in create_value_types_list( value_type_id ):
cursor.execute(
"""
INSERT INTO station_values( station_id, quantity_id,
value_type_id, meastech_set,
station_value, station_uncertainty,
corrected, outlier )
VALUES( ?, ?, ?, ?, ?, ?, ?, ? );
""",
(
sanitize_identifier(station_id),
str(quantity_id),
value_type_id,
meastech_set,
station_value,
station_uncertainty,
int(corrected),
int(outlier),
)
)
for meastech_id in meastech_ids:
cursor.execute(
"""
INSERT INTO station_values_mt( station_id, quantity_id,
value_type_id, meastech_set,
meastech_id )
VALUES( ?, ?, ?, ?, ? );
""",
(
sanitize_identifier(station_id),
str(quantity_id),
value_type_id,
meastech_set,
meastech_id,
)
)
for note_id in note_ids:
cursor.execute(
"""
INSERT INTO station_value_notes( station_id, quantity_id,
value_type_id, meastech_set,
note_id )
VALUES( ?, ?, ?, ?, ? );
""",
(
sanitize_identifier(station_id),
str(quantity_id),
value_type_id,
meastech_set,
int(note_id),
)
)
def get_points_at_station( cursor, station_id ):
cursor.execute(
"""
SELECT point_id
FROM points
WHERE station_id=?;
""",
(
station_id,
)
)
results = cursor.fetchall()
point_ids = []
for result in results:
point_ids.append( str(result[0]) )
return point_ids
def set_constant_profile( cursor, station_id, quantity_id, value,
value_type_id=VT_UNAVERAGED_VALUE,
meastech_ids=[], meastech_set=PRIMARY_MT_SET,
corrected=False, outlier=False, note_ids=[] ):
for point_id in get_points_at_station( cursor, station_id ):
set_point_value(
cursor,
point_id,
quantity_id,
value,
value_type_id=value_type_id,
meastech_ids=meastech_ids,
meastech_set=meastech_set,
corrected=False,
outlier=outlier,
note_ids=note_ids,
)
def get_station_value( cursor, station_id, quantity_id,
value_type_id=VT_ANY_AVERAGE,
meastech_set=PRIMARY_MT_SET, ):
final_value_type_id = value_type_id
if ( value_type_id == VT_ANY_AVERAGE ):
cursor.execute(
"""
SELECT value_type_id
FROM station_values
WHERE station_id=? AND quantity_id=? AND meastech_set=?;
""",
(
sanitize_identifier(station_id),
str(quantity_id),
meastech_set,
)
)
results = cursor.fetchall()
value_type_ids = []
for result in results:
value_type_ids.append( str(result[0]) )
final_value_type_id = pick_any_average_value_type( value_type_ids )
cursor.execute(
"""
SELECT station_value, station_uncertainty
FROM station_values
WHERE station_id=? AND quantity_id=? AND value_type_id=? AND meastech_set=?;
""",
(
sanitize_identifier(station_id),
str(quantity_id),
final_value_type_id,
meastech_set,
)
)
return fetch_float( cursor )
def add_point( cursor, flow_class_id, year, study_number, series_number,
station_number, point_number, point_label_id=None,
outlier=False, note_ids=[], point_external_ids={}, ):
point_id = identify_point(
flow_class_id,
year,
study_number,
series_number,
station_number,
point_number,
)
station_id = identify_station(
flow_class_id,
year,
study_number,
series_number,
station_number,
)
series_id = identify_series(
flow_class_id,
year,
study_number,
series_number,
)
study_id = identify_study(
flow_class_id,
year,
study_number,
)
cursor.execute(
"""
INSERT INTO points( point_id, station_id, series_id, study_id,
point_number, point_label_id, outlier )
VALUES( ?, ?, ?, ?, ?, ?, ? );
""",
(
point_id,
station_id,
series_id,
study_id,
int(point_number),
point_label_id,
int(outlier),
)
)
for note_id in note_ids:
cursor.execute(
"""
INSERT INTO point_notes( point_id, note_id )
VALUES( ?, ? );
""",
(
point_id,
int(note_id),
)
)
for compilation_id in point_external_ids:
cursor.execute(
"""
INSERT INTO point_external_ids( point_id, compilation_id,
point_external_id )
VALUES( ?, ?, ? );
""",
(
point_id,
int(compilation_id),
point_external_ids[compilation_id],
)
)
| |
"""Configuration for a stack."""
# Copyright (C) 2015 <NAME>, <NAME> and <NAME>.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
import networkx
from faucet.conf import Conf, test_config_condition
class Stack(Conf):
"""Stores state related to DP stack information, this includes the current elected root as that
is technically a fixed allocation for this DP Stack instance."""
defaults = {
# Sets the root priority value of the current DP with stacking
'priority': None,
# Use the stack route algorithms, will be forced true if routing is enabled
'route_learning': False,
# Number of update time intervals for a down stack node to still be considered healthy
'down_time_multiple': 3,
# Minimum percentage value of required UP stack ports for this stack
# node to be considered healthy
'min_stack_health': 1.0,
# Minimum percentage value of required UP LACP ports for this stack
# node to be considered healthy
'min_lacp_health': 1.0,
}
defaults_types = {
'priority': int,
'route_learning': bool,
'down_time_multiple': int,
'min_stack_health': float,
'min_lacp_health': float,
}
def __init__(self, _id, dp_id, name, canonical_port_order, lacp_down_ports, lacp_ports, conf):
"""
Constructs a new stack object
Args:
_id (str): Name of the configuration key
dp_id (int): DP ID of the DP that holds this stack instance
name (str): Name of the DP that holds this stack instance
canonical_port_order (func): Function to order ports in a standardized way
lacp_down_ports (func): Returns a tuple of the not UP LACP ports for this stack node
lacp_ports (func): Returns a tuple of all LACP ports for this stack node
conf (dict): Stack configuration
"""
self.name = name
# Function to order ports in a standardized way
self.canonical_port_order = canonical_port_order
self.lacp_down_ports = lacp_down_ports
self.lacp_ports = lacp_ports
# Stack configuration options
self.priority = None
self.route_learning = None
self.down_time_multiple = None
self.min_stack_health = None
self.min_lacp_health = None
# Ports that have stacking configured
self.ports = []
# Stack graph containing all the DPs & ports in the stacking topology
self.graph = None
# Additional stacking information
self.root_name = None
self.roots_names = None
self.root_flood_reflection = None
# Whether the stack node is currently healthy
# dyn_healthy_info := (<running>, <stack ports>, <lacp ports>)
self.dyn_healthy_info = (False, 0.0, 0.0)
self.dyn_healthy = False
super().__init__(_id, dp_id, conf)
def clone_dyn_state(self, prev_stack, dps=None):
"""Copy dyn state from the old stack instance when warm/cold starting"""
if prev_stack:
self.dyn_healthy = prev_stack.dyn_healthy
self.dyn_healthy_info = prev_stack.dyn_healthy_info
if dps:
stack_port_dps = [dp for dp in dps if dp.stack_ports()]
for dp in stack_port_dps:
for port in dp.stack_ports():
port_up = False
if port.is_stack_up():
port_up = True
elif port.is_stack_init() and port.stack['port'].is_stack_up():
port_up = True
self.modify_link(dp, port, add=port_up)
def live_timeout_healthy(self, last_live_time, now, update_time):
"""
Determines the timeout of the current stack node, and whether
the current stack node can be considered healthy according to
the `down_time_multiple` number of stack root update time intervals.
Args:
last_live_time (float): Last known live time for this current stack node
now (float): Current time
update_time (int): Update time interval
Return:
bool: If node down time is still in update time interval threshold; considered healthy,
float: Time elapsed since timed out
"""
# Time elapsed for the number of safe down time multiples before considered unhealthy
down_time = self.down_time_multiple * update_time
# Final time at which nodes are still considered healthy
health_timeout = now - down_time
# If node last known live time was greater than the health timeout then it is healthy
timeout_healthy = last_live_time >= health_timeout
return timeout_healthy, health_timeout
def stack_port_healthy(self):
"""
Determines the percentage of UP stack ports, and whether
the current stack node can be considered healthy according to
the `min_stack_health` configuration option.
Return:
bool: Whether threshold from DOWN stack ports is met; considered healthy,
float: Percentage of stack ports UP out of all stack ports
"""
down_ports = self.down_ports()
all_ports = self.ports
if len(all_ports) == 0:
return True, 1.0
percentage = 1.0 - (float(len(down_ports) / float(len(all_ports))))
stack_ports_healthy = percentage >= self.min_stack_health
return stack_ports_healthy, percentage
def lacp_port_healthy(self):
"""
Determines the percentage of UP LACP ports, and whether
the current stack node can be considered healthy according to
the `min_lacp_health` configuration option.
Return:
bool: Whether threshold from DOWN LACP ports is met; considered healthy,
float: Percentage of LACP ports UP out of all lacp ports
"""
down_ports = self.lacp_down_ports()
all_ports = self.lacp_ports()
if len(all_ports) == 0:
return True, 1.0
percentage = 1.0 - (float(len(down_ports) / float(len(all_ports))))
lacp_ports_healthy = percentage >= self.min_lacp_health
return lacp_ports_healthy, percentage
def update_health(self, now, dp_last_live_time, update_time):
"""
Determines whether the current stack node is healthy
Args:
now (float): Current time
last_live_times (dict): Last live time value for each DP
update_time (int): Stack root update interval time
Return:
tuple: Current stack node health state,
str: Reason for the current state
"""
reason = ''
last_live_time = dp_last_live_time.get(self.name, 0)
timeout_healthy, health_timeout = self.live_timeout_healthy(
last_live_time, now, update_time)
if not timeout_healthy:
# Too long since DP last running, if DP not running then
# number of UP stack or LACP ports should be 0
reason += 'last running %us ago (timeout %us)' % (now - last_live_time, health_timeout)
self.dyn_healthy_info = (False, 0.0, 0.0)
self.dyn_healthy = False
return self.dyn_healthy, reason
reason += 'running %us ago' % (now - last_live_time)
if reason:
reason += ', '
stack_ports_healthy, stack_percentage = self.stack_port_healthy()
if not stack_ports_healthy:
# The number of DOWN stack ports surpasses the threshold for DOWN stack port tolerance
reason += 'stack ports %s (%.0f%%) not up' % (
list(self.down_ports()), (1.0 - stack_percentage) * 100.0)
else:
reason += '%.0f%% stack ports running' % (stack_percentage * 100.0)
if self.lacp_ports():
if reason:
reason += ', '
lacp_ports_healthy, lacp_percentage = self.lacp_port_healthy()
if not lacp_ports_healthy:
# The number of DOWN LACP ports surpasses the threshold for DOWN LACP port tolerance
reason += 'lacp ports %s (%.0f%%) not up' % (
list(self.lacp_down_ports()), (1.0 - lacp_percentage) * 100.0)
else:
reason += '%.0f%% lacp ports running' % (lacp_percentage * 100.0)
else:
# No LACP ports in node, so default to 100% UP & don't report information
lacp_ports_healthy = True
lacp_percentage = 0.0
self.dyn_healthy_info = (timeout_healthy, stack_percentage, lacp_percentage)
if timeout_healthy and stack_ports_healthy and lacp_ports_healthy:
self.dyn_healthy = True
else:
self.dyn_healthy = False
return self.dyn_healthy, reason
@staticmethod
def nominate_stack_root(stacks):
"""Return stack names in priority order and the chosen root"""
def health_priority(stack):
# Invert the health priority info so it is sorted correctly
# in relation to priority and the binary health
invert_info = (1.0 - stack.dyn_healthy_info[1],
1.0 - stack.dyn_healthy_info[2])
return (not stack.dyn_healthy, *invert_info, stack.priority, stack.dp_id)
stack_priorities = sorted(stacks, key=health_priority)
priority_names = tuple(stack.name for stack in stack_priorities)
nominated_name = priority_names[0]
return priority_names, nominated_name
def resolve_topology(self, dps, meta_dp_state):
"""
Resolve & verify correct inter-DP stacking config
Args:
dps (list): List of configured DPs
meta_dp_state (MetaDPState): Provided if reloading when choosing a new root DP
"""
stack_dps = [dp for dp in dps if dp.stack is not None]
stack_priority_dps = [dp for dp in stack_dps if dp.stack.priority]
stack_port_dps = [dp for dp in dps if dp.stack_ports()]
if not stack_priority_dps:
test_config_condition(stack_dps, 'stacking enabled but no root DP')
return
if not self.ports:
return
for dp in stack_priority_dps:
test_config_condition(not isinstance(dp.stack.priority, int), (
'stack priority must be type %s not %s' % (
int, type(dp.stack.priority))))
test_config_condition(dp.stack.priority <= 0, (
'stack priority must be > 0'))
self.roots_names, self.root_name = self.nominate_stack_root(
[dp.stack for dp in stack_priority_dps])
if meta_dp_state:
# If meta_dp_state exists, then we are reloading a new instance of the stack
# for a new 'dynamically' chosen root
if meta_dp_state.stack_root_name in self.roots_names:
self.root_name = meta_dp_state.stack_root_name
for | |
isinstance(args[0], tuple):
return [symbol, (None, [oplist, args[0]]) + kwargs]
else:
return [symbol, (None, args + kwargs)]
method = [m for m in methods_supporting_dtype if symbol.find(m, 0) == 0]
if len(method) == 1:
symbol = symbol.replace(method[0], method[0] + dtype)
return [symbol, args + kwargs]
# def _ClassDef(self, node):
# """class ClassDef(name, bases, keywords, starargs, kwargs, body,
# decorator_list)
# `name` is a raw string for the class name.
# `bases` is a list of nodes for explicitly specified base classes.
# `keywords` is a list of keyword nodes, principally for `metaclass`.
# Other keywords will be passed to the metaclass, as per PEP-3115.
# `starargs` removed in python 3.5.
# `kwargs` removed in Python 3.5.
# `body` is a list of nodes representing the code within the class
# definition.
# `decorator_list` is the list of decorators to be applied, stored
# outermost first (i.e. the first in the list will be applied last).
# """
# PhySL.defined_classes[node.name] = {}
# if node.bases:
# raise NotImplementedError("Phylanx does not support inheritance.")
# class_body = list(self._apply_rule(m) for m in node.body)
# return class_body
def _Compare(self, node):
"""class Compare(left, ops, comparators)
A comparison of two or more values.
`left` is the first value in the comparison
`ops` is the list of operators
`comparators` is the list of values after the first (`left`).
"""
if (len(node.ops) == 1):
left = self._apply_rule(node.left)
op = get_symbol_info(node, self._apply_rule(node.ops[0]))
right = self._apply_rule(node.comparators[0])
return [op, (left, right)]
else:
# if we're dealing with more than one comparison, we canonicalize the
# comparisons in to the form of chained logical ands. e.g., a < b < c
# becomes: ([__and ((__lt b, c), (__lt a, b))])
# TODO: Make sure to respect Python operator precedence.
comparison = []
for i in range(len(node.ops)):
op = self._apply_rule(node.ops[-i])
left = self._apply_rule(node.comparators[-i - 1])
right = self._apply_rule(node.comparators[-i])
if comparison:
comparison = ['__and', (*comparison, (op, (left, right)))]
else:
comparison = [*comparison, (op, (left, right))]
op = self._apply_rule(node.ops[0])
left = self._apply_rule(node.left)
right = self._apply_rule(node.comparators[0])
if comparison:
comparison = ['__and', (*comparison, (op, (left, right)))]
else:
comparison = [op, (left, right)]
return comparison
def _comprehension(self, node):
"""class comprehension(target, iter, ifs, is_async)
1 for clause in a comprehension.
`target` is the reference to use in each element- a `name` or `Tuple`.
`iter` is the object to iterate over.
`ifs` is a list of test expressions (a for clause may have multiple ifs).
`is_async` indicates a comprehension is asynchronous.
"""
target = self._apply_rule(node.target)
iteration_space = self._apply_rule(node.iter)
comprehension = {
'target': target,
'iter': iteration_space
}
return comprehension
def _Constant(self, node):
"""A constant value."""
import sys
if sys.version_info.minor <= 7:
return self._apply_rule(node.value)
# starting V3.8 string and number literals are represented as _Constant
# nodes
if isinstance(node.value, str):
return self._Str(node)
# special integral values need special handling
name_constants = {None: 'nil', False: 'false', True: 'true'}
if isinstance(node.value, bool):
return name_constants[node.value]
if node.value is None:
return name_constants[node.value]
# everything that's not a string can be directly passed on
return '%s' % node.value
def _Div(self, node):
"""Leaf node, returning raw string of the 'division' operation."""
return '__div'
def _Eq(self, node):
"""Leaf node, returning raw string of the 'equality' operation."""
return '__eq'
def _Expr(self, node):
"""class Expr(value)
`value` holds one of the other nodes (rules).
"""
return self._apply_rule(node.value)
def _ExtSlice(self, node):
"""class ExtSlice(dims)
Advanced slicing.
`dims` holds a list of `Slice` and `Index` nodes.
"""
slicing = list(map(self._apply_rule, node.dims))
return slicing
def _block(self, node):
"""Returns a map representation of a PhySL block."""
if isinstance(node, list):
block = tuple(map(self._apply_rule, node))
if len(node) == 1:
return block
else:
return ['block', block]
else:
block = (self._apply_rule(node), )
return block
def _For(self, node):
"""class For(target, iter, body, orelse)
A for loop.
`target` holds the variable(s) the loop assigns to, as a single Name,
Tuple or List node.
`iter` holds the item to be looped over, again as a single node.
`body` contain lists of nodes to execute.
`orelse` same as `body`, however, those in orelse are executed if the
loop finishes normally, rather than via a break statement.
"""
# this lookup table helps us to choose the right mapping function based on the
# type of the iteration space (list, range, or prange).
mapping_function = {
'list': 'for_each',
'slice': 'for_each',
'range': 'for_each',
'prange': 'parallel_map'
}
target = self._apply_rule(node.target)
# TODO: **MAP**
# target_name = target.split('$', 1)[0]
# self.defined.add(target_name)
iteration_space = self._apply_rule(node.iter)
if isinstance(iteration_space, list) and iteration_space[0].startswith('zip'):
iter_space, indices = physl_zip(node)
symbol = get_symbol_info(node, 'for_each')
body = self._block(node.body)
body = ['block', (*indices, *body)]
op = get_symbol_info(node, 'lambda')
return [symbol, ([op, ('__physl_iterator', body)], iter_space)]
# extract the type of the iteration space- used as the lookup key in
# `mapping_function` dictionary above.
if isinstance(iteration_space, list):
symbol_name = mapping_function[iteration_space[0].split('$', 1)[0]]
symbol = get_symbol_info(node, symbol_name)
# replace keyword `prange` to `range` for compatibility with Phylanx.
iteration_space[0] = iteration_space[0].replace('prange', 'range')
else:
symbol = get_symbol_info(node, 'for_each')
body = self._block(node.body)
# orelse = self._block(node.orelse)
op = get_symbol_info(node, 'lambda')
return [symbol, ([op, (target, body)], iteration_space)]
# return [symbol, (target, iteration_space, body, orelse)]
def _FunctionDef(self, node):
"""class FunctionDef(name, args, body, decorator_list, returns)
`name` is a raw string of the function name.
`args` is a arguments node.
`body` is the list of nodes inside the function.
`decorator_list` is the list of decorators to be applied, stored
outermost first (i.e. the first in the list will be applied last).
`returns` is the return annotation (Python 3 only).
Notes:
We ignore decorator_list and returns.
"""
op = get_symbol_info(node, 'define')
symbol = get_symbol_info(node, node.name)
args = self._apply_rule(node.args)
body = self._block(node.body)
lambda_op = get_symbol_info(node, 'lambda')
if (args):
return [op, (symbol, args, body)]
else:
return [op, (symbol, (lambda_op, (body,)))]
def _Gt(self, node):
"""Leaf node, returning raw string of the 'greater than' operation."""
return '__gt'
def _GtE(self, node):
"""Leaf node, returning raw string of the 'greater than or equal' operation."""
return '__ge'
def _If(self, node):
"""class If(test, body, orelse)
`test` holds a single node, such as a Compare node.
`body` and `orelse` each hold a list of nodes.
"""
symbol = get_symbol_info(node, 'if')
test = self._apply_rule(node.test)
body = self._block(node.body)
orelse = self._block(node.orelse)
return [symbol, (test, body, orelse)]
def _IfExp(self, node):
"""class IfExp(test, body, orelse)
`test` holds a single node, such as a Compare node.
`body` and `orelse` each hold a list of nodes.
"""
symbol = get_symbol_info(node, 'if')
test = self._apply_rule(node.test)
body = self._block(node.body)
orelse = self._block(node.orelse)
return [symbol, (test, body, orelse)]
def _In(self, node):
raise Exception("`In` operator is not defined in Phylanx.")
def _Index(self, node):
"""class Index(value)
Simple subscripting with a single value.
"""
# tuple index shouldn't be transformed to list here
if isinstance(node.value, ast.Tuple):
elements = tuple(map(self._apply_rule, node.value.elts))
return (*elements, )
return self._apply_rule(node.value)
def _Is(self, node):
raise Exception("`Is` operator is not defined in Phylanx.")
def _IsNot(self, node):
raise Exception("`IsNot` operator is not defined in Phylanx.")
def _Lambda(self, node):
"""class Lambda(args, body)
`body` is a single node.
"""
symbol = get_symbol_info(node, 'lambda')
args = self._apply_rule(node.args)
body = self._block(node.body)
if args:
return [symbol, (args, body)]
else:
return [symbol, (body)]
def _List(self, node):
"""class List(elts, ctx)"""
op = get_symbol_info(node, 'list')
elements = tuple(map(self._apply_rule, node.elts))
return [op, (*elements, )]
def _ListComp(self, node):
"""class ListComp(elt, generators)
`elt` (or key and value) is a single node representing the part that
will be evaluated for each item.
`generators` is a list of comprehension nodes.
"""
if len(node.generators) > 1:
raise NotImplementedError("Nested comprehensions is not yet supported!")
elt = self._apply_rule(node.elt)
loop = self._apply_rule(node.generators[0])
target = loop['target']
iter_space = loop['iter']
if isinstance(iter_space, list) and iter_space[0].startswith('zip'):
iter_space, iterators = physl_zip([target, iter_space])
symbol = get_symbol_info(node, 'fmap')
body = ['block', (*iterators, elt)]
op = get_symbol_info(node, 'lambda')
return [symbol, ([op, ('__physl_iterator', body)], iter_space)]
lambda_ = ['lambda', (target, elt)]
fmap = ['fmap', (lambda_, iter_space)]
return fmap
def _Lt(self, node):
"""Leaf node, returning raw string of the 'less than' operation."""
return '__lt'
def _LtE(self, node):
"""Leaf node, returning raw string of the 'less than or equal' operation."""
| |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Main classification script, heavily modified from:
https://github.com/dmlc/gluon-nlp/blob/v0.9.x/scripts/bert/finetune_classifier.py
"""
import io
import os
import time
import json
import argparse
import random
import logging
import warnings
import collections
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.contrib.amp import amp
from mxnet.gluon import HybridBlock, nn
import gluonnlp as nlp
from utils.finetune_utils import ALL_TASKS_DIC, EXTRA_METRICS
from bort.bort import BortClassifier, get_bort_model
from utils.finetune_utils import preprocess_data, do_log
def get_parser():
parser = argparse.ArgumentParser(description='Bort fine-tune examples for various NLU tasks.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--epochs', type=int, default=3,
help='number of epochs.')
parser.add_argument('--ramp_up_epochs', type=int,
default=3, help='number of ramp up epochs.')
parser.add_argument('--training_steps', type=int,
help='The total training steps. Note that if specified, epochs will be ignored.')
parser.add_argument('--batch_size', type=int, default=32,
help='Batch size. Number of examples per gpu in a minibatch.')
parser.add_argument('--dev_batch_size', type=int, default=8,
help='Batch size for dev set and test set')
parser.add_argument('--init', type=str, default='uniform', choices=['gaussian', 'uniform', 'orthogonal', 'xavier'],
help='Initialization distribution.')
parser.add_argument('--prob', type=float, default=0.5,
help='The probability around which to center the distribution.')
parser.add_argument('--lr', type=float, default=3e-5,
help='Initial learning rate')
parser.add_argument('--epsilon', type=float, default=1e-6,
help='Small value to avoid division by 0')
parser.add_argument('--warmup_ratio', type=float, default=0.1,
help='ratio of warmup steps used in NOAM\'s stepsize schedule')
parser.add_argument('--weight_decay', type=float, default=0.01)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--max_len', type=int, default=512,
help='Maximum length of the sentence pairs')
parser.add_argument('--seed', type=int, default=2, help='Random seed')
parser.add_argument('--gpu', type=int, default=None,
help='Which GPU to use for fine-tuning.')
parser.add_argument('--use_scheduler', default=True, type=bool)
parser.add_argument('--accumulate', type=int, default=None,
help='The number of batches for gradients accumulation to simulate large batch size. ')
parser.add_argument('--log_interval', type=int,
default=10, help='report interval')
parser.add_argument('--no_distributed', default=False, type=bool)
parser.add_argument('--task_name', type=str, choices=ALL_TASKS_DIC.keys())
parser.add_argument('--dataset', type=str, default='openwebtext_ccnews_stories_books_cased',
choices=['book_corpus_wiki_en_uncased', 'book_corpus_wiki_en_cased',
'openwebtext_book_corpus_wiki_en_uncased', 'wiki_multilingual_uncased',
'wiki_multilingual_cased', 'wiki_cn_cased',
'openwebtext_ccnews_stories_books_cased'])
parser.add_argument('--pretrained_parameters', type=str,
default=None, help='Pre-trained Bort model parameter file.')
parser.add_argument('--model_parameters', type=str, default=None)
parser.add_argument('--output_dir', type=str, default='./output_dir',
help='The output directory to where the model params will be written.')
parser.add_argument('--only_inference', action='store_true',
help='If set, we skip training and only perform inference on dev and test data.')
parser.add_argument('--dtype', type=str, default='float32',
choices=['float32', 'float16'], help='The data type for training.')
parser.add_argument('--early_stop', type=int, default=None,
help='Whether to perform early stopping based on the metric on dev set.')
parser.add_argument('--multirc_test_location', type=str, required=False, default="/home/ec2-user/.mxnet/datasets/superglue_multirc/test.jsonl",
help='Location of the MultiRC test set, in case it is not in the default location.')
parser.add_argument('--record_test_location', type=str, required=False, default="/home/ec2-user/.mxnet/datasets/superglue_record/test.jsonl",
help='Location of the ReCoRD test set, in case it is not in the default location.')
parser.add_argument('--record_dev_location', type=str, required=False, default="/home/ec2-user/.mxnet/datasets/superglue_record/val.jsonl",
help='Location of the ReCoRD dev set, in case it is not in the default location.')
parser.add_argument('--race_dataset_location', type=str, required=False, default=None,
help='Location of the RACE dataset, in case it is not in the default location.')
return parser
def load_and_setup_model(task, args):
pretrained_parameters = args.pretrained_parameters
model_parameters = args.model_parameters
dataset = args.dataset
if only_inference and not model_parameters:
warnings.warn(
'model_parameters is not set. Randomly initialized model will be used for inference.')
get_pretrained = not (
pretrained_parameters is not None or model_parameters is not None)
# STS-B is a regression task and STSBTask().class_labels returns None
do_regression = not task.class_labels
if do_regression:
num_classes = 1
loss_function = gluon.loss.L2Loss()
else:
num_classes = len(task.class_labels)
loss_function = gluon.loss.SoftmaxCELoss()
bort, vocabulary = get_bort_model("bort_4_8_768_1024", dataset_name=dataset,
pretrained=get_pretrained, ctx=ctx,
use_pooler=True, use_decoder=False, use_classifier=False)
# TODO: CoLA uses a different classifier!
model = BortClassifier(bort, dropout=args.dropout, num_classes=num_classes)
if args.init == "gaussian":
initializer = mx.init.Normal(args.prob)
if args.init == "uniform":
initializer = mx.init.Uniform(args.prob)
if args.init == "orthogonal":
initializer = mx.init.Orthogonal(scale=args.prob)
if args.init == "xavier":
initializer = mx.init.Xavier()
if not model_parameters:
model.classifier.initialize(init=initializer, ctx=ctx)
# load checkpointing
if pretrained_parameters:
logging.info('loading Bort params from %s', pretrained_parameters)
nlp.utils.load_parameters(
model.bort, pretrained_parameters, ctx=ctx, ignore_extra=True, cast_dtype=True)
if model_parameters:
logging.info('loading model params from %s', model_parameters)
nlp.utils.load_parameters(
model, model_parameters, ctx=ctx, cast_dtype=True)
# data processing
nlp.utils.mkdir(output_dir)
do_lower_case = 'uncased' in dataset
tokenizer = nlp.data.GPT2BPETokenizer()
return model, tokenizer, loss_function, vocabulary
def setup_logger(args):
log = logging.getLogger()
log.setLevel(logging.INFO)
logging.captureWarnings(True)
fh = logging.FileHandler('log_{0}.txt'.format(task_name))
formatter = logging.Formatter(fmt='%(levelname)s:%(name)s:%(asctime)s %(message)s',
datefmt='%H:%M:%S')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
log.addHandler(console)
log.addHandler(fh)
def train(metric):
"""Training function."""
if not only_inference:
logging.info(
'Now we are doing Bort classification training on %s!', ctx)
all_model_params = model.collect_params()
optimizer_params = {'learning_rate': lr,
'epsilon': epsilon, 'wd': args.weight_decay}
trainer = gluon.Trainer(all_model_params, "bertadam", optimizer_params,
update_on_kvstore=False)
if args.dtype == 'float16':
amp.init_trainer(trainer)
epoch_number = args.ramp_up_epochs
step_size = batch_size * accumulate if accumulate else batch_size
num_train_steps = int(num_train_examples / step_size * args.ramp_up_epochs)
if args.training_steps:
num_train_steps = args.training_steps
epoch_number = 9999
logging.info('training steps=%d', num_train_steps)
warmup_ratio = args.warmup_ratio
num_warmup_steps = int(num_train_steps * warmup_ratio)
step_num = 0
# Do not apply weight decay on LayerNorm and bias terms
for _, v in model.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
# Collect differentiable parameters
params = [p for p in all_model_params.values() if p.grad_req != 'null']
# Set grad_req if gradient accumulation is required
if accumulate and accumulate > 1:
for p in params:
p.grad_req = 'add'
# track best eval score
metric_history = []
best_metric = None
patience = args.early_stop
tic = time.time()
finish_flag = False
for epoch_id in range(args.epochs):
if args.early_stop and patience == 0:
logging.info('Early stopping at epoch %d', epoch_id)
break
if finish_flag:
break
if not only_inference:
metric.reset()
step_loss = 0
tic = time.time()
all_model_params.zero_grad()
for batch_id, seqs in enumerate(train_data):
# learning rate schedule
if args.use_scheduler:
if step_num < num_warmup_steps:
new_lr = lr * step_num / num_warmup_steps
else:
non_warmup_steps = step_num - num_warmup_steps
offset = non_warmup_steps / \
(num_train_steps - num_warmup_steps)
new_lr = max(1e-7, lr - offset * lr)
trainer.set_learning_rate(new_lr)
# forward and backward
with mx.autograd.record():
if args.no_distributed:
input_ids, segment_ids, valid_length, label = seqs
else:
input_ids, segment_ids, valid_length, label = seqs[
hvd.rank()]
out = model(input_ids.as_in_context(ctx),
valid_length.as_in_context(ctx).astype('float32'))
ls = loss_function(out, label.as_in_context(ctx)).mean()
if args.dtype == 'float16':
with amp.scale_loss(ls, trainer) as scaled_loss:
mx.autograd.backward(scaled_loss)
else:
ls.backward()
# update
if not accumulate or (batch_id + 1) % accumulate == 0:
trainer.allreduce_grads()
nlp.utils.clip_grad_global_norm(params, 1)
trainer.update(accumulate if accumulate else 1)
step_num += 1
if accumulate and accumulate > 1:
# set grad to zero for gradient accumulation
all_model_params.zero_grad()
step_loss += ls.asscalar()
if not do_regression:
label = label.reshape((-1))
metric.update([label], [out])
if (batch_id + 1) % (args.log_interval) == 0:
if is_master_node:
do_log(batch_id, len(train_data), metric, step_loss, args.log_interval,
epoch_id=epoch_id, learning_rate=trainer.learning_rate)
step_loss = 0
mx.nd.waitall()
# inference on dev data
tmp_metric = []
for segment, dev_data in dev_data_list:
if is_master_node:
metric_nm, metric_val = evaluate(
dev_data, metric, segment, epoch=epoch_id)
if best_metric is None or metric_val >= best_metric:
best_metric = metric_val
patience = args.early_stop
else:
if args.early_stop is not None:
patience -= 1
tmp_metric += metric_val
if is_master_node:
# For multi-valued tasks (e.g., MNLI), we maximize the average of
# the metrics.
metric_history.append(
(epoch_id, metric_nm + ["average"], metric_val + [sum(tmp_metric) / len(tmp_metric)]))
if not only_inference and is_master_node:
ckpt_name = 'model_bort_{0}_{1}.params'.format(task_name, epoch_id)
params_saved = os.path.join(output_dir, ckpt_name)
nlp.utils.save_parameters(model, params_saved)
logging.info('params saved in: %s', params_saved)
toc = time.time()
logging.info('Time cost=%.2fs', toc - tic)
tic = toc
if not only_inference and is_master_node:
metric_history.sort(key=lambda x: x[-1][0], reverse=True)
epoch_id, metric_nm, metric_val = metric_history[0]
ckpt_name = 'model_bort_{0}_{1}.params'.format(task_name, epoch_id)
params_saved = os.path.join(output_dir, ckpt_name)
nlp.utils.load_parameters(model, params_saved)
metric_str = 'Best model at epoch {}. Validation metrics:'.format(
epoch_id)
metric_str += ','.join([i + ':%.4f' for i in metric_nm])
logging.info(metric_str, *metric_val)
# inference on test data
for segment, test_data in test_data_list:
if is_master_node:
test(test_data, segment, acc=metric_val[0])
def evaluate(loader_dev, metric, segment, epoch=0):
"""Evaluate the model on validation dataset."""
logging.info('Now we are doing evaluation on %s with %s.', segment, ctx)
metric.reset()
step_loss = 0
counter = 0
tic = time.time()
all_results = collections.defaultdict(list)
if "RACE" in args.task_name:
from utils.finetune_utils import process_RACE_answers, RACEHash
race_dev_data = RACEHash(
args.race_dataset_location, args.task_name, segment="dev")
results = []
if "ReCoRD" in args.task_name:
from utils.finetune_utils import process_ReCoRD_answers, ReCoRDHash
record_dev_data = ReCoRDHash(args.record_dev_location)
results = []
for batch_id, seqs in enumerate(loader_dev):
input_ids, segment_ids, valid_length, label = seqs
out = model(input_ids.as_in_context(ctx),
valid_length.as_in_context(ctx).astype('float32'))
ls = loss_function(out, label.as_in_context(ctx)).mean()
step_loss += ls.asscalar()
if not do_regression:
label = label.reshape((-1))
metric.update([label], [out])
for example_id, (l, p) in enumerate(zip(label, out)):
all_results[counter].append([[l], [p]])
counter += 1
if "RACE" in args.task_name:
indices = | |
# Copyright (c) 2018, 2019, Nordic Semiconductor ASA
#
# SPDX-License-Identifier: Apache-2.0
'''West configuration file handling.
West follows Git-like conventions for configuration file locations.
There are three types of configuration file: system-wide files apply
to all users on the current machine, global files apply to the current
user, and local files apply to the current west workspace.
System files:
- Linux: ``/etc/westconfig``
- macOS: ``/usr/local/etc/westconfig``
- Windows: ``%PROGRAMDATA%\\west\\config``
Global files:
- Linux: ``~/.westconfig`` or (if ``$XDG_CONFIG_HOME`` is set)
``$XDG_CONFIG_HOME/west/config``
- macOS: ``~/.westconfig``
- Windows: ``.westconfig`` in the user's home directory, as determined
by os.path.expanduser.
Local files:
- Linux, macOS, Windows: ``<workspace-topdir>/.west/config``
You can override these files' locations with the ``WEST_CONFIG_SYSTEM``,
``WEST_CONFIG_GLOBAL``, and ``WEST_CONFIG_LOCAL`` environment variables.
Configuration values from later configuration files override configuration
from earlier ones. Local values have highest precedence, and system values
lowest.
'''
import configparser
import os
from pathlib import PureWindowsPath, Path
import platform
from enum import Enum
from typing import Any, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING
import warnings
from west.util import west_dir, WestNotFound, PathType
def _configparser(): # for internal use
return configparser.ConfigParser(allow_no_value=True)
class _InternalCF:
# For internal use only; convenience interface for reading and
# writing INI-style [section] key = value configuration files,
# but presenting a west-style section.key = value style API.
@staticmethod
def from_path(path: Optional[Path]) -> Optional['_InternalCF']:
return _InternalCF(path) if path and path.exists() else None
def __init__(self, path: Path):
self.path = path
self.cp = _configparser()
read_files = self.cp.read(path, encoding='utf-8')
if len(read_files) != 1:
raise FileNotFoundError(path)
def __contains__(self, option: str) -> bool:
section, key = option.split('.', 1)
return section in self.cp and key in self.cp[section]
def get(self, option: str):
return self._get(option, self.cp.get)
def getboolean(self, option: str):
return self._get(option, self.cp.getboolean)
def getint(self, option: str):
return self._get(option, self.cp.getint)
def getfloat(self, option: str):
return self._get(option, self.cp.getfloat)
def _get(self, option, getter):
section, key = option.split('.', 1)
try:
return getter(section, key)
except (configparser.NoOptionError, configparser.NoSectionError):
raise KeyError(option)
def set(self, option: str, value: Any):
section, key = option.split('.', 1)
if section not in self.cp:
self.cp[section] = {}
self.cp[section][key] = value
with open(self.path, 'w', encoding='utf-8') as f:
self.cp.write(f)
def delete(self, option: str):
section, key = option.split('.', 1)
if section not in self.cp:
raise KeyError(option)
del self.cp[section][key]
if not self.cp[section].items():
del self.cp[section]
with open(self.path, 'w', encoding='utf-8') as f:
self.cp.write(f)
class ConfigFile(Enum):
'''Types of west configuration file.
Enumeration members:
- SYSTEM: system level configuration shared by all users
- GLOBAL: global or user-wide configuration
- LOCAL: per-workspace configuration
- ALL: all three of the above, where applicable
'''
ALL = 1
SYSTEM = 2
GLOBAL = 3
LOCAL = 4
class Configuration:
'''Represents the available configuration options and their values.
Allows getting, setting, and deleting configuration options
in the system, global, and local files.
Sets take effect immediately and are not protected against
concurrent gets. The caller is responsible for any necessary
mutual exclusion.
'''
def __init__(self, topdir: Optional[PathType] = None):
'''Load the system, global, and workspace configurations and
make them available for the user.
:param topdir: workspace location; may be None
'''
local_path = _location(ConfigFile.LOCAL, topdir=topdir,
search_for_local=False) or None
self._system_path = Path(_location(ConfigFile.SYSTEM))
self._global_path = Path(_location(ConfigFile.GLOBAL))
self._local_path = Path(local_path) if local_path is not None else None
self._system = _InternalCF.from_path(self._system_path)
self._global = _InternalCF.from_path(self._global_path)
self._local = _InternalCF.from_path(self._local_path)
def get(self, option: str,
default: Optional[str] = None,
configfile: ConfigFile = ConfigFile.ALL) -> Optional[str]:
'''Get a configuration option's value as a string.
:param option: option to get, in 'foo.bar' form
:param default: default value to return if option is missing
:param configfile: type of config file look for the value in
'''
return self._get(lambda cf: cf.get(option), default, configfile)
def getboolean(self, option: str,
default: bool = False,
configfile: ConfigFile = ConfigFile.ALL) -> bool:
'''Get a configuration option's value as a bool.
The configparser module's conversion to boolean is applied
to any value discovered. Invalid values raise ValueError.
:param option: option to get, in 'foo.bar' form
:param default: default value to return if option is missing
:param configfile: type of config file to look for the value in
'''
return self._get(lambda cf: cf.getboolean(option), default, configfile)
def getint(self, option: str,
default: Optional[int] = None,
configfile: ConfigFile = ConfigFile.ALL) -> Optional[int]:
'''Get a configuration option's value as an int.
:param option: option to get, in 'foo.bar' form
:param default: default value to return if option is missing
:param configfile: type of config file to look for the value in
'''
return self._get(lambda cf: cf.getint(option), default, configfile)
def getfloat(self, option: str,
default: Optional[float] = None,
configfile: ConfigFile = ConfigFile.ALL) -> Optional[float]:
'''Get a configuration option's value as a float.
:param option: option to get, in 'foo.bar' form
:param default: default value to return if option is missing
:param configfile: type of config file to look for the value in
'''
return self._get(lambda cf: cf.getfloat(option), default, configfile)
def _get(self, getter, default, configfile):
for cf in self._whence(configfile):
if cf is None:
continue
try:
return getter(cf)
except KeyError:
pass
return default
def _whence(self, configfile):
if configfile == ConfigFile.ALL:
if self._local is not None:
return [self._local, self._global, self._system]
return [self._global, self._system]
elif configfile == ConfigFile.SYSTEM:
return [self._system]
elif configfile == ConfigFile.GLOBAL:
return [self._global]
elif configfile == ConfigFile.LOCAL:
if self._local is None:
raise RuntimeError('local configuration file not found')
return [self._local]
else:
raise ValueError(configfile)
def set(self, option: str, value: Any,
configfile: ConfigFile = ConfigFile.LOCAL) -> None:
'''Set a configuration option's value.
The write to the configuration file takes effect
immediately. No concurrency protection is performed against
concurrent access from the time that this Configuration object
was created. If the file may have been modified since that
time, either create a new Configuration object before using
this method or lose the intervening modifications.
:param option: option to set, in 'foo.bar' form
:param value: value to set option to
:param configfile: type of config file to set the value in
'''
if configfile == ConfigFile.ALL:
# We need a real configuration file; ALL doesn't make sense here.
raise ValueError(configfile)
elif configfile == ConfigFile.LOCAL:
if self._local_path is None:
raise ValueError(f'{configfile}: file not found; retry in a '
'workspace or set WEST_CONFIG_LOCAL')
if not self._local_path.exists():
self._local = self._create(self._local_path)
if TYPE_CHECKING:
assert self._local
self._local.set(option, value)
elif configfile == ConfigFile.GLOBAL:
if not self._global_path.exists():
self._global = self._create(self._global_path)
if TYPE_CHECKING:
assert self._global
self._global.set(option, value)
elif configfile == ConfigFile.SYSTEM:
if not self._system_path.exists():
self._system = self._create(self._system_path)
if TYPE_CHECKING:
assert self._system
self._system.set(option, value)
else:
# Shouldn't happen.
assert False, configfile
@staticmethod
def _create(path: Path) -> _InternalCF:
path.parent.mkdir(parents=True, exist_ok=True)
path.touch(exist_ok=True)
ret = _InternalCF.from_path(path)
if TYPE_CHECKING:
assert ret
return ret
def delete(self, option: str,
configfile: Optional[ConfigFile] = None) -> None:
'''Delete an option from the given file or files.
If *option* is not set in the given *configfile*, KeyError is raised.
:param option: option to delete, in 'foo.bar' form
:param configfile: If ConfigFile.ALL, delete *option* in all files
where it is set.
If None, delete *option* only in the highest
precedence file where it is set.
Otherwise, delete from the given ConfigFile.
'''
if configfile == ConfigFile.ALL or configfile is None:
found = False
for cf in [self._local, self._global, self._system]:
if cf and option in cf:
cf.delete(option)
if configfile is None:
return
found = True
if not found:
raise KeyError(option)
elif configfile == ConfigFile.LOCAL:
if not self._local:
raise KeyError(option)
self._local.delete(option)
elif configfile == ConfigFile.GLOBAL:
if not self._global:
raise KeyError(option)
self._global.delete(option)
elif configfile == ConfigFile.SYSTEM:
if not self._system:
raise KeyError(option)
self._system.delete(option)
else:
raise RuntimeError(f'bad configfile {configfile}')
def _copy_to_configparser(self, cp: configparser.ConfigParser) -> None:
# Internal API for main to use to maintain backwards
# compatibility for existing extensions using the legacy
# function-and-global-state APIs.
def load(cf: _InternalCF):
for section, contents in cf.cp.items():
if section == 'DEFAULT':
continue
if section not in cp:
cp.add_section(section)
for key, value in contents.items():
cp[section][key] = value
if self._system:
load(self._system)
if self._global:
load(self._global)
if self._local:
load(self._local)
def items(self, configfile: ConfigFile = ConfigFile.ALL
) -> Iterable[Tuple[str, Any]]:
'''Iterator of option, value pairs.'''
if configfile == ConfigFile.ALL:
ret = {}
ret.update(self._system_as_dict)
ret.update(self._global_as_dict)
ret.update(self._local_as_dict)
return ret.items()
if configfile == ConfigFile.SYSTEM:
return self._system_as_dict.items()
if configfile == ConfigFile.GLOBAL:
return self._global_as_dict.items()
if configfile == ConfigFile.LOCAL:
return self._local_as_dict.items()
raise RuntimeError(configfile)
@property
def _system_as_dict(self):
return self._cf_to_dict(self._system)
@property
def _global_as_dict(self):
return self._cf_to_dict(self._global)
@property
def _local_as_dict(self):
| |
of the posts that match our criteria
try:
cursor.execute(query)
except Exception as e:
# Display information
print('Failed to report posts: {:s}'.format(str(e)))
# Specify that we failed to deliver
return False
# Compute the number of posts that the user needs
need = g_PostCount * 2 if userid in g_MultiList else g_PostCount
# Was there any post that matched our criteria?
if not cursor.rowcount:
# Send this user a PM explaining that he has no posts in this period of time
return SendPrivateMessage(g_BotUserID, userid, '0 posts made {:s}'.format(dtsub), 'You do not have any posts made {:s}. Your monthly posts requirement is [b]{:d}[/b] posts.'.format(dtmsg, need))
# Grab the post count
count = int(cursor.rowcount)
# Generate the message header
msg = 'You have [b]{:d}[/b] post(s) made {:s}. Your monthly posts requirement is [b]{:d}[/b] posts.\n[hr]\nHere is a list of the posts included in this count:\n[list]\n'.format(count, dtmsg, need)
# Build a list of posts that were included in this count
for row in cursor:
#Generate the post URL
url = g_PostURL.format(tid=int(row[1]), pid=int(row[0]))
# Generate the element list and append it
msg += '[*][url={0}]{1}[/url]\n'.format(url, row[2])
# Close the list
msg += '\n[/list]'
# Finally, send the PM to the user and return the result
return SendPrivateMessage(g_BotUserID, userid, '{:d} post(s) made {:s}'.format(count, dtsub), msg)
# At this point everything should have worked
return True
# -----------------------------------------------------------------------------------------------
# Report the number of posts after a certain date-time.
def ReportUserPostsAfter(userid, dtm):
# Import globals
global g_IgnoredForums
# Grab the time-stamp from the specified date-time
dateline = int(time.mktime(dtm.timetuple()))
# Generate the query string with the specified information
query = "SELECT pid, tid, subject FROM mybb_posts WHERE uid = {:d} AND dateline > {:d} AND fid NOT IN ('{:s}')".format(userid, dateline, g_IgnoredForums)
# Forward the call to the actual implementation and return the result
return ReportUserPostsImpl(userid, query, 'after {:s}'.format(str(dtm)), 'after [b]{:s}[/b]'.format(str(dtm)))
# -----------------------------------------------------------------------------------------------
# Report the number of posts after a certain date-time.
def ReportUserPostsBetween(userid, bdt, edt):
# Import globals
global g_IgnoredForums
# Grab the time-stamp from the specified date-time
begin = int(time.mktime(bdt.timetuple()))
end = int(time.mktime(edt.timetuple()))
# Generate the query string with the specified information
query = "SELECT pid, tid, subject FROM mybb_posts WHERE uid = {:d} AND dateline BETWEEN {:d} AND {:d} AND fid NOT IN ('{:s}')".format(userid, begin, end, g_IgnoredForums)
# Forward the call to the actual implementation and return the result
return ReportUserPostsImpl(userid, query, 'between {:s} and {:s}'.format(str(bdt), str(edt)), 'between [b]{:s}[/b] and [b]{:s}[/b]'.format(str(bdt), str(edt)))
# -----------------------------------------------------------------------------------------------
# Retrieve the name of the specified user identifier.
def FetchUserName(userid):
# Import globals
global g_Db
# Obtain a database cursor and proceed to query the database
with closing(g_Db.cursor()) as cursor:
# Select the name of the specified user
try:
cursor.execute("SELECT username FROM mybb_users WHERE uid = {:d} LIMIT 1".format(userid))
except Exception as e:
# Display information
print('Failed to acquire user name: {:s}'.format(str(e)))
# Specify that we failed to deliver
return None
# We selected one row so let's try and get just that
row = cursor.fetchone()
# Was there a user with that identifier?
return None if row == None else str(row[0])
# -----------------------------------------------------------------------------------------------
# Generate the report to send to the administrator who requested the alert/warn
def GenerateAdminReport(adminid, status, warn, bdt, edt):
# Import globals
global g_UserURL, g_BotUserID
# The type of report
rtype = 'warning' if warn == True else 'alert'
# Open the list of users which completed their post count, didn't or failed to receive the notice
clist = '\nThe list of users who completed their posts:\n[list]\n'
flist = '\nThe list of users who failed to complete their posts:\n[list]\n'
elist = '\nThe list of users who the counter failed to send the notice:\n[list]\n'
# The number of users which completed their post count, didn't or failed to receive the notice
cnum, fnum, enum = 0, 0, 0
# Start generating the lists
for us in status:
# The URL to this user profile
url = g_UserURL.format(uid=us['userid']);
# The name of the user
name = FetchUserName(us['userid'])
# Did we fail to send a notice to this user?
if us['error'] == True:
# Add it to the list of users which the counter failed to send the notice
elist += '[*][url={0}]{1}[/url]: [b]{2}[/b]\n'.format(url, name, us['reason'])
# Increment the users which the counter failed to send the notice
enum += 1
# Did this user completed his posts?
elif us['made'] >= us['need']:
# Add it to the list of users which completed their posts
clist += '[*][url={0}]{1}[/url]: [b]{2} / {3}[/b]\n'.format(url, name, us['made'], us['need'])
# Increment the users which completed their posts
cnum += 1
else:
# Add it to the list of users which didn't complete their posts
flist += '[*][url={0}]{1}[/url]: [b]{2} / {3}[/b]\n'.format(url, name, us['made'], us['need'])
# Increment the users which didn't complete their posts
fnum += 1
# Was there any user who completed their posts?
if cnum <= 0:
clist += '[*]No user completed their posts.\n'
# Was there any user who didn't complete their posts?
if fnum <= 0:
flist += '[*]No user failed to complete their posts.\n'
# Was there any user which the counter failed to send the notice?
if enum <= 0:
elist += '[*]All users received their post notice.\n'
# Close the post count lists
clist += '[/list]\n'
flist += '[/list]\n'
elist += '[/list]\n'
# Generate the report message to send to the administrator
msg = 'Here is the report of the post {0} between ([b]{1}[/b]) and ([b]{2}[/b]).\n[hr]{3}[hr]{4}[hr]{5}'.format(rtype, str(bdt), str(edt), clist, flist, elist)
# Finally, send the message to the administrator
return SendPrivateMessage(g_BotUserID, adminid, 'Report of post {0} between {1} and {2}'.format(rtype, str(bdt), str(edt)), msg)
# -----------------------------------------------------------------------------------------------
# Generate the message to be sent as a alert.
def GenerateAlertMessage(admin, bdate, edate, ndate, made, need, cursor):
# Import globals
global g_PostURL
# Start with an empty list of posts
plist = ""
# Did the user made any posts?
if made > 0:
# Open the post list
plist += '\n[color=#333333][size=small][font=Arial]The posts that were included in this count are:\n[list]\n'
# Build a list of posts that were included in this count
for row in cursor:
#Generate the post URL
url = g_PostURL.format(tid=int(row[1]), pid=int(row[0]))
# Generate the element list and append it
plist += '[*][url={0}]{1}[/url]\n'.format(url, row[2])
# Close the list
plist += '\n[/list]\n[/font][/size][/color]\n'
# Generate the message and return it
return """[font=Arial][color=#333333][size=small]Dear respected FreeVPS Directory & Discussion Forum Member & VPS Owner!
[/size][/color][/font]
[color=#333333][size=small][font=Arial]I am contacting you because you've missing posts that have to be made up between ([b]{bdate}[/b]) and ([b]{edate}[/b]) to keep your VPS for the month ([b]{nextm} {nexty}[/b])
[/font][/size][/color]
[color=#333333][size=small][font=Arial]Amount: [b]{posts}[/b] Post(s). Required: [b]{required}[/b] Post(s).
[/font][/size][/color]
{postlist}
[color=#333333][size=small][font=Arial]If you believe you have received this message in error, eg, you have already made up your posts, or the posts were counted incorrectly please let me know before ([b]{edate}[/b]) and I will double check your posts.[/font][/size][/color]
[color=#333333][size=small][font=Arial]You are being given time until ([b]{edate}[/b]) to make up all your missing posts and [b]reply to this message to confirm[/b] that you've made up all the missing posts.[/font][/size][/color]
[color=#333333][size=small][font=Arial][b]The last moment to message me back is ({edate}). Any messages received after that may not be accepted![/b][/font][/size][/color]
[color=#333333][size=small][font=Arial][b]VPSs of members who have [color=red]failed to make up their missing posts, will be terminated before the next giveaway[/color] and will be made available to other members during the next giveaway.[/b]
[/font][/size][/color]
[color=#333333][size=small][font=Arial]Note that posts in the "SPAM/Testing" and "Introductions" forums DO NOT count towards your post count as we've disabled the post count there a long time ago.[/font][/size][/color]
[color=#333333][size=small][font=Arial]Only real and valid excuses with proper proof will be accepted. Do not use this to get out of making posts unless you have a genuine and applicable reason for doing so.
[/font][/size][/color]
[color=#333333][size=small][font=Arial]Yours sincerely,[/font][/size][/color]
[color=#333333][size=small][font=Arial][b]{manager}[/b][/font][/size][/color]
[font=Arial][color=#333333][size=small]Giveaway Manager[/size][/color][/font]
[font=Arial][color=#333333][size=small]FreeVPS Directory & Discussion Staff & Administration[/size][/color][/font]""".format(
bdate=bdate.strftime("%d/%B/%Y %H:%M:%S"),
edate=edate.strftime("%d/%B/%Y %H:%M:%S"),
ndate=bdate.strftime("%d/%B/%Y %H:%M:%S"),
nextm=ndate.strftime("%B"),
nexty=ndate.year,
posts=made,
required=need,
manager=admin,
postlist=plist
)
# -----------------------------------------------------------------------------------------------
# Generate the message to be sent as an warning.
def GenerateWarningMessage(admin, bdate, edate, ndate, made, need, cursor):
# Import globals
global g_PostURL
# Start with an empty list of posts
plist = ""
# Did the user made any posts?
| |
# -*- coding: utf-8 -*-
import os
from sympy.printing.pycode import PythonCodePrinter
from sympy import Abs, Mul, Symbol, conjugate
from Definitions import mSymbol, splitPow
from Logging import loggingCritical
class PythonExport():
def __init__(self, model, latexSubs={}, cpp=None):
self._Name = model._Name.replace('-', '').replace('+', '')
if self._Name[0].isdigit():
self._Name = '_' + self._Name
self.model = model
self.string = ""
self.stringRun = ""
# BetaFunc definition
self.betaFactor = model.betaFactor
self.betaExponent = str(model.betaExponent(Symbol('n')))
self.translation = {'GaugeCouplings': 'Gauge Couplings',
'Yukawas': 'Yukawa Couplings',
'QuarticTerms': 'Quartic Couplings',
'TrilinearTerms' : 'Trilinear Couplings',
'ScalarMasses': 'Scalar Mass Couplings',
'FermionMasses': 'Fermion Mass Couplings',
'Vevs': 'Vacuum-expectation Values'}
self.cListNames = {k:v.replace(' ', '') for k,v in self.translation.items()}
self.cListNames['Vevs'] = 'Vevs'
self.allCouplings = {}
self.couplingStructure = {}
self.couplingStructure = {pycode(model.allCouplings[k][1]): v for k,v in model.couplingStructure.items()}
self.conjugatedCouplings = {}
self.cDic = {}
self.inconsistentRGset = (model.NonZeroCouplingRGEs != {} or model.NonZeroDiagRGEs != {})
if self.inconsistentRGset:
raise TypeError("The RGE set is inconsistent. Please refer to the latex output.")
self.gaugeFixing = False
self.RGfileString = {}
self.allBetaFunctions = {}
# Initialize the latex substitutions
self.latex = {pycode(k):v for k,v in latexSubs.items()}
# Fix the symbolic gen numbers
self.symbolicGens = []
self.genFix = ''
for p in model.Particles.values():
if isinstance(p.gen, Symbol):
if p.gen not in self.symbolicGens:
self.symbolicGens.append(p.gen)
if self.symbolicGens != []:
self.genFix = ' = '.join([str(el) for el in self.symbolicGens]) + ' = 3'
if cpp is not None:
self.cpp = cpp
else:
self.cpp = False
self.preamble(model)
self.RGsolver(model)
def write(self, path):
tmpDir = os.getcwd()
if not os.path.exists(os.path.join(path, 'PythonOutput')):
os.makedirs(os.path.join(path, 'PythonOutput'))
# First : write the Python solver module
fileName = os.path.join(path, 'PythonOutput', self._Name + '.py')
try:
self.file = open(fileName, 'w')
except:
loggingCritical('ERROR while creating the Python output file. Skipping.')
return
self.file.write(self.string)
self.file.close()
# Then, create the file containing the expression of the beta-functions
fileName = os.path.join(path, 'PythonOutput', 'RGEs.py')
try:
self.file = open(fileName, 'w')
self.file.write(self.RGEfileString())
except:
loggingCritical('ERROR while creating the Python RGE file. Skipping.')
return
self.file.close()
# Finally create and write the run.py file
os.chdir(os.path.join(path, 'PythonOutput'))
self.runString(self.model, os.path.join(path, 'PythonOutput'))
os.chdir(tmpDir)
fileName = os.path.join(path, 'PythonOutput', 'run.py')
try:
self.file = open(fileName, 'w')
self.file.write(self.stringRun)
except:
loggingCritical('ERROR while creating the Python run file. Skipping.')
return
self.file.close()
def preamble(self, model):
name = 'Model : ' + model._Name
auth = 'Author : ' + model._Author
date = 'Date : ' + model._Date
self.string += f"""\
#########################################################
## This file was automatically generated by PyR@TE 3 ##
### ###
## ##
# {name+(53-len(name))*' '+'#'}
# {auth+(53-len(auth))*' '+'#'}
# {date+(53-len(date))*' '+'#'}
#########################################################
"""
self.string += """
import os
import time
import numpy as np
from sympy import flatten
from scipy.integrate import ode
import matplotlib.pyplot as plt
""" + ('from math import ceil\nfrom ctypes import cdll, c_double, c_int' if self.cpp else '') + """
class Coupling():
couplings = {}
def __init__(self, name, couplingType, latex=None, shape = (), fromMat=None, cplx=False, isCplx=False, init=0, pos=None):
self.name = name
self.type = couplingType
if latex is not None:
self.latex = latex
else:
self.latex = self.name
self.shape = shape
self.is_matrix = ( shape != () )
self.nb = self.shape[0]*self.shape[1] if self.is_matrix else 1
self.cplx = cplx
self.isCplx = isCplx
self.initialValue = init if shape == () else np.zeros(shape)
if fromMat is not None:
self.pos = pos
self.latex = '{' + fromMat.latex + '}' + self.name.replace(fromMat.name, '')
return
if couplingType not in self.couplings:
self.couplings[couplingType] = []
self.pos = sum([c.nb for cList in self.couplings.values() for c in cList])
self.couplings[couplingType].append(self)
def as_explicit(self, toList=False):
if not self.is_matrix:
return self
nameFunc = lambda x: self.name+'_{' + str(1 + x // self.shape[1]) + str(1 + x % self.shape[1]) + '}'
initFunc = lambda x: list(self.initialValue)[x // self.shape[1]][x % self.shape[1]]
arrayFunc = np.vectorize(lambda x: Coupling(nameFunc(x), self.type, fromMat=self, init=initFunc(x), pos=self.pos+x, isCplx=self.isCplx))
array = arrayFunc(np.reshape(range(self.nb), self.shape))
if not toList:
return array
return [*array.flat]
def getInitialValue(self, splitCplx=False):
if self.is_matrix:
if splitCplx and self.isCplx:
[(np.real(el), np.imag(el)) for el in self.initialValue.flat]
else:
return [*self.initialValue.flat]
if splitCplx and self.isCplx:
return (np.real(self.initialValue), np.imag(self.initialValue))
if not self.isCplx and np.imag(self.initialValue) != 0:
raise ValueError(f"Error: the coupling {self.name} should not take complex values")
return self.initialValue
"""
def RGsolver(self, model):
s = '''
class RGEsolver():
""" This class contains the RGEs of the model, as well as pre-defined functions
used to solve and plot them.
The three following arguments may be provided:
- initialScale:
The energy scale at which the initial values are given
- tmin, tmax :
The lower and upper energy scales between which the running couplings are computed and plotted
The initialScale can be different from tmin and tmax, the only requirement being that the initial value of the
couplings are all given at the same scale."""
translation = {'GaugeCouplings': 'Gauge Couplings',
'Yukawas': 'Yukawa Couplings',
'QuarticTerms': 'Quartic Couplings',
'TrilinearTerms' : 'Trilinear Couplings',
'ScalarMasses': 'Scalar Mass Couplings',
'FermionMasses': 'Fermion Mass Couplings',
'Vevs': 'Vacuum-expectation Values'}
def __init__(self, name, initialScale = 0, tmin = 0, tmax = 20):
if initialScale < tmin or initialScale > tmax:
exit(f"The initial running scale must lie in the interval [tmin={tmin}, tmax={tmax}]")
self.name = name
Coupling.couplings = {}
self.initialScale = initialScale
self.tmin = tmin
self.tmax = tmax
self.kappa = lambda n: 1/(4*np.pi)**(''' + self.betaExponent + ''')
self.kappaString = '1/(4*np.pi)**(''' + self.betaExponent + ''')'
self.tList = []
self.solutions = {}
'''
s += "self.loops = " + pycode({k:v for k,v in model.loopDic.items() if k in model.toCalculate}, end='\n'+22*' ')
if self.genFix != '':
s += """
# Fix the symbolic generation numbers
""" + self.genFix
s += self.couplingsDefinition(model)
s += """
self.couplings = Coupling.couplings
self.matrixCouplings = {c.name: np.vectorize(lambda x: x.name)(c.as_explicit())
for cList in self.couplings.values()
for c in cList if c.is_matrix}"""
if self.cpp:
s += """
self.cppArrayToCouplings = []"""
s += """
def extractCouplings(self, couplingsArray, couplingType):
ret = []
for c in self.couplings[couplingType]:
if not c.is_matrix:
ret.append(couplingsArray[c.pos])
else:
ret.append(np.matrix(np.reshape([couplingsArray[p] for p in range(c.pos, c.pos+c.nb)], c.shape)))
return ret
"""
if self.gaugeFixing:
s += """
def fixGauge(self, xi):
self.xiGauge = xi
"""
s += self.RGEs(model)
s += r'''
def printInitialConditions(self, returnString=False):
""" This function displays the current running scheme and the initial values of the couplings.
Its output may be copy-pasted 'as-is' by user to modify these parameters before solving the RGEs."""
# Display the running scheme
outputString = "\n# Running scheme :\n\n"
s = f"{self.name}.loops = "
outputString += s + str(self.loops).replace(', ', ',\n ' + ' '*len(s)) + '\n'
# Display the initial values of the couplings
for cType, cList in self.couplings.items():
outputString += f"\n# {self.translation[cType]}\n\n"
for c in cList:
s = f"{self.name}.{c.name}.initialValue = "
if not c.is_matrix:
s += str(c.initialValue)
else:
sVal = '['
sVal += (',\n ' + len(s)*' ').join([ str(el).replace(' ', ', ') for el in c.initialValue])
sVal += ']\n'
s += sVal
outputString += s + '\n'
if returnString:
return outputString
print(outputString)
'''
s += r'''
##################
# Solve function #
##################
def solve(self, step=.1, Npoints=None):
""" This function performs the actual solving of the system of RGEs, using scipy.ode.
Either the step of the numerical integration may be provided by the user with 'step=[value]',
OR the number of integration points with 'Npoints=[integer value]'."""
self.allCouplings = flatten([c.as_explicit(toList=True) for cList in self.couplings.values() for c in cList])
time0 = time.time()
y0 = flatten([c.getInitialValue() for c in self.allCouplings])
tmin = self.tmin
tmax = self.tmax
t0 = self.initialScale
if Npoints is None:
dt = step
else:
dt = (tmax-tmin)/(Npoints-1)
solutions = {}
for c in self.allCouplings:
solutions[c.name] = []
tList = []
solver = ode(self.betaFunction).set_integrator('zvode', method='bdf')
solver.set_initial_value(y0, t0)
# Solve upwards
while solver.successful() and solver.t < tmax + dt/2:
tList.append(solver.t)
for i, c in enumerate(self.allCouplings):
y = solver.y[i]
if abs(y.imag) > 1e-10 and not c.cplx:
c.cplx = True
elif y.imag == 0:
y = y.real
solutions[c.name].append(y)
solver.integrate(solver.t+dt)
if t0 > tmin:
# If t0 > tmin, complete the solving going downwards
solutions2 = {}
for c in self.allCouplings:
solutions2[c.name] = []
tList2 = []
solver.set_initial_value(y0, t0)
# Solve downwards
while solver.successful() and solver.t > tmin + dt/2:
solver.integrate(solver.t-dt)
tList2.append(solver.t)
for i, c in enumerate(self.allCouplings):
y = solver.y[i]
if abs(y.imag) > 1e-10 and not c.cplx:
c.cplx = True
elif y.imag == 0:
y = y.real
solutions2[c.name].append(y)
# Combine the two regions
tList = tList2[::-1] + tList
for c in self.allCouplings:
solutions[c.name] = solutions2[c.name][::-1] + solutions[c.name]
self.tList, self.solutions | |
None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class ConfigValue(Type):
_toSchema = {'source': 'source', 'value': 'value'}
_toPy = {'source': 'source', 'value': 'value'}
def __init__(self, source=None, value=None, **unknown_fields):
'''
source : str
value : Any
'''
source_ = source
value_ = value
# Validate arguments against known Juju API types.
if source_ is not None and not isinstance(source_, (bytes, str)):
raise Exception("Expected source_ to be a str, received: {}".format(type(source_)))
self.source = source_
self.value = value_
self.unknown_fields = unknown_fields
class Constraints(Type):
_toSchema = {'count': 'Count', 'pool': 'Pool', 'size': 'Size'}
_toPy = {'Count': 'count', 'Pool': 'pool', 'Size': 'size'}
def __init__(self, count=None, pool=None, size=None, **unknown_fields):
'''
count : int
pool : str
size : int
'''
count_ = count
pool_ = pool
size_ = size
# Validate arguments against known Juju API types.
if count_ is not None and not isinstance(count_, int):
raise Exception("Expected count_ to be a int, received: {}".format(type(count_)))
if pool_ is not None and not isinstance(pool_, (bytes, str)):
raise Exception("Expected pool_ to be a str, received: {}".format(type(pool_)))
if size_ is not None and not isinstance(size_, int):
raise Exception("Expected size_ to be a int, received: {}".format(type(size_)))
self.count = count_
self.pool = pool_
self.size = size_
self.unknown_fields = unknown_fields
class ConstraintsResult(Type):
_toSchema = {'constraints': 'constraints', 'error': 'error'}
_toPy = {'constraints': 'constraints', 'error': 'error'}
def __init__(self, constraints=None, error=None, **unknown_fields):
'''
constraints : Value
error : Error
'''
constraints_ = Value.from_json(constraints) if constraints else None
error_ = Error.from_json(error) if error else None
# Validate arguments against known Juju API types.
if constraints_ is not None and not isinstance(constraints_, (dict, Value)):
raise Exception("Expected constraints_ to be a Value, received: {}".format(type(constraints_)))
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
self.constraints = constraints_
self.error = error_
self.unknown_fields = unknown_fields
class ConstraintsResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ConstraintsResult]
'''
results_ = [ConstraintsResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class ConsumeApplicationArg(Type):
_toSchema = {'application_alias': 'application-alias', 'application_description': 'application-description', 'applicationofferdetails': 'ApplicationOfferDetails', 'bindings': 'bindings', 'endpoints': 'endpoints', 'external_controller': 'external-controller', 'macaroon': 'macaroon', 'offer_name': 'offer-name', 'offer_url': 'offer-url', 'offer_uuid': 'offer-uuid', 'source_model_tag': 'source-model-tag', 'spaces': 'spaces', 'users': 'users'}
_toPy = {'ApplicationOfferDetails': 'applicationofferdetails', 'application-alias': 'application_alias', 'application-description': 'application_description', 'bindings': 'bindings', 'endpoints': 'endpoints', 'external-controller': 'external_controller', 'macaroon': 'macaroon', 'offer-name': 'offer_name', 'offer-url': 'offer_url', 'offer-uuid': 'offer_uuid', 'source-model-tag': 'source_model_tag', 'spaces': 'spaces', 'users': 'users'}
def __init__(self, applicationofferdetails=None, application_alias=None, application_description=None, bindings=None, endpoints=None, external_controller=None, macaroon=None, offer_name=None, offer_url=None, offer_uuid=None, source_model_tag=None, spaces=None, users=None, **unknown_fields):
'''
applicationofferdetails : ApplicationOfferDetails
application_alias : str
application_description : str
bindings : typing.Mapping[str, str]
endpoints : typing.Sequence[~RemoteEndpoint]
external_controller : ExternalControllerInfo
macaroon : Macaroon
offer_name : str
offer_url : str
offer_uuid : str
source_model_tag : str
spaces : typing.Sequence[~RemoteSpace]
users : typing.Sequence[~OfferUserDetails]
'''
applicationofferdetails_ = ApplicationOfferDetails.from_json(applicationofferdetails) if applicationofferdetails else None
application_alias_ = application_alias
application_description_ = application_description
bindings_ = bindings
endpoints_ = [RemoteEndpoint.from_json(o) for o in endpoints or []]
external_controller_ = ExternalControllerInfo.from_json(external_controller) if external_controller else None
macaroon_ = Macaroon.from_json(macaroon) if macaroon else None
offer_name_ = offer_name
offer_url_ = offer_url
offer_uuid_ = offer_uuid
source_model_tag_ = source_model_tag
spaces_ = [RemoteSpace.from_json(o) for o in spaces or []]
users_ = [OfferUserDetails.from_json(o) for o in users or []]
# Validate arguments against known Juju API types.
if applicationofferdetails_ is not None and not isinstance(applicationofferdetails_, (dict, ApplicationOfferDetails)):
raise Exception("Expected applicationofferdetails_ to be a ApplicationOfferDetails, received: {}".format(type(applicationofferdetails_)))
if application_alias_ is not None and not isinstance(application_alias_, (bytes, str)):
raise Exception("Expected application_alias_ to be a str, received: {}".format(type(application_alias_)))
if application_description_ is not None and not isinstance(application_description_, (bytes, str)):
raise Exception("Expected application_description_ to be a str, received: {}".format(type(application_description_)))
if bindings_ is not None and not isinstance(bindings_, dict):
raise Exception("Expected bindings_ to be a Mapping, received: {}".format(type(bindings_)))
if endpoints_ is not None and not isinstance(endpoints_, (bytes, str, list)):
raise Exception("Expected endpoints_ to be a Sequence, received: {}".format(type(endpoints_)))
if external_controller_ is not None and not isinstance(external_controller_, (dict, ExternalControllerInfo)):
raise Exception("Expected external_controller_ to be a ExternalControllerInfo, received: {}".format(type(external_controller_)))
if macaroon_ is not None and not isinstance(macaroon_, (dict, Macaroon)):
raise Exception("Expected macaroon_ to be a Macaroon, received: {}".format(type(macaroon_)))
if offer_name_ is not None and not isinstance(offer_name_, (bytes, str)):
raise Exception("Expected offer_name_ to be a str, received: {}".format(type(offer_name_)))
if offer_url_ is not None and not isinstance(offer_url_, (bytes, str)):
raise Exception("Expected offer_url_ to be a str, received: {}".format(type(offer_url_)))
if offer_uuid_ is not None and not isinstance(offer_uuid_, (bytes, str)):
raise Exception("Expected offer_uuid_ to be a str, received: {}".format(type(offer_uuid_)))
if source_model_tag_ is not None and not isinstance(source_model_tag_, (bytes, str)):
raise Exception("Expected source_model_tag_ to be a str, received: {}".format(type(source_model_tag_)))
if spaces_ is not None and not isinstance(spaces_, (bytes, str, list)):
raise Exception("Expected spaces_ to be a Sequence, received: {}".format(type(spaces_)))
if users_ is not None and not isinstance(users_, (bytes, str, list)):
raise Exception("Expected users_ to be a Sequence, received: {}".format(type(users_)))
self.applicationofferdetails = applicationofferdetails_
self.application_alias = application_alias_
self.application_description = application_description_
self.bindings = bindings_
self.endpoints = endpoints_
self.external_controller = external_controller_
self.macaroon = macaroon_
self.offer_name = offer_name_
self.offer_url = offer_url_
self.offer_uuid = offer_uuid_
self.source_model_tag = source_model_tag_
self.spaces = spaces_
self.users = users_
self.unknown_fields = unknown_fields
class ConsumeApplicationArgs(Type):
_toSchema = {'args': 'args'}
_toPy = {'args': 'args'}
def __init__(self, args=None, **unknown_fields):
'''
args : typing.Sequence[~ConsumeApplicationArg]
'''
args_ = [ConsumeApplicationArg.from_json(o) for o in args or []]
# Validate arguments against known Juju API types.
if args_ is not None and not isinstance(args_, (bytes, str, list)):
raise Exception("Expected args_ to be a Sequence, received: {}".format(type(args_)))
self.args = args_
self.unknown_fields = unknown_fields
class ConsumeApplicationResult(Type):
_toSchema = {'error': 'error', 'local_name': 'local-name'}
_toPy = {'error': 'error', 'local-name': 'local_name'}
def __init__(self, error=None, local_name=None, **unknown_fields):
'''
error : Error
local_name : str
'''
error_ = Error.from_json(error) if error else None
local_name_ = local_name
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if local_name_ is not None and not isinstance(local_name_, (bytes, str)):
raise Exception("Expected local_name_ to be a str, received: {}".format(type(local_name_)))
self.error = error_
self.local_name = local_name_
self.unknown_fields = unknown_fields
class ConsumeApplicationResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~ConsumeApplicationResult]
'''
results_ = [ConsumeApplicationResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class ConsumeOfferDetails(Type):
_toSchema = {'external_controller': 'external-controller', 'macaroon': 'macaroon', 'offer': 'offer'}
_toPy = {'external-controller': 'external_controller', 'macaroon': 'macaroon', 'offer': 'offer'}
def __init__(self, external_controller=None, macaroon=None, offer=None, **unknown_fields):
'''
external_controller : ExternalControllerInfo
macaroon : Macaroon
offer : ApplicationOfferDetails
'''
external_controller_ = ExternalControllerInfo.from_json(external_controller) if external_controller else None
macaroon_ = Macaroon.from_json(macaroon) if macaroon else None
offer_ = ApplicationOfferDetails.from_json(offer) if offer else None
# Validate arguments against known Juju API types.
if external_controller_ is not None and not isinstance(external_controller_, (dict, ExternalControllerInfo)):
raise Exception("Expected external_controller_ to be a ExternalControllerInfo, received: {}".format(type(external_controller_)))
if macaroon_ is not None and not isinstance(macaroon_, (dict, Macaroon)):
raise Exception("Expected macaroon_ to be a Macaroon, received: {}".format(type(macaroon_)))
if offer_ is not None and not isinstance(offer_, (dict, ApplicationOfferDetails)):
raise Exception("Expected offer_ to be a ApplicationOfferDetails, received: {}".format(type(offer_)))
self.external_controller = external_controller_
self.macaroon = macaroon_
self.offer = offer_
self.unknown_fields = unknown_fields
class ConsumeOfferDetailsResult(Type):
_toSchema = {'consumeofferdetails': 'ConsumeOfferDetails', 'error': 'error', 'external_controller': 'external-controller', 'macaroon': 'macaroon', 'offer': 'offer'}
_toPy = {'ConsumeOfferDetails': 'consumeofferdetails', 'error': 'error', 'external-controller': 'external_controller', 'macaroon': 'macaroon', 'offer': 'offer'}
def __init__(self, consumeofferdetails=None, error=None, external_controller=None, macaroon=None, offer=None, **unknown_fields):
'''
consumeofferdetails : ConsumeOfferDetails
error : Error
external_controller : ExternalControllerInfo
macaroon : Macaroon
offer : ApplicationOfferDetails
'''
consumeofferdetails_ = ConsumeOfferDetails.from_json(consumeofferdetails) if consumeofferdetails else None
error_ = Error.from_json(error) if error else None
external_controller_ = ExternalControllerInfo.from_json(external_controller) if external_controller else None
macaroon_ = Macaroon.from_json(macaroon) if macaroon else None
offer_ = ApplicationOfferDetails.from_json(offer) if offer else None
# Validate arguments | |
import numpy as np
import helper
import tensorflow as tf
from tensorflow.python.layers.core import Dense
from datetime import datetime
# Build the Neural Network
# Components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
#
# - model_inputs
# - process_decoder_input
# - encoding_layer
# - decoding_layer_train
# - decoding_layer_infer
# - decoding_layer
# - seq2seq_model
def model_inputs():
"""
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
"""
input = tf.placeholder(tf.int32, shape=[None, None], name='input')
targets = tf.placeholder(tf.int32, shape=[None, None], name='targets')
lr = tf.placeholder(tf.float32, name='learning_rate')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
target_sequence_length = tf.placeholder(tf.int32, shape=[None], name='target_sequence_length')
max_target_len = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, shape=[None], name='source_sequence_length')
return input, targets, lr, keep_prob, target_sequence_length, max_target_len, source_sequence_length
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
"""
# Encoder embedding
enc_embed_input = tf.contrib.layers.embed_sequence(
rnn_inputs,
source_vocab_size,
encoding_embedding_size
)
# RNN cell
def make_cell(rnn_size):
gru = tf.contrib.rnn.LSTMCell(
rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)
)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(gru, output_keep_prob=keep_prob)
return drop
# create a RNN cell composed sequentially of a number of RNNCells
enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
enc_output, enc_state = tf.nn.dynamic_rnn(
enc_cell,
enc_embed_input,
sequence_length=source_sequence_length,
dtype=tf.float32
)
return enc_output, enc_state
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
"""
# Helper for the training process. Used by BasicDecoder to read inputs.
training_helper = tf.contrib.seq2seq.TrainingHelper(
inputs=dec_embed_input,
sequence_length=target_sequence_length,
time_major=False
)
# Basic decoder
training_decoder = tf.contrib.seq2seq.BasicDecoder(
cell=dec_cell,
helper=training_helper,
initial_state=encoder_state,
output_layer=output_layer
)
# Perform dynamic decoding using the decoder
training_decoder_output = tf.contrib.seq2seq.dynamic_decode(
decoder=training_decoder,
impute_finished=True,
maximum_iterations=max_summary_length
)[0]
return training_decoder_output
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
"""
start_tokens = tf.tile(
tf.constant([start_of_sequence_id], dtype=tf.int32),
[batch_size],
name='start_tokens'
)
# Helper for the inference process.
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=dec_embeddings,
start_tokens=start_tokens,
end_token=end_of_sequence_id
)
# Basic decoder
inference_decoder = tf.contrib.seq2seq.BasicDecoder(
cell=dec_cell,
helper=inference_helper,
initial_state=encoder_state,
output_layer=output_layer
)
# Perform dynamic decoding using the decoder
inference_decoder_output = tf.contrib.seq2seq.dynamic_decode(
decoder=inference_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length
)[0]
return inference_decoder_output
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
"""
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# Decoder Embedding
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# Construct the decoder cell
def make_cell(rnn_size):
dec_cell = tf.contrib.rnn.LSTMCell(
rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)
)
return dec_cell
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
# Dense layer to translate the decoder's output at each time
# step into a choice from the target vocabulary
output_layer = Dense(
target_vocab_size,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1)
)
# Set up a training decoder and an inference decoder
# Training Decoder
with tf.variable_scope("decode"):
training_decoder_output = decoding_layer_train(
encoder_state,
dec_cell,
dec_embed_input,
target_sequence_length,
max_target_sequence_length,
output_layer,
keep_prob
)
# Reuses the same parameters trained by the training process
with tf.variable_scope("decode", reuse=True):
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
inference_decoder_output = decoding_layer_infer(
encoder_state,
dec_cell,
dec_embeddings,
start_of_sequence_id,
end_of_sequence_id,
max_target_sequence_length,
target_vocab_size,
output_layer,
batch_size,
keep_prob
)
return training_decoder_output, inference_decoder_output
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# Pass the input data through the encoder. We'll ignore the encoder output, but use the state.
_, enc_state = encoding_layer(
input_data,
rnn_size,
num_layers,
keep_prob,
source_sequence_length,
source_vocab_size,
enc_embedding_size
)
# Prepare the target sequences we'll feed to the decoder in training mode.
dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)
# Pass encoder state and decoder inputs to the decoders.
training_decoder_output, inference_decoder_output = decoding_layer(
dec_input,
enc_state,
target_sequence_length,
max_target_sentence_length,
rnn_size,
num_layers,
target_vocab_to_int,
target_vocab_size,
batch_size,
keep_prob,
dec_embedding_size
)
return training_decoder_output, inference_decoder_output
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
"""Batch targets, sources, and the lengths of their sentences together"""
for batch_i in range(0, len(sources) // batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0, 0), (0, max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0, 0), (0, max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
epochs = 21
batch_size = 1024
rnn_size = 128
num_layers = 2
encoding_embedding_size = 100
decoding_embedding_size = 100
learning_rate = 0.01
keep_probability = 0.75
display_step = 20
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int
)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "./logs/"
logdir = "{}/run-{}-lstm".format(root_logdir, now)
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths) = next(
get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
writer = tf.summary.FileWriter(logdir, graph=train_graph)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, | |
aug_det.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# varying scales
aug = iaa.Affine(scale={"x": (0.5, 1.5), "y": (0.5, 1.5)}, translate_px=0,
rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 2, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8) * 100
image = image[:, :, np.newaxis]
images_list = [image]
images = np.array([image])
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.8)
assert nb_changed_aug_det == 0
aug = iaa.Affine(scale=iap.Uniform(0.7, 0.9))
assert isinstance(aug.scale, iap.Uniform)
assert isinstance(aug.scale.a, iap.Deterministic)
assert isinstance(aug.scale.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.scale.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.scale.b.value < 0.9 + 1e-8
# ---------------------
# translate
# ---------------------
# move one pixel to the right
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move one pixel to the right
# with backend = skimage
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage")
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = skimage
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage")
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = skimage, order=ALL
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage", order=ia.ALL)
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = skimage, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="skimage", order=[0, 1, 3])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = cv2, order=list
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="cv2", order=[0, 1, 3])
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the right
# with backend = cv2, order=StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px={"x": 1, "y": 0}, rotate=0, shear=0, backend="cv2", order=iap.Choice([0, 1, 3]))
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
# move one pixel to the bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": 0, "y": 1}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the right
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0.3333, "y": 0}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[1, 2] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# move 33% (one pixel) to the bottom
aug = iaa.Affine(scale=1.0, translate_percent={"x": 0, "y": 0.3333}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# 0-1px to left/right and 0-1px to top/bottom
aug = iaa.Affine(scale=1.0, translate_px={"x": (-1, 1), "y": (-1, 1)}, rotate=0, shear=0)
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
centers_aug = np.copy(image).astype(np.int32) * 0
centers_aug_det = np.copy(image).astype(np.int32) * 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert len(observed_aug[0].nonzero()[0]) == 1
assert len(observed_aug_det[0].nonzero()[0]) == 1
centers_aug += (observed_aug[0] > 0)
centers_aug_det += (observed_aug_det[0] > 0)
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
assert (centers_aug > int(nb_iterations * (1/9 * 0.6))).all()
assert (centers_aug < int(nb_iterations * (1/9 * 1.4))).all()
aug = iaa.Affine(translate_percent=iap.Uniform(0.7, 0.9))
assert isinstance(aug.translate, iap.Uniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert 0.7 - 1e-8 < aug.translate.a.value < 0.7 + 1e-8
assert 0.9 - 1e-8 < aug.translate.b.value < 0.9 + 1e-8
aug = iaa.Affine(translate_px=iap.DiscreteUniform(1, 10))
assert isinstance(aug.translate, iap.DiscreteUniform)
assert isinstance(aug.translate.a, iap.Deterministic)
assert isinstance(aug.translate.b, iap.Deterministic)
assert aug.translate.a.value == 1
assert aug.translate.b.value == 10
# ---------------------
# translate heatmaps
# ---------------------
heatmaps = ia.HeatmapsOnImage(
np.float32([
[0.0, 0.5, 0.75],
[0.0, 0.5, 0.75],
[0.75, 0.75, 0.75],
]),
shape=(3, 3, 3)
)
arr_expected_1px_right = np.float32([
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.75, 0.75],
])
aug = iaa.Affine(translate_px={"x": 1})
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# should still use mode=constant cval=0 even when other settings chosen
aug = iaa.Affine(translate_px={"x": 1}, cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
aug = iaa.Affine(translate_px={"x": 1}, mode="edge", cval=255)
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == heatmaps.shape
assert heatmaps.min_value - 1e-6 < observed.min_value < heatmaps.min_value + 1e-6
assert heatmaps.max_value - 1e-6 < observed.max_value < heatmaps.max_value + 1e-6
assert np.array_equal(observed.get_arr(), arr_expected_1px_right)
# ---------------------
# rotate
# ---------------------
# rotate by 45 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=90, shear=0)
aug_det = aug.to_deterministic()
image = np.zeros((3, 3, 1), dtype=np.uint8)
image_aug = np.copy(image)
image[1, :] = 255
image_aug[0, 1] = 255
image_aug[1, 1] = 255
image_aug[2, 1] = 255
images = np.array([image])
images_aug = np.array([image_aug])
images_list = [image]
images_aug_list = [image_aug]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=1)], shape=base_img.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=1, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=1, y=2)], shape=base_img.shape)]
observed = aug.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug_det.augment_images(images)
observed[observed >= 100] = 255
observed[observed < 100] = 0
assert np.array_equal(observed, images_aug)
observed = aug.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug_det.augment_images(images_list)
observed[0][observed[0] >= 100] = 255
observed[0][observed[0] < 100] = 0
assert array_equal_lists(observed, images_aug_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints_aug)
# rotate by StochasticParameter
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=iap.Uniform(10, 20), shear=0)
assert isinstance(aug.rotate, iap.Uniform)
assert isinstance(aug.rotate.a, iap.Deterministic)
assert aug.rotate.a.value == 10
assert isinstance(aug.rotate.b, iap.Deterministic)
assert aug.rotate.b.value == 20
# random rotation 0-364 degrees
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=(0, 364), shear=0)
aug_det = aug.to_deterministic()
last_aug = | |
0.5*m.b166*m.b318 + 0.5*m.b166*m.b334
+ 0.5*m.b166*m.b337 + 0.5*m.b166*m.b357 + 0.5*m.b166*m.b374 + 0.5*m.b166*m.b383 + 0.5*m.b166*
m.b397 + 0.5*m.b166*m.b402 + 0.5*m.b166*m.b410 + 0.5*m.b166*m.b411 + 0.5*m.b166*m.b505 + 0.5*
m.b166*m.b509 + 0.5*m.b166*m.b510 + 0.5*m.b166*m.b530 + 0.5*m.b166*m.b536 + 0.5*m.b166*m.b544 +
0.5*m.b166*m.b547 + 0.5*m.b166*m.b553 + 0.5*m.b166*m.b562 + 0.5*m.b166*m.b569 + 0.5*m.b166*m.b574
+ 0.5*m.b166*m.b576 + 0.5*m.b166*m.b583 + 0.5*m.b166*m.b586 + 0.5*m.b166*m.b591 + 0.5*m.b166*
m.b602 + 0.5*m.b166*m.b605 + 0.5*m.b166*m.b641 + 0.5*m.b166*m.b645 + 0.5*m.b166*m.b648 + 0.5*
m.b166*m.b650 + 0.5*m.b166*m.b656 + 0.5*m.b166*m.b658 + 0.5*m.b166*m.b662 + 0.5*m.b166*m.b666 +
m.b166*m.x842 + 0.5*m.b167*m.b168 + 0.5*m.b167*m.b170 + m.b167*m.b171 + 0.5*m.b167*m.b172 + 0.5*
m.b167*m.b173 + 0.5*m.b167*m.b175 + 0.5*m.b167*m.b176 + 0.5*m.b167*m.b177 + 0.5*m.b167*m.b178 +
0.5*m.b167*m.b179 + 0.5*m.b167*m.b183 + 0.5*m.b167*m.b186 + 0.5*m.b167*m.b251 + 0.5*m.b167*m.b263
+ 0.5*m.b167*m.b271 + 0.5*m.b167*m.b284 + 0.5*m.b168*m.b171 + 0.5*m.b168*m.b172 + m.b168*m.b175
+ 0.5*m.b168*m.b176 + 0.5*m.b168*m.b178 + 0.5*m.b168*m.b183 + 0.5*m.b168*m.b186 + 0.5*m.b168*
m.b251 + 0.5*m.b168*m.b263 + 0.5*m.b168*m.b271 + 0.5*m.b168*m.b284 + m.b168*m.x846 + m.b169*
m.b174 + 0.5*m.b169*m.b176 + 0.5*m.b169*m.b178 + 0.5*m.b169*m.b183 + 0.5*m.b169*m.b184 + 0.5*
m.b169*m.b186 + 0.5*m.b170*m.b171 + 0.5*m.b170*m.b173 + 0.5*m.b170*m.b177 + 0.5*m.b170*m.b179 +
0.5*m.b171*m.b172 + 0.5*m.b171*m.b173 + 0.5*m.b171*m.b175 + 0.5*m.b171*m.b176 + 0.5*m.b171*m.b177
+ 0.5*m.b171*m.b178 + 0.5*m.b171*m.b179 + 0.5*m.b171*m.b183 + 0.5*m.b171*m.b186 + 0.5*m.b171*
m.b251 + 0.5*m.b171*m.b263 + 0.5*m.b171*m.b271 + 0.5*m.b171*m.b284 + 0.5*m.b172*m.b175 + 0.5*
m.b172*m.b176 + 0.5*m.b172*m.b178 + 0.5*m.b172*m.b183 + 0.5*m.b172*m.b186 + 0.5*m.b172*m.b251 +
0.5*m.b172*m.b263 + 0.5*m.b172*m.b271 + 0.5*m.b172*m.b284 + m.b172*m.x848 + m.b173*m.b177 + 0.5*
m.b173*m.b179 + 0.5*m.b173*m.b185 + 0.5*m.b173*m.b682 + 0.5*m.b173*m.b685 + 0.5*m.b173*m.b698 +
0.5*m.b173*m.b711 + 0.5*m.b173*m.b713 + 0.5*m.b173*m.b715 + 0.5*m.b173*m.b720 + 0.5*m.b173*m.b730
+ 0.5*m.b173*m.b740 + 0.5*m.b173*m.b741 + 0.5*m.b173*m.b743 + 0.5*m.b173*m.b744 + 0.5*m.b173*
m.b745 + 0.5*m.b173*m.b748 + 0.5*m.b173*m.b751 + 0.5*m.b173*m.b756 + 0.5*m.b173*m.b763 + 0.5*
m.b173*m.b766 + 0.5*m.b173*m.b768 + 0.5*m.b173*m.b769 + 0.5*m.b173*m.b770 + 0.5*m.b173*m.b785 +
0.5*m.b173*m.b786 + 0.5*m.b173*m.b791 + 0.5*m.b173*m.b812 + 0.5*m.b173*m.b813 + 0.5*m.b173*m.b817
+ 0.5*m.b173*m.b819 + 0.5*m.b173*m.b825 + 0.5*m.b173*m.b827 + 0.5*m.b174*m.b176 + 0.5*m.b174*
m.b178 + 0.5*m.b174*m.b183 + 0.5*m.b174*m.b184 + 0.5*m.b174*m.b186 + 0.5*m.b175*m.b176 + 0.5*
m.b175*m.b178 + 0.5*m.b175*m.b183 + 0.5*m.b175*m.b186 + 0.5*m.b175*m.b251 + 0.5*m.b175*m.b263 +
0.5*m.b175*m.b271 + 0.5*m.b175*m.b284 + m.b175*m.x846 + m.b176*m.b178 + m.b176*m.b183 + m.b176*
m.b186 + 0.5*m.b176*m.b251 + 0.5*m.b176*m.b263 + 0.5*m.b176*m.b271 + 0.5*m.b176*m.b284 + 0.5*
m.b177*m.b179 + 0.5*m.b177*m.b185 + 0.5*m.b177*m.b682 + 0.5*m.b177*m.b685 + 0.5*m.b177*m.b698 +
0.5*m.b177*m.b711 + 0.5*m.b177*m.b713 + 0.5*m.b177*m.b715 + 0.5*m.b177*m.b720 + 0.5*m.b177*m.b730
+ 0.5*m.b177*m.b740 + 0.5*m.b177*m.b741 + 0.5*m.b177*m.b743 + 0.5*m.b177*m.b744 + 0.5*m.b177*
m.b745 + 0.5*m.b177*m.b748 + 0.5*m.b177*m.b751 + 0.5*m.b177*m.b756 + 0.5*m.b177*m.b763 + 0.5*
m.b177*m.b766 + 0.5*m.b177*m.b768 + 0.5*m.b177*m.b769 + 0.5*m.b177*m.b770 + 0.5*m.b177*m.b785 +
0.5*m.b177*m.b786 + 0.5*m.b177*m.b791 + 0.5*m.b177*m.b812 + 0.5*m.b177*m.b813 + 0.5*m.b177*m.b817
+ 0.5*m.b177*m.b819 + 0.5*m.b177*m.b825 + 0.5*m.b177*m.b827 + m.b178*m.b183 + m.b178*m.b186 +
0.5*m.b178*m.b251 + 0.5*m.b178*m.b263 + 0.5*m.b178*m.b271 + 0.5*m.b178*m.b284 + 0.5*m.b179*m.b256
+ 0.5*m.b179*m.b257 + 0.5*m.b179*m.b264 + 0.5*m.b179*m.b269 + 0.5*m.b179*m.b283 + 0.5*m.b180*
m.b181 + 0.5*m.b180*m.b252 + 0.5*m.b180*m.b253 + 0.5*m.b180*m.b265 + 0.5*m.b180*m.b298 + 0.5*
m.b180*m.b300 + 0.5*m.b180*m.b318 + 0.5*m.b180*m.b334 + 0.5*m.b180*m.b337 + 0.5*m.b180*m.b357 +
0.5*m.b180*m.b374 + 0.5*m.b180*m.b383 + 0.5*m.b180*m.b397 + 0.5*m.b180*m.b402 + 0.5*m.b180*m.b410
+ 0.5*m.b180*m.b411 + 0.5*m.b180*m.b505 + 0.5*m.b180*m.b509 + 0.5*m.b180*m.b510 + 0.5*m.b180*
m.b530 + 0.5*m.b180*m.b536 + 0.5*m.b180*m.b544 + 0.5*m.b180*m.b547 + 0.5*m.b180*m.b553 + 0.5*
m.b180*m.b562 + 0.5*m.b180*m.b569 + 0.5*m.b180*m.b574 + 0.5*m.b180*m.b576 + 0.5*m.b180*m.b583 +
0.5*m.b180*m.b586 + 0.5*m.b180*m.b591 + 0.5*m.b180*m.b602 + 0.5*m.b180*m.b605 + 0.5*m.b180*m.b641
+ 0.5*m.b180*m.b645 + 0.5*m.b180*m.b648 + 0.5*m.b180*m.b650 + 0.5*m.b180*m.b656 + 0.5*m.b180*
m.b658 + 0.5*m.b180*m.b662 + 0.5*m.b180*m.b666 + m.b180*m.x843 + 0.5*m.b181*m.b252 + 0.5*m.b181*
m.b253 + 0.5*m.b181*m.b265 + 0.5*m.b181*m.b298 + 0.5*m.b181*m.b300 + 0.5*m.b181*m.b318 + 0.5*
m.b181*m.b334 + 0.5*m.b181*m.b337 + 0.5*m.b181*m.b357 + 0.5*m.b181*m.b374 + 0.5*m.b181*m.b383 +
0.5*m.b181*m.b397 + 0.5*m.b181*m.b402 + 0.5*m.b181*m.b410 + 0.5*m.b181*m.b411 + 0.5*m.b181*m.b505
+ 0.5*m.b181*m.b509 + 0.5*m.b181*m.b510 + 0.5*m.b181*m.b530 + 0.5*m.b181*m.b536 + 0.5*m.b181*
m.b544 + 0.5*m.b181*m.b547 + 0.5*m.b181*m.b553 + 0.5*m.b181*m.b562 + 0.5*m.b181*m.b569 + 0.5*
m.b181*m.b574 + 0.5*m.b181*m.b576 + 0.5*m.b181*m.b583 + 0.5*m.b181*m.b586 + 0.5*m.b181*m.b591 +
0.5*m.b181*m.b602 + 0.5*m.b181*m.b605 + 0.5*m.b181*m.b641 + 0.5*m.b181*m.b645 + 0.5*m.b181*m.b648
+ 0.5*m.b181*m.b650 + 0.5*m.b181*m.b656 + 0.5*m.b181*m.b658 + 0.5*m.b181*m.b662 + 0.5*m.b181*
m.b666 + 0.5*m.b182*m.b252 + 0.5*m.b182*m.b253 + 0.5*m.b182*m.b265 + 0.5*m.b182*m.b298 + 0.5*
m.b182*m.b300 + 0.5*m.b182*m.b320 + 0.5*m.b182*m.b329 + 0.5*m.b182*m.b356 + 0.5*m.b182*m.b359 +
0.5*m.b182*m.b373 + 0.5*m.b182*m.b375 + 0.5*m.b182*m.b377 + 0.5*m.b182*m.b391 + 0.5*m.b182*m.b392
+ 0.5*m.b182*m.b395 + 0.5*m.b182*m.b396 + 0.5*m.b182*m.b408 + 0.5*m.b182*m.b414 + 0.5*m.b182*
m.b420 + 0.5*m.b182*m.b421 + 0.5*m.b182*m.b438 + 0.5*m.b182*m.b442 + 0.5*m.b182*m.b444 + 0.5*
m.b182*m.b451 + 0.5*m.b182*m.b454 + 0.5*m.b182*m.b462 + 0.5*m.b182*m.b473 + 0.5*m.b182*m.b479 +
0.5*m.b182*m.b481 + 0.5*m.b182*m.b486 + 0.5*m.b182*m.b489 + 0.5*m.b182*m.b520 + 0.5*m.b182*m.b524
+ 0.5*m.b182*m.b540 + 0.5*m.b182*m.b541 + 0.5*m.b182*m.b550 + 0.5*m.b182*m.b552 + 0.5*m.b182*
m.b556 + 0.5*m.b182*m.b563 + 0.5*m.b182*m.b568 + 0.5*m.b182*m.b578 + 0.5*m.b182*m.b585 + 0.5*
m.b182*m.b588 + 0.5*m.b182*m.b601 + 0.5*m.b182*m.b606 + 0.5*m.b182*m.b609 + 0.5*m.b182*m.b613 +
0.5*m.b182*m.b618 + 0.5*m.b182*m.b620 + 0.5*m.b182*m.b624 + 0.5*m.b182*m.b625 + 0.5*m.b182*m.b630
+ 0.5*m.b182*m.b631 + 0.5*m.b182*m.b636 + 0.5*m.b182*m.b647 + 0.5*m.b182*m.b651 + m.b183*m.b186
+ 0.5*m.b183*m.b251 + 0.5*m.b183*m.b263 + 0.5*m.b183*m.b271 + 0.5*m.b183*m.b284 + 0.5*m.b184*
m.b251 + 0.5*m.b184*m.b263 + 0.5*m.b184*m.b271 + 0.5*m.b184*m.b284 + 0.5*m.b185*m.b682 + 0.5*
m.b185*m.b685 + 0.5*m.b185*m.b698 + 0.5*m.b185*m.b711 + 0.5*m.b185*m.b713 + 0.5*m.b185*m.b715 +
0.5*m.b185*m.b720 + 0.5*m.b185*m.b730 + 0.5*m.b185*m.b740 + 0.5*m.b185*m.b741 + 0.5*m.b185*m.b743
+ 0.5*m.b185*m.b744 + 0.5*m.b185*m.b745 + 0.5*m.b185*m.b748 + 0.5*m.b185*m.b751 + 0.5*m.b185*
m.b756 + 0.5*m.b185*m.b763 + 0.5*m.b185*m.b766 + 0.5*m.b185*m.b768 + 0.5*m.b185*m.b769 + 0.5*
m.b185*m.b770 + 0.5*m.b185*m.b785 + 0.5*m.b185*m.b786 + 0.5*m.b185*m.b791 + 0.5*m.b185*m.b812 +
0.5*m.b185*m.b813 + 0.5*m.b185*m.b817 + 0.5*m.b185*m.b819 + 0.5*m.b185*m.b825 + 0.5*m.b185*m.b827
+ m.b185*m.x849 + 0.5*m.b186*m.b251 + 0.5*m.b186*m.b263 + 0.5*m.b186*m.b271 + 0.5*m.b186*m.b284
+ m.b187*m.b691 + m.b187*m.b701 + m.b187*m.b710 + m.b187*m.b716 + m.b187*m.b718 + m.b187*m.b725
+ m.b187*m.b727 + m.b187*m.b733 + m.b187*m.b734 + m.b187*m.b746 + m.b187*m.b750 + m.b187*m.b753
+ m.b187*m.b764 + m.b187*m.b783 + m.b187*m.b792 + m.b187*m.b793 + m.b187*m.b797 + m.b187*m.b806
+ m.b187*m.b828 + m.b187*m.b830 + m.b187*m.b833 + m.b188*m.b326 + m.b188*m.b407 + m.b188*m.b412
+ m.b188*m.b427 + m.b188*m.b434 + m.b188*m.b449 + m.b188*m.b483 + m.b188*m.b545 + m.b188*m.b546
+ m.b188*m.b557 + m.b188*m.b589 + m.b188*m.b590 + m.b188*m.b615 + m.b188*m.b621 + m.b188*m.b659
+ m.b188*m.b663 + m.b189*m.b250 + m.b189*m.b258 + m.b189*m.b259 + m.b189*m.b260 + m.b189*m.b272
+ m.b189*m.b276 + m.b189*m.b279 + m.b189*m.b281 + m.b189*m.b291 + m.b189*m.b303 + m.b189*m.b324
+ m.b189*m.b351 + m.b189*m.b355 + m.b189*m.b372 + m.b189*m.b376 + m.b189*m.b383 + m.b189*m.b390
+ m.b189*m.b394 + m.b189*m.b423 + m.b189*m.b424 + m.b189*m.b428 + m.b189*m.b458 + m.b189*m.b467
+ m.b189*m.b477 + m.b189*m.b482 + m.b189*m.b488 + m.b189*m.b490 + m.b189*m.b497 + m.b189*m.b499
+ m.b189*m.b500 + m.b189*m.b526 + m.b189*m.b530 + m.b189*m.b531 + m.b189*m.b562 + m.b189*m.b566
+ m.b189*m.b570 + m.b189*m.b572 + m.b189*m.b574 + m.b189*m.b587 + m.b189*m.b603 + m.b189*m.b605
+ m.b189*m.b608 + m.b189*m.b623 + m.b189*m.b628 + m.b189*m.b664 + m.b189*m.b670 + m.b189*m.b673
+ m.b189*m.b674 + m.b189*m.b676 + m.b189*m.b681 + m.b190*m.b682 + m.b190*m.b706 + m.b190*m.b723
+ m.b190*m.b748 + m.b190*m.b754 + m.b190*m.b778 + m.b190*m.b784 + m.b190*m.b791 + m.b190*m.b812
+ m.b190*m.b813 + m.b191*m.b726 + m.b191*m.b729 + m.b191*m.b757 + m.b191*m.b758 + m.b191*m.b761
+ m.b191*m.b780 + m.b191*m.b790 + m.b191*m.b809 + m.b191*m.b823 + m.b191*m.b826 + m.b192*m.b683
+ m.b192*m.b684 + m.b192*m.b693 + m.b192*m.b697 + m.b192*m.b709 + m.b192*m.b711 + m.b192*m.b719
+ m.b192*m.b730 + m.b192*m.b741 + m.b192*m.b763 + m.b192*m.b794 + m.b192*m.b805 + m.b192*m.b825
+ m.b193*m.b692 + m.b193*m.b693 + m.b193*m.b694 + m.b193*m.b697 + m.b193*m.b699 + m.b193*m.b703
+ m.b193*m.b705 + m.b193*m.b706 + m.b193*m.b708 + m.b193*m.b709 + m.b193*m.b722 + m.b193*m.b723
+ m.b193*m.b732 + m.b193*m.b742 + m.b193*m.b754 + m.b193*m.b772 + m.b193*m.b774 + m.b193*m.b777
+ m.b193*m.b778 + m.b193*m.b779 + m.b193*m.b784 + m.b193*m.b794 + m.b193*m.b795 + m.b193*m.b796
+ m.b193*m.b800 + m.b193*m.b801 + m.b193*m.b802 + m.b193*m.b805 + m.b193*m.b810 + m.b193*m.b815
+ m.b193*m.b824 + m.b193*m.b829 + m.b193*m.b831 + m.b193*m.b832 + m.b193*m.b835 + m.b195*m.b698
+ m.b195*m.b713 + m.b195*m.b715 + m.b195*m.b718 + m.b195*m.b721 + m.b195*m.b722 + m.b195*m.b732
+ m.b195*m.b735 + m.b195*m.b740 + m.b195*m.b742 + m.b195*m.b746 + m.b195*m.b747 + m.b195*m.b753
+ m.b195*m.b769 + m.b195*m.b776 + m.b195*m.b782 + m.b195*m.b793 + m.b195*m.b806 + m.b195*m.b818
+ m.b195*m.b824 + m.b195*m.b831 + m.b196*m.b381 + m.b196*m.b386 + m.b196*m.b393 + m.b196*m.b428
+ m.b196*m.b440 + m.b196*m.b450 + m.b196*m.b458 + m.b196*m.b465 + m.b196*m.b471 + m.b196*m.b490
+ m.b196*m.b526 + m.b196*m.b549 + m.b196*m.b559 + m.b196*m.b561 + m.b196*m.b567 + m.b196*m.b581
+ m.b196*m.b582 + m.b196*m.b595 + m.b196*m.b611 + m.b196*m.b614 + m.b196*m.b619 + m.b196*m.b626
+ m.b196*m.b627 + m.b196*m.b632 + m.b196*m.b635 + m.b196*m.b657 + m.b196*m.b661 + m.b196*m.b670
+ m.b196*m.b672 + m.b197*m.b728 + m.b197*m.b762 + m.b197*m.b767 + m.b197*m.b783 + m.b197*m.b803
+ m.b197*m.b822 + m.b198*m.b702 + m.b198*m.b703 + m.b198*m.b716 + m.b198*m.b725 + m.b198*m.b764
+ m.b198*m.b772 + m.b198*m.b773 + m.b198*m.b774 + m.b198*m.b781 + m.b198*m.b787 + m.b198*m.b797
+ m.b198*m.b815 + m.b198*m.b820 + m.b198*m.b821 + m.b198*m.b833 + m.b198*m.b835 + m.b199*m.b686
+ m.b199*m.b687 + m.b199*m.b688 + m.b199*m.b689 + m.b199*m.b704 + m.b199*m.b714 + m.b199*m.b717
+ m.b199*m.b737 + | |
<filename>generators/generate_pybind11_bindings.py
import os
import platform
import shutil
import sys
from collections import Counter
from collections import defaultdict, OrderedDict
from os.path import join
from typing import List, Dict, Set
from CppHeaderParser import CppHeaderParser
from CppHeaderParser.CppHeaderParser import CppMethod
import generators.dependency_tree
from generators.config import common_includes, PCL_BASE, PATH_LOADER, PATH_MODULES, MODULES_TO_BUILD, \
HEADERS_TO_SKIP, ATTRIBUTES_TO_SKIP, CLASSES_TO_IGNORE, METHODS_TO_SKIP, SUBMODULES_TO_SKIP, EXPLICIT_INCLUDES, \
SPECIALIZED_TEMPLATED_TYPES_TO_SKIP
from generators.definitions.function import generate_function_definitions, get_methods_defined_outside
from generators.definitions.method import split_methods_by_type
from generators.definitions.submodule_loader import generate_loader
from generators.definitions.templated_class import ClassDefinition
from generators.instantiations import Instantiations
from generators.point_types_utils import unpack_yaml_point_types
from generators.utils import make_header_include_name, sort_headers_by_dependencies, \
generate_main_loader, make_namespace_class, read_header_file
def filter_methods_for_parser_errors(methods):
return [m for m in methods if not m["name"] in ("void", "bool")]
def filter_methods_to_skip(methods):
filtered_methods = []
for m in methods:
if (m["parent"]["name"], m["name"]) in METHODS_TO_SKIP:
continue
if "Callback" in m["name"]:
single_argument = len(m["parameters"]) == 1
boost_function = single_argument and m["parameters"][0]["type"].startswith("boost::function")
if not boost_function:
continue
filtered_methods.append(m)
return filtered_methods
def same_parameters(p1: Dict, p2: Dict) -> bool:
fields = ["constant", "name", "raw_type", "reference", "static"]
return all(p1[f] == p2[f] for f in fields)
def same_methods(m1: CppMethod, m2: CppMethod) -> bool:
if m1["name"] != m2["name"]:
return False
# bug in CppHeaderParser
# in "void ImageGrabber<PointT>::publish", "void ImageGrabber<PointT>::" is the return type
path = m1.get("path", m2.get("path"))
path = path[path.rfind(":") + 1:]
if not any(path in type_ for type_ in [m1["rtnType"], m2["rtnType"]]):
return False
# same parameters
for p1 in m1["parameters"]:
for p2 in m2["parameters"]:
if m1["name"] == m2["name"] and same_parameters(p1, p2):
break
else:
return False
return len(m1["parameters"]) == len(m2["parameters"])
def private_methods_defined_outside(private_methods: List[CppMethod],
methods_declared_outside: List[CppMethod]) -> List[CppMethod]:
private_defined_outside = []
for m_private in private_methods:
for m_outside in methods_declared_outside:
if same_methods(m_private, m_outside):
private_defined_outside.append(m_private)
break
return private_defined_outside
def generate_class_definitions(main_classes,
module,
header_name,
path,
needs_overloading: List[str],
methods_defined_outside: List[CppMethod]) -> str:
text = []
a = text.append
a(common_includes)
a(EXPLICIT_INCLUDES.get((module, header_name), ""))
a(make_header_include_name(module, header_name, path))
a("")
namespaces = set([c["namespace"] for c in main_classes])
for namespace in namespaces:
if not namespace == "pcl":
a("using namespace %s;" % namespace)
a("\n")
for class_ in main_classes:
methods = class_["methods"]["public"]
methods = filter_methods_for_parser_errors(methods)
methods = filter_methods_to_skip(methods)
private_and_protected = class_["methods"]["private"] + class_["methods"]["protected"]
methods += private_methods_defined_outside(private_and_protected, methods_defined_outside)
class_properties = [p for p in class_["properties"]["public"]
if not "using" in p["type"]
and not "union" in p["type"]]
union_properties = [p for nested_class in class_["nested_classes"]
for p in nested_class["properties"]["public"]
if "union" in nested_class["name"]]
class_properties += union_properties
class_properties = filter_class_properties(module, header_name, class_["name"], class_properties)
constructors, variables, others = split_methods_by_type(methods, class_properties,
needs_overloading)
if not class_["can_be_instantiated"]:
constructors = []
class_def = ClassDefinition(class_, constructors, variables, others, module)
a(class_def.to_class_function_definition())
a("")
return "\n".join(text)
def filter_class_properties(module, header, class_name, properties):
key = (module, header, class_name)
# ignore properties without a name
properties = [p for p in properties if p["name"]]
if key in ATTRIBUTES_TO_SKIP:
to_ignore = ATTRIBUTES_TO_SKIP[key]
filtered_properties = []
for p in properties:
if p["name"] in to_ignore:
continue
filtered_properties.append(p)
properties = filtered_properties
return properties
def get_main_classes(header, module, header_name):
# header = read_headers(base_path, header_name, module)
main_classes = [c for c in header.classes.values() if c["namespace"] in ("pcl", "pcl::" + module)]
filtered_main_classes = []
for class_ in main_classes:
specialized_template = class_.get("template") and "<" in class_["name"]
if specialized_template:
to_skip = any(("<%s>" % type_) in class_["name"] for type_ in SPECIALIZED_TEMPLATED_TYPES_TO_SKIP)
if not to_skip:
message = "Warning: Template class specialization not implemented for class %s in %s"
print(message % (class_["name"], header_name))
elif (module, header_name, class_["name"]) in CLASSES_TO_IGNORE:
pass
else:
filtered_main_classes.append(class_)
filtered_main_classes = sorted(filtered_main_classes, key=lambda c: c["name"])
return filtered_main_classes
def get_functions(header, module):
functions = [f for f in header.functions if f["namespace"] in ("pcl",
"pcl::",
"pcl::%s" % module,
"pcl::%s::" % module)]
functions = sorted(functions, key=lambda f: f["name"])
filtered = filter_module_level_functions(functions)
return filtered
def filter_module_level_functions(functions: List[CppMethod]):
filtered = []
for f in functions:
keep = True
if f.get("returns_const"):
keep = False
for param in f["parameters"]:
for type_ in SPECIALIZED_TEMPLATED_TYPES_TO_SKIP:
if type_ in param["type"]:
keep = False
if keep:
filtered.append(f)
return filtered
def get_variables(header):
variables = [v for v in header.variables if v.get("defaultValue") and 'using' != v.get('type')]
variables = sorted(variables, key=lambda v: v["name"])
return variables
def get_enums(header):
enums = [e for e in header.enums if e.get("name")] # skip nameless enums
enums = sorted(enums, key=lambda v: v["name"])
return enums
def read_header(header_path, skip_macros=None):
# I tried to do this in multiple threads but it seems like CppHeaderParser is not thread safe...
if skip_macros is None:
skip_macros = []
header_file_str = read_header_file(header_path, skip_macros)
parser = CppHeaderParser
parser.debug = False
header = parser.CppHeader(header_file_str, argType="string")
return header
def clean():
try:
os.remove(PATH_LOADER)
except FileNotFoundError:
pass
if os.path.exists(PATH_MODULES):
shutil.rmtree(PATH_MODULES)
def check_if_needs_overloading(main_classes):
needs_overloading = {}
classes_by_module = defaultdict(list)
for (module, _), class_ in main_classes.items():
classes_by_module[module] += class_
for module, classes in classes_by_module.items():
needs = []
for class_ in classes:
count = Counter(m["name"] for methods in class_["methods"].values() for m in methods)
for name, count in count.items():
if count >= 2:
needs.append(name)
needs_overloading[module] = needs
return needs_overloading
def get_headers(modules=None, skip_modules=None):
def listmod(module):
found_modules = []
for base, folders, files in os.walk(join(PCL_BASE, module)):
if any(base.endswith(m) for m in SUBMODULES_TO_SKIP):
continue
relative_base = os.path.abspath(base).replace(PCL_BASE, "")[1:]
for f in files:
if f.endswith(".h"):
found_modules.append([f, join(relative_base, f)])
return found_modules
if modules is None:
modules = MODULES_TO_BUILD
if skip_modules is not None:
modules = [m for m in modules if m not in skip_modules]
headers_to_generate = [(module, header_name, path) for module in modules
for header_name, path in listmod(module)]
base_headers = [("", f, f) for f in os.listdir(PCL_BASE) if f.endswith(".h")]
headers_to_generate += base_headers
headers_to_generate_temp = []
for module, header_name, path in headers_to_generate:
if (module, header_name) in HEADERS_TO_SKIP:
continue
headers_to_generate_temp.append(tuple([module, header_name, path]))
return headers_to_generate_temp
def get_pure_virtual_methods(class_: CppHeaderParser.CppClass) -> Set[str]:
access = "private protected public".split()
return set([m["name"] for a in access for m in class_["methods"][a] if m["pure_virtual"]])
def get_all_class_methods_not_pure_virtual(class_: CppHeaderParser.CppClass) -> Set[str]:
access = "private protected public".split()
return set([m["name"] for a in access for m in class_["methods"][a] if not m["pure_virtual"]])
def flag_instantiatable_class(dependency_tree, main_classes):
"""determine if the class can be instantiated"""
main_classes_by_name_namespace = {make_namespace_class(c["namespace"], c["name"]): c
for classes in main_classes.values() for c in classes}
for module, header_name in main_classes:
for class_ in main_classes[(module, header_name)]:
can_be_instantiated = True
if class_["abstract"]:
can_be_instantiated = False
else:
# check if any pure virtual method is not implemented
all_implemented_inherited_methods = get_all_class_methods_not_pure_virtual(class_)
namespace_class = make_namespace_class(class_["namespace"], class_["name"])
for base_name_nsp in dependency_tree.breadth_first_iterator(namespace_class):
base_class = main_classes_by_name_namespace.get(base_name_nsp)
if base_class:
base_class_methods = get_all_class_methods_not_pure_virtual(base_class)
all_implemented_inherited_methods.update(base_class_methods)
for base_name_nsp in dependency_tree.breadth_first_iterator(namespace_class):
base_class = main_classes_by_name_namespace.get(base_name_nsp)
if base_class and base_class["abstract"]:
base_pure_virtual_methods = get_pure_virtual_methods(base_class)
if base_pure_virtual_methods - all_implemented_inherited_methods:
can_be_instantiated = False
class_["can_be_instantiated"] = can_be_instantiated
def load_yaml_point_types(not_every_point_type):
classes_point_types = unpack_yaml_point_types("point_types_generated.yml", not_every_point_type)
extra_point_types = unpack_yaml_point_types("point_types_extra.yml")
for k, v in extra_point_types.items():
if k in classes_point_types:
classes_point_types[k].append(v)
else:
classes_point_types[k] = v
return classes_point_types
def make_module_dirs(modules):
for module in modules:
module_dir = join(PATH_MODULES, module)
if not os.path.exists(module_dir):
os.makedirs(module_dir)
def is_file_different(path, text):
v = open(path).read()
if v != text:
print("File is different: %s" % os.path.split(path)[1])
return True
# print("File is the same: %s" % os.path.split(path)[1])
return False
def write_if_different(files_to_write, delete_others):
written = []
for base, folder, files in os.walk(PATH_MODULES):
for f in files:
path = join(base, f)
if path in files_to_write:
if is_file_different(path, files_to_write[path]):
open(path, "w").write(files_to_write[path])
written.append(path)
elif delete_others:
os.remove(path)
print("Deleted: " + path)
# write new files
for path, text in files_to_write.items():
if path not in written:
open(path, "w").write(files_to_write[path])
def delete_other_dirs(modules):
for f in os.listdir(PATH_MODULES):
folder = join(PATH_MODULES, f)
if f not in modules and os.path.isdir(folder):
shutil.rmtree(folder, ignore_errors=True)
def write_stuff_if_needed(generated_headers: OrderedDict, delete_others=True):
modules = set(module for module, _ in generated_headers.keys())
make_module_dirs(modules)
# hpp
files_to_write = {}
for (module, header_name), text in generated_headers.items():
if text:
output_path = join(PATH_MODULES, module, header_name + "pp")
files_to_write[output_path] = text
# loaders
loader_modules = defaultdict(list)
for (module, header_name), text in generated_headers.items():
if text:
loader_modules[module or "base"].append(header_name)
for module, headers in loader_modules.items():
path_loader = join(PATH_MODULES, "_%s_loader.cpp" % module)
files_to_write[path_loader] = generate_loader(module, headers)
files_to_write[PATH_LOADER] = generate_main_loader(loader_modules)
write_if_different(files_to_write, delete_others)
if delete_others:
delete_other_dirs(modules)
def generate(headers_to_generate, skip_macros, not_every_point_type=False) -> OrderedDict:
"""
:return: OrderedDict
"""
main_classes, module_functions, module_variables, module_enums = {}, {}, {}, {}
for module, header_name, path in headers_to_generate[:]:
header_full_path = join(PCL_BASE, path) if path else join(PCL_BASE, module, header_name)
header = read_header(header_full_path, skip_macros)
main_classes[(module, header_name)] = get_main_classes(header, module, header_name)
module_functions[(module, header_name)] = get_functions(header, module)
module_variables[(module, header_name)] = get_variables(header)
module_enums[(module, header_name)] = get_enums(header)
classes = [c for module, header, path in headers_to_generate
for c in main_classes[(module, header)]]
dependency_tree = generators.dependency_tree.DependencyTree(classes)
loaded_point_types = load_yaml_point_types(not_every_point_type)
classes_point_types: OrderedDict = dependency_tree.get_point_types_with_dependencies(loaded_point_types)
classes_sorted_base_first = list(dependency_tree.leaf_iterator())
def index_for_class(class_):
return classes_sorted_base_first.index(make_namespace_class(class_["namespace"], class_["name"]))
# sort classes inside modules based on inheritance
for module, header in main_classes:
main_classes[(module, | |
local algos
last_papers = prev_papers
available_papers = all_vecs[:]
rank_err = 0
if ranking_type == "CRP":
predicted_order = ranking_model.rank_on_clusters(last_papers, available_papers)
else:
predicted_order = ranking_func(last_papers, available_papers)
prediction_error = error_func(predicted_order, next_papers)
rank_err += prediction_error
random_order = get_random(last_papers, available_papers)
random_rank_err = error_func(random_order, next_papers)
rank_diff_per_timestep[ranking_func].append(random_rank_err - prediction_error)
rank_err = rank_err / len(
curr_papers
) # Just take the average rank error at timestep?
cumulative_err[ranking_func] += rank_err
prev_papers = curr_papers
for paper in emerged_papers:
available_papers.remove(
list(paper)
) # Not using list comprehension so duplicates are preserved
if ranking_type == "CRP":
ranking_model.update_clusters(prev_papers, len(prev_papers))
return cumulative_err / num_timesteps, rank_diff_per_timestep
def get_rank_score_avg(predicted: np.ndarray, actual: List) -> int:
rank_diff = 0
for vec in actual:
rank_diff += predicted.index(list(vec))
return rank_diff / len(actual)
def get_rank_score_best(predicted: np.ndarray, actual: List) -> int:
ranks = [predicted.index(list(vec)) for vec in actual]
return min(ranks)
def get_rank_score_worst(predicted: np.ndarray, actual: List) -> int:
ranks = [predicted.index(list(vec)) for vec in actual]
return max(ranks)
def indicator(rand_ranks: List, model_ranks: List) -> List:
return [1 if rand_rank > model_rank else 0 for rand_rank, model_rank in zip(rand_ranks, model_ranks)]
def get_probability_score_multi(emergence_order: dict, all_vecs: List, ranking_funcs: dict, ranking_types: List, carry_error: bool = False, suppress_print: bool = True) -> tuple:
last_timestep = max(emergence_order.keys())
log_Ls = [0] * len(ranking_funcs)
order = []
emerged_papers = [[] for i in range(len(ranking_funcs))]
if not suppress_print:
print("TOTAL PAPERS: ", len(all_vecs))
for t in range(last_timestep):
if not suppress_print:
print("TIMESTEP: ", t)
for i, name in enumerate(ranking_funcs):
curr_papers = emergence_order[t]
#print("curr papers: ", len(curr_papers))
next_papers = emergence_order[t + 1]
if not carry_error:
emerged_papers[i].extend(curr_papers)
last_papers = emerged_papers[i]
if ranking_types[i] == "local":
last_papers = curr_papers
else:
if t == 0:
emerged_papers[i].extend(emergence_order[0])
last_papers = emerged_papers[i]
available_papers = all_vecs[:]
for paper in emerged_papers[i]:
available_papers.remove(
list(paper)
) # Not using list comprehension so duplicates are preserved
pred_and_sim = ranking_funcs[name](last_papers, available_papers, keep_sim=True)
pred, sim = zip(*pred_and_sim)
log_L = 0
#sim_softmax = [prob / sum(sim) for prob in sim]
sim_softmax = softmax(sim)
if not carry_error:
for vec in next_papers:
#pdb.set_trace()
next_indices = [pred.index(v) for v in next_papers]
found_index = pred.index(vec)
next_indices_excluded_self = [i for i in next_indices if i != found_index]
sim_others_excluded = [prob if i not in next_indices_excluded_self else 0 for i, prob in enumerate(sim_softmax)]
sim_others_excluded = [prob/sum(sim_others_excluded) for prob in sim_others_excluded]
#print(f"{found_index}: {sim_others_excluded[found_index]}, len: {len([i for i in sim_others_excluded if i != 0])}")
#if sim_others_excluded[found_index] == 0:
#pdb.set_trace()
log_L += np.log(sim_others_excluded[found_index])
if carry_error:
for vec in pred[:len(next_papers)]:
closest_ind = np.argpartition(distance.cdist(np.array([vec]), np.asarray(emerged_papers[i]), metric="cosine"), 0)[0][0]
emerged_papers[i].append(vec)
log_Ls[i] += log_L
order.append(name)
return log_Ls, order
def get_probability_score(emergence_order: dict, all_vecs: dict, ranking_func: Callable, ranking_type: str = "global", carry_error: bool = False, labels: List = [], return_log_L_only: bool = False, suppress_print: bool = True) -> tuple:
last_timestep = max(emergence_order.keys())
log_L = 0
emerged_papers = []
tails = []
if not suppress_print:
print("TOTAL PAPERS: ", len(all_vecs))
for t in range(last_timestep):
#print("TIMESTEP: ", t)
curr_papers = emergence_order[t]
#print("curr papers: ", len(curr_papers))
next_papers = emergence_order[t + 1]
if not carry_error:
emerged_papers.extend(curr_papers)
last_papers = emerged_papers
if ranking_type == "local":
last_papers = curr_papers
else:
if t == 0:
emerged_papers.extend(emergence_order[0])
last_papers = emerged_papers
available_papers = all_vecs[:]
for paper in emerged_papers:
available_papers.remove(
list(paper)
) # Not using list comprehension so duplicates are preserved
pred_and_sim = ranking_func(last_papers, available_papers, keep_sim=True)
pred, sim = zip(*pred_and_sim)
#sim_softmax = [prob / sum(sim) for prob in sim]
sim_softmax = softmax(sim)
if not carry_error:
for vec in next_papers:
#pdb.set_trace()
next_indices = [pred.index(v) for v in next_papers]
found_index = pred.index(vec)
next_indices = [i for i in next_indices if i != found_index]
sim_others_excluded = [prob if i not in next_indices else 0 for i, prob in enumerate(sim_softmax)]
sim_others_excluded = [prob/sum(sim_others_excluded) for prob in sim_others_excluded]
#print(f"{found_index}: {sim_others_excluded[found_index]}, len: {len([i for i in sim_others_excluded if i != 0])}")
#if sim_others_excluded[found_index] == 0:
#pdb.set_trace()
log_L += np.log(sim_others_excluded[found_index])
if carry_error:
for vec in pred[:len(next_papers)]:
closest_ind = np.argpartition(distance.cdist(np.array([vec]), np.asarray(emerged_papers), metric="cosine"), 0)[0][0]
tails.append(closest_ind)
emerged_papers.append(vec)
if return_log_L_only:
return log_L
return log_L, emerged_papers, tails
def get_probability_score_crp(emergence_order: dict, all_vecs: dict, crp_model: Generic, return_log_L_only: bool = False, suppress_print: bool = False) -> tuple:
last_timestep = max(emergence_order.keys())
log_L = 0
emerged_papers = []
if not suppress_print:
print("TOTAL PAPERS: ", len(all_vecs))
for t in range(last_timestep):
#print("TIMESTEP: ", t)
curr_papers = emergence_order[t]
if t != 0:
crp_model.update_clusters(curr_papers, len(curr_papers))
#print("curr papers: ", len(curr_papers))
next_papers = emergence_order[t + 1]
emerged_papers.extend(curr_papers)
last_papers = emerged_papers
available_papers = all_vecs[:]
for paper in emerged_papers:
available_papers.remove(
list(paper)
) # Not using list comprehension so duplicates are preserved
pred_and_sim = crp_model.rank_on_clusters_custom(available_papers)
pred, sim = zip(*pred_and_sim)
#sim_softmax = [prob / sum(sim) for prob in sim]
sim_softmax = softmax(sim)
for vec in next_papers:
#pdb.set_trace()
next_indices = [pred.index(v) for v in next_papers]
found_index = pred.index(vec)
next_indices = [i for i in next_indices if i != found_index]
sim_others_excluded = [prob if i not in next_indices else 0 for i, prob in enumerate(sim_softmax)]
sim_others_excluded = [prob/sum(sim_others_excluded) for prob in sim_others_excluded]
#print(f"{found_index}: {sim_others_excluded[found_index]}, len: {len([i for i in sim_others_excluded if i != 0])}")
#if sim_others_excluded[found_index] == 0:
#pdb.set_trace()
log_L += np.log(sim_others_excluded[found_index])
if return_log_L_only:
return log_L
return log_L, emerged_papers, tails
def get_probability_rand(emergence_order: dict) -> float:
""" Returns a fixed probability for a predicted sequence based on the number
papers emerging at each timestep.
timesteps: [run_0, run_1, run_2...]
"""
timesteps = make_timesteps(emergence_order)
log_L = 0
denom = sum(timesteps[1:])
for ii, timestep in enumerate(timesteps[1:]):
#print(f"TIMESTEP: {ii}, available papers: {denom}, papers to predict: {timestep}")
#print("len others excluded: ", denom - timestep + 1)
for i in range(timestep):
#print("prob: ", 1 / (denom - timestep + 1))
log_L += np.log(1 / (denom - timestep + 1))
#print("val: ", 1/(denom - timestep + 1) , " denom: ", denom - timestep + 1)
denom -= timestep
return log_L
def make_timesteps(emergence_order: dict) -> float:
runs = []
for i in range(max(emergence_order.keys()) + 1):
runs.append(len(emergence_order[i]))
return runs
# Returns (p value, upper 95% confidence interval, lower confidence interval)
def shuffle_test(n_iter: int, target_val: int, emergence_order: dict, vecs_filename: str, order_filename: str, return_raw_counts: bool = False) -> tuple:
higher = 0
lower = 0
cumulative_rank_diffs = []
trial_timestep_rank_diff = None
_attested_order = get_attested_order(vecs_filename)
_emergence_order = get_emergence_order(order_filename)
for i in range(n_iter):
attested_order = _attested_order
emergence_order = _emergence_order
random.seed()
rand_val, rank_diff_at_timestep = predict_seq(
attested_order, emergence_order, get_random, get_rank_score_avg
)
cumulative_rank_diffs.append(rand_val - target_val)
if trial_timestep_rank_diff == None:
trial_timestep_rank_diff = rank_diff_at_timestep
else:
trial_timestep_rank_diff = [sum(x) for x in zip(trial_timestep_rank_diff, rank_diff_at_timestep)]
if rand_val > target_val:
higher += 1
else:
lower += 1
avg_rank_diff = sum(cumulative_rank_diffs) / len(cumulative_rank_diffs)
avg_rank_diff_timesteps = [i / n_iter for i in trial_timestep_rank_diff]
upper_conf_interval, lower_conf_interval = sms.DescrStatsW(
cumulative_rank_diffs
).tconfint_mean()
if return_raw_counts:
return [lower, higher]
else:
return (
float(lower) / n_iter,
avg_rank_diff,
upper_conf_interval,
lower_conf_interval,
avg_rank_diff_timesteps
)
def shuffle_test_multi(n_iter: int, target_val_list: List, emergence_order: dict, vecs_filename: str, order_filename: str, return_raw_counts: bool = False) -> tuple:
cumulative_rank_diffs = []
trial_timestep_rank_diff = None
_attested_order = get_attested_order(vecs_filename)
_emergence_order = get_emergence_order(order_filename)
pvals = []
pvals_ratio = []
avg_rank_diffs = []
avg_rank_diff_timesteps_multi = []
upper_conf_intervals = []
upper_conf_intervals_ratio = []
lower_conf_intervals = []
lower_conf_intervals_ratio = []
expected_wins = []
for target_val in target_val_list:
higher = 0
lower = 0
for i in range(n_iter):
print(i)
attested_order = _attested_order
emergence_order = _emergence_order
random.seed()
rand_val, rank_diff_at_timestep, win_ratio = predict_seq(
attested_order, emergence_order, get_random, get_rank_score_avg
)
cumulative_rank_diffs.append(rand_val - target_val)
expected_wins.append(win_ratio)
if trial_timestep_rank_diff == None:
trial_timestep_rank_diff = rank_diff_at_timestep
else:
trial_timestep_rank_diff = [sum(x) for x in zip(trial_timestep_rank_diff, rank_diff_at_timestep)]
if rand_val > target_val:
higher += 1
else:
lower += 1
avg_rank_diff = sum(cumulative_rank_diffs) / len(cumulative_rank_diffs)
avg_rank_diffs.append(avg_rank_diff)
if avg_rank_diff >= 0:
pvals.append(float(lower) / n_iter)
else:
pvals.append(float(higher) / n_iter)
avg_rank_diff_timesteps = [i / n_iter for i in trial_timestep_rank_diff]
avg_rank_diff_timesteps_multi.append(avg_rank_diff_timesteps)
upper_conf_interval, lower_conf_interval = sms.DescrStatsW(
cumulative_rank_diffs
).tconfint_mean()
p_val_wins = ttest_1samp(expected_wins, 0.5)
pvals_ratio.append(p_val_wins)
u_c_interval, l_c_interval = sms.DescrStatsW(expected_wins).tconfint_mean()
upper_conf_intervals.append(upper_conf_interval)
lower_conf_intervals.append(lower_conf_interval)
upper_conf_intervals_ratio.append(u_c_interval)
lower_conf_intervals_ratio.append(l_c_interval)
if return_raw_counts:
return [lower, higher]
else:
return (
pvals,
avg_rank_diffs,
upper_conf_intervals,
lower_conf_intervals,
avg_rank_diff_timesteps_multi
)
def shuffle_test_expected_vals(n_iter: int, ranking_funcs: dict, model_order: dict, vecs_filename: str, order_filename: str) -> tuple:
_attested_order = get_attested_order(vecs_filename, vecs_col=2, multicols=True)
_emergence_order = get_emergence_order(order_filename, vecs_col=2, multicols=True)
memoized_preds = {}
win_ratios = {}
for func_name in ranking_funcs:
model_type = "local" if func_name == "Local" else "global"
expected_ratio, pred_ranks_vec = | |
<reponame>maaku/elements
#!/usr/bin/env python2
import sys, os, json, traceback, decimal
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../python-bitcoinrpc"))
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from rotating_consensus import RotatingConsensus
from threading import Lock, current_thread
from time import sleep
from constants import FedpegConstants
from httplib import CannotSendRequest
import socket
settings = FedpegConstants()
port = 14242
sidechain = [AuthServiceProxy(settings.sidechain_url), AuthServiceProxy(settings.sidechain_url)]
# We need to do a rescan on bitcoin, so we set a huge timeout
bitcoin = [AuthServiceProxy(settings.bitcoin_url, timeout=60*10), AuthServiceProxy(settings.bitcoin_url)]
spent_from_history = {}
open('spent_from.log', 'a').close() # Touch file (create if not already present)
with open('spent_from.log') as f:
for line in f.readlines():
l = eval(line)
if l[0] not in spent_from_history:
spent_from_history[l[0]] = set()
spent_from_history[l[0]].add(l[1])
spent_from_log = os.open("spent_from.log", os.O_CREAT | os.O_WRONLY | os.O_SYNC | os.O_DSYNC | os.O_APPEND)
def check_reset_connections():
global sidechain, bitcoin
connections_good = True
try:
sidechain[thread_id()].getblockcount()
except CannotSendRequest as e:
sidechain[thread_id()] = AuthServiceProxy(settings.sidechain_url)
connections_good = False
except socket.timeout as e:
sidechain[thread_id()] = AuthServiceProxy(settings.sidechain_url)
connections_good = False
try:
bitcoin[thread_id()].getblockcount()
except CannotSendRequest as e:
bitcoin[thread_id()] = AuthServiceProxy(settings.bitcoin_url)
connections_good = False
except socket.timeout as e:
bitcoin[thread_id()] = AuthServiceProxy(settings.bitcoin_url)
connections_good = False
return connections_good
# If there are two outputs to the same destination, the first output must fully
# confirm before we allow the second to process.
# This is really for ease of developer headache, though we could change some
# indexes and allow this
map_lock = Lock()
# withdraw metadata map: {"txid_concat": sidechain_txid_concat, "sidechain_height": height,
# "script_gen": p2sh_script_in_asm_for_bitcoin-tx, "script_match": p2sh_script_in_hex,
# "value": value, "spent_from": set({frozenset({(bitcoin_txid, bitcoin_vout), ...}), ...})}
# (spent_from is a set of sets of inputs used in every tx which was signed signed and had output)
# sidechain txid_concat -> withdraw metadata map
outputs_pending = {}
# withdraw_target_p2sh_script_hex -> txid_concat (for withdraw_claims_pending use)
outputs_pending_by_p2sh_hex = {}
# withdraw_target_p2sh_script_hex -> [withdraw metadata map, ...]
outputs_waiting = {}
# utxo metadata map: {"redeem_info": redeem_info_for_bitcoin_signrawtransaction, "privateKey": gen_private_key, "value": Decimal(value),
# "spent_by": set(), "donated_map": {frozenset({(bitcoin_txid, bitcoin_vout), ...}): value} }
# spent_by is a set of sidechain txid_concats which can be used to look up in outputs_pending
# donated_map is a map from input sets to the value taken from donated_funds as a fee
# (bitcoin_txid, bitcoin_vout) -> utxo metadata map
utxos = {}
#set of sets of txos we need to ensure are spent by fraud proofs
fraud_check_map = {}
donated_funds = 0
manual_check_lock = Lock()
manual_check_set = set()
main_thread = current_thread()
def thread_id():
if current_thread() == main_thread:
return 0
return 1
def check_raise(cond):
if not cond:
raise Exception("assertion failed")
def trigger_bitcoin_rescan():
# TODO: Replace with a really random one, instead
cht = os.popen("%s %s -c -p %s -a SALT -n %s" % (settings.contracthashtool_path, settings.cht_testnet_arg, settings.functionary_private_key, os.urandom(16).encode("hex")))
useless_private_key = cht.read().split("\n")[0 + settings.is_testnet][16:]
check_raise(cht.close() == None)
# Trigger a rescan by importing something useless and new
sys.stdout.write("Now triggering a full wallet rescan of the bitcoin chain...")
sys.stdout.flush()
bitcoin[thread_id()].importprivkey(useless_private_key, "", True)
print("done")
def process_bitcoin_tx_for_utxos(tx, is_donation=False, manual_check=False):
global donated_funds
manual_check_lock.acquire()
if not manual_check and tx["txid"] in manual_check_set:
manual_check_set.remove(tx["txid"])
return
elif manual_check:
manual_check_set.add(tx["txid"])
manual_check_lock.release()
# Go through the outputs, adding any coins sent to the raw functionary address to utxos
for nout, outp in enumerate(tx["vout"]):
if outp["scriptPubKey"]["type"] == "scripthash" and outp["scriptPubKey"]["addresses"][0] == settings.redeem_script_address:
txo = tx["vout"][nout]
map_lock.acquire()
print("Got %s UTXO sent to raw functioanry address (change or donation): %s:%d" % ("new" if (tx["txid"], nout) not in utxos else "existing", tx["txid"], nout))
utxos[(tx["txid"], nout)] = {"redeem_info": {"txid": tx["txid"], "vout": nout, "scriptPubKey": outp["scriptPubKey"]["hex"], "redeemScript": settings.redeem_script}, "privateKey": settings.functionary_private_key, "value": decimal.Decimal(outp["value"]), "spent_by": set(), "donated_map": {}}
if is_donation:
print("Got donation of %s, now possibly paying fees" % str(outp["value"]))
donated_funds = donated_funds + outp["value"]
map_lock.release()
def sign_withdraw_tx(tx_hex, txid_concat_list):
global donated_funds
tx_raw = bitcoin[thread_id()].decoderawtransaction(tx_hex)
max_sidechain_height = sidechain[thread_id()].getblockcount() - 6
check_raise(len(tx_raw["vout"]) == len(txid_concat_list) + 1)
check_raise(tx_raw["vout"][-1]["scriptPubKey"]["type"] == "scripthash")
check_raise(tx_raw["vout"][-1]["scriptPubKey"]["addresses"][0] == settings.redeem_script_address)
tx_value = decimal.Decimal(0)
privKeys = []
redeemScripts = []
inputs_set = set()
input_size = 0
for inp in tx_raw["vin"]:
if (inp["txid"], inp["vout"]) not in utxos:
# To-functionary UTXOs are only added after sufficient confirmations,
# so we may need to find them here.
spent_tx = bitcoin[thread_id()].getrawtransaction(inp["txid"], 1)
process_bitcoin_tx_for_utxos(spent_tx, manual_check=True)
check_raise((inp["txid"], inp["vout"]) in utxos)
utxo = utxos[(inp["txid"], inp["vout"])]
redeemScripts.append(utxo["redeem_info"])
privKeys.append(utxo["privateKey"])
tx_value = tx_value + decimal.Decimal(utxo["value"])
inputs_set.add((inp["txid"], inp["vout"]))
input_size = input_size + len(inp["scriptSig"]["hex"])/2
if len(inp["scriptSig"]["hex"])/2 >= 0xfd:
input_size += 2
txid_concat_set = set()
for i, txid_concat in enumerate(txid_concat_list):
check_raise(txid_concat in outputs_pending)
output = outputs_pending[txid_concat]
check_raise(output["sidechain_height"] <= max_sidechain_height)
tx_vout = tx_raw["vout"][i]
check_raise(tx_vout["scriptPubKey"]["hex"] == output["script_match"])
check_raise(decimal.Decimal(tx_vout["value"]) == output["value"])
tx_value = tx_value - decimal.Decimal(tx_vout["value"])
for input_set in output["spent_from"]:
check_raise(not inputs_set.isdisjoint(input_set))
txid_concat_set.add(txid_concat)
# scriptSig is OP_0 x*(1-byte pushlen + 73-byte max-sized signature) + redeemScript push
# if it triggers a long var-int for the scriptlen we have to include that, too
RS_push_size = len(settings.redeem_script) / 2
RS_push_size += 1 if RS_push_size <= 0x4b else (2 if RS_push_size <= 0xff else 3)
scriptSig_size = 1 + 74 * settings.sigs_required + RS_push_size
if scriptSig_size >= 0xfd:
scriptSig_size += 2
fee_allowed = len(tx_hex)/2 - input_size + scriptSig_size * len(tx_raw["vin"])
fee_allowed = min(fee_allowed, donated_funds * 100000000)
fee_paid = tx_value - decimal.Decimal(tx_raw["vout"][-1]["value"])
check_raise(fee_paid * 100000000 <= fee_allowed)
donated_funds = donated_funds - fee_paid
inputs_set = frozenset(inputs_set)
for txid_concat in txid_concat_list:
output = outputs_pending[txid_concat]
if inputs_set not in output["spent_from"]:
output["spent_from"].add(inputs_set)
os.write(spent_from_log, "['%s', %s]\n" % (txid_concat, repr(inputs_set)))
old_paid_memory = -1
for inp in tx_raw["vin"]:
utxo = utxos[(inp["txid"], inp["vout"])]
utxo["spent_by"] = utxo["spent_by"] | txid_concat_set
old_paid = 0
if inputs_set in utxo["donated_map"]:
old_paid = utxo["donated_map"][inputs_set]
if old_paid_memory == -1:
old_paid_memory = old_paid
elif old_paid != old_paid_memory:
print("Internal data structure inconsistency!")
sys.exit(1)
utxo["donated_map"][inputs_set] = fee_paid + old_paid
return bitcoin[thread_id()].signrawtransaction(tx_hex, redeemScripts, privKeys)["hex"]
class WatchPeerController(RotatingConsensus):
round_local_tx_hex = ""
def gen_master_msg(self):
if not check_reset_connections():
return None
map_lock.acquire()
try:
max_sidechain_height = sidechain[thread_id()].getblockcount() - 8
txid_concat_list_untried = []
txid_concat_list_retries = []
command_untried = '%s %s -create' % (settings.bitcoin_tx_path, settings.btc_testnet_arg)
command_retries = command_untried
input_sets_retries = set()
input_pairs_retries = set()
for txid_concat in outputs_pending:
output = outputs_pending[txid_concat]
if output["sidechain_height"] > max_sidechain_height:
continue
if len(output["spent_from"]) == 0:
command_untried = command_untried + ' outscript=%.16g:"%s"' % (output["value"], output["script_gen"])
txid_concat_list_untried.append(txid_concat)
elif len(txid_concat_list_untried) == 0:
all_still_spendable = True
for input_set in output["spent_from"]:
for input_pair in input_set:
if bitcoin[thread_id()].gettxout(input_pair[0], input_pair[1], True) == None:
all_still_spendable = False
break
if not all_still_spendable:
break
if all_still_spendable:
command_retries = command_retries + ' outscript=%.16g:"%s"' % (output["value"], output["script_gen"])
txid_concat_list_retries.append(txid_concat)
input_sets_retries = input_sets_retries | output["spent_from"]
for input_set in output["spent_from"]:
input_pairs_retries = input_pairs_retries | input_set
if len(txid_concat_list_untried) != 0:
txid_concat_list = txid_concat_list_untried
command = command_untried
elif len(txid_concat_list_retries) != 0:
inputs_required = []
while len(input_sets_retries) != 0:
e = max(input_pairs_retries, key=lambda x: len([i for i in input_sets_retries if x in i]))
inputs_required.append(e)
input_sets_retries = set([x for x in input_sets_retries if e not in x])
for input_pair in inputs_required:
command_retries = command_retries + ' in="%s":%d' % (input_pair[0], input_pair[1])
txid_concat_list = txid_concat_list_retries
command = command_retries
else:
return None
cht = os.popen(command)
tx_hex = cht.read().split("\n")[0]
check_raise(cht.close() == None)
funded_tx = bitcoin[thread_id()].fundrawtransaction(tx_hex, True)
tx_raw = bitcoin[thread_id()].decoderawtransaction(funded_tx["hex"])
change_value = decimal.Decimal(funded_tx["fee"]) + decimal.Decimal(tx_raw["vout"][funded_tx["changepos"]]["value"])
cht = os.popen('%s %s %s delout=%d outaddr=%s:%s' % (settings.bitcoin_tx_path, settings.btc_testnet_arg, funded_tx["hex"], funded_tx["changepos"], "0", settings.redeem_script_address))
tx_hex = cht.read().split("\n")[0]
check_raise(cht.close() == None)
redeem_script_push_size = len(settings.redeem_script)/2
if redeem_script_push_size <= 0x4b:
redeem_script_push_size += 1
elif redeem_script_push_size <= 0xff:
redeem_script_push_size += 2
else:
redeem_script_push_size += 3
input_size = 1 + 74 * settings.sigs_required + redeem_script_push_size
if input_size >= 0xfd:
input_size += 2
pay_fee = decimal.Decimal(len(tx_hex)/2 + input_size * len(tx_raw["vin"])) / decimal.Decimal(100000000)
pay_fee = min(pay_fee, funded_tx["fee"])
if pay_fee > donated_funds:
pay_fee = 0
print("Paying fee of %s" % str(pay_fee))
change_value = change_value - pay_fee
cht = os.popen('%s %s %s delout=%d outaddr=%s:%s' % (settings.bitcoin_tx_path, settings.btc_testnet_arg, tx_hex, len(tx_raw["vout"]) - 1, change_value, settings.redeem_script_address))
tx_hex = cht.read().split("\n")[0]
check_raise(cht.close() == None)
self.round_local_tx_hex = sign_withdraw_tx(tx_hex, txid_concat_list)
return json.dumps([self.round_local_tx_hex, txid_concat_list])
finally:
map_lock.release()
def recv_master_msg(self, msg):
msg_decoded = json.loads(msg)
map_lock.acquire()
try:
self.round_local_tx_hex = sign_withdraw_tx(msg_decoded[0], msg_decoded[1])
return self.round_local_tx_hex
finally:
map_lock.release()
def round_done(self, peer_messages):
txn_concat = self.round_local_tx_hex
check_raise(txn_concat != "")
input_list = []
for inp in bitcoin[thread_id()].decoderawtransaction(txn_concat)["vin"]:
input_list.append((inp["txid"], inp["vout"]))
for msg in peer_messages:
try:
for i, inp in enumerate(bitcoin[thread_id()].decoderawtransaction(msg[1])["vin"]):
check_raise(input_list[i] == (inp["txid"], inp["vout"]))
txn_concat = txn_concat + msg[1]
except:
print("Peer %s sent invalid transaction" % msg[0])
res = bitcoin[thread_id()].signrawtransaction(txn_concat)
print("Final round result:")
print(res)
if res["complete"]:
bitcoin[thread_id()].sendrawtransaction(res["hex"])
return
def round_failed(self):
self.round_local_tx_hex = ""
return
def process_sidechain_tx_for_utxos(tx, height):
for vout, output in enumerate(tx["vout"]):
if output["scriptPubKey"]["type"] == "withdrawout":
outp = output["scriptPubKey"]["asm"].split(" ")
check_raise(len(outp) == 16)
bitcoin_tx = outp[2]
bitcoin_raw_tx = bitcoin[thread_id()].getrawtransaction(bitcoin_tx, 1)
txo = bitcoin_raw_tx["vout"][int(outp[3])]
inp = tx["vin"][vout]["scriptSig"]["asm"].split(" ")
contract = inp[2]
cht = os.popen("%s %s -g -r %s -f %s" % (settings.contracthashtool_path, settings.cht_testnet_arg, settings.redeem_script, contract))
cht_out = cht.read()
check_raise(cht.close() == None)
modified_redeem_script = cht_out.split("\n")[2 + settings.is_testnet][24:]
modified_address = cht_out.split("\n")[3 + settings.is_testnet][40:]
bitcoin[thread_id()].importaddress(modified_redeem_script, "", False, True)
cht = os.popen("%s %s -c -p %s -f %s" % (settings.contracthashtool_path, settings.cht_testnet_arg, settings.functionary_private_key, contract))
gen_private_key = cht.read().split("\n")[0 + settings.is_testnet][16:]
check_raise(cht.close() == None)
outp[3] = int(outp[3])
map_lock.acquire()
already_had = (bitcoin_tx, outp[3]) in utxos
utxos[(bitcoin_tx, outp[3])] = {"redeem_info": {"txid": bitcoin_tx, "vout": outp[3], "scriptPubKey": txo["scriptPubKey"]["hex"], "redeemScript": modified_redeem_script}, "privateKey": gen_private_key, "value": decimal.Decimal(txo["value"]), "spent_by": set(), "donated_map": {}}
if already_had:
if height not in fraud_check_map:
fraud_check_map[height] = []
fraud_check_map[height].append((tx["txid"], vout))
map_lock.release()
print("Got %s UTXO (%s:%d) from sidechain tx %s:%d" % ("new" if not already_had else "existing", bitcoin_tx, outp[3], tx["txid"], vout))
def process_sidechain_tx_for_withdraw(tx, height):
for vout, output in enumerate(tx["vout"]):
if output["scriptPubKey"]["type"] == "withdraw":
outp = output["scriptPubKey"]["asm"].split(" ")
if len(outp) == 5 and outp[2] == settings.inverse_bitcoin_genesis_hash and outp[3] == settings.secondScriptPubKeyHash:
check_raise(outp[1] == "OP_DROP" and outp[4] == "OP_WITHDRAWPROOFVERIFY")
if outp[0][0:8] != "50325348":
continue
contract = outp[0][8:]
check_raise(len(contract) == 40)
p2sh_script = "OP_HASH160 0x14%s OP_EQUAL" % outp[0][8:]
p2sh_hex = "a914%s87" % outp[0][8:]
txid_concat = tx["txid"] + ":" + str(vout)
value = decimal.Decimal(output["value"])
if txid_concat in spent_from_history:
output = {"txid_concat": txid_concat, "sidechain_height": height, "script_gen": p2sh_script, "script_match": p2sh_hex, "value": value, "spent_from": spent_from_history[txid_concat]}
else:
output = {"txid_concat": txid_concat, "sidechain_height": height, "script_gen": p2sh_script, "script_match": p2sh_hex, "value": value, "spent_from": set()}
# We track the set of inputs (from the utxos map) from which we've sent the withdraw,
# freely signing double-spends, but never allowing two non-conflicting withdraws
map_lock.acquire()
if txid_concat in outputs_pending:
print("Re-ran process_sidechain_tx_for_withdraw with existing withdraw: %s???" % txid_concat)
sys.exit(1)
if p2sh_hex in outputs_pending_by_p2sh_hex:
if p2sh_hex in outputs_waiting:
outputs_waiting[p2sh_hex].append(output)
else:
outputs_waiting[p2sh_hex] = [output]
print("Got new txo for withdraw (waiting on previous tx %s): %s" % (txid_concat, outputs_pending_by_p2sh_hex[p2sh_hex]))
map_lock.release()
continue
outputs_pending[txid_concat] = output
outputs_pending_by_p2sh_hex[p2sh_hex] = txid_concat
print("Got new txo for withdraw: %s (to %s with value %s)" % (txid_concat, p2sh_hex, str(value)))
map_lock.release()
def process_sidechain_blockchain(min_height, | |
from collections import namedtuple, OrderedDict
import re
import os
import functools
import stat
import sys
import zipfile
import io
import csv
import string
import codecs
import datetime
import time
import argparse
import fnmatch
EOL = '\n'
COLSEP = '\t'
COLUMN_SEPARATOR = COLSEP
g_logs = [] # get with get_log()
g_args = None # get with args()
g_currentProgress = None
g_numProgress = 0
g_logfp = sys.stderr
# save on start to auto-log at end
g_scriptname = None
WEEKDAYS = [ 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun' ]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def space_with_nbsp(text):
""" Replace spaces with ;nbsp; """
return text.replace(' ', ' ')
def split_xdid(xdid):
""" Split xdid [nyt2015-07-01] into set
If not matched return None
"""
m = re.match('([a-z]+)(\d{4})-(\d{2})-(\d{2})', xdid)
return m.groups() if m else [ None, None, None, None ]
def br_with_n(text):
""" Replace br with \n """
return re.sub(r'<br.*?>','\n', text, flags=re.IGNORECASE)
def get_log():
return EOL.join(g_logs) + EOL
def log(s, minverbose=0, severity='INFO'):
# This can be made in more Python way
if g_logfp.isatty(): # Colors only for atty term
if severity.lower() == 'warning':
s = bcolors.WARNING + s + bcolors.ENDC
if severity.lower() == 'error':
s = bcolors.FAIL + s + bcolors.ENDC
if g_logfp:
g_logfp.write("%s: %s\n" % (severity.upper(), s))
g_logs.append("%s: [%s] %s" % (g_currentProgress or g_scriptname, severity.upper(), s))
# if not g_args or g_args.verbose >= minverbose:
# print(" " + s)
def info(_s, _m=0):
log(_s, minverbose=_m, severity='info')
def warn(_s, _m=0):
log(_s, minverbose=_m, severity='warning')
def error(_s, _m=0):
log(_s, minverbose=_m, severity='error')
def summary(_s, _m=0):
log(_s, minverbose=_m, severity='summary')
# print without logging if -d
def debug(s):
if g_args.debug:
print(" " + s)
def progress(rest=None, every=1):
global g_currentProgress, g_numProgress
if not sys.stdout.isatty():
return
if rest:
g_numProgress += 1
g_currentProgress = rest
if g_numProgress % every == 0:
print("\r% 6d %s " % (g_numProgress, rest), end="")
sys.stdout.flush()
else:
g_currentProgress = ""
g_numProgress = 0
print()
sys.stdout.flush()
def args_parser(desc=""):
log("[%s]: %s" % (desc, " ".join(sys.argv)))
return argparse.ArgumentParser(description=desc)
def get_args(desc="", parser=None):
global g_args, g_scriptname
g_scriptname = parse_pathname(sys.argv[0]).base
if g_args:
return g_args
if not parser:
parser = args_parser(desc)
parser.add_argument('inputs', nargs='*', help='toplevel input(s)')
parser.add_argument('-o', '--output', dest='output', action='store')
parser.add_argument('-q', '--quiet', dest='verbose', action='store_const', const=-1, default=0)
parser.add_argument('-v', '--verbose', dest='verbose', action='count', default=0)
parser.add_argument('-d', '--debug', dest='debug', action='store_true', default=False, help='abort on exception')
parser.add_argument('-c', '--corpus', dest='corpusdir', default='crosswords', help='corpus source')
g_args = parser.parse_args()
return g_args
def find_files(*paths, **kwargs):
for fn, data, dt in find_files_with_time(*paths, **kwargs):
yield fn, data
def to_timet(y, mon=1, d=1, h=0, m=0, s=0):
return time.mktime(datetime.datetime(y, mon, d, h, m, s).timetuple())
def generate_zip_files(data):
try:
zf = zipfile.ZipFile(io.BytesIO(data))
for zi in sorted(zf.infolist(), key=lambda x: x.filename):
zipdt = to_timet(*zi.date_time)
yield zi.filename, zf.read(zi), zipdt
except zipfile.BadZipfile as e:
error("generate_zip_files(): %s" % str(e))
# walk all 'paths' recursively and yield (filename, contents) for non-hidden files
def find_files_with_time(*paths, **kwargs):
ext = kwargs.get("ext")
should_strip_toplevel = kwargs.get("strip_toplevel", True)
for path in paths:
try:
if stat.S_ISDIR(os.stat(path).st_mode):
# handle directories
for thisdir, subdirs, files in os.walk(path):
for fn in sorted(files):
fullfn = os.path.join(thisdir, fn)
if fn.endswith('.zip'):
for zipfn, zipdata, zipdt in generate_zip_files(open(fullfn, 'rb').read()):
if ext and not zipfn.endswith(ext):
continue
yield fn + ":" + zipfn, zipdata, zipdt
elif ext and not fn.endswith(ext): # only looking for one particular ext, don't log
continue
else:
progress(fullfn)
if fn[0] == ".":
info("ignoring dotfile")
continue
yield fullfn, open(fullfn, 'rb').read(), filetime(fullfn)
elif path.endswith('.zip'):
for zipfn, zipdata, zipdt in generate_zip_files(open(path, 'rb').read()):
if ext and not zipfn.endswith(ext): # as above
continue
progress(zipfn)
if should_strip_toplevel:
zipfn = strip_toplevel(zipfn)
yield zipfn, zipdata, zipdt
else:
if ext and not path.endswith(ext):
continue
# handle individual files
fullfn = path
yield fullfn, open(fullfn, 'rb').read(), filetime(fullfn)
except FileNotFoundError as e:
error("find_files_with_time(): %s" % str(e))
# reset progress indicator after processing all files
progress()
def filetime(fn):
try:
return os.path.getmtime(fn)
except:
return time.time()
# date only
def iso8601(timet=None):
if not timet:
timet = time.time()
return datetime.datetime.fromtimestamp(int(timet)).isoformat(' ').split(' ')[0]
# YYYY-MM-DD to datetime.date
def datestr_to_datetime(s):
try:
return datetime.date(*[int(x) for x in s.split("-")])
except Exception as e:
error("datestr_to_datetime(): %s" % str(e))
if g_args.debug:
raise
dt = datetime.date.today()
return dt
def parse_xdid(path):
a = path.rindex('/')
b = path.rindex('.')
return path[a+1:b]
def parse_pathname(path):
# Fix to proper split names like file.xml.1
ext = os.extsep + os.extsep.join(os.path.basename(path).split(os.extsep)[1:])
path, fn = os.path.split(path)
ext = ext if fn else ''
base = os.path.splitext(fn)[0]
nt = namedtuple('Pathname', 'path base ext filename')
return nt(path=path, base=base, ext=ext, filename=fn)
def parse_pubid(fn):
m = re.search("(^[A-Za-z]*)", parse_pathname(fn).base)
return m.group(1).lower()
def construct_date(y, m, d):
thisyear = datetime.datetime.today().year
year, mon, day = int(y), int(m), int(d)
if year > 1900 and year <= thisyear:
pass
elif year < 100:
if year >= 0 and year <= thisyear - 2000:
year += 2000
else:
year += 1900
else:
debug("year outside 1900-%s: '%s'" % (thisyear, y))
return None
if mon < 1 or mon > 12:
debug("bad month '%s'" % m)
return None
if day < 1 or day > 31:
debug("bad day %s" % d)
return None
return datetime.date(year, mon, day)
def parse_iso8601(s):
m = re.search(r'\d+(-\d+(-\d+))', s)
if m:
return m.group(0)
def parse_seqnum(s):
m = re.search(r'-?\d+(-\d+(-\d+))', s)
if m:
return m.group(0)
# from original filename
def parse_date_from_filename(fn):
base = parse_pathname(fn).base
m = re.search("(\d{2,4})-?(\d{2})-?(\d{2})", base)
if m:
g1, g2, g3 = m.groups()
# try YYMMDD first, then MMDDYY
return construct_date(g1, g2, g3) or construct_date(g3, g1, g2)
def clean_filename(fn):
badchars = """ "'\\"""
basefn = parse_pathname(fn).base
for ch in badchars:
basefn = basefn.replace(ch, '_')
return basefn
# newext always includes the '.' so it can be removed entirely with newext=""
def replace_ext(fn, newext):
base, ext = os.path.splitext(fn)
return base + newext
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
#class AttrDict(dict):
# __getattr__ = dict.__getitem__
# __setattr__ = dict.__setitem__
def autoconvert(v):
if v is None:
return ''
elif v.isdigit():
return int(v)
else:
return v
# should always include header row
# returns a sequence of mappings or tuples, depending on whether objname is specified
def parse_tsv_data(contents, objname=None):
csvreader = csv.DictReader(contents.splitlines(), delimiter=COLUMN_SEPARATOR, quoting=csv.QUOTE_NONE, skipinitialspace=True)
if objname:
if not csvreader.fieldnames:
return
nt = namedtuple(objname, " ".join(csvreader.fieldnames))
for row in csvreader:
if objname:
r = AttrDict((k, autoconvert(v)) for k, v in row.items())
else:
r = AttrDict(row)
yield r
def parse_tsv(fn, objname=None):
try:
fp = codecs.open(fn, encoding='utf-8')
rows = parse_tsv_data(fp.read(), objname)
return dict((r[0], r) for r in rows)
except Exception as e:
error("parse_tsv('%s') %s" % (fn, str(e)))
if g_args.debug:
raise
return {}
def parse_tsv_rows(fn, objname=None):
try:
fp = codecs.open(fn, encoding='utf-8')
return [r for r in parse_tsv_data(fp.read(), objname)]
except Exception as e:
error("parse_tsv_rows('%s'): %s" % (fn, str(e)))
if g_args.debug:
raise
return []
class OutputZipFile(zipfile.ZipFile):
def __init__(self, fnzip, toplevel="", log=True):
zipfile.ZipFile.__init__(self, fnzip, 'w', allowZip64=True)
self.toplevel = toplevel
self.log = log
def write_file(self, fn, contents, timet=None):
if not timet:
timet = time.time()
fullfn = os.path.join(self.toplevel, fn)
zi = zipfile.ZipInfo(fullfn, datetime.datetime.fromtimestamp(timet).timetuple())
zi.external_attr = 0o444 << 16
zi.compress_type = zipfile.ZIP_DEFLATED
self.writestr(zi, contents)
if g_args.debug:
debug("wrote %s to %s" % (fullfn, self.filename))
def write(self, data):
raise Exception("can't write directly to .zip")
def __del__(self):
if self.log:
self.write_file(g_scriptname + ".log", get_log().encode('utf-8'))
zipfile.ZipFile.__del__(self)
class OutputFile:
def __init__(self, outfp=None):
self.toplevel = "xd"
self.outfp = outfp
def write_file(self, fn, contents, timet=None):
self.outfp.write("\n--- %s ---\n" % fn)
self.outfp.write(contents)
def write(self, data):
self.outfp.write(data)
def write_row(self, fields):
self.write(COLUMN_SEPARATOR.join(fields) + EOL)
def write_html(self, fn, innerhtml, title=""):
from .html import html_header, html_footer
basepagename = parse_pathname(fn).path
htmlstr = html_header(current_url=basepagename, title=title) + innerhtml + html_footer()
self.write(htmlstr.encode("ascii", 'xmlcharrefreplace').decode("ascii"))
def strip_toplevel(fn):
if "/" in fn:
return "/".join(fn.split("/")[1:]) # strip off leading directory
else:
return fn
def disambiguate_fn(fn, all_filenames):
p = parse_pathname(fn)
# append a, b, c, etc until finding one that hasn't been taken already
i = 0
while fn in all_filenames:
info('%s already in use, disambiguating' % fn)
fn = os.path.join(p.path, p.base + string.ascii_lowercase[i] + p.ext)
i += 1
return fn
class OutputDirectory:
def __init__(self, toplevel_dir):
self.toplevel = toplevel_dir
self.files = {}
def exists(self, fn):
fullfn = os.path.join(self.toplevel, fn) # prepend our toplevel
return os.path.exists(fullfn)
def open_file(self, fn, mode='w'):
if fn in self.files:
if mode == 'a':
# just keep appending to same file
return self.files[fn]
if mode == 'w':
# make a new file with a disambiguated filename
fn = disambiguate_fn(fn, self.files)
fullfn = os.path.join(self.toplevel, fn) # prepend our toplevel
# make parent dirs
try:
os.makedirs(parse_pathname(fullfn).path)
except Exception as e:
pass # log("%s: %s" % (type(e), str(e)))
f = codecs.open(fullfn, mode, encoding='utf-8')
if mode[0] == 'a':
self.files[fn] = f
elif mode[0] | |
add data to object
if freqs is not None and power_spectra is not None:
self.add_data(freqs, power_spectra, freq_range)
# If 'verbose', print out a marker of what is being run
if self.verbose and not progress:
print('Running FOOOFGroup across {} power spectra.'.format(len(self.power_spectra)))
# Run linearly
if n_jobs == 1:
self._reset_group_results(len(self.power_spectra))
for ind, power_spectrum in \
_progress(enumerate(self.power_spectra), progress, len(self)):
self._fit(power_spectrum=power_spectrum)
self.group_results[ind] = self._get_results()
# Run in parallel
else:
self._reset_group_results()
n_jobs = cpu_count() if n_jobs == -1 else n_jobs
with Pool(processes=n_jobs) as pool:
self.group_results = list(_progress(pool.imap(partial(_par_fit, fg=self),
self.power_spectra),
progress, len(self.power_spectra)))
# Clear the individual power spectrum and fit results of the current fit
self._reset_data_results(clear_spectrum=True, clear_results=True)
def drop(self, inds):
"""Drop one or more model fit results from the object.
Parameters
----------
inds : int or array_like of int or array_like of bool
Indices to drop model fit results for.
If a boolean mask, True indicates indices to drop.
Notes
-----
This method sets the model fits as null, and preserves the shape of the model fits.
"""
for ind in check_inds(inds):
fm = self.get_fooof(ind)
fm._reset_data_results(clear_results=True)
self.group_results[ind] = fm.get_results()
def get_results(self):
"""Return the results run across a group of power spectra."""
return self.group_results
def get_params(self, name, col=None):
"""Return model fit parameters for specified feature(s).
Parameters
----------
name : {'aperiodic_params', 'peak_params', 'gaussian_params', 'error', 'r_squared'}
Name of the data field to extract across the group.
col : {'CF', 'PW', 'BW', 'offset', 'knee', 'exponent'} or int, optional
Column name / index to extract from selected data, if requested.
Only used for name of {'aperiodic_params', 'peak_params', 'gaussian_params'}.
Returns
-------
out : ndarray
Requested data.
Raises
------
NoModelError
If there are no model fit results available.
ValueError
If the input for the `col` input is not understood.
Notes
-----
For further description of the data you can extract, check the FOOOFResults documentation.
"""
if not self.has_model:
raise NoModelError("No model fit results are available, can not proceed.")
# Allow for shortcut alias, without adding `_params`
if name in ['aperiodic', 'peak', 'gaussian']:
name = name + '_params'
# If col specified as string, get mapping back to integer
if isinstance(col, str):
col = get_indices(self.aperiodic_mode)[col]
elif isinstance(col, int):
if col not in [0, 1, 2]:
raise ValueError("Input value for `col` not valid.")
# Pull out the requested data field from the group data
# As a special case, peak_params are pulled out in a way that appends
# an extra column, indicating which FOOOF run each peak comes from
if name in ('peak_params', 'gaussian_params'):
out = np.array([np.insert(getattr(data, name), 3, index, axis=1)
for index, data in enumerate(self.group_results)])
# This updates index to grab selected column, and the last column
# This last column is the 'index' column (FOOOF object source)
if col is not None:
col = [col, -1]
else:
out = np.array([getattr(data, name) for data in self.group_results])
# Some data can end up as a list of separate arrays
# If so, concatenate it all into one 2d array
if isinstance(out[0], np.ndarray):
out = np.concatenate([arr.reshape(1, len(arr)) \
if arr.ndim == 1 else arr for arr in out], 0)
# Select out a specific column, if requested
if col is not None:
out = out[:, col]
return out
@copy_doc_func_to_method(plot_fg)
def plot(self, save_fig=False, file_name=None, file_path=None):
plot_fg(self, save_fig, file_name, file_path)
@copy_doc_func_to_method(save_report_fg)
def save_report(self, file_name, file_path=None):
save_report_fg(self, file_name, file_path)
@copy_doc_func_to_method(save_fg)
def save(self, file_name, file_path=None, append=False,
save_results=False, save_settings=False, save_data=False):
save_fg(self, file_name, file_path, append, save_results, save_settings, save_data)
def load(self, file_name, file_path=None):
"""Load FOOOFGroup data from file.
Parameters
----------
file_name : str
File to load data from.
file_path : str, optional
Path to directory to load from. If None, loads from current directory.
"""
# Clear results so as not to have possible prior results interfere
self._reset_group_results()
power_spectra = []
for ind, data in enumerate(load_jsonlines(file_name, file_path)):
self._add_from_dict(data)
# If settings are loaded, check and update based on the first line
if ind == 0:
self._check_loaded_settings(data)
# If power spectra data is part of loaded data, collect to add to object
if 'power_spectrum' in data.keys():
power_spectra.append(data['power_spectrum'])
# If results part of current data added, check and update object results
if set(OBJ_DESC['results']).issubset(set(data.keys())):
self._check_loaded_results(data)
self.group_results.append(self._get_results())
# Reconstruct frequency vector, if information is available to do so
if self.freq_range:
self._regenerate_freqs()
# Add power spectra data, if they were loaded
if power_spectra:
self.power_spectra = np.array(power_spectra)
# Reset peripheral data from last loaded result, keeping freqs info
self._reset_data_results(clear_spectrum=True, clear_results=True)
def get_fooof(self, ind, regenerate=True):
"""Get a FOOOF object for a specified model fit.
Parameters
----------
ind : int
The index of the FOOOFResults in FOOOFGroup.group_results to load.
regenerate : bool, optional, default: False
Whether to regenerate the model fits from the given fit parameters.
Returns
-------
fm : FOOOF
The FOOOFResults data loaded into a FOOOF object.
"""
# Initialize a FOOOF object, with same settings & check data mode as current FOOOFGroup
fm = FOOOF(*self.get_settings(), verbose=self.verbose)
fm.set_check_data_mode(self._check_data)
# Add data for specified single power spectrum, if available
# The power spectrum is inverted back to linear, as it is re-logged when added to FOOOF
if self.has_data:
fm.add_data(self.freqs, np.power(10, self.power_spectra[ind]))
# If no power spectrum data available, copy over data information & regenerate freqs
else:
fm.add_meta_data(self.get_meta_data())
# Add results for specified power spectrum, regenerating full fit if requested
fm.add_results(self.group_results[ind])
if regenerate:
fm._regenerate_model()
return fm
def get_group(self, inds):
"""Get a FOOOFGroup object with the specified sub-selection of model fits.
Parameters
----------
inds : array_like of int or array_like of bool
Indices to extract from the object.
If a boolean mask, True indicates indices to select.
Returns
-------
fg : FOOOFGroup
The requested selection of results data loaded into a new FOOOFGroup object.
"""
# Check and convert indices encoding to list of int
inds = check_inds(inds)
# Initialize a new FOOOFGroup object, with same settings as current FOOOFGroup
fg = FOOOFGroup(*self.get_settings(), verbose=self.verbose)
# Add data for specified power spectra, if available
# The power spectra are inverted back to linear, as they are re-logged when added to FOOOF
if self.has_data:
fg.add_data(self.freqs, np.power(10, self.power_spectra[inds, :]))
# If no power spectrum data available, copy over data information & regenerate freqs
else:
fg.add_meta_data(self.get_meta_data())
# Add results for specified power spectra
fg.group_results = [self.group_results[ind] for ind in inds]
return fg
def print_results(self, concise=False):
"""Print out FOOOFGroup results.
Parameters
----------
concise : bool, optional, default: False
Whether to print the report in a concise mode, or not.
"""
print(gen_results_fg_str(self, concise))
def _fit(self, *args, **kwargs):
"""Create an alias to FOOOF.fit for FOOOFGroup object, for internal use."""
super().fit(*args, **kwargs)
def _get_results(self):
"""Create an alias to FOOOF.get_results for FOOOFGroup object, for internal use."""
return super().get_results()
def _check_width_limits(self):
"""Check and warn about bandwidth limits / frequency resolution interaction."""
# Only check & warn on first power spectrum
# This is to avoid spamming standard output for every spectrum in the group
if self.power_spectra[0, 0] == self.power_spectrum[0]:
super()._check_width_limits()
###################################################################################################
###################################################################################################
def _par_fit(power_spectrum, fg):
"""Helper function for running in parallel."""
fg._fit(power_spectrum=power_spectrum)
return fg._get_results()
def _progress(iterable, progress, n_to_run):
"""Add a progress bar to an iterable to be processed.
Parameters
----------
iterable : list or iterable
Iterable object to potentially apply progress tracking to.
progress : {None, 'tqdm', 'tqdm.notebook'}
Which kind of progress bar to use. If None, no progress bar is used.
n_to_run : int
Number of jobs to complete.
Returns
-------
pbar : iterable or tqdm object
Iterable object, with tqdm progress functionality, if requested.
Raises
------
ValueError
If the input for `progress` is not understood.
Notes
-----
The explicit `n_to_run` input is required as tqdm requires this in the parallel case.
The `tqdm` object that is potentially returned acts the same as the underlying iterable,
with the addition of printing out progress every time items are requested.
"""
# Check progress specifier is okay
tqdm_options = ['tqdm', 'tqdm.notebook']
if progress is not None and progress not in tqdm_options:
raise ValueError("Progress bar option not understood.")
# Set the display text for the progress bar
pbar_desc = 'Running FOOOFGroup'
# Use a tqdm, progress bar, | |
by side
coronavirus_in_my_collection_Nouns_Left = []
for word in Nouns_LEFT_clean:
if word in coronavirus:
coronavirus_in_my_collection_Nouns_Left.append(word)
len(coronavirus_in_my_collection_Nouns_Left)/len(Nouns_LEFT_clean) #0.0012387557935397597
coronavirus_in_my_collection_Nouns_Right = []
for word in Nouns_RIGHT_clean:
if word in coronavirus:
coronavirus_in_my_collection_Nouns_Right.append(word)
len(coronavirus_in_my_collection_Nouns_Right)/len(Nouns_RIGHT_clean) #0.001673182148777105
coronavirus_in_my_collection_Propernouns_Left = []
for word in Propernouns_LEFT_clean:
if word in coronavirus:
coronavirus_in_my_collection_Propernouns_Left.append(word)
len(coronavirus_in_my_collection_Propernouns_Left)/len(Propernouns_LEFT_clean) #0.00806220021421586
coronavirus_in_my_collection_Propernouns_Right = []
for word in Propernouns_RIGHT_clean:
if word in coronavirus:
coronavirus_in_my_collection_Propernouns_Right.append(word)
len(coronavirus_in_my_collection_Propernouns_Right)/len(Propernouns_RIGHT_clean) #0.010157396094479933
#### 7.4.1 - do I need to exclude pronouns from Nouns lists?
first_pers_pronouns = ['i', 'we', 'me','us','mine','ours','my','our','myself','ourselves']
second_pers_pronouns = ['you','yours','your','yourself','yourselves']
third_pers_pronouns = ['he', 'she', 'it', 'they', 'her','him','them','hers','his','its','theirs','his','their','herself','himself','itself','themselves']
other_pronouns = ['all','another','any','anybody','anyone','anything','both','each','either','everybody','everyone','everything','few','many','most','neither','nobody','none','noone','nothing','one','other','others','several','some','somebody','someone','something','such','that','these','this','those','what','whatrever','which','whichever','who','whoever','whom','whomever','whose','as','that','what','whatever','thou','thee','thy','thine','ye','eachother','everybody','naught','nought','somewhat','thyself','whatsoever','whence','where','whereby','wherever']
pronouns = first_pers_pronouns + second_pers_pronouns + third_pers_pronouns + other_pronouns
len(pronouns) #94
#now create a list of all words in these 2 collections that match tags in 'coronavirus'
pronouns_in_my_collection_Nouns = []
for word in my_collection_Nouns:
if word in pronouns:
pronouns_in_my_collection_Nouns.append(word)
len(pronouns_in_my_collection_Nouns) #929401
len(pronouns_in_my_collection_Nouns)/len(my_collection_Nouns) #0.09835795101580351 --> need to multiverse Nouns?
pronouns_in_my_collection_Propernouns = []
for word in my_collection_Propernouns:
if word in pronouns:
pronouns_in_my_collection_Propernouns.append(word)
len(pronouns_in_my_collection_Propernouns) #15902
len(pronouns_in_my_collection_Propernouns)/len(my_collection_Propernouns) #0.005827493441229791 --> don't need to multiverse Propernouns
## --> re-plot Nouns without pronouns:
def remove_pronouns(wordlist):
wordlist_clean = [word for word in wordlist if word not in pronouns]
return wordlist_clean
len(Nouns_LEFT_clean) #5665362
len(Nouns_RIGHT_clean) #3783808
Nouns_LEFT_clean_nopronouns = remove_pronouns(Nouns_LEFT_clean)
len(Nouns_LEFT_clean_nopronouns) #5074947
Nouns_RIGHT_clean_nopronouns = remove_pronouns(Nouns_RIGHT_clean)
len(Nouns_RIGHT_clean_nopronouns) #3444822
counts_Nouns_LEFT_clean_nopronouns = collections.Counter(Nouns_LEFT_clean_nopronouns)
len(counts_Nouns_LEFT_clean_nopronouns) #116784
counts_Nouns_LEFT_clean_nopronouns_30 = pd.DataFrame(counts_Nouns_LEFT_clean_nopronouns.most_common(30), columns=['words', 'count'])
counts_Nouns_LEFT_clean_nopronouns_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_LEFT_clean_nopronouns_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Nouns Found in LEFT Tweets (Including All non-pronoun Words without 's|'ll|'d|'ve|'m|'re, '#' & emojis, lowecased)")
plt.savefig('RESULTS/WordCounts/Nouns_LEFT_clean_3_mostcommon_nopronouns.png')
counts_Nouns_RIGHT_clean_nopronouns = collections.Counter(Nouns_RIGHT_clean_nopronouns)
len(counts_Nouns_RIGHT_clean_nopronouns) #93220
counts_Nouns_RIGHT_clean_nopronouns_30 = pd.DataFrame(counts_Nouns_RIGHT_clean_nopronouns.most_common(30), columns=['words', 'count'])
counts_Nouns_RIGHT_clean_nopronouns_30.head()
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_RIGHT_clean_nopronouns_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Nouns Found in RIGHT Tweets (Including All non-pronoun Words without 's|'ll|'d|'ve|'m|'re, '#' & emojis, lowecased)")
plt.savefig('RESULTS/WordCounts/Nouns_RIGHT_clean_3_mostcommon_nopronouns.png')
#### 7.5 - see how big a proportion of my Propernouns/Nouns emojis constitute
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", flags=re.UNICODE)
my_collection_Propernouns_uncleaned = Propernouns_LEFT + Propernouns_RIGHT
#need to use the uncleaned collection, because in the cleaned version I remove these emojis
emojis_in_Propernouns = []
for word in my_collection_Propernouns_uncleaned:
match_tag = emoji_pattern.match(word)
if match_tag:
emojis_in_Propernouns.append(word)
len(emojis_in_Propernouns) #46959
len(emojis_in_Propernouns)/len(my_collection_Propernouns_uncleaned) #0.01698819881101782
## --> emojis constitute only 1.7% of this collection
## --> maybe shouls still try to drop entire tag if it contains emoji - as the words follower by them are rarely propernuons
my_collection_Nouns_uncleaned = Nouns_LEFT + Nouns_RIGHT_clean
emojis_in_Nouns = []
for word in my_collection_Nouns_uncleaned:
match_tag = emoji_pattern.match(word)
if match_tag:
emojis_in_Nouns.append(word)
len(emojis_in_Nouns) #6229
len(emojis_in_Nouns)/len(my_collection_Nouns_uncleaned) #0.000658914835283985
#######################################################
########### 8 - calculations ##########################
#######################################################
#P(word use | political affiliation) = #times word occurs in tweets of followers of this side / count of all words used in tweets of followers of this side
df_LEFT = pd.read_csv('RESULTS_LEFT_noun_frequency_2.csv', index_col=0)
df_LEFT.head()
total_tags_LEFT = df_LEFT['total_tags'].sum()
total_tags_LEFT #33017889
df_RIGHT = pd.read_csv('RESULTS_RIGHT_noun_frequency_2.csv', index_col=0)
df_LEFT.head()
total_tags_RIGHT = df_RIGHT['total_tags'].sum()
total_tags_RIGHT #22513236
#Nouns, LEFT
counts_Nouns_LEFT_clean = collections.Counter(Nouns_LEFT_clean) #dictionary
len(counts_Nouns_LEFT_clean) #116856 - same as len(set(Nouns_LEFT_clean))
data=[]
for word in set(Nouns_LEFT_clean): #only loop through each word once - no repetitions
count = counts_Nouns_LEFT_clean[word]
proportion = counts_Nouns_LEFT_clean[word]/total_tags_LEFT
data.append([word, count, proportion])
df_Nouns_proportions_LEFT = pd.DataFrame(columns=['word', 'count', 'proportion'], data=data)
df_Nouns_proportions_LEFT.to_csv('df_Nouns_proportions_LEFT.csv')
#Nouns, RIGHT
counts_Nouns_RIGHT_clean = collections.Counter(Nouns_RIGHT_clean)
len(counts_Nouns_RIGHT_clean) #93293
data=[]
for word in set(Nouns_RIGHT_clean):
count = counts_Nouns_RIGHT_clean[word]
proportion = counts_Nouns_RIGHT_clean[word]/total_tags_RIGHT
data.append([word, count, proportion])
df_Nouns_proportions_RIGHT = pd.DataFrame(columns=['word', 'count', 'proportion'], data=data)
df_Nouns_proportions_RIGHT.to_csv('df_Nouns_proportions_RIGHT.csv')
#Propernouns, LEFT
counts_Propernouns_LEFT_clean = collections.Counter(Propernouns_LEFT_clean) #dictionary
len(counts_Propernouns_LEFT_clean) #166338 - same as len(set(Propernouns_LEFT_clean))
data=[]
for word in set(Propernouns_LEFT_clean):
count = counts_Propernouns_LEFT_clean[word]
proportion = counts_Propernouns_LEFT_clean[word]/total_tags_LEFT
data.append([word, count, proportion])
df_Propernouns_proportions_LEFT = pd.DataFrame(columns=['word', 'count', 'proportion'], data=data)
df_Propernouns_proportions_LEFT.shape
df_Propernouns_proportions_LEFT.to_csv('df_Propernouns_proportions_LEFT.csv')
#Propernouns, RIGHT
counts_Propernouns_RIGHT_clean = collections.Counter(Propernouns_RIGHT_clean) #dictionary
len(counts_Propernouns_RIGHT_clean) #146319
data=[]
for word in set(Propernouns_RIGHT_clean):
count = counts_Propernouns_RIGHT_clean[word]
proportion = counts_Propernouns_RIGHT_clean[word]/total_tags_RIGHT
data.append([word, count, proportion])
df_Propernouns_proportions_RIGHT = pd.DataFrame(columns=['word', 'count', 'proportion'], data=data)
df_Propernouns_proportions_RIGHT.to_csv('df_Propernouns_proportions_RIGHT.csv')
#### re-import data
df_Propernouns_proportions_LEFT = pd.read_csv('df_Propernouns_proportions_LEFT.csv', index_col=0)
df_Propernouns_proportions_RIGHT = pd.read_csv('df_Propernouns_proportions_RIGHT.csv', index_col=0)
df_Nouns_proportions_LEFT = pd.read_csv('df_Nouns_proportions_LEFT.csv', index_col=0)
df_Nouns_proportions_RIGHT = pd.read_csv('df_Nouns_proportions_RIGHT.csv', index_col=0)
df_Propernouns_proportions_LEFT.shape #(166338, 3)
df_Propernouns_proportions_RIGHT.shape #(146319, 3)
df_Nouns_proportions_LEFT.shape #(116856, 3)
df_Nouns_proportions_RIGHT.shape #(93293, 3)
#drop words with counts<20 from each df
df_Propernouns_proportions_LEFT = df_Propernouns_proportions_LEFT[df_Propernouns_proportions_LEFT['count']>10]
df_Propernouns_proportions_LEFT.shape #(13378, 3)
df_Propernouns_proportions_RIGHT = df_Propernouns_proportions_RIGHT[df_Propernouns_proportions_RIGHT['count']>10]
df_Propernouns_proportions_RIGHT.shape #(10943, 3)
df_Nouns_proportions_LEFT = df_Nouns_proportions_LEFT[df_Nouns_proportions_LEFT['count']>10]
df_Nouns_proportions_LEFT.shape #(17368, 3)
df_Nouns_proportions_RIGHT = df_Nouns_proportions_RIGHT[df_Nouns_proportions_RIGHT['count']>10]
df_Nouns_proportions_RIGHT.shape #(14468, 3)
#### NOW display them by highest proportion first
df_Propernouns_proportions_LEFT.head()
df_Propernouns_proportions_LEFT_sorted = df_Propernouns_proportions_LEFT.sort_values(by = 'count', ascending=False)
df_Propernouns_proportions_LEFT_sorted.head()
df_Propernouns_proportions_RIGHT.head()
df_Propernouns_proportions_RIGHT_sorted = df_Propernouns_proportions_RIGHT.sort_values(by = 'count', ascending=False)
df_Propernouns_proportions_LEFT_sorted.head()
df_Propernouns_LEFT-RIGHT = pd.DataFrame()
#re-set index as 'word' so I can loop over specific words?
for index in df_Propernouns_proportions_LEFT.index:
df_Propernouns_LEFT['word'].values[index] = df_Propernouns_proportions_LEFT['word'].values[index]
#df_Propernouns_proportions_LEFT.at
df_Propernouns_LEFT-RIGHT['LEFT-RIGHT'].values[index] = df_Propernouns_proportions_LEFT['proportion'].values[index]
################################################################################
#### 9 - trying to understand why centrality is related to noun/propernoun use :
################################################################################
## --> analyse words used by 10 MOST hubs-central users
#find 10 most hubs-central users in df
df = pd.read_csv('RESULTS_df_multiverse_DIRECTED.csv', index_col=0)
df.head()
df.shape
df_LEFT = df[df['side']=='LEFT']
df_LEFT.shape
df_LEFT.head()
df_RIGHT = df[df['side']=='RIGHT']
df_RIGHT.shape
df_LEFT_sorted = df_LEFT.sort_values(by='hubs', ascending=False) #most central at the top
df_LEFT_sorted = df_LEFT_sorted.head(10)
LEFT_central_ids = list(df_LEFT_sorted['user_id_str'])
LEFT_central_ids
#now manualy save these ids into 'ark-tweet-nlp-0.3.2/outputs_conll/LEFT/most_central'
df_RIGHT_sorted = df_RIGHT.sort_values(by='hubs', ascending=False) #most central at the top
df_RIGHT_sorted = df_RIGHT_sorted.head(10)
RIGHT_central_ids = list(df_RIGHT_sorted['user_id_str'])
RIGHT_central_ids
#now manualy save these ids into 'ark-tweet-nlp-0.3.2/outputs_conll/LEFT/most_central'
## 1. LEFT
os.chdir(os.path.expanduser("~"))
os.chdir('ark-tweet-nlp-0.3.2/outputs_conll/LEFT/most_central') #do this once
errors = []
Propernoun_tags = ['^', 'Z', 'M']
Noun_tags = ['N', 'S', 'L']
Propernouns_LEFT_central = [] #skip this at the second run
Nouns_LEFT_central = [] #skip this at the second run
Propernouns_RIGHT_central = []
Nouns_RIGHT_central = []
counter = 0
for txt_file in glob.glob("*.txt"):
counter+=1
#extract user_id from file name
user_id = txt_file.split("tweets_")[1]
user_id = user_id.split(".txt")[0]
with open(txt_file, 'r') as f:
try:
for tweet in f.read().split('\n\n'): #for every tweet from this user
lines = tweet.split('\n') #create iterable with every triple of tab-separasted tags
lines_split = [x.split('\t') for x in lines] #this is now a list of 3 items
for triple in lines_split:
if triple[1] in Propernoun_tags:
Propernouns_LEFT_central.append(triple[0]) #CHANGE to LEFT/RIGHT
elif triple[1] in Noun_tags:
Nouns_LEFT_central.append(triple[0]) #CHANGE to LEFT/RIGHT
except IndexError as e:
errors.append({user_id: {tweet: e}})
print(f'finished file {counter}')
len(Propernouns_LEFT_central) #363
len(errors) #10 - 1 for each tweet - this is the blank line at the end
len(Nouns_LEFT_central) #1668
Propernouns_LEFT_central[0]
##NOW re-run this with RIGHT
len(Propernouns_RIGHT_central) #431
len(errors) #10 - 1 for each tweet - this is the blank line at the end
len(Nouns_RIGHT_central) #1546
Nouns_RIGHT_central[0]
#now clean these
def clean_wordlist(wordlist):
wordlist_clean = [string.lower().strip().replace("#", "") for string in wordlist]
wordlist_clean = [re.sub(r"((’|')(s|ll|d|ve|m|re))", "", string) for string in wordlist_clean]
wordlist_clean = [remove_emoji(string) for string in wordlist_clean] ##NB define this function earlier
wordlist_clean = list(filter(None, wordlist_clean)) #drop empty stirng which is the result of dropping emoji
return wordlist_clean
Propernouns_LEFT_central_clean = clean_wordlist(Propernouns_LEFT_central)
Nouns_LEFT_central_clean = clean_wordlist(Nouns_LEFT_central)
Propernouns_RIGHT_central_clean = clean_wordlist(Propernouns_RIGHT_central)
Nouns_RIGHT_central_clean = clean_wordlist(Nouns_RIGHT_central)
len(Propernouns_LEFT_central_clean) #363 --> 362
len(Nouns_LEFT_central) #1668 --> 1668
len(Propernouns_RIGHT_central) #431 --> 431
len(Nouns_RIGHT_central) #1546 --> 1546
###visualise & save most common words for these 10 users
os.chdir(os.path.expanduser("~"))
#Propernouns
counts_Propernouns_LEFT_c = collections.Counter(Propernouns_LEFT_central_clean)
counts_Propernouns_LEFT_c_30 = pd.DataFrame(counts_Propernouns_LEFT_c.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Propernouns_LEFT_c_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Propernouns Found in Tweets of 10 most LEFT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Propernouns_LEFT_10mostcentral_mostcommon_clean.png')
counts_Propernouns_RIGHT_c = collections.Counter(Propernouns_RIGHT_central_clean)
counts_Propernouns_RIGHT_c_30 = pd.DataFrame(counts_Propernouns_RIGHT_c.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Propernouns_RIGHT_c_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Propernouns Found in Tweets of 10 most RIGHT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Propernouns_RIGHT_10mostcentral_mostcommon_clean.png')
#Nouns
counts_Nouns_LEFT_c = collections.Counter(Nouns_LEFT_central_clean)
counts_Nouns_LEFT_c_30 = pd.DataFrame(counts_Nouns_LEFT_c.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_LEFT_c_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="red")
ax.set_title("Common Nouns Found in Tweets of 10 most LEFT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Nouns_LEFT_10mostcentral_mostcommon_clean.png')
counts_Nouns_RIGHT_c = collections.Counter(Nouns_RIGHT_central_clean)
counts_Nouns_RIGHT_c_30 = pd.DataFrame(counts_Nouns_RIGHT_c.most_common(30), columns=['words', 'count'])
fig, ax = plt.subplots(figsize=(8, 8))
counts_Nouns_RIGHT_c_30.sort_values(by='count').plot.barh(x='words',
y='count',
ax=ax,
color="blue")
ax.set_title("Common Nouns Found in Tweets of 10 most RIGHT-central users (Including All Words cleaned)")
plt.savefig('RESULTS/WordCounts/Nouns_RIGHT_10mostcentral_mostcommon_clean.png')
## --> analyse words used by 10 LEAST hubs-central users
#find 10 most hubs-central users in df
#1. re-import df
df = pd.read_csv('RESULTS_df_multiverse_DIRECTED.csv', index_col=0)
df.shape
df_LEFT = df[df['side']=='LEFT']
df_RIGHT = df[df['side']=='RIGHT']
df_LEFT_sorted = df_LEFT.sort_values(by='hubs', ascending=False) #most central at the top
df_RIGHT_sorted = df_RIGHT.sort_values(by='hubs', ascending=False) #most central at the top
df_LEFT_sorted = df_LEFT_sorted.tail(10)
LEFT_leastcentral_ids = list(df_LEFT_sorted['user_id_str'])
LEFT_leastcentral_ids
#now manualy save these ids into 'ark-tweet-nlp-0.3.2/outputs_conll/LEFT/least_central'
df_RIGHT_sorted = df_RIGHT_sorted.tail(10)
RIGHT_leastcentral_ids = list(df_RIGHT_sorted['user_id_str'])
RIGHT_leastcentral_ids
#now manualy save these ids into 'ark-tweet-nlp-0.3.2/outputs_conll/LEFT/least_central'
## 1. LEFT; 2. RIGHT
os.chdir(os.path.expanduser("~"))
os.chdir('ark-tweet-nlp-0.3.2/outputs_conll/LEFT/least_central') #do this once
errors = []
Propernoun_tags = ['^', 'Z', 'M']
Noun_tags = ['N', 'S', 'L']
Propernouns_LEFT_leastcentral = []
Nouns_LEFT_leastcentral = []
Propernouns_RIGHT_leastcentral = []
Nouns_RIGHT_leastcentral = []
counter = 0
for txt_file in glob.glob("*.txt"):
counter+=1
#extract user_id from file name
user_id = txt_file.split("tweets_")[1]
user_id = user_id.split(".txt")[0]
with open(txt_file, 'r') as f:
try:
for tweet in f.read().split('\n\n'): #for every tweet from this user
lines = tweet.split('\n') #create iterable with every triple of tab-separasted tags
lines_split = [x.split('\t') for x in lines] #this is now a list of 3 items
for triple in lines_split:
if triple[1] in Propernoun_tags:
Propernouns_LEFT_leastcentral.append(triple[0])#CHANGE to LEFT/RIGHT
elif triple[1] in Noun_tags:
Nouns_LEFT_leastcentral.append(triple[0]) #CHANGE to LEFT/RIGHT
except IndexError as e:
errors.append({user_id: {tweet: e}})
print(f'finished file {counter}')
len(Propernouns_LEFT_leastcentral) #1139
len(errors) #10 - 1 for each tweet - this is the blank line at the end
len(Nouns_LEFT_leastcentral) #3375
Propernouns_LEFT_leastcentral[0] #'Newhaven'
len(Propernouns_RIGHT_leastcentral) #894
len(errors) #10 - 1 for each tweet - this is the blank line at the end
len(Nouns_RIGHT_leastcentral) #3424
#now clean these using functions defined above
Propernouns_LEFT_leastcentral_clean = clean_wordlist(Propernouns_LEFT_leastcentral)
Nouns_LEFT_leastcentral_clean = clean_wordlist(Nouns_LEFT_leastcentral)
Propernouns_RIGHT_leastcentral_clean = clean_wordlist(Propernouns_RIGHT_leastcentral)
Nouns_RIGHT_leastcentral_clean = clean_wordlist(Nouns_RIGHT_leastcentral)
len(Propernouns_LEFT_leastcentral_clean) #1139 --> 1112
len(Nouns_LEFT_leastcentral_clean) #3375 --> 3370
len(Propernouns_RIGHT_leastcentral_clean) # 894 --> 683
len(Nouns_RIGHT_leastcentral_clean) #3424 --> 3090
###visualise & | |
"
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen, items_1.id AS items_1_id, "
"items_1.description AS items_1_description FROM users "
"JOIN orders ON users.id = orders.user_id, "
"orders AS orders_1 LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id ORDER BY items_1.id",
)
class SubqueryTest(fixtures.MappedTest):
run_deletes = "each"
@classmethod
def define_tables(cls, metadata):
Table(
"users_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(16)),
)
Table(
"tags_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", Integer, ForeignKey("users_table.id")),
Column("score1", sa.Float),
Column("score2", sa.Float),
)
@testing.combinations(
(True, "score"),
(True, None),
(False, None),
)
def test_label_anonymizing(self, labeled, labelname):
"""Eager loading works with subqueries with labels,
Even if an explicit labelname which conflicts with a label on the
parent.
There's not much reason a column_property() would ever need to have a
label of a specific name (and they don't even need labels these days),
unless you'd like the name to line up with a name that you may be
using for a straight textual statement used for loading instances of
that type.
"""
tags_table, users_table = (
self.tables.tags_table,
self.tables.users_table,
)
class User(fixtures.ComparableEntity):
@property
def prop_score(self):
return sum([tag.prop_score for tag in self.tags])
class Tag(fixtures.ComparableEntity):
@property
def prop_score(self):
return self.score1 * self.score2
tag_score = tags_table.c.score1 * tags_table.c.score2
user_score = sa.select(
sa.func.sum(tags_table.c.score1 * tags_table.c.score2)
).where(
tags_table.c.user_id == users_table.c.id,
)
if labeled:
tag_score = tag_score.label(labelname)
user_score = user_score.label(labelname)
else:
user_score = user_score.scalar_subquery()
mapper(
Tag,
tags_table,
properties={"query_score": sa.orm.column_property(tag_score)},
)
mapper(
User,
users_table,
properties={
"tags": relationship(Tag, backref="user", lazy="joined"),
"query_score": sa.orm.column_property(user_score),
},
)
session = fixture_session()
session.add(
User(
name="joe",
tags=[
Tag(score1=5.0, score2=3.0),
Tag(score1=55.0, score2=1.0),
],
)
)
session.add(
User(
name="bar",
tags=[
Tag(score1=5.0, score2=4.0),
Tag(score1=50.0, score2=1.0),
Tag(score1=15.0, score2=2.0),
],
)
)
session.flush()
session.expunge_all()
for user in session.query(User).all():
eq_(user.query_score, user.prop_score)
def go():
u = session.query(User).filter_by(name="joe").one()
eq_(u.query_score, u.prop_score)
self.assert_sql_count(testing.db, go, 1)
class CorrelatedSubqueryTest(fixtures.MappedTest):
"""tests for #946, #947, #948.
The "users" table is joined to "stuff", and the relationship
would like to pull only the "stuff" entry with the most recent date.
Exercises a variety of ways to configure this.
"""
# another argument for joinedload learning about inner joins
__requires__ = ("correlated_outer_joins",)
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
)
Table(
"stuff",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("date", Date),
Column("user_id", Integer, ForeignKey("users.id")),
)
@classmethod
def insert_data(cls, connection):
stuff, users = cls.tables.stuff, cls.tables.users
connection.execute(
users.insert(),
[
{"id": 1, "name": "user1"},
{"id": 2, "name": "user2"},
{"id": 3, "name": "user3"},
],
)
connection.execute(
stuff.insert(),
[
{"id": 1, "user_id": 1, "date": datetime.date(2007, 10, 15)},
{"id": 2, "user_id": 1, "date": datetime.date(2007, 12, 15)},
{"id": 3, "user_id": 1, "date": datetime.date(2007, 11, 15)},
{"id": 4, "user_id": 2, "date": datetime.date(2008, 1, 15)},
{"id": 5, "user_id": 3, "date": datetime.date(2007, 6, 15)},
{"id": 6, "user_id": 3, "date": datetime.date(2007, 3, 15)},
],
)
def test_labeled_on_date_noalias(self):
self._do_test(True, True, False)
def test_scalar_on_date_noalias(self):
self._do_test(False, True, False)
def test_labeled_on_limitid_noalias(self):
self._do_test(True, False, False)
def test_scalar_on_limitid_noalias(self):
self._do_test(False, False, False)
def test_labeled_on_date_alias(self):
self._do_test(True, True, True)
def test_scalar_on_date_alias(self):
self._do_test(False, True, True)
def test_labeled_on_limitid_alias(self):
self._do_test(True, False, True)
def test_scalar_on_limitid_alias(self):
self._do_test(False, False, True)
def _do_test(self, labeled, ondate, aliasstuff):
stuff, users = self.tables.stuff, self.tables.users
class User(fixtures.ComparableEntity):
pass
class Stuff(fixtures.ComparableEntity):
pass
mapper(Stuff, stuff)
if aliasstuff:
salias = stuff.alias()
else:
# if we don't alias the 'stuff' table within the correlated
# subquery,
# it gets aliased in the eager load along with the "stuff" table
# to "stuff_1".
# but it's a scalar subquery, and this doesn't actually matter
salias = stuff
if ondate:
# the more 'relational' way to do this, join on the max date
stuff_view = (
select(func.max(salias.c.date).label("max_date"))
.where(salias.c.user_id == users.c.id)
.correlate(users)
)
else:
# a common method with the MySQL crowd, which actually might
# perform better in some
# cases - subquery does a limit with order by DESC, join on the id
stuff_view = (
select(salias.c.id)
.where(salias.c.user_id == users.c.id)
.correlate(users)
.order_by(salias.c.date.desc())
.limit(1)
)
# can't win on this one
if testing.against("mssql"):
operator = operators.in_op
else:
operator = operators.eq
if labeled:
stuff_view = stuff_view.label("foo")
operator = operators.eq
else:
stuff_view = stuff_view.scalar_subquery()
if ondate:
mapper(
User,
users,
properties={
"stuff": relationship(
Stuff,
primaryjoin=and_(
users.c.id == stuff.c.user_id,
operator(stuff.c.date, stuff_view),
),
)
},
)
else:
mapper(
User,
users,
properties={
"stuff": relationship(
Stuff,
primaryjoin=and_(
users.c.id == stuff.c.user_id,
operator(stuff.c.id, stuff_view),
),
)
},
)
sess = fixture_session()
def go():
eq_(
sess.query(User)
.order_by(User.name)
.options(joinedload(User.stuff))
.all(),
[
User(name="user1", stuff=[Stuff(id=2)]),
User(name="user2", stuff=[Stuff(id=4)]),
User(name="user3", stuff=[Stuff(id=5)]),
],
)
self.assert_sql_count(testing.db, go, 1)
sess = fixture_session()
def go():
eq_(
sess.query(User).order_by(User.name).first(),
User(name="user1", stuff=[Stuff(id=2)]),
)
self.assert_sql_count(testing.db, go, 2)
sess = fixture_session()
def go():
eq_(
sess.query(User)
.order_by(User.name)
.options(joinedload(User.stuff))
.first(),
User(name="user1", stuff=[Stuff(id=2)]),
)
self.assert_sql_count(testing.db, go, 1)
sess = fixture_session()
def go():
eq_(
sess.query(User)
.filter(User.id == 2)
.options(joinedload(User.stuff))
.one(),
User(name="user2", stuff=[Stuff(id=4)]),
)
self.assert_sql_count(testing.db, go, 1)
class CyclicalInheritingEagerTestOne(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("c2", String(30)),
Column("type", String(30)),
)
Table(
"t2",
metadata,
Column(
"c1", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("c2", String(30)),
Column("type", String(30)),
Column("t1.id", Integer, ForeignKey("t1.c1")),
)
def test_basic(self):
t2, t1 = self.tables.t2, self.tables.t1
class T(object):
pass
class SubT(T):
pass
class T2(object):
pass
class SubT2(T2):
pass
mapper(T, t1, polymorphic_on=t1.c.type, polymorphic_identity="t1")
mapper(
SubT,
None,
inherits=T,
polymorphic_identity="subt1",
properties={
"t2s": relationship(
SubT2,
lazy="joined",
backref=sa.orm.backref("subt", lazy="joined"),
)
},
)
mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity="t2")
mapper(SubT2, None, inherits=T2, polymorphic_identity="subt2")
# testing a particular endless loop condition in eager load setup
fixture_session().query(SubT).all()
class CyclicalInheritingEagerTestTwo(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class PersistentObject(Base):
__tablename__ = "persistent"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
class Movie(PersistentObject):
__tablename__ = "movie"
id = Column(Integer, ForeignKey("persistent.id"), primary_key=True)
director_id = Column(Integer, ForeignKey("director.id"))
title = Column(String(50))
class Director(PersistentObject):
__tablename__ = "director"
id = Column(Integer, ForeignKey("persistent.id"), primary_key=True)
movies = relationship("Movie", foreign_keys=Movie.director_id)
name = Column(String(50))
def test_from_subclass(self):
Director = self.classes.Director
s = fixture_session()
self.assert_compile(
s.query(Director).options(joinedload("*")),
"SELECT director.id AS director_id, "
"persistent.id AS persistent_id, "
"director.name AS director_name, movie_1.id AS movie_1_id, "
"persistent_1.id AS persistent_1_id, "
"movie_1.director_id AS movie_1_director_id, "
"movie_1.title AS movie_1_title "
"FROM persistent JOIN director ON persistent.id = director.id "
"LEFT OUTER JOIN "
"(persistent AS persistent_1 JOIN movie AS movie_1 "
"ON persistent_1.id = movie_1.id) "
"ON director.id = movie_1.director_id",
)
def test_integrate(self):
Director = self.classes.Director
Movie = self.classes.Movie
session = Session(testing.db)
rscott = Director(name="<NAME>")
alien = Movie(title="Alien")
brunner = Movie(title="Blade Runner")
rscott.movies.append(brunner)
rscott.movies.append(alien)
session.add_all([rscott, alien, brunner])
session.commit()
close_all_sessions()
self.d = session.query(Director).options(joinedload("*")).first()
assert len(list(session)) == 3
class CyclicalInheritingEagerTestThree(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
__dialect__ = "default"
run_create_tables = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class PersistentObject(Base):
__tablename__ = "persistent"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
__mapper_args__ = {"with_polymorphic": "*"}
class Director(PersistentObject):
__tablename__ = "director"
id = Column(Integer, ForeignKey("persistent.id"), primary_key=True)
other_id = Column(Integer, ForeignKey("persistent.id"))
name = Column(String(50))
other = relationship(
PersistentObject,
primaryjoin=other_id == PersistentObject.id,
lazy=False,
)
__mapper_args__ = {"inherit_condition": id == PersistentObject.id}
def test_gen_query_nodepth(self):
PersistentObject = self.classes.PersistentObject
sess = fixture_session()
self.assert_compile(
sess.query(PersistentObject),
"SELECT persistent.id AS persistent_id, "
"director.id AS director_id,"
" director.other_id AS director_other_id, "
"director.name AS director_name FROM persistent "
"LEFT OUTER JOIN director ON director.id = persistent.id",
)
def test_gen_query_depth(self):
PersistentObject = self.classes.PersistentObject
Director = self.classes.Director
sess = fixture_session()
self.assert_compile(
sess.query(PersistentObject).options(joinedload(Director.other)),
"SELECT persistent.id AS persistent_id, "
"director.id AS director_id, "
"director.other_id AS director_other_id, "
"director.name AS director_name, persistent_1.id AS "
"persistent_1_id, director_1.id AS director_1_id, "
"director_1.other_id AS director_1_other_id, "
"director_1.name AS director_1_name "
"FROM persistent LEFT OUTER JOIN director "
"ON director.id = persistent.id "
"LEFT OUTER JOIN (persistent AS persistent_1 "
"LEFT OUTER JOIN director AS director_1 ON "
"director_1.id = persistent_1.id) "
"ON director.other_id = persistent_1.id",
)
class LoadFromJoinedInhWUnion(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""test for #6595"""
__dialect__ = "default"
run_create_tables = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Tag(Base):
__tablename__ = "tags"
id = Column(Integer, primary_key=True)
name = Column(String(50), primary_key=True)
sample_id = Column("sample_id", Integer, ForeignKey("sample.id"))
class BaseDataFile(Base):
__tablename__ = "base_data_file"
id = Column(Integer, primary_key=True)
type = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "base_data_file",
"polymorphic_on": type,
}
class Sample(BaseDataFile):
__tablename__ = "sample"
__mapper_args__ = {"polymorphic_identity": "sample"}
id = Column(
Integer,
ForeignKey("base_data_file.id"),
primary_key=True,
)
tags = | |
<gh_stars>10-100
import os
import sys
import pickle
import argparse
import time
from torch import optim
from torch.utils.tensorboard import SummaryWriter
sys.path.append(os.getcwd())
from utils import *
from motion_pred.utils.config import Config
from motion_pred.utils.dataset_h36m_multimodal import DatasetH36M
from motion_pred.utils.dataset_humaneva_multimodal import DatasetHumanEva
from models.motion_pred_ours import *
from utils import util, valid_angle_check
def joint_loss(Y_g):
parts = cfg.nf_specs['parts']
parts_idx = [(np.array(p) * 3).tolist() + (np.array(p) * 3 + 1).tolist() + (np.array(p) * 3 + 2).tolist()
for p in parts]
nparts = len(parts)
if 'alphas' in cfg.nf_specs.keys():
alpha = cfg.nf_specs['alphas'][0]
beta = cfg.nf_specs['alphas'][1]
else:
alpha = 100
beta = 300
loss = []
Y_g = Y_g.permute(1, 0, 2).contiguous()
Y_g = Y_g.view([Y_g.shape[0] // cfg.nk ** nparts] + [cfg.nk] * nparts + [Y_g.shape[1], -1])
assert nparts == 2
mask = torch.tril(torch.ones([cfg.nk, cfg.nk], device=device)) == 0
yt = Y_g[:, :, 0, ...][..., parts_idx[0]].reshape([Y_g.shape[0], cfg.nk, -1])
# pdist = (yt[:, :, None] - yt[:, None, :]).abs()[:, mask]
pdist = torch.cdist(yt, yt, p=1)[:, mask]
loss.append((-pdist / alpha).exp().mean())
yt = Y_g[..., parts_idx[1]].reshape([Y_g.shape[0] * cfg.nk, cfg.nk, -1])
# pdist = (yt[:, :, None] - yt[:, None, :]).abs()[:, mask]
pdist = torch.cdist(yt, yt, p=1)[:, mask]
loss.append((-pdist / beta).exp().mean())
with torch.no_grad():
mask = torch.tril(torch.ones([cfg.nk ** nparts, cfg.nk ** nparts], device=device)) == 0
yt = Y_g.reshape([Y_g.shape[0], cfg.nk ** nparts, -1])
pdist = torch.cdist(yt, yt, p=2)[:, mask]
# loss.append(pdist.mean())
return loss, pdist.mean()
def recon_loss(Y_g, Y, Y_mm):
parts = cfg.nf_specs['parts']
nparts = len(parts)
Y_g = Y_g.view(Y_g.shape[0], -1, cfg.nk ** nparts, Y_g.shape[2])
diff = Y_g - Y.unsqueeze(2)
dist = diff.pow(2).sum(dim=-1).sum(dim=0)
loss_recon = dist.min(dim=1)[0].mean()
with torch.no_grad():
ade = torch.norm(diff, dim=-1).mean(dim=0).min(dim=1)[0].mean()
diff = Y_g[:, :, :, None, :] - Y_mm[:, :, None, :, :]
mask = Y_mm.abs().sum(-1).sum(0) > 1e-6
dist = diff.pow(2).sum(dim=-1).sum(dim=0)
loss_recon_multi = dist.min(dim=1)[0][mask].mean()
if torch.isnan(loss_recon_multi):
loss_recon_multi = torch.zeros_like(loss_recon)
return loss_recon, loss_recon_multi, ade
def angle_loss(y):
ang_names = list(valid_ang.keys())
y = y.reshape([-1, y.shape[-1]])
ang_cos = valid_angle_check.h36m_valid_angle_check_torch(
y) if cfg.dataset == 'h36m' else valid_angle_check.humaneva_valid_angle_check_torch(y)
loss = tensor(0, dtype=dtype, device=device)
b = 1
for an in ang_names:
lower_bound = valid_ang[an][0]
if lower_bound >= -0.98:
# loss += torch.exp(-b * (ang_cos[an] - lower_bound)).mean()
if torch.any(ang_cos[an] < lower_bound):
# loss += b * torch.exp(-(ang_cos[an][ang_cos[an] < lower_bound] - lower_bound)).mean()
loss += (ang_cos[an][ang_cos[an] < lower_bound] - lower_bound).pow(2).mean()
upper_bound = valid_ang[an][1]
if upper_bound <= 0.98:
# loss += torch.exp(b * (ang_cos[an] - upper_bound)).mean()
if torch.any(ang_cos[an] > upper_bound):
# loss += b * torch.exp(ang_cos[an][ang_cos[an] > upper_bound] - upper_bound).mean()
loss += (ang_cos[an][ang_cos[an] > upper_bound] - upper_bound).pow(2).mean()
return loss
def loss_function(traj_est, traj, traj_multimodal, prior_lkh, prior_logdetjac):
lambdas = cfg.nf_specs['lambdas']
parts = cfg.nf_specs['parts']
nparts = len(parts)
nj = dataset.traj_dim // 3
# diversity loss
Y_g = traj_est[t_his:]
JL, div = joint_loss(Y_g)
# reconstruction loss
Y = traj[t_his:]
Y_multimodal = traj_multimodal[t_his:]
RECON, RECON_mm, ade = recon_loss(Y_g, Y, Y_multimodal)
# recover history
xest = traj_est[:t_his].reshape([t_his, cfg.batch_size, cfg.nk ** nparts, -1])
xgt = traj[:t_his].unsqueeze(2)
loss_x = torch.mean((xest - xgt).pow(2).sum(dim=-1))
# maintain limb length
parent = dataset.skeleton.parents()
tmp = traj[0].reshape([cfg.batch_size, nj, 3])
pgt = torch.zeros([cfg.batch_size, nj + 1, 3], dtype=dtype, device=device)
pgt[:, 1:] = tmp
limbgt = torch.norm(pgt[:, 1:] - pgt[:, parent[1:]], dim=2)[None, :, None, :]
tmp = traj_est.reshape([-1, cfg.batch_size, cfg.nk ** nparts, nj, 3])
pest = torch.zeros([tmp.shape[0], cfg.batch_size, cfg.nk ** nparts, nj + 1, 3], dtype=dtype, device=device)
pest[:, :, :, 1:] = tmp
limbest = torch.norm(pest[:, :, :, 1:] - pest[:, :, :, parent[1:]], dim=4)
loss_limb = torch.mean((limbgt - limbest).pow(2).sum(dim=3))
# angle loss
loss_ang = angle_loss(Y_g)
loss_r = loss_x * lambdas[0] + loss_limb * lambdas[1] \
+ JL[0] * lambdas[2] + JL[1] * lambdas[3] + RECON * lambdas[4] + RECON_mm * lambdas[5] \
- prior_lkh.mean() * lambdas[6] # - prior_logdetjac.mean() * lambdas[7]
if loss_ang > 0:
loss_r += loss_ang * lambdas[8]
return loss_r, np.array([loss_r.item(), loss_x.item(), loss_limb.item(), loss_ang.item(),
JL[0].item(), JL[1].item(), div.item(), RECON.item(), RECON_mm.item(), ade.item(),
prior_lkh.mean().item(), prior_logdetjac.mean().item()])
def train(epoch):
model.train()
t_s = time.time()
train_losses = 0
train_grad = 0
train_grad_d = 0
total_num_sample = 0
n_modality = 10
loss_names = ['LOSS', 'loss_cont', 'loss_limb', 'loss_ang', 'loss_DIV_L', 'loss_DIV_U', 'DIV',
'RECON', 'RECON_multi', "ADE", 'p(z)', 'logdet']
generator = dataset.sampling_generator(num_samples=cfg.num_vae_data_sample, batch_size=cfg.batch_size,
n_modality=n_modality)
prior = torch.distributions.Normal(torch.tensor(0, dtype=dtype, device=device),
torch.tensor(1, dtype=dtype, device=device))
# generator_d = dataset.sampling_generator(num_samples=cfg.num_vae_data_sample, batch_size=cfg.batch_size)
dct_m, idct_m = util.get_dct_matrix(t_pred + t_his)
dct_m_all = dct_m.float().to(device)
idct_m_all = idct_m.float().to(device)
parts = cfg.nf_specs['parts']
n_parts = len(parts)
idx_pad = list(range(t_his)) + [t_his - 1] * t_pred
k = 1
for traj_np, traj_multimodal_np in generator:
with torch.no_grad():
traj_np = traj_np[..., 1:, :].transpose([0, 2, 3, 1]) # .reshape(traj_np.shape[0], traj_np.shape[1], -1)
traj = tensor(traj_np, device=device, dtype=dtype) # .permute(0, 2, 1).contiguous()
bs, nj, _, _ = traj.shape
inp = traj.reshape([bs, -1, t_his + t_pred]).transpose(1, 2)
inp = torch.matmul(dct_m_all[:cfg.n_pre], inp[:, idx_pad, :]).transpose(1, 2). \
reshape([bs, nj, 3, -1]).reshape([bs, nj, -1])
traj_multimodal_np = traj_multimodal_np[..., 1:, :] # [bs, modality, seqn, jn, 3]
traj_multimodal_np = traj_multimodal_np.reshape([bs, n_modality, t_his + t_pred, -1]).transpose(
[2, 0, 1, 3])
traj_multimodal = tensor(traj_multimodal_np, device=device, dtype=dtype) # .permute(0, 2, 1).contiguous()
inp = inp.unsqueeze(1).repeat([1, (cfg.nk ** n_parts), 1, 1]).reshape(
[bs * (cfg.nk ** n_parts), nj, -1])
z = None
for _ in range(n_parts):
if z is None:
zt = torch.randn([bs, cfg.nk, 1, cfg.nf_specs['nz']], dtype=dtype, device=device)
z = zt
else:
z = z.repeat_interleave(cfg.nk, dim=1)
zt = torch.randn([bs, z.shape[1], 1, cfg.nf_specs['nz']], dtype=dtype, device=device)
z = torch.cat([z, zt], dim=2)
z = z.reshape([-1, n_parts, cfg.nf_specs['nz']])
# train generator
xt = model(inp, z)
xt = xt.reshape([bs * (cfg.nk ** n_parts), nj, 3, -1]).reshape([bs * (cfg.nk ** n_parts), nj * 3, -1]) \
.transpose(1, 2)
traj_est = torch.matmul(idct_m_all[:, :cfg.n_pre], xt).transpose(0, 1)
traj = traj.reshape([bs, -1, t_his + t_pred]).permute([2, 0, 1])
# to save computation
ran = np.random.uniform()
if ran > 0.67:
traj_tmp = traj_est[t_his::3].reshape([-1, traj_est.shape[-1] // 3, 3])
tmp = torch.zeros_like(traj_tmp[:, :1, :])
traj_tmp = torch.cat([tmp, traj_tmp], dim=1)
traj_tmp = util.absolute2relative_torch(traj_tmp, parents=dataset.skeleton.parents()).reshape(
[-1, traj_est.shape[-1]])
elif ran > 0.33:
traj_tmp = traj_est[t_his + 1::3].reshape([-1, traj_est.shape[-1] // 3, 3])
tmp = torch.zeros_like(traj_tmp[:, :1, :])
traj_tmp = torch.cat([tmp, traj_tmp], dim=1)
traj_tmp = util.absolute2relative_torch(traj_tmp, parents=dataset.skeleton.parents()).reshape(
[-1, traj_est.shape[-1]])
else:
traj_tmp = traj_est[t_his + 2::3].reshape([-1, traj_est.shape[-1] // 3, 3])
tmp = torch.zeros_like(traj_tmp[:, :1, :])
traj_tmp = torch.cat([tmp, traj_tmp], dim=1)
traj_tmp = util.absolute2relative_torch(traj_tmp, parents=dataset.skeleton.parents()).reshape(
[-1, traj_est.shape[-1]])
z, prior_logdetjac = pose_prior(traj_tmp)
prior_lkh = prior.log_prob(z).sum(dim=-1)
# prior_logdetjac = log_det_jacobian.sum(dim=2)
loss, losses = loss_function(traj_est, traj, traj_multimodal, prior_lkh, prior_logdetjac)
# if torch.isinf(loss):
# print(1)
optimizer.zero_grad()
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(list(model.parameters()), max_norm=100)
train_grad += grad_norm
optimizer.step()
train_losses += losses
total_num_sample += 1
# print(torch.cuda.memory_allocated()/1024/1024)
del loss, z, inp, xt, traj_est
# print(torch.cuda.memory_allocated())
scheduler.step()
# dt = time.time() - t_s
train_losses /= total_num_sample
lr = optimizer.param_groups[0]['lr']
losses_str = ' '.join(['{}: {:.4f}'.format(x, y) for x, y in zip(loss_names, train_losses)])
# average cost of log time 20s
tb_logger.add_scalar('train_grad', train_grad / total_num_sample, epoch)
for name, loss in zip(loss_names, train_losses):
tb_logger.add_scalars(name, {'train': loss}, epoch)
logger.info('====> Epoch: {} Time: {:.2f} {} lr: {:.5f}'.format(epoch, time.time() - t_s, losses_str, lr))
def val(epoch):
model.eval()
t_s = time.time()
train_losses = 0
total_num_sample = 0
n_modality = 10
loss_names = ['LOSS', 'loss_cont', 'loss_limb', 'loss_ang', 'loss_DIV_L', 'loss_DIV_U', 'DIV',
'RECON', 'RECON_multi', "ADE", 'p(z)', 'logdet']
generator = dataset_test.sampling_generator(num_samples=cfg.num_vae_data_sample, batch_size=cfg.batch_size)
prior = torch.distributions.Normal(torch.tensor(0, dtype=dtype, device=device),
torch.tensor(1, dtype=dtype, device=device))
with torch.no_grad():
dct_m, idct_m = util.get_dct_matrix(t_pred + t_his)
dct_m_all = dct_m.float().to(device)
idct_m_all = idct_m.float().to(device)
parts = cfg.nf_specs['parts']
n_parts = len(parts)
idx_pad = list(range(t_his)) + [t_his - 1] * t_pred
k = 1
for traj_np, traj_multimodal_np in generator:
traj_np = traj_np[..., 1:, :].transpose([0, 2, 3, 1]) # .reshape(traj_np.shape[0], traj_np.shape[1], -1)
traj = tensor(traj_np, device=device, dtype=dtype) # .permute(0, 2, 1).contiguous()
bs, nj, _, _ = traj.shape
inp = traj.reshape([bs, -1, t_his + t_pred]).transpose(1, 2)
inp = torch.matmul(dct_m_all[:cfg.n_pre], inp[:, idx_pad, :]).transpose(1, 2). \
reshape([bs, nj, 3, -1]).reshape([bs, nj, -1])
traj_multimodal_np = traj_multimodal_np[..., 1:, :] # [bs, modality, seqn, jn, 3]
traj_multimodal_np = traj_multimodal_np.reshape([bs, n_modality, t_his + t_pred, -1]).transpose(
[2, 0, 1, 3])
traj_multimodal = tensor(traj_multimodal_np, device=device, dtype=dtype) # .permute(0, 2, 1).contiguous()
inp = inp.unsqueeze(1).repeat([1, (cfg.nk ** n_parts), 1, 1]).reshape(
[bs * (cfg.nk ** n_parts), nj, -1])
z = None
for _ in range(n_parts):
if z is None:
zt = torch.randn([bs, cfg.nk, 1, cfg.nf_specs['nz']], dtype=dtype, device=device)
z = zt
else:
z = z.repeat_interleave(cfg.nk, dim=1)
zt = torch.randn([bs, z.shape[1], 1, cfg.nf_specs['nz']], dtype=dtype, device=device)
z = torch.cat([z, zt], dim=2)
z = z.reshape([-1, n_parts, cfg.nf_specs['nz']])
# train generator
xt = model(inp, z)
xt = xt.reshape([bs * (cfg.nk | |
<filename>src/blueprints/telegram_bot/_common/yandex_oauth.py
import base64
import secrets
from enum import Enum, auto
from typing import Union
from flask import current_app
import jwt
from src.api import yandex
from src.extensions import db
from src.database import (
User,
YandexDiskToken,
UserQuery,
ChatQuery
)
class InvalidState(Exception):
"""
Provided state is invalid
(invalid Base64, missing data, wrong data, etc.).
For security reasons there is no exact reason.
"""
pass
class ExpiredInsertToken(Exception):
"""
Provided insert token is expired.
"""
pass
class InvalidInsertToken(Exception):
"""
Provided insert token (extracted from state) is invalid.
Most probably new state was requested and old one
was passed for handling.
"""
class YandexRequestError(Exception):
"""
Unexpected error occurred during Yandex.OAuth HTTP request.
"""
pass
class MissingData(Exception):
"""
Requested data is missing.
"""
pass
class YandexOAuthClient:
"""
Base class for all Yandex.OAuth clients.
"""
def encode_state(self, user_id: int, insert_token: str) -> str:
"""
:returns:
JWT which should be used as a value for `state`
Yandex.OAuth key. It is urlsafe base64 string.
"""
return base64.urlsafe_b64encode(
jwt.encode(
{
"user_id": user_id,
"insert_token": insert_token
},
current_app.secret_key.encode(),
algorithm="HS256"
)
).decode()
def decode_state(self, state: str) -> dict:
"""
:param state:
A state from `create_state()`.
:returns:
A dict of arguments that were passed into `create_state()`.
:raises:
`InvalidState`.
"""
encoded_state = None
try:
encoded_state = base64.urlsafe_b64decode(
state.encode()
).decode()
except Exception:
raise InvalidState()
decoded_state = None
try:
decoded_state = jwt.decode(
encoded_state,
current_app.secret_key.encode(),
algorithm="HS256"
)
except Exception:
raise InvalidState()
user_id = decoded_state.get("user_id")
insert_token = decoded_state.get("insert_token")
if not any((user_id, insert_token)):
raise InvalidState()
return {
"user_id": user_id,
"insert_token": insert_token
}
def get_user(self, user_id: int, insert_token: str) -> User:
"""
:param user_id:
DB id of needed user.
:param insert_token:
User will be returned only in case when provided
insert token matchs with one from DB. This means
you are allowed to modify this DB user.
Insert token of that user can be modified in futher by
some another operation, so, you should call this function
once and reuse returned result.
:returns:
DB user.
:raises:
`MissingData`, `ExpiredInsertToken`, `InvalidInsertToken`.
"""
user = UserQuery.get_user_by_id(user_id)
if (
user is None or
# for some reason `yandex_disk_token` not created,
# it is not intended behavior.
user.yandex_disk_token is None
):
raise MissingData()
db_insert_token = None
try:
db_insert_token = user.yandex_disk_token.get_insert_token()
except Exception:
raise ExpiredInsertToken()
if (insert_token != db_insert_token):
raise InvalidInsertToken()
return user
def request_access_token(self, code="", refresh_token="") -> dict:
"""
Makes HTTP request to Yandex.OAuth API to get access token.
- you should specify only one parameter:
`code` or `refresh_token`. If specified both, then `code`
will be selected. If nothing is specified, then an error
will be thrown.
:returns:
`ok` indicates status of operation.
If `ok = False`, then `error` will contain
`title` and optional `description`.
if `ok = True`, then `access_token`, `token_type`,
`expires_in`, `refresh_token` will be provided.
:raises:
`YandexRequestError`.
"""
response = None
kwargs = {}
if code:
kwargs["grant_type"] = "authorization_code"
kwargs["code"] = code
elif refresh_token:
kwargs["grant_type"] = "refresh_token"
kwargs["refresh_token"] = refresh_token
else:
raise Exception("Invalid arguments")
try:
response = yandex.get_access_token(
**kwargs
)["content"]
except Exception as error:
raise YandexRequestError(str(error))
if "error" in response:
return {
"ok": False,
"error": {
"title": response["error"],
"description": response.get("error_description")
}
}
return {
"ok": True,
"access_token": response["access_token"],
"token_type": response["token_type"],
"expires_in": response["expires_in"],
"refresh_token": response["refresh_token"],
}
def set_access_token(self, user: User, code: str) -> dict:
"""
Makes request to Yandex.OAuth server, gets access
token and saves it.
- on success clears insert token.
- perform a DB commit in order to save changes!
:param user:
DB user.
:param code:
Code from Yandex which was given to user.
:returns:
`ok` which contains status of operation.
`error` from Yandex in case of `ok = False`,
`error` contains `title` and optional `description`.
:raises:
`YandexRequestError`.
"""
response = self.request_access_token(code=code)
if not response["ok"]:
return response
user.yandex_disk_token.clear_insert_token()
user.yandex_disk_token.set_access_token(
response["access_token"]
)
user.yandex_disk_token.access_token_type = (
response["token_type"]
)
user.yandex_disk_token.access_token_expires_in = (
response["expires_in"]
)
user.yandex_disk_token.set_refresh_token(
response["refresh_token"]
)
return {
"ok": True
}
def refresh_access_token(self, user: User) -> dict:
"""
Similar to `set_access_token()`, but uses user
refresh token from DB.
- perform DB commit in order to save changes!
- `error` not always presented in case of `ok = False`.
:raises:
`YandexRequestError`.
"""
refresh_token = user.yandex_disk_token.get_refresh_token()
if refresh_token is None:
return {
"ok": False
}
response = self.request_access_token(refresh_token=refresh_token)
if not response["ok"]:
return response
user.yandex_disk_token.clear_insert_token()
user.yandex_disk_token.set_access_token(
response["access_token"]
)
user.yandex_disk_token.access_token_type = (
response["token_type"]
)
user.yandex_disk_token.access_token_expires_in = (
response["expires_in"]
)
user.yandex_disk_token.set_refresh_token(
response["refresh_token"]
)
return {
"ok": True
}
def clear_access_token(self, user: User) -> None:
"""
Clears access token.
- perform DB commit in order to save changes!
"""
user.yandex_disk_token.clear_access_token()
user.yandex_disk_token.clear_refresh_token()
def have_valid_access_token(self, user: User) -> bool:
"""
:returns:
User have valid (not expired) access token.
"""
token = user.yandex_disk_token
if not token:
return False
if not token.have_access_token():
return False
try:
# there will be errors in case of
# expired or invalid token
token.get_access_token()
except Exception:
return False
return True
def create_insert_token(self, user: User) -> str:
"""
Creates insert token (used to insert new data).
WARNING: it clears all previous data
(access token, refresh token, etc)!
- perform DB commit in order to save changes!
:returns:
Created insert token.
:raises:
`MissingData` (DB data is corrupted or problems with DB).
"""
user.yandex_disk_token.clear_all_tokens()
user.yandex_disk_token.set_insert_token(
secrets.token_hex(
current_app.config[
"YANDEX_OAUTH_API_INSERT_TOKEN_BYTES"
]
)
)
user.yandex_disk_token.insert_token_expires_in = (
current_app.config[
"YANDEX_OAUTH_API_INSERT_TOKEN_LIFETIME"
]
)
# it is necessary to check if we able to get
# valid token after inseting
insert_token = user.yandex_disk_token.get_insert_token()
if insert_token is None:
raise MissingData("Insert token is NULL")
return insert_token
class YandexOAuthAutoCodeClient(YandexOAuthClient):
"""
Implements https://yandex.ru/dev/oauth/doc/dg/reference/auto-code-client.html # noqa
"""
def before_user_interaction(self, user: User) -> dict:
"""
This function should be executed before user interation.
:returns:
`status` that contains operation status. See `OperationStatus`
documentation for more. In case of `status = CONTINUE_TO_URL`
there will be both `url` and `lifetime`. User should open this
url, after `lifetime` seconds this url will be expired.
`state` is used to avoid handling of url, but you should
already have valid code from Yandex.
In case of any other `status` further user actions not needed
because this user already have valid access token.
:raises:
`YandexRequestError`, `MissingData`.
"""
# it can be not created if it is a new user
if not user.yandex_disk_token:
db.session.add(
YandexDiskToken(user=user)
)
elif self.have_valid_access_token(user):
return {
"status": OperationStatus.HAVE_ACCESS_TOKEN
}
refresh_result = self.refresh_access_token(user)
# if `ok = False`, then there can be useful error message
# from Yandex with some description. At the moment
# we will do nothing with it and just continue
# with need of user interaction
if refresh_result["ok"]:
db.session.commit()
return {
"status": OperationStatus.ACCESS_TOKEN_REFRESHED
}
insert_token = self.create_insert_token(user)
state = self.encode_state(user.id, insert_token)
url = yandex.create_user_oauth_url(state)
lifetime_in_seconds = (
user.yandex_disk_token.insert_token_expires_in
)
db.session.commit()
return {
"status": OperationStatus.CONTINUE_TO_URL,
"url": url,
"lifetime": lifetime_in_seconds,
"state": state
}
def after_success_redirect(self, state: str, code: str) -> dict:
"""
Should be called after Yandex successful redirect
(when there is both `code` and `state` parameters).
Performs needed operations to end user authorization.
:returns:
`ok` which contains status of setting of access token.
`error` from Yandex in case of `ok = False`,
`error` contains `title` and optional `description`.
If `ok = False`, you should notify user about occured error
and user should request new authorization link because old
one will become invalid.
`user` is DB user.
:raises:
- `InvalidState`, `ExpiredInsertToken`, `MissingData`,
`InvalidInsertToken`, `YandexRequestError`.
- Other errors (`Exception`) should be considered as
internal server error.
"""
data = self.decode_state(state)
user = self.get_user(
data["user_id"],
data["insert_token"]
)
result = self.set_access_token(user, code)
result["user"] = user
if not result["ok"]:
user.yandex_disk_token.clear_insert_token()
db.session.commit()
return result
def after_error_redirect(self, state: str) -> None:
"""
Should be called after Yandex error redirect
(when there is both `error` and `state` parameters).
- if function successfully ends, then old user authorization
link will become invalid.
:raises:
`InvalidState`, `ExpiredInsertToken`,
`InvalidInsertToken`, `MissingData`.
"""
data = self.decode_state(state)
user = self.get_user(
data["user_id"],
data["insert_token"]
)
user.yandex_disk_token.clear_insert_token()
db.session.commit()
# It inherits `YandexOAuthAutoCodeClient`, not base `YandexOAuthClient`,
# because we will use it exactly like `YandexOAuthAutoCodeClient`.
# We doing so because `YandexOAuthConsoleClient` intended mostly
# for usage at development process, so, UX not the key.
# However, it is better to write pure code for that module later
class YandexOAuthConsoleClient(YandexOAuthAutoCodeClient):
"""
Implements https://yandex.ru/dev/oauth/doc/dg/reference/console-client.html # noqa
"""
def handle_code(self, state: str, code: | |
else:
pytest.fail("Unknown job grouping 'service' value: {}".format(categories[service_or_provider]))
self.assert_equal_with_jobs_diffs(grouped_jobs["jobs"], expect) # noqa
def test_get_jobs_valid_grouping_by_service(self):
self.template_get_jobs_valid_grouping_by_service_provider("service")
def test_get_jobs_valid_grouping_by_provider(self):
"""
Grouping by ``provider`` must work as alias to ``service`` and must be adjusted inplace in response categories.
"""
self.template_get_jobs_valid_grouping_by_service_provider("provider")
def test_get_jobs_links_navigation(self):
"""
Verifies that relation links update according to context in order to allow natural navigation between responses.
"""
expect_jobs_total = len(self.job_info)
expect_jobs_visible = len(list(filter(lambda j: VISIBILITY_PUBLIC in j.access, self.job_info)))
assert len(self.job_store.list_jobs()) == expect_jobs_total, (
"expected number of jobs mismatch, following test might not work"
)
path = get_path_kvp(sd.jobs_service.path, limit=1000)
resp = self.app.get(path, headers=self.json_headers)
assert len(resp.json["jobs"]) == expect_jobs_visible, "unexpected number of visible jobs"
base_url = self.settings["weaver.url"]
jobs_url = base_url + sd.jobs_service.path
limit = 2 # expect 11 jobs to be visible, making 6 pages of 2 each (except last that is 1)
last = 5 # zero-based index of last page
last_page = "page={}".format(last)
prev_last_page = "page={}".format(last - 1)
limit_kvp = "limit={}".format(limit)
path = get_path_kvp(sd.jobs_service.path, limit=limit)
resp = self.app.get(path, headers=self.json_headers)
links = get_links(resp.json["links"])
assert resp.json["total"] == expect_jobs_visible
assert len(resp.json["jobs"]) == limit
assert links["alternate"] is None
assert links["collection"] == jobs_url
assert links["search"] == jobs_url
assert links["up"] is None, "generic jobs endpoint doesn't have any parent collection"
assert links["current"].startswith(jobs_url) and limit_kvp in links["current"] and "page=0" in links["current"]
assert links["prev"] is None, "no previous on first page (default page=0 used)"
assert links["next"].startswith(jobs_url) and limit_kvp in links["next"] and "page=1" in links["next"]
assert links["first"].startswith(jobs_url) and limit_kvp in links["first"] and "page=0" in links["first"]
assert links["last"].startswith(jobs_url) and limit_kvp in links["last"] and last_page in links["last"]
path = get_path_kvp(sd.jobs_service.path, limit=limit, page=2)
resp = self.app.get(path, headers=self.json_headers)
links = get_links(resp.json["links"])
assert len(resp.json["jobs"]) == limit
assert links["alternate"] is None
assert links["collection"] == jobs_url
assert links["search"] == jobs_url
assert links["up"] is None, "generic jobs endpoint doesn't have any parent collection"
assert links["current"].startswith(jobs_url) and limit_kvp in links["current"] and "page=2" in links["current"]
assert links["prev"].startswith(jobs_url) and limit_kvp in links["prev"] and "page=1" in links["prev"]
assert links["next"].startswith(jobs_url) and limit_kvp in links["next"] and "page=3" in links["next"]
assert links["first"].startswith(jobs_url) and limit_kvp in links["first"] and "page=0" in links["first"]
assert links["last"].startswith(jobs_url) and limit_kvp in links["last"] and last_page in links["last"]
path = get_path_kvp(sd.jobs_service.path, limit=limit, page=last)
resp = self.app.get(path, headers=self.json_headers)
links = get_links(resp.json["links"])
assert len(resp.json["jobs"]) == 1, "last page should show only remaining jobs within limit"
assert links["alternate"] is None
assert links["collection"] == jobs_url
assert links["search"] == jobs_url
assert links["up"] is None, "generic jobs endpoint doesn't have any parent collection"
assert links["current"].startswith(jobs_url) and limit_kvp in links["current"] and last_page in links["current"]
assert links["prev"].startswith(jobs_url) and limit_kvp in links["prev"] and prev_last_page in links["prev"]
assert links["next"] is None, "no next page on last"
assert links["first"].startswith(jobs_url) and limit_kvp in links["first"] and "page=0" in links["first"]
assert links["last"].startswith(jobs_url) and limit_kvp in links["last"] and last_page in links["last"]
p_id = self.process_public.identifier # 5 jobs with this process, but only 3 visible
p_j_url = base_url + sd.process_jobs_service.path.format(process_id=p_id)
p_url = base_url + sd.process_service.path.format(process_id=p_id)
p_kvp = "process={}".format(p_id)
path = get_path_kvp(sd.jobs_service.path, limit=1000, process=p_id)
resp = self.app.get(path, headers=self.json_headers)
assert len(resp.json["jobs"]) == 3, "unexpected number of visible jobs for specific process"
path = get_path_kvp(sd.jobs_service.path, limit=limit, page=1, process=p_id)
resp = self.app.get(path, headers=self.json_headers)
links = get_links(resp.json["links"])
assert len(resp.json["jobs"]) == 1, "last page should show only remaining jobs within limit"
assert links["alternate"].startswith(p_j_url) and p_kvp not in links["alternate"]
assert limit_kvp in links["alternate"] and "page=1" in links["alternate"], "alt link should also have filters"
assert links["collection"] == jobs_url
assert links["search"] == jobs_url
assert links["up"] == p_url, "parent path should be indirectly pointing at process description from alt link"
assert links["current"].startswith(jobs_url) and limit_kvp in links["current"] and "page=1" in links["current"]
assert links["prev"].startswith(jobs_url) and limit_kvp in links["prev"] and "page=0" in links["prev"]
assert links["next"] is None
assert links["first"].startswith(jobs_url) and limit_kvp in links["first"] and "page=0" in links["first"]
assert links["last"].startswith(jobs_url) and limit_kvp in links["last"] and "page=1" in links["last"]
assert all(p_kvp in links[rel] for rel in ["current", "next", "prev", "first", "last"] if links[rel])
path = get_path_kvp(sd.process_jobs_service.path.format(process_id=p_id), limit=limit, page=0)
resp = self.app.get(path, headers=self.json_headers)
links = get_links(resp.json["links"])
assert len(resp.json["jobs"]) == limit
assert links["alternate"].startswith(jobs_url) and "process={}".format(p_id) in links["alternate"]
assert limit_kvp in links["alternate"] and "page=0" in links["alternate"], "alt link should also have filters"
assert links["collection"] == p_j_url, "collection endpoint should rebase according to context process"
assert links["search"] == jobs_url, "search endpoint should remain generic jobs even with context process used"
assert links["up"] == p_url, "parent path should be directly pointing at process description"
assert links["current"].startswith(p_j_url) and limit_kvp in links["current"] and "page=0" in links["current"]
assert links["prev"] is None
assert links["next"].startswith(p_j_url) and limit_kvp in links["next"] and "page=1" in links["next"]
assert links["first"].startswith(p_j_url) and limit_kvp in links["first"] and "page=0" in links["first"]
assert links["last"].startswith(p_j_url) and limit_kvp in links["last"] and "page=1" in links["last"]
assert all(p_kvp not in links[rel] for rel in ["current", "next", "prev", "first", "last"] if links[rel])
limit_over_total = expect_jobs_visible * 2
limit_kvp = "limit={}".format(limit_over_total)
path = get_path_kvp(sd.jobs_service.path, limit=limit_over_total)
resp = self.app.get(path, headers=self.json_headers)
links = get_links(resp.json["links"])
assert len(resp.json["jobs"]) == expect_jobs_visible
assert links["alternate"] is None
assert links["collection"] == jobs_url
assert links["search"] == jobs_url
assert links["up"] is None, "generic jobs endpoint doesn't have any parent collection"
assert links["current"].startswith(jobs_url) and limit_kvp in links["current"] and "page=0" in links["current"]
assert links["prev"] is None, "no previous on first page (default page=0 used)"
assert links["next"] is None, "no next page on last"
assert links["first"].startswith(jobs_url) and limit_kvp in links["first"] and "page=0" in links["first"]
assert links["last"].startswith(jobs_url) and limit_kvp in links["last"] and "page=0" in links["last"]
def test_get_jobs_page_out_of_range(self):
resp = self.app.get(sd.jobs_service.path, headers=self.json_headers)
total = resp.json["total"]
limit = total // 2
max_limit = 1 if 2 * limit == total else 2 # exact match or last page remainder
bad_page = 4
path = get_path_kvp(sd.jobs_service.path, page=bad_page, limit=limit)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code == 400
assert resp.json["code"] == "JobInvalidParameter"
assert "IndexError" in resp.json["error"]
assert f"[0,{max_limit}]" in resp.json["description"]
assert "page" in resp.json["value"] and resp.json["value"]["page"] == bad_page
# note:
# Following errors are generated by schema validators (page min=0, limit min=1) rather than above explicit
# checks. They don't provide the range because the error can apply to more than just paging failing value
# is still explicitly reported though. Because comparisons happen at query param level, it reports str values.
path = get_path_kvp(sd.jobs_service.path, page=-1, limit=limit)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code == 400
assert resp.json["code"] == "JobInvalidParameter"
assert "page" in str(resp.json["cause"]) and "less than minimum" in str(resp.json["cause"])
assert "page" in resp.json["value"] and resp.json["value"]["page"] == str(-1)
path = get_path_kvp(sd.jobs_service.path, page=0, limit=0)
resp = self.app.get(path, headers=self.json_headers, expect_errors=True)
assert resp.status_code == 400
assert resp.json["code"] == "JobInvalidParameter"
assert "limit" in str(resp.json["cause"]) and "less than minimum" in str(resp.json["cause"])
assert "limit" in resp.json["value"] and resp.json["value"]["limit"] == str(0)
def test_get_jobs_by_encrypted_email(self):
"""
Verifies that literal email can be used as search criterion although not saved in plain text within db.
"""
email = "<EMAIL>"
body = {
"inputs": [{"id": "test_input", "data": "test"}],
"outputs": [{"id": "test_output", "transmissionMode": EXECUTE_TRANSMISSION_MODE_REFERENCE}],
"mode": EXECUTE_MODE_ASYNC,
"response": EXECUTE_RESPONSE_DOCUMENT,
"notification_email": email
}
with contextlib.ExitStack() as stack:
for runner in mocked_process_job_runner():
stack.enter_context(runner)
path = "/processes/{}/jobs".format(self.process_public.identifier)
resp = self.app.post_json(path, params=body, headers=self.json_headers)
assert resp.status_code == 201
assert resp.content_type == CONTENT_TYPE_APP_JSON
job_id = resp.json["jobID"]
# verify the email is not in plain text
job = self.job_store.fetch_by_id(job_id)
assert job.notification_email != email and job.notification_email is not None
assert int(job.notification_email, 16) != 0 # email should be encrypted with hex string
path = get_path_kvp(sd.jobs_service.path, detail="true", notification_email=email)
resp = self.app.get(path, headers=self.json_headers)
assert resp.status_code == 200
assert resp.content_type == CONTENT_TYPE_APP_JSON
assert resp.json["total"] == 1, "Should match exactly 1 email with specified literal string as query param."
assert resp.json["jobs"][0]["jobID"] == job_id
def test_get_jobs_by_type_process(self):
path = get_path_kvp(sd.jobs_service.path, type="process")
resp = self.app.get(path, headers=self.json_headers)
self.check_basic_jobs_info(resp)
expect_jobs = [self.job_info[i].id for i in [0, 2]] # idx=2 & idx>4 have 'service', only 0,2 are public
result_jobs = resp.json["jobs"]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs)
assert resp.json["total"] == len(expect_jobs)
def test_get_jobs_by_type_process_and_specific_process_id(self):
path = get_path_kvp(sd.jobs_service.path, type="process", process=self.process_public.identifier)
resp = self.app.get(path, headers=self.json_headers)
self.check_basic_jobs_info(resp)
assert len(resp.json["jobs"]) == 1
expect_jobs = [self.job_info[0].id]
result_jobs = resp.json["jobs"]
self.assert_equal_with_jobs_diffs(result_jobs, expect_jobs, message="expected only matching process")
def test_get_jobs_by_type_process_and_specific_service_name(self):
"""
Requesting provider ``type`` with a specific ``process`` identifier cannot yield any valid result (contradicts).
.. seealso::
Test :meth:`test_get_jobs_by_type_process_and_specific_process_id` that contains a valid match otherwise
for the given process identifier.
"""
path | |
if unexpected status code
"""
url = '{blobep}{container_name}/{blob_name}{saskey}'.format(
blobep=self.blobep, container_name=container_name,
blob_name=blob_name, saskey=self.saskey)
reqheaders = {'Content-MD5': content_md5}
reqparams = {'comp': 'block', 'blockid': blockid}
response = azure_request(
requests.put, url=url, params=reqparams, headers=reqheaders,
data=block, timeout=self.timeout)
response.raise_for_status()
if response.status_code != 201:
raise IOError(
'incorrect status code returned for put_block: {}'.format(
response.status_code))
def put_block_list(
self, container_name, blob_name, block_list,
x_ms_blob_content_type, x_ms_blob_content_md5):
"""Put block list for blob
Parameters:
container_name - container name
blob_name - name of blob
block_list - block list for blob
x_ms_blob_content_md5 - md5 hash for blob
Returns:
Nothing
Raises:
IOError if unexpected status code
"""
url = '{blobep}{container_name}/{blob_name}{saskey}'.format(
blobep=self.blobep, container_name=container_name,
blob_name=blob_name, saskey=self.saskey)
reqheaders = {'x-ms-blob-content-md5': x_ms_blob_content_md5}
if x_ms_blob_content_type is not None:
reqheaders['x-ms-blob-content-type'] = x_ms_blob_content_type
reqparams = {'comp': 'blocklist'}
body = ['<?xml version="1.0" encoding="utf-8"?><BlockList>']
for block in block_list:
body.append('<Latest>{}</Latest>'.format(block))
body.append('</BlockList>')
response = azure_request(
requests.put, url=url, params=reqparams, headers=reqheaders,
data=''.join(body), timeout=self.timeout)
response.raise_for_status()
if response.status_code != 201:
raise IOError(
'incorrect status code returned for put_block_list: {}'.format(
response.status_code))
def set_blob_properties(
self, container_name, blob_name, x_ms_blob_content_md5):
"""Sets blob properties (MD5 only)
Parameters:
container_name - container name
blob_name - name of blob
x_ms_blob_content_md5 - md5 hash for blob
Returns:
Nothing
Raises:
IOError if unexpected status code
"""
url = '{blobep}{container_name}/{blob_name}{saskey}'.format(
blobep=self.blobep, container_name=container_name,
blob_name=blob_name, saskey=self.saskey)
reqheaders = {'x-ms-blob-content-md5': x_ms_blob_content_md5}
reqparams = {'comp': 'properties'}
response = azure_request(
requests.put, url=url, params=reqparams, headers=reqheaders,
timeout=self.timeout)
response.raise_for_status()
if response.status_code != 200:
raise IOError('incorrect status code returned for '
'set_blob_properties: {}'.format(
response.status_code))
class BlobChunkWorker(threading.Thread):
"""Chunk worker for a Blob"""
def __init__(
self, exc, s_in_queue, s_out_queue, args, blob_service,
xfertoazure):
"""Blob Chunk worker Thread ctor
Parameters:
exc - exception list
s_in_queue - storage in queue
s_out_queue - storage out queue
args - program arguments
blob_service - blob service
xfertoazure - xfer to azure (direction)
Returns:
Nothing
Raises:
Nothing
"""
threading.Thread.__init__(self)
self._exc = exc
self._in_queue = s_in_queue
self._out_queue = s_out_queue
self._pageblob = args.pageblob
self._autovhd = args.autovhd
self.blob_service = blob_service
self.timeout = args.timeout
self.xfertoazure = xfertoazure
def run(self):
"""Thread code
Parameters:
Nothing
Returns:
Nothing
Raises:
Nothing
"""
while True:
localresource, container, remoteresource, blockid, \
offset, bytestoxfer, flock, filedesc = self._in_queue.get()
try:
if self.xfertoazure:
# upload block/page
self.putblobdata(
localresource, container, remoteresource, blockid,
offset, bytestoxfer, flock, filedesc)
else:
# download range
self.getblobrange(
localresource, container, remoteresource, offset,
bytestoxfer, flock, filedesc)
# pylint: disable=W0703
except Exception as exc:
# pylint: enable=W0703
self._exc.append(exc)
self._out_queue.put(localresource)
if len(self._exc) > 0:
break
def putblobdata(
self, localresource, container, remoteresource, blockid, offset,
bytestoxfer, flock, filedesc):
"""Puts data (blob or page) into Azure storage
Parameters:
localresource - name of local resource
container - blob container
remoteresource - name of remote resource
blockid - block id (ignored for page blobs)
offset - file offset
bytestoxfer - number of bytes to xfer
flock - file lock
filedesc - file handle
Returns:
Nothing
Raises:
IOError if file cannot be read
"""
# if bytestoxfer is zero, then we're transferring a zero-byte
# file, use put blob instead of page/block ops
if bytestoxfer == 0:
contentmd5 = compute_md5_for_data_asbase64(b'')
if as_page_blob(self._pageblob, self._autovhd, localresource):
blob_type = 'PageBlob'
else:
blob_type = 'BlockBlob'
azure_request(
self.blob_service.put_blob, container_name=container,
blob_name=remoteresource, blob=None, x_ms_blob_type=blob_type,
x_ms_blob_content_md5=contentmd5,
x_ms_blob_content_length=bytestoxfer,
x_ms_blob_content_type=get_mime_type(localresource))
return
# read the file at specified offset, must take lock
data = None
with flock:
closefd = False
if not filedesc:
filedesc = open(localresource, 'rb')
closefd = True
filedesc.seek(offset, 0)
data = filedesc.read(bytestoxfer)
if closefd:
filedesc.close()
if not data:
raise IOError('could not read {}: {} -> {}'.format(
localresource, offset, offset + bytestoxfer))
# issue REST put
if as_page_blob(self._pageblob, self._autovhd, localresource):
aligned = page_align_content_length(bytestoxfer)
# fill data to boundary
if aligned != bytestoxfer:
data = data.ljust(aligned, b'\0')
# compute page md5
contentmd5 = compute_md5_for_data_asbase64(data)
# check if this page is empty
if contentmd5 == _EMPTY_MAX_PAGE_SIZE_MD5:
return
elif len(data) != _MAX_BLOB_CHUNK_SIZE_BYTES:
data_chk = b'\0' * len(data)
data_chk_md5 = compute_md5_for_data_asbase64(data_chk)
del data_chk
if data_chk_md5 == contentmd5:
return
del data_chk_md5
# upload page range
rangestr = 'bytes={}-{}'.format(offset, offset + aligned - 1)
azure_request(
self.blob_service.put_page, container_name=container,
blob_name=remoteresource, page=data, x_ms_range=rangestr,
x_ms_page_write='update', content_md5=contentmd5,
timeout=self.timeout)
else:
# compute block md5
contentmd5 = compute_md5_for_data_asbase64(data)
azure_request(
self.blob_service.put_block, container_name=container,
blob_name=remoteresource, block=data, blockid=blockid,
content_md5=contentmd5, timeout=self.timeout)
del data
def getblobrange(
self, localresource, container, remoteresource, offset,
bytestoxfer, flock, filedesc):
"""Get a segment of a blob using range offset downloading
Parameters:
localresource - name of local resource
container - blob container
remoteresource - name of remote resource
offset - file offset
bytestoxfer - number of bytes to xfer
flock - file lock
filedesc - file handle
Returns:
Nothing
Raises:
Nothing
"""
rangestr = 'bytes={}-{}'.format(offset, offset + bytestoxfer)
blobdata = azure_request(
self.blob_service.get_blob, timeout=self.timeout,
container_name=container, blob_name=remoteresource,
x_ms_range=rangestr)
with flock:
closefd = False
if not filedesc:
filedesc = open(localresource, 'r+b')
closefd = True
filedesc.seek(offset, 0)
filedesc.write(blobdata)
if closefd:
filedesc.close()
del blobdata
def azure_request(req, timeout=None, *args, **kwargs):
"""Wrapper method to issue/retry requests to Azure, works with both
the Azure Python SDK and Requests
Parameters:
req - request to issue
timeout - timeout in seconds
args - positional args to req
kwargs - keyworded args to req
Returns:
result of request
Raises:
Any uncaught exceptions
IOError if timeout
"""
start = time.clock()
while True:
try:
return req(*args, **kwargs)
except requests.Timeout as exc:
pass
except requests.HTTPError as exc:
if exc.response.status_code < 500 or \
exc.response.status_code == 501 or \
exc.response.status_code == 505:
raise
except socket.error as exc:
if exc.errno != errno.ETIMEDOUT and \
exc.errno != errno.ECONNRESET and \
exc.errno != errno.ECONNREFUSED and \
exc.errno != errno.ECONNABORTED and \
exc.errno != errno.ENETRESET:
raise
except Exception as exc:
try:
if not ('TooManyRequests' in exc.message or
'InternalError' in exc.message or
'ServerBusy' in exc.message or
'OperationTimedOut' in exc.message):
raise
except AttributeError:
raise exc
if timeout is not None and time.clock() - start > timeout:
raise IOError(
'waited for {} for request {}, exceeded timeout of {}'.format(
time.clock() - start, req.__name__, timeout))
time.sleep(random.randint(2, 5))
def create_dir_ifnotexists(dirname):
"""Create a directory if it doesn't exist
Parameters:
dirname - name of directory to create
Returns:
Nothing
Raises:
Unhandled exceptions
"""
try:
os.makedirs(dirname)
print('created local directory: {}'.format(dirname))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise # pragma: no cover
def compute_md5_for_file_asbase64(filename, pagealign=False, blocksize=65536):
"""Compute MD5 hash for file and encode as Base64
Parameters:
filename - filename to compute md5
pagealign - align bytes for page boundary
blocksize - block size in bytes
Returns:
MD5 for file encoded as Base64
Raises:
Nothing
"""
hasher = hashlib.md5()
with open(filename, 'rb') as filedesc:
while True:
buf = filedesc.read(blocksize)
if not buf:
break
buflen = len(buf)
if pagealign and buflen < blocksize:
aligned = page_align_content_length(buflen)
if aligned != buflen:
buf = buf.ljust(aligned, b'\0')
hasher.update(buf)
if _PY2:
return base64.b64encode(hasher.digest())
else:
return str(base64.b64encode(hasher.digest()), 'ascii')
def get_mime_type(filename):
"""Guess the type of a file based on its filename
Parameters:
filename - filename to guess the content-type
Returns:
A string of the form 'type/subtype',
usable for a MIME content-type header
Raises:
Nothing
"""
return (mimetypes.guess_type(filename)[0] or 'application/octet-stream')
def compute_md5_for_data_asbase64(data):
"""Compute MD5 hash for bits and encode as Base64
Parameters:
data - data to compute MD5 hash over
Returns:
MD5 for data encoded as Base64
Raises:
Nothing
"""
hasher = hashlib.md5()
hasher.update(data)
if _PY2:
return base64.b64encode(hasher.digest())
else:
return str(base64.b64encode(hasher.digest()), 'ascii')
def page_align_content_length(length):
"""Compute page boundary alignment
Parameters:
length - content length
Returns:
aligned byte boundary
Raises:
Nothing
"""
mod = length % _PAGEBLOB_BOUNDARY
if mod != 0:
return length + (_PAGEBLOB_BOUNDARY - mod)
return length
def as_page_blob(pageblob, autovhd, name):
"""Determines if the file should be a pageblob
Parameters:
pageblob - pageblob arg
autovhd - autovhd arg
name - file name
Returns:
True if file should be a pageblob
Raises:
Nothing
"""
if pageblob or (autovhd and name.lower().endswith('.vhd')):
return True
return False
def get_blob_listing(blob_service, args):
"""Convenience method for generating a blob listing of a container
Parameters:
blob_service - blob service
args - program arguments
Returns:
dictionary of blob -> list [content length, content md5]
Raises:
Nothing
"""
marker = None
blobdict = {}
while True:
try:
result = azure_request(
blob_service.list_blobs, timeout=args.timeout,
container_name=args.container, marker=marker,
maxresults=_MAX_LISTBLOBS_RESULTS)
except azure.common.AzureMissingResourceHttpError:
break
for blob in result:
blobdict[blob.name] = [
blob.properties.content_length, blob.properties.content_md5]
marker = result.next_marker
if marker is None or len(marker) < 1:
break
return | |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import distributions as dist
from layers import (CResnetBlockConv1d, CBatchNorm1d, CBatchNorm1d_legacy)
import resnet
import numpy as np
class VoxelDecoder64(nn.Module):
''' Voxel64 Decoder with batch normalization (BN) class.
Args:
z_dim (int): input feature z dimension
gf_dim (int): dimension of feature channel
'''
def __init__(self, z_dim=256, gf_dim=256):
super(VoxelDecoder64, self).__init__()
self.z_dim = z_dim
self.gf_dim = gf_dim
self.fc_z = nn.Sequential(
nn.Conv1d(self.z_dim, self.gf_dim * 2 * 2 * 2, 1),
nn.BatchNorm1d(self.gf_dim * 2 * 2 * 2),
nn.LeakyReLU(0.2)
)
self.deconv1 = nn.Sequential(
nn.ConvTranspose3d(self.gf_dim, self.gf_dim, kernel_size=4, stride=2, padding=1),
nn.BatchNorm3d(self.gf_dim),
nn.LeakyReLU(0.2)
)
self.deconv2 = nn.Sequential(
nn.ConvTranspose3d(self.gf_dim, self.gf_dim//2, kernel_size=4, stride=2, padding=1),
nn.BatchNorm3d(self.gf_dim//2),
nn.LeakyReLU(0.2)
)
self.deconv3 = nn.Sequential(
nn.ConvTranspose3d(self.gf_dim//2, self.gf_dim//4, kernel_size=4, stride=2, padding=1),
nn.BatchNorm3d(self.gf_dim//4),
nn.LeakyReLU(0.2)
)
self.deconv4 = nn.Sequential(
nn.ConvTranspose3d(self.gf_dim//4, self.gf_dim//8, kernel_size=4, stride=2, padding=1),
nn.BatchNorm3d(self.gf_dim//8),
nn.LeakyReLU(0.2)
)
self.deconv5 = nn.Sequential(
nn.ConvTranspose3d(self.gf_dim//8, self.gf_dim//16, kernel_size=4, stride=2, padding=1),
nn.BatchNorm3d(self.gf_dim//16),
nn.LeakyReLU(0.2)
)
self.deconv6 = nn.Sequential(
nn.ConvTranspose3d(self.gf_dim//16, 1, kernel_size=1, stride=1, padding=0)
)
self.sigmoid = nn.Sigmoid()
def forward(self, z):
z = z.contiguous().view(-1, self.z_dim, 1)
net = self.fc_z(z)
net = net.contiguous().view(-1, self.gf_dim, 2, 2, 2)
# print(net.size()) # torch.Size([-1, 256, 2, 2, 2])
net = self.deconv1(net)
# print(net.size()) # torch.Size([-1, 256, 4, 4, 4])
net = self.deconv2(net)
# print(net.size()) # torch.Size([-1, 128, 8, 8, 8])
net = self.deconv3(net)
# print(net.size()) # torch.Size([-1, 64, 16, 16, 16])
net = self.deconv4(net)
# print(net.size()) # torch.Size([-1, 32, 32, 32, 32])
net = self.deconv5(net)
# print(net.size()) # torch.Size([-1, 16, 64, 64, 64])
out = self.deconv6(net)
# print(out.size()) # torch.Size([-1, 1, 64, 64, 64])
out_sigmoid = self.sigmoid(out)
return out, out_sigmoid
def provide_middle_feature(self, z): # provide
z = z.contiguous().view(-1, self.z_dim, 1)
feat = self.fc_z(z)
feat = feat.contiguous().view(-1, self.gf_dim, 2, 2, 2)
# print(net.size()) # torch.Size([-1, 256, 2, 2, 2])
feat = self.deconv1(feat)
# print(net.size()) # torch.Size([-1, 256, 4, 4, 4])
feat = self.deconv2(feat)
# print(net.size()) # torch.Size([-1, 128, 8, 8, 8])
feat = self.deconv3(feat)
# print(net.size()) # torch.Size([-1, 64, 16, 16, 16])
return feat # return global feature for patch high-resolution
def predict_with_middle_feature(self, z):
z = z.contiguous().view(-1, self.z_dim, 1)
net = self.fc_z(z)
net = net.contiguous().view(-1, self.gf_dim, 2, 2, 2)
# print(net.size()) # torch.Size([-1, 256, 2, 2, 2])
net = self.deconv1(net)
# print(net.size()) # torch.Size([-1, 256, 4, 4, 4])
net = self.deconv2(net)
# print(net.size()) # torch.Size([-1, 128, 8, 8, 8])
feat = self.deconv3(net)
# print(net.size()) # torch.Size([-1, 64, 16, 16, 16])
net = self.deconv4(feat)
# print(net.size()) # torch.Size([-1, 32, 32, 32, 32])
net = self.deconv5(net)
# print(net.size()) # torch.Size([-1, 16, 64, 64, 64])
out = self.deconv6(net)
# print(out.size()) # torch.Size([-1, 1, 64, 64, 64])
out_sigmoid = self.sigmoid(out)
return feat, out_sigmoid
class PointDecoder(nn.Module):
''' Sample Point Decoder (PointSetGenerationV1).
Args:
z_dim (int): input feature z dimension
'''
def __init__(self, z_dim=256, npc=512):
super(PointDecoder, self).__init__()
self.z_dim = z_dim
self.pc_num = npc
self.fc1 = nn.Conv1d(self.z_dim, 512, 1)
self.fc2 = nn.Conv1d(512, 1024, 1)
self.fc3 = nn.Conv1d(1024, 1024, 1)
self.fc4 = nn.Conv1d(1024, self.pc_num*3, 1)
self.relu = F.relu
def forward(self, z):
z = z.contiguous().view(-1, self.z_dim, 1)
net = self.relu(self.fc1(z))
net = self.relu(self.fc2(net))
net = self.relu(self.fc3(net))
out = torch.tanh(self.fc4(net))/2. # output val: (-0.5, 0.5)
out = out.contiguous().view(-1, self.pc_num, 3)
return out
class DecoderCBatchNorm(nn.Module):
''' Decoder with conditional batch normalization (CBN) class.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
leaky (bool): whether to use leaky ReLUs
legacy (bool): whether to use the legacy structure
'''
def __init__(self, dim=3, c_dim=128,
hidden_size=256, leaky=False, legacy=False):
super(DecoderCBatchNorm, self).__init__()
self.fc_p = nn.Conv1d(dim, hidden_size, 1)
self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy)
if not legacy:
self.bn = CBatchNorm1d(c_dim, hidden_size)
else:
self.bn = CBatchNorm1d_legacy(c_dim, hidden_size)
self.fc_out = nn.Conv1d(hidden_size, 1, 1)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
def forward(self, p, c, **kwargs):
p = p.transpose(1, 2)
# batch_size, D, T = p.size()
net = self.fc_p(p)
net = self.block0(net, c)
net = self.block1(net, c)
net = self.block2(net, c)
net = self.block3(net, c)
net = self.block4(net, c)
out = self.fc_out(self.actvn(self.bn(net, c)))
out = out.squeeze(1)
return out
"""
##########################################define network##########################################
"""
class VoxelNetwork64(nn.Module):
def __init__(self, z_dim=256, gf_dim=256):
super(VoxelNetwork64, self).__init__()
self.z_dim = z_dim
self.encoder = resnet.Resnet18(self.z_dim)
self.decoder = VoxelDecoder64(z_dim=self.z_dim, gf_dim=gf_dim)
def forward(self, x):
x = x[:, :3, :, :].contiguous()
x = self.encoder(x)
y, y_sigmoid = self.decoder(x)
return y, y_sigmoid
def provide_middle_feature(self, img):
z = self.encoder(img)
return self.decoder.provide_middle_feature(z)
class VoxelRefineNetwork(nn.Module):
def __init__(self, channel_dim=64, vox_size=64):
super(VoxelRefineNetwork, self).__init__()
self.channel_dim = channel_dim
self.conv1 = nn.Sequential(
nn.Conv3d(1, self.channel_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(self.channel_dim),
nn.LeakyReLU(0.2)
)
self.conv2 = nn.Sequential(
nn.Conv3d(self.channel_dim, self.channel_dim//2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(self.channel_dim//2),
nn.LeakyReLU(0.2)
)
self.conv3 = nn.Sequential(
nn.Conv3d(self.channel_dim // 2, self.channel_dim // 4, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(self.channel_dim // 4),
nn.LeakyReLU(0.2)
)
self.conv4 = nn.Sequential(
nn.Conv3d(self.channel_dim // 4, 1, kernel_size=1, stride=1, padding=0)
)
self.sigmoid = nn.Sigmoid()
self.vox_size = vox_size
def voxel_updater(self, vox_sigmoid, p, p_occ_sigmoid, occ_lambda=0.9): # vox_shape: [bs, 1, vs, vs, vs]
# pick volumetric positions/coordinates corresponding to points
p = (p+0.5)*(self.vox_size-0.001) # exclude the 1*self.vox_size
p_coord = p.type(torch.LongTensor) # shape: [bs, n, 3]
d = p.floor() + 0.5 - p # shape: [bs, n, 3]
w = occ_lambda - torch.sum(d*d, dim=-1) # shape: [bs, n]
# update occupancy values
vox_update = vox_sigmoid.clone()
bs = p_coord.size()[0]
for i in range(0, bs):
vox_update[i, 0, p_coord[i, :, 0], p_coord[i, :, 1], p_coord[i, :, 2]] = \
vox_sigmoid[i, 0, p_coord[i, :, 0], p_coord[i, :, 1], p_coord[i, :, 2]]*(1.-w[i]) + \
p_occ_sigmoid[i, :]*w[i]
return vox_update
def forward(self, x):
net = self.conv1(x)
net = self.conv2(net)
net = self.conv3(net)
out = self.conv4(net)
out_sigmoid = self.sigmoid(out)
return out, out_sigmoid
def predict_with_local_feature(self, x):
feat = self.conv1(x)
feat = self.conv2(feat)
feat = self.conv3(feat)
voxel = self.conv4(feat)
voxel = self.sigmoid(voxel)
return feat, voxel
class VoxelSuperResNetwork_16_64(nn.Module): # generate voxel from 16*16*16 to 64*64*64
def __init__(self, input_dim=1, gf_dim=128):
super(VoxelSuperResNetwork_16_64, self).__init__()
self.input_dim = input_dim
self.gf_dim = gf_dim
self.deconv1 = nn.Sequential(
nn.ConvTranspose3d(self.input_dim, self.gf_dim, kernel_size=4, stride=2, padding=1),
nn.BatchNorm3d(self.gf_dim),
nn.LeakyReLU(0.2)
)
self.deconv2 = nn.Sequential(
nn.ConvTranspose3d(self.gf_dim, self.gf_dim//2, kernel_size=4, stride=2, padding=1),
nn.BatchNorm3d(self.gf_dim//2),
nn.LeakyReLU(0.2)
)
self.deconv3 = nn.Sequential(
nn.ConvTranspose3d(self.gf_dim//2, self.gf_dim//4, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(self.gf_dim//4),
nn.LeakyReLU(0.2)
)
self.deconv4 = nn.Sequential(
nn.ConvTranspose3d(self.gf_dim//4, 1, kernel_size=1, stride=1, padding=0)
)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
net = self.deconv1(x)
# print(net.size()) # torch.Size([-1, gf_dim, 32, 32, 32])
net = self.deconv2(net)
# print(net.size()) # torch.Size([-1, gf_dim//2, 64, 64, 64])
net = self.deconv3(net)
# print(out.size()) # torch.Size([-1, gf_dim//4, 64, 64, 64])
out = self.deconv4(net)
# print(out.size()) # torch.Size([-1, 1, 64, 64, 64])
out_sigmoid = self.sigmoid(out)
return out, out_sigmoid
class VoxelPatchSuperResNetwork(nn.Module):
def __init__(self, input_dim=1, gf_dim=64, w_update=0.7, begin_idx=None):
super(VoxelPatchSuperResNetwork, self).__init__()
self.input_dim = input_dim
self.gf_dim = gf_dim
self.w_update = w_update
if begin_idx is not None:
self.begin_idx = begin_idx
else:
begin_idx = []
for i in range(0, 64, 16):
for j in range(0, 64, 16):
for k in range(0, 64, 16):
begin_idx.append([i, j, k])
self.begin_idx = np.array(begin_idx, dtype=np.int32) * 4
self.generator = VoxelSuperResNetwork_16_64(input_dim=self.input_dim, gf_dim=self.gf_dim)
self.refiner = VoxelRefineNetwork(channel_dim=gf_dim, vox_size=64)
def forward(self, vox_patch, patch_idx, pc, pc_occ, b_return_patch_pc_idx=False):
patch_size = vox_patch.size() # [opt.patchNum, 1, 16, 16, 16]
vox_pre, vox_pre_sigmoid = self.generator(vox_patch)
pc = pc.contiguous().view(-1, 3) + 0.5 # pc: [0, 1]
pc_occ = pc.contiguous().view(-1, 1)
pc_256 = np.array(pc.cpu().data.squeeze().numpy() * 256., dtype=np.int32)
vox_pre_update = vox_pre_sigmoid.clone()
pc_idx_patch = []
for i in range(0, patch_size[0]):
idx = self.begin_idx[patch_idx[i]]
pc_idx = np.where((pc_256[:, 0] >= idx[0]) & (pc_256[:, 0] < idx[0] + 64) &
(pc_256[:, 1] >= idx[1]) & (pc_256[:, 1] < idx[1] + 64) &
(pc_256[:, 2] >= idx[2]) & (pc_256[:, 2] < idx[2] + 64))
if len(pc_idx) > 0 and len(pc_idx[0]) > 0:
pc_patch = (pc[pc_idx] - (torch.from_numpy(np.array(idx, dtype=np.float32))/256.).cuda())/0.25 * 63.999
pc_patch_coord = pc_patch.type(torch.LongTensor) # shape: [n, 3]
pc_occ_patch = pc_occ[pc_idx] # shape: [n, 1]
vox_pre_update[i, 0, pc_patch_coord[:, 0], pc_patch_coord[:, 1], pc_patch_coord[:, 2]] = \
vox_pre_sigmoid[i, 0, pc_patch_coord[:, 0], pc_patch_coord[:, 1], pc_patch_coord[:, 2]] * \
(1.-self.w_update) + pc_occ_patch[:, 0] * self.w_update
pc_idx_patch.append(pc_patch_coord)
else:
pc_idx_patch.append([])
vox_ref, vox_ref_sigmoid = self.refiner(vox_pre_update)
if b_return_patch_pc_idx:
return vox_pre, vox_pre_sigmoid, vox_ref, vox_ref_sigmoid, pc_idx_patch
else:
return vox_pre, vox_pre_sigmoid, vox_ref, vox_ref_sigmoid
class PointSetGenerationNetwork(nn.Module):
def __init__(self, z_dim=256, n_pc=512):
super(PointSetGenerationNetwork, self).__init__()
self.z_dim = z_dim
self.n_pc = n_pc
self.encoder = resnet.Resnet18(self.z_dim)
self.decoder = PointDecoder(z_dim=self.z_dim, npc=self.n_pc)
| |
c2 = filt['c2']
r2 = filt['r2']
if 'r3' not in filt.keys():
r3 = 0
c3 = 0
else:
c3 = filt['c3']
r3 = filt['r3']
if 'r4' not in filt.keys():
r4 = 0
c4 = 0
else:
c4 = filt['c4']
r4 = filt['r4']
coeffs = calculateCoefficients( c1=c1,
c2=c2,
c3=c3,
c4=c4,
r2=r2,
r3=r3,
r4=r4,
flt_type=filt['flt_type'])
a = coeffs
t2 = filt['r2']*filt['c2']
if len(a) == 2:
# 2nd order
a.append(0)
a.append(0)
elif len(a) == 3:
# 3rd order
a.append(0)
else:
pass
# loop filter impedance
# z = (1 + s*t2)/(s*(a[3]*s**3 + a[2]*s**2 + a[1]*s + a[0]))
z = calculateZ( f,
t2,
a[0],
a[1],
a[2],
a[3] )
# G(s)
# g = kphi*kvco*z/s
g = calculateG( f, z, kphi, kvco )
# # Open-loop gain
g_ol = g/N
g_ol_db = 10*np.log10(np.absolute(g_ol))
# ph_ol = 180 + np.unwrap(np.angle(g_ol))*180/np.pi
ph_ol = np.unwrap(np.angle(g_ol))*180/np.pi
# # Closed-loop reference transfer gain
cl_r = (1.0/R)*(g/(1+g/N))
cl_r_db = 20*np.log10(np.absolute(cl_r))
# # Closed-loop VCO transfer gain
cl_vco = 1.0/(1+g/N)
cl_vco_db = 20*np.log10(np.absolute(cl_vco))
# convert gains and phases to lists
# cannot return numpy array to javascript
g = []
p = []
g.extend(g_ol_db)
p.extend(ph_ol)
fz, pz = getInterpolatedFzeroPzero( f, g, p )
ref_cl = []
vco_cl = []
ref_cl.extend(cl_r_db)
vco_cl.extend(cl_vco_db)
return f, g, p, fz, pz, ref_cl, vco_cl
def plotSimulatePhaseNoise():
kphi = 5e-3
kvco = 60e6
N = 200
R = 1
fpfd = 10e6/R
flt = {
'c1': 368e-12,
'c2': 6.75e-9,
'c3': 76.6e-12,
'c4': 44.7e-12,
'r2': 526,
'r3': 1.35e3,
'r4': 3.4e3,
'flt_type':"passive"
}
f = [10, 100, 1e3, 10e3, 100e3, 1e6, 10e6, 100e6]
refPnIn = [-138, -158, -163, -165, -165, -165, -165, -165]
vcoPnIn = [-10, -30, -60, -90, -120, -140, -160, -162]
pllFom = -227
pllFlicker = -268
f, refPn, vcoPn, icPn, icFlick, comp = simulatePhaseNoise(f,
refPnIn,
vcoPnIn,
pllFom,
pllFlicker,
kphi,
kvco,
fpfd,
N,
R,
filt=flt)
# print(type(f))
# print(type(refPn))
# print(type(vcoPn))
# print(type(icPn))
# print(type(icFlick))
# print(type(comp))
fig, ax = plt.subplots()
ax.semilogx(f,refPn,'r',label='ref')
ax.semilogx(f,vcoPn,'b',label='vco')
ax.semilogx(f,icPn,'g',label='pll')
ax.semilogx(f,icFlick,'c',label='flick')
ax.semilogx(f,comp,'k',linewidth=2,label='total')
legend = ax.legend()
plt.grid(True)
plt.show()
return f, refPn, vcoPn, icPn, icFlick, comp
def interp_semilogx(x, y, num_points, x_range=None):
""" return a paired list of values each with length num_points where
the values are linearly interpolated with the x axis in the log scale.
Essentially, given arrays x and y, increase the resolution of to num_points
:param x: array of x values
:param y: array of y values (x and y need to be of equal length)
:param num_points: int number of points for the entire range
:param x_range: array of 2 elements: [x_lo, x_hi]
this is the range of x values which will be returned
:return:
tuple (xx, yy)
"""
# first, log-ify the x axis
log_x = []
for item in x:
log_x.append(math.log10(item)) # x_new, y_new = interp_linear(log_x, y, x_interp)
if x_range == None:
xmin = min(log_x)
else:
xmin = math.log10(min(x_range))
if x_range == None:
xmax = max(log_x)
else:
xmax = math.log10(max(x_range))
f_log = np.linspace(xmin, xmax, num_points)
y_interp = []
x_log = []
for x_val in x:
x_log.append(math.log10(x_val))
f = []
for xx in f_log:
f.append(10 ** (xx))
y_temp = interp_linear(x_log, y, xx)
y_interp.append(y_temp[1])
return f, y_interp
def plot_interp_semilogx(x, y, num_points=10):
"""
"""
x2, y2 = interp_semilogx(x, y, num_points=num_points)
plt.semilogx(x, y, '-bo', x2, y2, 'ro')
plt.grid(True)
plt.show()
def linspace(a, b, num_points):
""" return a list of linearly spaced values
between a and b having num_points points
"""
inc = (float(b) - float(a))/(num_points-1)
ret_ar = []
for i in range(num_points):
ret_ar.append(a + i*inc)
return ret_ar
def plot_interp_linear(x, y, x_interp):
"""
"""
x2, y2 = interp_linear(x, y, x_interp)
plt.plot(x, y, '-bo', [x2], [y2], 'ro')
plt.grid(True)
plt.show()
def interp_linear(x, y, x_interp):
""" linearly interpolate between two points with the
Parameters
x (list) - x values
y (list) - y values
Returns
tuple (x, y) where x is x_interp and y is the
interpolated y value
"""
if len(x) != len(y):
raise ValueError('x and y arrays need to be the same length')
x_interp = float(x_interp)
if x_interp < x[0]: # x_interp is below the lowest point in x array
# find the first slope and interpolate below
m = (y[1]-y[0])/(x[1]-x[0])
y_interp = (x_interp - x[0])*m + y[0]
return x_interp, y_interp
elif x_interp > x[-1]: # x_interp is above the highest point in x array
# find the last slope and interpolate above
m = (y[-1]-y[-2])/(x[-1]-x[-2])
y_interp = (x_interp - x[-1])*m + y[-1]
return x_interp, y_interp
else: # x_interp is between 2 points in array
for n in range(1,len(x)):
if x[n] > x_interp:
j = n
i = n-1
break
elif x[n] == x_interp:
return x[n], y[n]
m = (y[j]-y[i])/(x[j]-x[i])
y_interp = (x_interp - x[i])*m + y[i]
return x_interp, y_interp
def get_freq_points_per_decade(fstart, fstop, ptsPerDec):
""" return an array of frequencies starting at the
nearest decade of 10 from fstart and ending at the
nearest decade of 10 at fstop. Each decade has
ptsPerDec tpoints.
:Arguments:
fstart (float)
fstop (float)
ptsPerDec (int)
"""
fstart = float(fstart)
fstop = float(fstop)
ptsPerDec = int(ptsPerDec)
num_decades = round(math.log10(fstop/fstart)/math.log10(10),0)
ar = []
istart = int(math.log10(fstart)/math.log10(10))
ar.append(10**istart)
for i in range(istart,int(num_decades)+1):
newDec = 10**i
nextDec = 10**(i+1)
inc = float((nextDec - newDec))/float(ptsPerDec-1)
for j in range(1,ptsPerDec):
val = newDec + j*inc
ar.append(float(val))
return ar
def simulatePhaseNoise2(f,
refPn,
vcoPn,
pllFom,
kphi,
kvco,
fpfd,
N,
R,
filt=None,
coeffs=None,
numPts=1000 ):
""" simulate an arbitrary phase-locked loop using either
filter coefficients or component values. return 3 lists:
f (frequencies), g_ol (open-loop gain), phases (open-loop phases)
"""
if coeffs == None:
c1 = filt['c1']
c2 = filt['c2']
r2 = filt['r2']
if 'r3' not in filt.keys():
r3 = 0
c3 = 0
else:
c3 = filt['c3']
r3 = filt['r3']
if 'r4' not in filt.keys():
r4 = 0
c4 = 0
else:
c4 = filt['c4']
r4 = filt['r4']
coeffs = calculateCoefficients( c1=c1,
c2=c2,
c3=c3,
c4=c4,
r2=r2,
r3=r3,
r4=r4,
flt_type=filt['flt_type'])
a = coeffs
t2 = filt['r2']*filt['c2']
if len(a) == 2:
# 2nd order
a.append(0)
a.append(0)
elif len(a) == 3:
# 3rd order
a.append(0)
else:
pass
# get smoothed curves for each phase noise component
freq, vcoPn = interp_semilogx(f, vcoPn, num_points=numPts)
# loop filter impedance
z = calculateZ( freq,
t2,
a[0],
a[1],
a[2],
a[3] )
# G(s)
g = calculateG( freq, z, kphi, kvco )
# # Closed-loop reference transfer gain
cl_r = (1.0/R)*(g/(1+g/N))
cl_r_db = 20*np.log10(np.absolute(cl_r))
refPnOut = refPn + cl_r_db
refPn = []
refPn.extend(refPnOut)
cl_ic = (g/(1+g/N))
cl_ic_db = 20*np.log10(np.absolute(cl_r))
icPnOut = pllFom + 10*np.log10(fpfd) + cl_ic_db
icPn = []
icPn.extend( icPnOut )
# # Closed-loop VCO transfer gain
cl_vco = 1.0/(1+g/N)
cl_vco_db = 20*np.log10(np.absolute(cl_vco))
vcoPnOut = vcoPn + cl_vco_db
vcoPn = []
vcoPn.extend( vcoPnOut )
compPn = []
for i in range(len(freq)):
compPn.append(power_sum([refPnOut[i],
vcoPnOut[i],
icPnOut[i] ]))
return freq, refPn, vcoPn, icPn, compPn
def simulatePhaseNoise( f,
refPn,
vcoPn,
pllFom,
pllFlicker,
kphi,
kvco,
fpfd,
N,
R,
filt=None,
coeffs=None ):
""" simulate an arbitrary phase-locked loop using either
filter coefficients or component values. return 3 lists:
f (frequencies), g_ol (open-loop gain), phases (open-loop phases)
"""
if coeffs == None:
c1 = filt['c1']
c2 = filt['c2']
r2 = filt['r2']
if 'r3' not in filt.keys():
r3 = 0
c3 = 0
else:
c3 = filt['c3']
r3 = filt['r3']
if 'r4' not in filt.keys():
r4 = 0
c4 = 0
else:
c4 = filt['c4']
r4 = filt['r4']
coeffs = calculateCoefficients( c1=c1,
c2=c2,
c3=c3,
c4=c4,
r2=r2,
r3=r3,
r4=r4,
flt_type=filt['flt_type'])
a = coeffs
t2 = filt['r2']*filt['c2']
if len(a) == 2:
# 2nd order
a.append(0)
a.append(0)
elif len(a) == 3:
# 3rd order
a.append(0)
else:
pass
# loop filter impedance
z = calculateZ( f,
t2,
a[0],
a[1],
a[2],
a[3] )
# G(s)
g = calculateG( f, z, kphi, kvco )
# # Closed-loop reference transfer gain
cl_r = (1.0/R)*(g/(1+g/N))
cl_r_db = 20*np.log10(np.absolute(cl_r))
refPnOut = refPn + cl_r_db
refPn = []
refPn.extend( refPnOut )
| |
keys are `"ABCD"` and `"ABGH"`,
respectively, but only `"AB"` was transmitted before the experiment ended.
Therefore, each key in the dict maps to a list of experiments, in case
a single key (`"AB"`) maps to multiple experiments in the MCS data. The
user would then need to disambiguate between them.
Each item in the value is a 5-tuple of ``(start, end, handshake_len,
count_data, count)`` specific to the experiment. ``start`` and ``end``
are the slice from :attr:`data_indices_start` and :attr:`data_indices_end`
for the experiment. ``count_data`` is the slice from
:attr:`DigitalDataStore.count_data` for the experiment and ``count`` is the
:meth:`DigitalDataStore.get_count_ints` parsed ``count`` from that.
``handshake_len`` is the the corresponding value from
:meth:`DigitalDataStore.get_handshake`, parsed from ``count``.
.. note::
This is only populated for experiments created by Ceed versions
greater than ``1.0.0.dev0``.
"""
def parse(
self, ceed_version, data, t_start: datetime.datetime, f,
find_start_from_ceed_time=False,
estimated_start: datetime.datetime = None,
pre_estimated_start: float = 0):
"""Parses the MCS data into the individual experiments.
:param ceed_version: The file's Ceed version string.
:param data: the upto 24-bit digital data recorded by MCS at the MCS
sampling rate (>> the Ced rate) along with the electrode data.
It is saved as :attr:`DigitalDataStore.data`.
:param t_start: The date/time that corresponds to the first data sample
index in ``data``. That's when the MCS data recording started.
:param f: The MCS recording sampling rate.
:param find_start_from_ceed_time: Whether to locate the Ceed experiment
in the MCS data using a Ceed time estimate or by finding the
handshaking pattern in the digital data. The time based approach
is not well tested, but can tuned if handshaking is not working.
:param estimated_start: The estimated time when the Ceed experiment
started.
:param pre_estimated_start: A fudge factor for ``estimated_start``. We
look for the experiment by ``pre_estimated_start`` before
``estimated_start``.
"""
self._parse_components(data)
self.reduce_samples(
t_start, f, find_start_from_ceed_time,
estimated_start, pre_estimated_start)
if ceed_version == '1.0.0.dev0':
return
self.parse_experiments()
def reduce_samples(
self, t_start: datetime.datetime, f,
find_start_from_ceed_time=False,
estimated_start: datetime.datetime = None,
pre_estimated_start: float = 0):
"""Reduces the data from multiple samples per-frame, to one sample per
frame, using the clock. See :meth:`parse`.
Ceed (projector) generates data at about 120Hz, while MCS records data
at many thousands of hertz. So each Ceed frame is saved repeatedly over
many samples. This collapses it into a single sample per frame.
We can do this because Ceed toggles the clock for each frame.
Once reduced, it extracts the clock, short and long counter data and
saves them into the class properties ready for use by the super class
methods.
"""
# data is already converted to normal lower bits
clock_data = self.clock_data
short_count_data = self.short_count_data
count_data = self.count_data
offset = 0
if find_start_from_ceed_time:
offset = (estimated_start - t_start).total_seconds() - \
float(pre_estimated_start)
if offset < 0:
raise ValueError(
'Ceed data is not in the mcs data, given the offset')
offset = int(offset * f)
clock_data = clock_data[offset:]
short_count_data = short_count_data[offset:]
count_data = count_data[offset:]
# should have at least 10 samples. At 5k sampling rate it's reasonable
if len(clock_data) < 10:
raise TypeError(
'There is not enough data in the mcs file to be able to align '
'with Ceed')
clock_change = np.argwhere(clock_data[1:] - clock_data[:-1]).squeeze()
# indices in data where value is different from last (including 0)
idx_start = np.array([0], dtype=clock_change.dtype)
# indices in data where next value is different (including last value)
idx_end = np.array([len(clock_data) - 1], dtype=clock_change.dtype)
if len(clock_change):
idx_start = np.concatenate((idx_start, clock_change + 1))
idx_end = np.concatenate((clock_change, idx_end))
# take value after clock changes
indices = np.minimum(idx_end - idx_start, 1) + idx_start
# start at the
s = 0 if clock_data[0] else 1
indices = indices[s:]
idx_start = idx_start[s:]
idx_end = idx_end[s:]
# indices in the original data
self.data_indices_start = idx_start + offset
self.data_indices_end = idx_end + offset
# condensed data
self.clock_data = clock_data[indices]
self.short_count_data = short_count_data[indices]
self.count_data = count_data[indices]
def parse_experiments(self):
"""Given the data that has been reduced and split into the components
with :meth:`reduce_samples`, it extracts the individual experiments
into :attr:`experiments`.
"""
# assuming the experiments recorded have at least two good frames,
# otherwise we can't estimate expected frame size
if self.n_parts_per_int <= 1:
raise NotImplementedError(
'Must break counter int into at least two parts so we can '
'locate clock inverted values')
max_shot_val = 2 ** len(self.short_count_indices)
count_data_full = self.count_data
short_count_data_full = self.short_count_data
clock_data_full = self.clock_data
start = self.data_indices_start
end = self.data_indices_end
diff = end - start
med = np.median(diff)
# each experiment is proceeded by 30-50 blank frames, so 10 is safe.
# And we should never skip 10+ frames sequentially in a stable system
# breaks is the indices between experiment - potential experiment start
breaks = np.nonzero(diff >= (10 * med))[0]
# if MCS stops recording during Ceed experiment we don't have a break
if not len(breaks) and len(start):
breaks = [len(start) - 1]
experiments = self.experiments = defaultdict(list)
# the start of the next (potential) experiment
start_i = 0
for break_i in breaks:
s = start_i
# the long frame is included in last experiment. If long frame is
# clock low, first frame of next exp is high. If it's high, clock
# goes low for some frames and then high, so high frame will be
# first short frame
e = break_i + 1
# get section of this possible experiment
count_data = count_data_full[s:e]
short_count_data = short_count_data_full[s:e]
clock_data = clock_data_full[s:e]
start_i = e
# need some data to work with
if len(count_data) < 4:
continue
# need to start high
if not clock_data[0]:
count_data = count_data[1:]
short_count_data = short_count_data[1:]
s += 1
try:
# use short counter to see if missing frames, exclude final
self.check_missing_frames(short_count_data[:-1], False, 1)
# we don't drop last frame, but the frame extends too long post
# experiment (i.e. last was clock low and it stayed clock low
# until next experiment). And last item may be spurious
end[e - 1] = start[e - 1] + med
# chop off partial ints and get full ints, it's ok if last value
# is spurious
count, count_2d, count_inverted_2d = self.get_count_ints(
count_data, False, 1)
# get handshake from full ints. Last val could be spurious, so
# if it's part of the handshake, handshake is not complete
handshake_data, handshake_len, n_handshake_ints, \
n_config_frames = self.get_handshake(count, False, 1)
# check that full and partial ints match
self.check_counter_consistency(
count_data, count_2d, count_inverted_2d, n_handshake_ints,
False, 1, True)
except AlignmentException as e:
if self.debug:
logging.exception(e)
continue
if not handshake_len:
continue
# the last count or handshake value could be spurious, but then it
# won't match, which is ok because we need the full handshake and
# when searching for handshake we anyway chop of end until empty
# or found
experiments[handshake_data].append((
start[s:e], end[s:e], handshake_len, count_data, count))
class CeedDigitalData(DigitalDataStore):
"""Parses the Ceed data for an individual experiment as recorded in the
Ceed H5 file.
"""
def parse(
self, ceed_version, frame_bits: np.ndarray,
frame_counter: np.ndarray, start_t: datetime.datetime,
n_sub_frames: int, rendered_frames: np.ndarray
) -> None:
"""Parses the Ceed data into the class properties from the raw data.
:param ceed_version: The Ceed version string.
:param frame_bits: Array containing the 24 bits sent to MCS for each
Ceed frame.
:param frame_counter: Array containing the global Ceed counter
corresponding to each frame. This array includes frames that are
not actually rendered but dropped by Ceed.
:param start_t: The date/time when the experiment started.
:param n_sub_frames: The number of sub-frames in each frame (e.g. for
quad modes it's 4 or 12).
:param rendered_frames: Logical array of the same size as
``frame_counter`` but only those frames that were rendered are True.
"""
if ceed_version == '1.0.0.dev0':
self.parse_data_v1_0_0_dev0(frame_bits)
else:
self.parse_data(
frame_bits, frame_counter, start_t, n_sub_frames,
rendered_frames)
def parse_data_v1_0_0_dev0(self, data: np.ndarray) -> None:
"""Parses the data (from :meth:`parse`) for Ceed version ``1.0.0.dev0``.
"""
self._parse_components(data)
def parse_data(
self, frame_bits: np.ndarray, frame_counter: np.ndarray,
start_t: datetime.datetime, n_sub_frames: int,
rendered_frames: np.ndarray
) -> None:
"""Parses the data (from :meth:`parse`) for Ceed versions greater than
``1.0.0.dev0``.
| |
#!/usr/bin/env python3
import argparse
import logging
import os
import stat
import subprocess
import sys
import time
import yaml
import paramiko
'''
{
config: {
max_instance_count: 10,
min_instance_count: 0,
name: sprout_group, scaling_config_action: [
{ ns_config_primitive_name_ref: Sprout Scaling Ops, trigger: pre_scale_in},
{ ns_config_primitive_name_ref: Sprout Scaling Ops, trigger: post_scale_out}
],
vnfd_member: [{count: 1, member_vnf_index_ref: 6}]
},
nsr: {name: NS2},
trigger: post_scale_out,
vnfrs_in_group: [
{connection_points: [ {ip_address: 192.168.127.12, name: sprout_vnfd/sigport} ], name: NS2__sprout_group__1__sprout_vnfd__6,
rw_mgmt_ip: 10.66.217.196, rw_mgmt_port: 0},
vdur_data: [{vm_mgmt_ip: 10.0.217.137, vm_name: iovdu_0}]],
vnfrs_others: [
{connection_points: [{ip_address: 192.168.127.12, name: sprout_vnfd/sigport}],
name: NS2__sprout_group__1__sprout_vnfd__6, rw_mgmt_ip: 10.66.217.196, rw_mgmt_port: 0,
vdur_data: [{vm_mgmt_ip: 10.0.217.130, vm_name: iovdu_0}]}
{connection_points: [{ip_address: 192.168.3.11, name: dnsserver_vnfd/sigport}],
name: NS2__dnsserver_vnfd__5, rw_mgmt_ip: 10.66.217.190, rw_mgmt_port: 0,
vdur_data: [{vm_mgmt_ip: 10.0.217.130, vm_name: iovdu_0}]}
{connection_points: [{ip_address: 172.16.17.32, name: homesteadprov_vnfd/sigport}],
name: NS2__homesteadprov_vnfd__1, rw_mgmt_ip: 10.66.217.191, rw_mgmt_port: 0,
vdur_data: [{vm_mgmt_ip: 10.0.217.130, vm_name: iovdu_0}]}
{connection_points: [{ip_address: 192.168.127.12, name: sipp_vnfd/cp0}], name: NS2__sipp_vnfd__4,
rw_mgmt_ip: 10.66.217.192, rw_mgmt_port: 2022,
vdur_data: [{vm_mgmt_ip: 10.0.217.130, vm_name: iovdu_0}]}
{connection_points: [ {ip_address: 172.16.58.3, name: homesteadprov_vnfd/sigport}], name: NS2__homesteadprov_vnfd__2,
rw_mgmt_ip: 10.66.217.193, rw_mgmt_port: 0,
vdur_data: [{vm_mgmt_ip: 10.0.217.130, vm_name: iovdu_0}]}
{connection_points: [{ip_address: 172.16.31.10, name: homesteadprov_vnfd/sigport}], name: NS2__homesteadprov_vnfd__3,
rw_mgmt_ip: 10.66.217.194, rw_mgmt_port: 0,
vdur_data: [{vm_mgmt_ip: 10.0.217.130, vm_name: iovdu_0}]}
{connection_points: [{ip_address: 192.168.127.12, name: sprout_vnfd/sigport}], name: NS2__sprout_vnfd__6,
rw_mgmt_ip: 10.66.217.195, rw_mgmt_port: 0,
vdur_data: [{vm_mgmt_ip: 10.0.217.130, vm_name: iovdu_0}]}
]}
'''
class ConfigurationError(Exception):
pass
def copy_file_ssh_sftp(logger, server, username, remote_dir, remote_file, local_file):
sshclient = paramiko.SSHClient()
sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
sshclient.load_system_host_keys(filename="/dev/null")
sshclient.connect(server, username=username, password="<PASSWORD>")
sftpclient = sshclient.open_sftp()
sftpclient.put(local_file, remote_dir + '/' + remote_file)
sshclient.close()
def get_vnf_file(logger, file_name, d_name, d_id, d_type):
logger.debug("Obtaining local file %s", file_name)
# Get the full path to the vnf file
vnffile = ''
# If vnf file name name starts with /, assume it is full path
if file_name[0] == '/':
# The vnf file name has full path, use as is
vnffile = file_name
else:
vnffile = os.path.join(os.environ['RIFT_ARTIFACTS'],
'launchpad/packages',
d_type,
d_id,
d_name,
'scripts',
file_name)
logger.debug("Checking for vnf file name at %s", vnffile)
if not os.path.exists(vnffile):
logger.debug("Did not find file %s", vnffile)
vnffile = os.path.join(os.environ['RIFT_INSTALL'],
'usr/bin',
file_name)
return vnffile
def configure_sippscaleop(logger, run_dir, vnf_mgmt_ip, sipp_sig_ip, dns_vm_ip):
logger.debug("Starting SIPP scaleop ")
sh_file = "{}/configure_sippscaleop-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating SIPP file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "fedora"
set pw "<PASSWORD>"
set success 0
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@{vnf_mgmt_ip}
set spid $spawn_id
set timeout 60
set sipp_local_port 5060
expect -i $spid \
"*?assword:" {{
exp_send -i $spid "$pw\r"
if {{ $success == 0 }} {{
incr success -1
exp_continue
}}
}} "]$ " {{
set success 1
}} "yes/no" {{
exp_send -i $spid "yes\r"
exp_continue
}} timeout {{
set success -1
}}
send "export TERM=xterm\r"
expect "]$ "
# Get all sprouts
send "dig -t A sprout.test.com +noall +answer\r"
expect "]$ "
set dig_output $expect_out(buffer)
set sproutip_list [regexp -inline -all {{\d+\.\d+\.\d+\.\d+}} $dig_output]
set num_sprouts [llength $sproutip_list]
puts "Regex list for sprout ips: $sproutip_list"
send "ps -ef | grep sipp-master\r"
expect "]$ "
set sipp_process_output $expect_out(buffer)
puts "DEBUG $sipp_process_output"
set callrate_list [regexp -inline -all {{r\s+\d+\s+}} $sipp_process_output]
puts "Callrate_list: $callrate_list"
if {{[llength $callrate_list] == 0}} {{
puts "DEBUG6 Done"
exp_close -i $spid
exit 0
}}
set cumul_callrate 0
foreach callratestr $callrate_list {{
# "r 2"
puts "DEBUG2 $callratestr"
set wordlist [regexp -inline -all {{\S+}} $callratestr]
set ind_callrate [lindex $wordlist 1]
puts "DEBUG4 $ind_callrate"
puts "DEBUG5-1 $cumul_callrate"
set cumul_callrate [expr $cumul_callrate + $ind_callrate ]
puts "DEBUG5-2 $cumul_callrate"
}}
set per_sipp_call_rate [expr {{ $cumul_callrate/$num_sprouts }}]
# Kill existing sipp clients
send "pkill sipp-master \r"
expect "]$ "
send "rm -f /tmp/sipp_stats*.txt\r"
expect "]$ "
set filectr 1
foreach sproutip $sproutip_list {{
send "./sipp-master -sf reg_auth_dereg.xml -inf data1.csv -i {sipp_sig_ip} -p $sipp_local_port -key registrar test.com -l 0 -r $per_sipp_call_rate -t t1 $sproutip:5052 -trace_stat -stf /tmp/sipp_stats$filectr.txt -bg -fd 20\r"
expect "]$ "
incr sipp_local_port
incr filectr
}}
exp_close -i $spid
'''.format(vnf_mgmt_ip=vnf_mgmt_ip, sipp_sig_ip=sipp_sig_ip))
os.chmod(sh_file, stat.S_IRWXU)
cmd = "{sh_file} ".format(sh_file=sh_file)
logger.debug("Executing shell cmd : %s", cmd)
rc = subprocess.call(cmd, shell=True)
if rc != 0:
raise ConfigurationError("SIPP traffic start failed: {}".format(rc))
'''
script to configure DNS server for Sprout Scaleout
'''
def configure_dnsserver_sproutscaleout(logger, run_dir, dns_vnf_mgmt_ip, dns_vm_mgmt_ip, sproutinfo):
sh_file = "{}/configure_dnsserver-sproutscaleout{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating DNS server script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "fedora"
set pw "<PASSWORD>"
set success 0
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@{vnf_mgmt_ip}
set spid $spawn_id
set timeout 60
expect -i $spid \
"*?assword:" {{
exp_send -i $spid "$pw\r"
if {{ $success == 0 }} {{
incr success -1
exp_continue
}}
}} "]$ " {{
set success 1
}} "yes/no" {{
exp_send -i $spid "yes\r"
exp_continue
}} timeout {{
set success -1
}}
send "sudo su\r"
expect "]# "
set sproutmgmtname {sprout_mgmt_name}
set sproutmgmtname2 [string map {{"_" ""}} $sproutmgmtname]
set sproutsigname {sprout_sig_name}
set sproutsigname2 [string map {{"_" ""}} $sproutsigname]
send "sed -i '/# Management A records/a local-data: \"$sproutmgmtname2.test.com. IN A {sprout_mgmt_ip}\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# A records for individual Clearwater nodes/a local-data: \"$sproutsigname2.test.com. IN A {sprout_sig_ip}\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# A record load-balancing/a local-data: \"sprout.test.com. IN A {sprout_sig_ip}\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# A record load-balancing/a local-data: \"sprout-mgmt.test.com. IN A {sprout_mgmt_ip}\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# A record load-balancing/a local-data: \"icscf.sprout.test.com. IN A {sprout_sig_ip}\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# S-CSCF cluster/a local-data: \"_sip._udp.sprout.test.com. IN SRV 0 0 5054 $sproutsigname2.test.com.\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# S-CSCF cluster/a local-data: \"_sip._tcp.sprout.test.com. IN SRV 0 0 5054 $sproutsigname2.test.com.\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# S-CSCF cluster/a local-data: \"_sip._udp.scscf.sprout.test.com. IN SRV 0 0 5054 $sproutsigname2.test.com.\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# S-CSCF cluster/a local-data: \"_sip._tcp.scscf.sprout.test.com. IN SRV 0 0 5054 $sproutsigname2.test.com.\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# I-CSCF cluster/a local-data: \"_sip._tcp.icscf.sprout.test.com. IN SRV 0 0 5052 $sproutsigname2.test.com.\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# I-CSCF cluster/a local-data: \"_sip._udp.icscf.sprout.test.com. IN SRV 0 0 5052 $sproutsigname2.test.com.\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# Reverse lookups for individual nodes/a local-data-ptr: \"{sprout_mgmt_ip} $sproutmgmtname2.test.com\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "sed -i '/# Reverse lookups for individual nodes/a local-data-ptr: \"{sprout_sig_ip} $sproutsigname2.test.com\"' /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "cat /etc/unbound/local.d/test.com.conf\r"
expect "]# "
send "service unbound restart\r"
expect "]# "
sleep 5
exp_close -i $spid
'''.format(vnf_mgmt_ip=dns_vnf_mgmt_ip, vm_mgmt_ip=dns_vm_mgmt_ip, sprout_mgmt_name=sproutinfo['mgmt_name'], sprout_mgmt_ip=sproutinfo['local_mgmt_ip'], sprout_sig_name=sproutinfo['sig_name'], sprout_sig_ip=sproutinfo['sig_ip']))
os.chmod(sh_file, stat.S_IRWXU)
cmd = "{sh_file}".format(sh_file=sh_file)
logger.debug("Executing shell cmd : %s", cmd)
rc = subprocess.call(cmd, shell=True)
if rc != 0:
raise ConfigurationError("Configuration of DNS entries in {} failed: {}".format(dns_vnf_mgmt_ip, rc))
'''
script to configure sprout VNf for scale-out operation
'''
def configure_sproutscaleout(logger, run_dir, vnf_mgmt_ip, vm_mgmt_ip, dns_mgmt_ip, dns_sig_ip, etcd_ip, sprout_mgmt_name):
sh_file = "{}/configure_sproutscaleout-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating sprout script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "clearwater"
set pw "!<PASSWORD>"
set success 0
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@{vnf_mgmt_ip}
set spid $spawn_id
set timeout 60
expect -i $spid \
"*?assword:" {{
exp_send -i $spid "$pw\r"
if {{ $success == 0 }} {{
incr success -1
exp_continue
}}
}} "~$ " {{
set success 1
}} "yes/no" {{
exp_send -i $spid "yes\r"
exp_continue
}} timeout {{
set success -1
}}
send "sudo su\r"
expect "clearwater# "
# Rename Host
set sproutname {sprout_mgmt_name}
set sproutname2 [string map {{"_" ""}} $sproutname]
send "echo $sproutname2.test.com > /etc/hostname\r"
expect "clearwater# "
send "hostname -F /etc/hostname\r"
expect "clearwater# "
# Update /etc/hosts if needed
send "TMPHOSTS=/etc/hosts.rift.new\r"
expect "clearwater# "
send "grep -v '127\[.\]0\[.\]1\[.\]1\[\[:space:\]\]' < /etc/hosts > \$TMPHOSTS\r"
expect "clearwater# "
send "mv \$TMPHOSTS /etc/hosts\r"
expect "clearwater# "
# Recreate SSH2 keys
send "export DEBIAN_FRONTEND=noninteractive\r"
expect "clearwater# "
send "dpkg-reconfigure openssh-server\r"
expect "clearwater# "
# Remove DHCP exit hook
send "mv /etc/dhcp/dhclient-exit-hooks.d/sethostname /tmp/sethostname.bkup\r"
expect "clearwater# "
# Update clearwater local config
send "sed -iE 's/public_hostname=.*/public_hostname=$sproutname2.test.com/' /etc/clearwater/local_config\r"
expect "clearwater# "
# Debug
send "cat /etc/clearwater/local_config\r"
expect "clearwater# "
# Update ETCD cluster clearwater local config
send "sed -iE 's/etcd_cluster=.*/etcd_cluster={etcd_ip}/' /etc/clearwater/local_config\r"
expect "clearwater# "
# Update signaling dns server
send "sed -iE 's/signaling_dns_server=.*/signaling_dns_server={dns_sig_ip}/' /etc/clearwater/local_config\r"
expect "clearwater# "
# Update /etc/resolv.conf
send "echo 'nameserver {dns_mgmt_ip}' > /etc/resolv.conf\r"
expect "clearwater# "
send "echo 'search test.com' >> /etc/resolv.conf\r"
expect "clearwater# "
# Update /etc/netns/signaling/resolv.conf
send "echo 'nameserver | |
args):
ret = self.performCustomStep("prepublish", args)
if not ret:
return ret
self.loadModifiedFiles(args)
# Commit any files that may have been added to the main repo.
# The custom prepublish step is responsible for performing the git add for any
# modified files.
# Submodules and nested subprojects are not handled here. If any files there are
# modified during the prepublish step, the git add *and* the git commit must be
# handled in the custom step.
try:
git.commit(" -m \"%s\"" % args["-m"])
except git.GrapeGitError:
pass
return self.checkInProgressLock(args)
def performCustomPostPublishSteps(self, args):
return self.performCustomStep("postpublish", args)
@staticmethod
def getModifiedFileList(public, topic, args):
# Limit the number of updated files displayed per subproject
emailMaxFiles = args["--emailMaxFiles"]
updatelist = git.diff("--name-only %s %s" % (public, topic)).split('\n')
if len(updatelist) > emailMaxFiles:
updatelist.append("[ Additional files not shown ]")
return updatelist
def loadModifiedFiles(self, args):
if "modifiedFiles" in self.progress:
return True
wsdir = utility.workspaceDir()
os.chdir(wsdir)
public = args["--public"]
topic = args["--topic"]
if git.SHA(public) == git.SHA(topic):
public = utility.userInput("Please enter the branch name or SHA of the commit to diff against %s for the "
"modified file list." % topic)
self.progress["modifiedFiles"] = []
# Get list of modified files in main repo
self.progress["modifiedFiles"] += self.getModifiedFileList(public, topic, args)
# Get list of modified files in submodules
if args["--recurse"]:
submodulePublic = args["--submodulePublic"]
submodules = git.getModifiedSubmodules(public, topic)
for sub in submodules:
os.chdir(os.path.join(wsdir, sub))
self.progress["modifiedFiles"] += [sub + "/" + s for s in self.getModifiedFileList(submodulePublic, topic, args)]
os.chdir(wsdir)
# Get list of modified files in nested subprojects
for nested in grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes():
os.chdir(os.path.join(wsdir, nested))
modified = self.getModifiedFileList(public, topic, args)
if len(modified) > 0:
self.progress["modifiedFiles"] += [nested + "/" + s for s in modified]
os.chdir(wsdir)
return True
def loadVersion(self, args):
if "version" in self.progress:
return True
else:
menu = grapeMenu.menu()
menu.applyMenuChoice("version", ["read"])
guess = menu.getOption("version").ver
self.progress["version"] = utility.userInput("Please enter version string for this commit", guess)
return True
def loadCommitMessage(self, args):
if "reviewers" not in self.progress:
# fill in the reviewers entry in progress, but don't check the review status.
self.verifyCompletedReview(args)
if "commitMsg" in self.progress:
if not args["-m"]:
args["-m"] = self.progress["commitMsg"]
return True
if args["--noUpdateLog"]:
self.progress["commitMsg"] = "no details entered"
return True
if not args["<CommitMessageFile>"] and not args["-m"]:
proceed = utility.userInput("No commit message entered. Would you like to use the Pull Request's "
"description as your commit message? [y/n] \n(Enter 'n' to enter a file name with your commit message instead)", 'y')
if not proceed:
args["<CommitMessageFile>"] = utility.userInput("Enter the name of the file containing your commit "
"message: ")
if args["<CommitMessageFile>"] and not args["-m"]:
# commit message should come from the file
commitMsgFile = args["<CommitMessageFile>"]
try:
with open(commitMsgFile, 'r') as f:
commitMsg = f.readlines()+["\n"]
except IOError as e:
print(e.message)
utility.printMsg("Could not read contents of %s" % commitMsgFile)
args["<CommitMessageFile>"] = False
return False
if not args["--noReview"]:
utility.printMsg("Updating Pull Request with commit msg...")
self.markReview(args, ["--descr", commitMsgFile], "")
else:
utility.printMsg("Skipping update of pull request description from commit message")
elif args["-m"]:
commitMsg = [args["-m"]+"\n"]
else:
if args["--noReview"]:
utility.printMsg("Skipping retrieval of commit message from Pull Request description..")
if not args["-m"]:
print("File with commit message is required argument when publishing with --noReview and no -m "
"<msg> defined.")
return False
utility.printMsg("Retrieving pull request description for use as commit message...")
atlassian = Atlassian.Atlassian(username=args["--user"], url=args["--bitbucketURL"], verify=args["--verifySSL"])
repo = atlassian.project(args["--project"]).repo(args["--repo"])
pullRequest = repo.getOpenPullRequest(args["--topic"], args["--public"])
if pullRequest:
commitMsg = pullRequest.description().splitlines(True)+['\n']
else:
commitMsg = ""
# this will be used for the actual merge commit message.
escapedCommitMsg = ''.join(commitMsg).replace("\"", "\\\"")
escapedCommitMsg = escapedCommitMsg.replace("`", "'")
if escapedCommitMsg:
args["-m"] = escapedCommitMsg
else:
utility.printMsg("WARNING: Commit message is empty. ")
utility.printMsg("The following commit message will be used for email notification, merge commits, etc.\n"
"======================================================================")
print ''.join(commitMsg[:10])
print "======================================================================"
proceed = utility.userInput("Is the above message what you want for email notifications and merge commits? "
"['y','n']", 'y')
if not proceed:
utility.printMsg("Stopping. Either edit the message in your pull request, or pass in the name of a file "
"containing your message as an argument to grape publish.")
e = Exception()
e.message = "Invalid commit message."
args["<CommitMessageFile>"] = False
args["-m"] = False
raise e
else:
self.progress["commitMsg"] = escapedCommitMsg
args["-m"] = escapedCommitMsg
return True
def updateLog(self, args):
if not (self.loadCommitMessage(args) and self.loadVersion(args)):
return False
commitMsg = self.progress["commitMsg"].split('\n')
if args["--noUpdateLog"]:
return True
logFile = args["--updateLog"]
cwd = os.getcwd()
os.chdir(utility.workspaceDir())
if logFile:
header = args["--entryHeader"]
header = header.replace("<date>", time.asctime())
header = header.replace("<user>", git.config("--get user.name"))
header = header.replace("<version>", self.progress["version"])
header = header.replace("<reviewers>", self.progress["reviewers"])
header = ["\n"]+header.split("\\n")
commitMsg = header + commitMsg
numLinesToSkip = int(args["--skipFirstLines"])
with open(logFile, 'r') as f:
loglines = f.readlines()
loglines.insert(numLinesToSkip, '\n'.join(commitMsg))
with open(logFile, 'w') as f:
f.writelines(loglines)
git.commit("%s -m \"GRAPE publish: updated log file %s\"" % (logFile, logFile))
os.chdir(cwd)
return self.checkInProgressLock(args)
def tickVersion(self, args):
if not args["--tickVersion"]:
return True
menu = grapeMenu.menu()
if not args["--noReview"]:
atlassian = Atlassian.Atlassian(username=args["--user"], url=args["--bitbucketURL"], verify=args["--verifySSL"])
repo = atlassian.project(args["--project"]).repo(args["--repo"])
thisRequest = repo.getOpenPullRequest(args["--topic"], args["--public"])
requestTitle = thisRequest.title()
versionArgs = ["read"]
menu.applyMenuChoice("version", versionArgs)
currentVer = grapeMenu.menu().getOption("version").ver
if currentVer in requestTitle:
utility.printMsg("Current Version string already in pull request title. Assuming this is from "
"a previous call to grape publish. Not ticking version again.")
return True
ret = True
if args["--tickVersion"]:
versionArgs = ["tick", "--notag", "--public=%s" % args["--public"]]
for arg in args["-T"]:
versionArgs += [arg.strip()]
ret = grapeMenu.menu().applyMenuChoice("version", versionArgs)
self.progress["version"] = grapeMenu.menu().getOption("version").ver
ret = ret and self.markReviewWithVersionNumber(args)
return ret and self.checkInProgressLock(args)
@staticmethod
def tagVersion(args):
ret = True
if args["--tickVersion"]:
versionArgs = ["tick", "--tag", "--notick", "--nocommit", "--tagNested"]
for arg in args["-T"]:
versionArgs += [arg.strip()]
cwd = os.getcwd()
wsdir = utility.workspaceDir()
os.chdir(wsdir)
ret = grapeMenu.menu().applyMenuChoice("version", versionArgs)
for nested in grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes():
os.chdir(os.path.join(wsdir, nested))
git.push("--tags origin")
os.chdir(wsdir)
git.push("--tags origin")
os.chdir(cwd)
return ret
def sendNotificationEmail(self, args):
if not (self.loadCommitMessage(args) and self.loadVersion(args) and self.loadModifiedFiles(args)):
return False
# Write the contents of the mail file out to a temporary file
mailfile = tempfile.mktemp()
with open(mailfile, 'w') as mf:
date = time.asctime()
emailHeader = args["--emailHeader"]
emailHeader = emailHeader.replace("<user>", git.config("--get user.name"))
emailHeader = emailHeader.replace("<date>", date)
emailHeader = emailHeader.replace("<version>", self.progress["version"])
emailHeader = emailHeader.replace("<reviewers>", self.progress["reviewers"])
emailHeader = emailHeader.replace("<public>", args["--public"])
emailHeader = emailHeader.split("\\n")
mf.write('\n'.join(emailHeader))
comments = self.progress["commitMsg"]
mf.write('\n')
mf.write(comments)
updatelist = self.progress["modifiedFiles"]
if len(updatelist) > 0:
mf.write("\nFILES UPDATED:\n")
mf.write("\n".join(updatelist))
mf.write('\n')
emailFooter = args["--emailFooter"]
emailFooter = emailFooter.replace("<user>", git.config("--get user.name"))
emailFooter = emailFooter.replace("<date>", date)
emailFooter = emailFooter.replace("<version>", self.progress["version"])
emailFooter = emailFooter.replace("<reviewers>", self.progress["reviewers"])
emailFooter = emailFooter.replace("<public>", args["--public"])
emailFooter = emailFooter.split("\\n")
mf.write('\n'.join(emailFooter))
if not args["--emailNotification"].lower() == "true":
utility.printMsg("Skipping E-mail notification..")
with open(mailfile, 'r') as mf:
utility.printMsg("-- Begin update message --")
utility.printMsg(mf.read())
utility.printMsg("-- End update message --")
return True
# Open the file back up and attach it to a MIME message
t = open(mailfile, 'rb')
message = t.read()
t.close()
msg = MIMEText(message)
# Use their email address from their git user profile.
myemail = git.config("--get user.email")
mailsubj = args["--emailSubject"]
mailsubj = mailsubj.replace("<user>", git.config("--get user.name"))
mailsubj = mailsubj.replace("<public>", args["--public"])
mailsubj = mailsubj.replace("<version>", self.progress["version"])
mailsubj = mailsubj.replace("<date>", date)
sendto = args["--emailSendTo"]
msg['Subject'] = mailsubj
msg['From'] = myemail
msg['To'] = sendto
msg['CC'] = myemail
# Send the message via the configured SMTP server (don't know if this
# is necessary - localhost might work just as well)
import socket
try:
s = smtplib.SMTP("nospam.llnl.gov", timeout=10)
except socket.error, e:
utility.printMsg("Failed to email: %s" % str(e))
return False
# Don't need to connect if we specified the
# host in the SMTP constructor above...
#s.connect()
tolist = msg['To'].split(',')
tolist.append(myemail)
s.sendmail(msg['From'], tolist, msg.as_string())
s.quit()
# Remove the tempfile
os.remove(mailfile)
return True
def askWhetherToDelete(self, args):
if "<<doDelete>>" in self.progress:
self.doDelete = self.progress["<<doDelete>>"]
if not self.doDelete:
if args["--deleteTopic"].lower() == "true":
self.doDelete[args["--topic"]] = utility.userInput("Once the publish is done, would you like to delete the branch %s ? \n[y/n]" % (args["--topic"]), default='y')
else:
self.doDelete[args["--topic"]] = False
self.progress["<<doDelete>>"] = self.doDelete
def deleteTopicBranch(self, args):
self.askWhetherToDelete(args)
if self.doDelete[args["--topic"]]:
utility.printMsg("Deleting %s" % args["--topic"])
grapeMenu.menu().applyMenuChoice("db", [args["--topic"]])
# If the branch was not deleted, offer to return to that branch
try:
# SHA will raise an exception if the branch has been deleted
if git.SHA(args["--topic"]):
checkout = utility.userInput("You are currently on %s. Would you like to checkout %s? [y,n]" | |
track_wires: Union[WireArray, List[WireArray]], *,
min_len_mode: Optional[MinLenMode] = None,
debug: bool = False) -> Union[Optional[WireArray],
List[Optional[WireArray]]]:
"""Connect all given WireArrays to the given WireArrays on adjacent layer.
Parameters
----------
wire_arr_list : Union[WireArray, List[WireArray]]
list of WireArrays to connect to track.
track_wires : Union[WireArray, List[WireArray]]
list of tracks as WireArrays.
min_len_mode : MinLenMode
the minimum length extension mode.
debug : bool
True to print debug messages.
Returns
-------
wire_arr : Union[Optional[WireArray], List[Optional[WireArray]]]
WireArrays representing the tracks created. None if nothing to do.
"""
ans = [] # type: List[Optional[WireArray]]
for warr in WireArray.wire_grp_iter(track_wires):
tr = self.connect_to_tracks(wire_arr_list, warr.track_id, track_lower=warr.lower,
track_upper=warr.upper, min_len_mode=min_len_mode,
debug=debug)
ans.append(tr)
if isinstance(track_wires, WireArray):
return ans[0]
return ans
def connect_differential_tracks(self, pwarr_list: Union[WireArray, List[WireArray]],
nwarr_list: Union[WireArray, List[WireArray]],
tr_layer_id: int, ptr_idx: TrackType, ntr_idx: TrackType, *,
width: int = 1, track_lower: Optional[int] = None,
track_upper: Optional[int] = None
) -> Tuple[Optional[WireArray], Optional[WireArray]]:
"""Connect the given differential wires to two tracks symmetrically.
This method makes sure the connections are symmetric and have identical parasitics.
Parameters
----------
pwarr_list : Union[WireArray, List[WireArray]]
positive signal wires to connect.
nwarr_list : Union[WireArray, List[WireArray]]
negative signal wires to connect.
tr_layer_id : int
track layer ID.
ptr_idx : TrackType
positive track index.
ntr_idx : TrackType
negative track index.
width : int
track width in number of tracks.
track_lower : Optional[int]
if given, extend track(s) to this lower coordinate.
track_upper : Optional[int]
if given, extend track(s) to this upper coordinate.
Returns
-------
p_track : Optional[WireArray]
the positive track.
n_track : Optional[WireArray]
the negative track.
"""
track_list = self.connect_matching_tracks([pwarr_list, nwarr_list], tr_layer_id,
[ptr_idx, ntr_idx], width=width,
track_lower=track_lower, track_upper=track_upper)
return track_list[0], track_list[1]
def connect_differential_wires(self, pin_warrs: Union[WireArray, List[WireArray]],
nin_warrs: Union[WireArray, List[WireArray]],
pout_warr: WireArray, nout_warr: WireArray, *,
track_lower: Optional[int] = None,
track_upper: Optional[int] = None
) -> Tuple[Optional[WireArray], Optional[WireArray]]:
"""Connect the given differential wires to two WireArrays symmetrically.
This method makes sure the connections are symmetric and have identical parasitics.
Parameters
----------
pin_warrs : Union[WireArray, List[WireArray]]
positive signal wires to connect.
nin_warrs : Union[WireArray, List[WireArray]]
negative signal wires to connect.
pout_warr : WireArray
positive track wires.
nout_warr : WireArray
negative track wires.
track_lower : Optional[int]
if given, extend track(s) to this lower coordinate.
track_upper : Optional[int]
if given, extend track(s) to this upper coordinate.
Returns
-------
p_track : Optional[WireArray]
the positive track.
n_track : Optional[WireArray]
the negative track.
"""
p_tid = pout_warr.track_id
lay_id = p_tid.layer_id
pidx = p_tid.base_index
nidx = nout_warr.track_id.base_index
width = p_tid.width
if track_lower is None:
tr_lower = pout_warr.lower
else:
tr_lower = min(track_lower, pout_warr.lower)
if track_upper is None:
tr_upper = pout_warr.upper
else:
tr_upper = max(track_upper, pout_warr.upper)
return self.connect_differential_tracks(pin_warrs, nin_warrs, lay_id, pidx, nidx,
width=width, track_lower=tr_lower,
track_upper=tr_upper)
def connect_matching_tracks(self, warr_list_list: List[Union[WireArray, List[WireArray]]],
tr_layer_id: int, tr_idx_list: List[TrackType], *,
width: int = 1,
track_lower: Optional[int] = None,
track_upper: Optional[int] = None,
min_len_mode: MinLenMode = MinLenMode.NONE
) -> List[Optional[WireArray]]:
"""Connect wires to tracks with optimal matching.
This method connects the wires to tracks in a way that minimizes the parasitic mismatches.
Parameters
----------
warr_list_list : List[Union[WireArray, List[WireArray]]]
list of signal wires to connect.
tr_layer_id : int
track layer ID.
tr_idx_list : List[TrackType]
list of track indices.
width : int
track width in number of tracks.
track_lower : Optional[int]
if given, extend track(s) to this lower coordinate.
track_upper : Optional[int]
if given, extend track(s) to this upper coordinate.
min_len_mode : MinLenMode
the minimum length extension mode.
Returns
-------
track_list : List[WireArray]
list of created tracks.
"""
# simple error checking
num_tracks = len(tr_idx_list) # type: int
if num_tracks != len(warr_list_list):
raise ValueError('Connection list parameters have mismatch length.')
if num_tracks == 0:
raise ValueError('Connection lists are empty.')
if track_lower is None:
track_lower = COORD_MAX
if track_upper is None:
track_upper = COORD_MIN
wbounds = [[COORD_MAX, COORD_MIN], [COORD_MAX, COORD_MIN]]
for warr_list, tr_idx in zip(warr_list_list, tr_idx_list):
tid = TrackID(tr_layer_id, tr_idx, width=width, grid=self._grid)
for warr in WireArray.wire_grp_iter(warr_list):
cur_lay_id = warr.layer_id
if cur_lay_id == tr_layer_id + 1:
wb_idx = 1
elif cur_lay_id == tr_layer_id - 1:
wb_idx = 0
else:
raise ValueError(
'WireArray layer {} cannot connect to layer {}'.format(cur_lay_id,
tr_layer_id))
bnds = self._layout.connect_warr_to_tracks(warr.track_id, tid,
warr.lower, warr.upper)
wbounds[wb_idx][0] = min(wbounds[wb_idx][0], bnds[wb_idx][0])
wbounds[wb_idx][1] = max(wbounds[wb_idx][1], bnds[wb_idx][1])
track_lower = min(track_lower, bnds[1 - wb_idx][0])
track_upper = max(track_upper, bnds[1 - wb_idx][1])
# fix min_len_mode
track_lower, track_upper = self.fix_track_min_length(tr_layer_id, width, track_lower,
track_upper, min_len_mode)
# extend wires
ans = []
for warr_list, tr_idx in zip(warr_list_list, tr_idx_list):
for warr in WireArray.wire_grp_iter(warr_list):
wb_idx = (warr.layer_id - tr_layer_id + 1) // 2
self._layout.add_warr(warr.track_id, wbounds[wb_idx][0], wbounds[wb_idx][1])
cur_tid = TrackID(tr_layer_id, tr_idx, width=width, grid=self._grid)
warr = WireArray(cur_tid, track_lower, track_upper)
self._layout.add_warr(cur_tid, track_lower, track_upper)
ans.append(warr)
self._use_color = True
return ans
def draw_vias_on_intersections(self, bot_warr_list: Union[WireArray, List[WireArray]],
top_warr_list: Union[WireArray, List[WireArray]]) -> None:
"""Draw vias on all intersections of the two given wire groups.
Parameters
----------
bot_warr_list : Union[WireArray, List[WireArray]]
the bottom wires.
top_warr_list : Union[WireArray, List[WireArray]]
the top wires.
"""
for bwarr in WireArray.wire_grp_iter(bot_warr_list):
for twarr in WireArray.wire_grp_iter(top_warr_list):
self._layout.add_via_on_intersections(bwarr.track_id, twarr.track_id,
bwarr.lower, bwarr.upper,
twarr.lower, twarr.upper, True, True)
def mark_bbox_used(self, layer_id: int, bbox: BBox) -> None:
"""Marks the given bounding-box region as used in this Template."""
# TODO: Fix this
raise ValueError('Not implemented yet')
def do_max_space_fill(self, layer_id: int, bound_box: Optional[BBox] = None,
fill_boundary: bool = True) -> None:
"""Draw density fill on the given layer."""
if bound_box is None:
bound_box = self.bound_box
fill_info = self.grid.tech_info.get_max_space_fill_info(layer_id)
self._layout.do_max_space_fill(layer_id, bound_box, fill_boundary, fill_info.info)
self._use_color = True
def do_device_fill(self, fill_cls: Type[TemplateBase], **kwargs: Any) -> None:
"""Fill empty region with device fills."""
bbox = self.bound_box
if bbox is None:
raise ValueError('bound_box attribute is not set.')
lookup = RTree()
ed = ImmutableSortedDict()
lookup.insert(None, bbox)
# subtract instance bounding boxes
for inst in self._instances.values():
if inst.committed:
inst_box = inst.bound_box
inst_edges = inst.master.edge_info
if inst_edges is None:
# TODO: implement this. Need to recurse down instance hierarchy
raise ValueError('Not implemented, see developer.')
# save items in list, because we'll remove them from the index
item_list = list(lookup.intersect_iter(inst_box))
for box, item_id in item_list:
if box.get_intersect(inst_box).is_physical():
box_edges = cast(Optional[TemplateEdgeInfo], lookup.pop(item_id))
_update_device_fill_area(lookup, ed, inst_box, inst_edges, box, box_edges)
# draw fill
cnt = 0
for box, obj_id in lookup:
kwargs['width'] = box.w
kwargs['height'] = box.h
kwargs['edges'] = lookup[obj_id]
master = self.new_template(fill_cls, params=kwargs)
self.add_instance(master, inst_name=f'XFILL{cnt}', xform=Transform(box.xl, box.yl))
cnt += 1
def get_lef_options(self, options: Dict[str, Any], config: Mapping[str, Any]) -> None:
"""Populate the LEF options dictionary.
Parameters
----------
options : Mapping[str, Any]
the result LEF options dictionary.
config : Mapping[str, Any]
the LEF configuration dictionary.
"""
if not self.finalized:
raise ValueError('This method only works on finalized master.')
detail_layers_inc = config.get('detail_layers', [])
top_layer = self.top_layer
tech_info = self.grid.tech_info
cover_layers = set(range(tech_info.bot_layer, top_layer + 1))
detail_layers = set()
for lay in detail_layers_inc:
detail_layers.add(lay)
cover_layers. discard(lay)
options['detailed_layers'] = [lay for lay_id in sorted(detail_layers)
for lay, _ in tech_info.get_lay_purp_list(lay_id)]
options['cover_layers'] = [lay for lay_id in sorted(cover_layers)
for lay, _ in tech_info.get_lay_purp_list(lay_id)]
options['cell_type'] = config.get('cell_type', 'block')
def _update_device_fill_area(lookup: RTree, ed: Param, inst_box: BBox, inst_edges: TemplateEdgeInfo,
sp_box: BBox, sp_edges: Optional[TemplateEdgeInfo]) -> None:
# find instance edge with no constraints
cut_edge_dir: Optional[Direction2D] = None
cut_edge_dir_backup: Optional[Direction2D] = None
two_backup = False
# start at 1 so we prefer cutting horizontally
for edir in (Direction2D.SOUTH, Direction2D.EAST, Direction2D.NORTH, Direction2D.WEST):
if not inst_edges.get_edge_params(edir):
if inst_edges.get_edge_params(edir.flip()):
two_backup = cut_edge_dir_backup is not None
if not two_backup:
cut_edge_dir_backup = edir
else:
cut_edge_dir = edir
break
bxl = sp_box.xl
byl = sp_box.yl
bxh = sp_box.xh
byh = sp_box.yh
ixl = inst_box.xl
iyl = inst_box.yl
ixh = inst_box.xh
iyh = inst_box.yh
if sp_edges is None:
bel = beb = ber = bet = ed
else:
bel, beb, ber, bet = sp_edges.to_tuple()
iel, ieb, ier, iet = inst_edges.to_tuple()
sq_list = [(BBox(bxl, byl, ixl, iyl), (bel, beb, ed, ed)),
(BBox(ixl, byl, ixh, iyl), (ed, beb, ed, iet)),
(BBox(ixh, byl, bxh, iyl), (ed, beb, ber, ed)),
(BBox(ixh, iyl, bxh, iyh), (ier, ed, ber, ed)),
(BBox(ixh, iyh, bxh, byh), (ed, ed, ber, bet)),
(BBox(ixl, iyh, ixh, byh), (ed, ieb, ed, bet)),
(BBox(bxl, iyh, ixl, byh), (bel, ed, ed, bet)),
(BBox(bxl, iyl, ixl, iyh), (bel, ed, iel, ed)),
]
if cut_edge_dir is not None:
# found opposite edges with no constraints, we're | |
4: { 600: [-0.065, 0.063, 6.9e-4, -0.298],
831: [-0.034, 0.060, 6.9e-4, -0.196],
900: [-0.064, 0.083, 6.9e-4, -0.277],
1200: [-0.052, 0.122, 6.9e-4, -0.294],
'other': [-0.050, 0.080, 6.9e-4, -0.250] } }
# Return calbirated roll, yaw, and tilt
return orientation_coeffs[slider][_ruling][0], \
orientation_coeffs[slider][_ruling][1], \
tilt*(1-orientation_coeffs[slider][_ruling][2]) \
+ orientation_coeffs[slider][_ruling][3]
def mask_to_pixel_coordinates(self, x=None, y=None, wave=None, order=1, filename=None,
corners=False):
r"""
Convert the mask coordinates in mm to pixel coordinates on the
DEIMOS detector.
If not already instantiated, the :attr:`slitmask`,
:attr:`grating`, :attr:`optical_model`, and :attr:`detector_map`
attributes are instantiated. If these are not instantiated, a
file must be provided. If no arguments are provided, the
function expects these attributes to be set and will output the
pixel coordinates for the centers of the slits in the
:attr:`slitmask` at the central wavelength of the
:attr:`grating`.
Method generally expected to be executed in one of two modes:
- Use the `filename` to read the slit mask and determine the
detector positions at the central wavelength.
- Specifically map the provided x, y, and wave values to the
detector.
If arrays are provided for both `x`, `y`, and `wave`, the
returned objects have the shape :math:`N_\lambda\times S_x`,
where :math:`S_x` is the shape of the x and y arrays.
Args:
x (array-like, optional):
The x coordinates in the slit mask in mm. Default is to
use the center of the slits in the :attr:`slitmask`.
y (array-like, optional):
The y coordinates in the slit mask in mm. Default is to
use the center of the slits in the :attr:`slitmask`.
wave (array-like, optional):
The wavelengths in angstroms for the propagated
coordinates. Default is to use the central wavelength
of the :attr:`grating`.
order (:obj:`int`, optional):
The grating order. Default is 1.
filename (:obj:`str`, optional):
The filename to use to (re)instantiate the
:attr:`slitmask` and :attr:`grating`. Default is to use
previously instantiated attributes.
corners (:obj:`bool`, optional):
Instead of using the centers of the slits in the
:attr:`slitmask`, return the detector pixel coordinates
for the corners of all slits.
Returns:
numpy.ndarray: Returns 5 arrays: (1-2) the x and y
coordinates in the image plane in mm, (3) the detector
(1-indexed) where the slit should land at the provided
wavelength(s), and (4-5) the pixel coordinates (1-indexed)
in the relevant detector.
Raises:
ValueError:
Raised if the user provides one but not both of the x
and y coordinates, if no coordinates are provided or
available within the :attr:`slitmask`, or if the
:attr:`grating` hasn't been defined and not file is
provided.
"""
# Cannot provide just one of x or y
if x is None and y is not None or x is not None and y is None:
raise ValueError('Must provide both x and y or neither to use slit mask.')
# Use the file to update the slitmask (if no x coordinates are
# provided) and the grating
if filename is not None:
if x is None and y is None:
# Reset the slit mask
self.get_slitmask(filename)
# Reset the grating
self.get_grating(filename)
# Check that any coordinates are available
if x is None and y is None and self.slitmask is None:
raise ValueError('No coordinates; Provide them directly or instantiate slit mask.')
# Make sure the coordinates are numpy arrays
_x = None if x is None else np.atleast_1d(x)
_y = None if y is None else np.atleast_1d(y)
if _x is None:
# Use all the slit centers or corners
_x = self.slitmask.corners[...,0].ravel() if corners else self.slitmask.center[:,0]
_y = self.slitmask.corners[...,1].ravel() if corners else self.slitmask.center[:,1]
# Check that the grating is defined
if self.grating is None:
raise ValueError('Must define a grating first; provide a file or use get_grating()')
# Instantiate the optical model or reset it grating
if self.optical_model is None:
self.optical_model = DEIMOSOpticalModel(self.grating)
else:
self.optical_model.reset_grating(self.grating)
# Instantiate the detector map, if necessary
self.get_detector_map()
# Compute the detector image plane coordinates (mm)
x_img, y_img = self.optical_model.mask_to_imaging_coordinates(_x, _y, wave=wave,
order=order)
# Reshape if computing the corner positions
if corners:
x_img = x_img.reshape(self.slitmask.corners.shape[:2])
y_img = y_img.reshape(self.slitmask.corners.shape[:2])
# Use the detector map to convert to the detector coordinates
return (x_img, y_img) + self.detector_map.ccd_coordinates(x_img, y_img)
class DEIMOSOpticalModel(OpticalModel):
# TODO: Are focal_r_surface (!R_IMSURF) and focal_r_curvature
# (!R_CURV) supposed to be the same? If so, consolodate these into
# a single number.
def __init__(self, grating):
super(DEIMOSOpticalModel, self).__init__(
20018.4, # Pupil distance in mm (!PPLDIST, !D_1)
2133.6, # Radius of the image surface in mm (!R_IMSURF)
2124.71, # Focal-plane radius of curvature in mm (!R_CURV)
2120.9, # Mask radius of curvature in mm (!M_RCURV)
np.radians(6.), # Mask tilt angle in radians (!M_ANGLE)
128.803, # Mask y zero point in mm (!ZPT_YM)
3.378, # Mask z zero-point in mm (!MASK_HT0)
2197.1, # Collimator distance in mm (sys.COL_DST)
4394.2, # Collimator radius of curvature in mm (!R_COLL)
-0.75, # Collimator curvature constant (!K_COLL)
np.radians(0.002), # Collimator tilt error in radians (sys.COL_ERR)
0.0, # Collimator tilt phi angle in radians (sys.COL_PHI)
grating, # DEIMOS grating object
np.radians(2.752), # Camera angle in radians (sys.CAM_ANG)
np.pi/2, # Camera tilt phi angle in radians (sys.CAM_PHI)
382.0, # Camera focal length in mm (sys.CAM_FOC)
DEIMOSCameraDistortion(), # Object used to apply/remove camera distortions
np.radians(0.021), # ICS rotation in radians (sys.MOS_ROT)
[-0.234, -3.822]) # Camera optical axis center in mm (sys.X_OPT,sys.Y_OPT)
# Include tent mirror
self.tent_theta = np.radians(71.5-0.5) # Tent mirror theta angle (sys.TNT_ANG)
self.tent_phi = np.radians(90.+0.081) # Tent mirror phi angle (sys.TNT_PHI)
#TENT MIRROR: this mirror is OK to leave in del-theta,phi
self.tent_reflection \
= OpticalModel.get_reflection_transform(self.tent_theta, self.tent_phi)
def reset_grating(self, grating):
self.grating = grating
def mask_coo_to_grating_input_vectors(self, x, y):
"""
Propagate rays from the mask plane to the grating.
Taken from xidl/DEEP2/spec2d/pro/model/pre_grating.pro
Need to override parent class to add tent mirror reflection.
"""
r = super(DEIMOSOpticalModel, self).mask_coo_to_grating_input_vectors(x, y)
# Reflect off the tent mirror and return
return OpticalModel.reflect(r, self.tent_reflection)
class DEIMOSCameraDistortion:
"""Class to remove or apply DEIMOS camera distortion."""
def __init__(self):
self.c0 = 1.
self.c2 = 0.0457563
self.c4 = -0.3088123
self.c6 = -14.917
x = np.linspace(-0.6, 0.6, 1000)
y = self.remove_distortion(x)
self.interpolator = interpolate.interp1d(y, x)
def remove_distortion(self, x):
x2 = np.square(x)
return x / (self.c0 + x2 * (self.c2 + x2 * (self.c4 + x2 * self.c6)))
def apply_distortion(self, y):
indx = (y > self.interpolator.x[0]) & (y < self.interpolator.x[-1])
if not np.all(indx):
warnings.warn('Some input angles outside of valid distortion interval!')
x = np.zeros_like(y)
x[indx] = self.interpolator(y[indx])
return x
class DEIMOSDetectorMap(DetectorMap):
"""
A map of the center coordinates and rotation of each CCD in DEIMOS.
!! PIXEL COORDINATES ARE 1-INDEXED !!
"""
def __init__(self):
# Number of chips
self.nccd = 8
# Number of pixels for each chip in each dimension
self.npix = np.array([2048, 4096])
# The size of the CCD pixels in mm
self.pixel_size = 0.015
# Nominal gap between each CCD in each dimension in mm
self.ccd_gap = np.array([1, 0.1])
# Width of the CCD edge in each dimension in mm
self.ccd_edge = np.array([0.154, 0.070])
# Effective size of each chip in each dimension in pixels
self.ccd_size = self.npix + (2*self.ccd_edge + self.ccd_gap)/self.pixel_size
# Center coordinates
origin = np.array([[-1.5,-0.5], [-0.5,-0.5], [ 0.5,-0.5], [ 1.5,-0.5],
[-1.5, 0.5], [-0.5, 0.5], [ 0.5, 0.5], [ 1.5, 0.5]])
offset = np.array([[-20.05, 14.12], [-12.64, 7.25], [0.00, 0.00], [-1.34, -19.92],
[-19.02, 16.46], [ -9.65, 8.95], [1.88, 1.02], [ 4.81, -24.01]])
self.ccd_center = origin * self.ccd_size[None,:] + offset
# Construct the rotation matrix
self.rotation = np.radians([-0.082, 0.030, 0.0, -0.1206, 0.136, -0.06, -0.019, -0.082])
cosa = np.cos(self.rotation)
sina = np.sin(self.rotation)
self.rot_matrix = np.array([cosa, -sina, sina, cosa]).T.reshape(self.nccd,2,2)
# ccd_geom.pro has offsets by sys.CN_XERR, but these are all 0.
'''
def deimos_image_sections(inp, det):
"""
Parse the image for the raw image shape and data sections
Args:
inp (str or `astropy.io.fits.HDUList`_ object):
det (int):
Returns:
tuple:
shape, dsec, osec, ext_items
ext_items is a large tuple of bits and pieces for other methods
ext_items = hdu, chips, postpix, image
"""
# Check for file; allow for extra .gz, etc. suffix
if isinstance(inp, str):
fil = glob.glob(inp + '*')
if len(fil) != 1:
msgs.error('Found | |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks_dev/dag.ipynb (unless otherwise specified).
__all__ = ['pickle_load', 'pickle_dump', 'recursive_dict', 'undefault_dict', 'recursive_assign', 'folder_to_dict',
'dict_to_folder', 'BaseDAG', 'model_to_dot', 'DAGEstimator']
# Cell
from warnings import warn
from shutil import rmtree
from pathlib import Path
import networkx as nx
from IPython import display
try:
import pydot_ng as pydot
except:
import pydot
from sklearn.base import BaseEstimator, clone
from .node import NodeEstimator, Input, Target, BaseInputNode, _validate_name
from .utils import remove_folder_or_file
# Cell
def pickle_load(path, **kwargs):
'''
abstraction for loading object
'''
path = Path(path)
try:
with open(path.absolute(), 'rb') as f:
obj = cloudpickle.load(f, **kwargs)
except Exception as e:
raise Exception(f'{str(e)} [{path.absolute()}]')
return obj
def pickle_dump(obj, path, **kwargs):
'''
abstraction for dumping objects
'''
path = Path(path)
try:
with open(path.absolute(), 'wb') as f:
cloudpickle.dump(obj, f, **kwargs)
except Exception as e:
raise Exception(f'{str(e)} [{path.absolute()}]')
return
def recursive_dict():
return defaultdict(recursive_dict)
def undefault_dict(dictionary):
'''
recursively transforms a default dicts into dicts
'''
for key in dictionary.keys():
if isinstance(dictionary[key], defaultdict):
dictionary[key] = undefault_dict(dict(dictionary[key]))
elif isinstance(dictionary[key], dict):
dictionary[key] = undefault_dict(dictionary[key])
else:
return dictionary
dictionary = dict(dictionary)
return dictionary
def recursive_assign(dictionary, keys, value):
'''
assigns value to dictinary[keys[0]][keys[1]]...[keys[n]] position
'''
if len(keys) == 1:
dictionary[keys[0]] = value
return dictionary
else:
dictionary[keys[0]] = recursive_assign(dictionary[keys[0]], keys[1:], value)
return dictionary
def folder_to_dict(root_folder, patterns = ['*.pickle','*.pkl','*.sav']):
'''
creates a dict of unpickled objects found recursively under root folder and matching patterns
'''
obj_dict = {}
root_folder = Path(root_folder)
file_paths = []
for pattern in patterns:
file_paths += root_folder.rglob(pattern)
for path in file_paths:
obj = pickle_load(path)
obj_dict[str(path)] = obj
return obj_dict
def dict_to_folder(dictionary, path = '.', assert_new_folder = True, override = False):
'''
creates folder structure according to dictionary. Can ensure that no folders with the same name
are found under path and to raise errors in case the same filenames already exists.
'''
root_path = Path(path)
for key in dictionary:
filepath = Path(key)
if assert_new_folder:
dirpath = root_path/filepath.parts[0]
if dirpath.exists():
raise FileExistsError(f'{dirpath.absolute()} already exists.')
filepath = root_path/filepath
if filepath.exists():
if override:
warn(f'{filepath.absolute()} already exists and will be overriden.')
else:
raise FileExistsError(f'{filepath.absolute()} already exists.')
pickle_dump(dictionary[key], filepath)
return
# Cell
def _traverse_upstream(output_node):
'''
traverses all nodes in the dag that ends at output_node.
returns nodes and directed edges
'''
def __traverse_upstream(node, traversed_nodes = [], traversed_edges = []):
if not isinstance(node, (NodeEstimator, Input)):
raise TypeError(f'Node should be instance of NodeEstimator or Input, got {type(node)}')
node = Cacher(node, f'{node.name}.pkl') #append Cacher to graph
traversed_nodes.append(node)
if not isinstance(node.get(), BaseInputNode): #keep traversing until reaches an InputNode
if not node.get().input_nodes:
raise ValueError(f'{node.get().name} input nodes are empty. Populate {node.get().name} by calling the object and passing input_nodes')
for input_node in node.get().input_nodes:
#make input node a Cacher
input_node = Cacher(input_node, f'{input_node.name}.pkl')
traversed_edges.append((input_node, node))
__traverse_upstream(input_node, traversed_nodes)
return traversed_nodes, traversed_edges
traversed_nodes, traversed_edges = __traverse_upstream(output_node)
return traversed_nodes, traversed_edges
# Cell
class BaseDAG(BaseEstimator):
pass
#class BaseDAG(BaseNode):
# '''
# a base class containing a safe __getattr__ method
# to ensure that every method and attribute is accessed safely inside
# pipeline, i.e. all serialized files are dumped under the same root folder with pipeline name
# '''
#
# def __getattribute__(self, attr):
# '''
# returns a wrapped session method or attribute.
# if attribute is property, opertaions are run safely inside PipelineSession aswell
# '''
#
# name = super().__getattribute__('name')
# safe_run = super().__getattribute__('_safe_run')
#
# #safely get attribute value, useful in case attr is property
# attr_value = safe_run(super().__getattribute__, name)(attr)
#
# #case where attribute is method
# if callable(attr_value):
# attr_value = safe_run(attr_value, name)
#
# return attr_value
#
# def _safe_run(self, method, name):
# '''
# runs a method or retrieves an attribute safely inside a PipelineSession
# '''
#
# def session_wrapped_method(*args, **kwargs):
# with PipelineSession(pipeline_name = name):
#
# result = method(*args, **kwargs)
#
# return result
#
# return session_wrapped_method
# Cell
def model_to_dot(graph, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96, subgraph=False, **kwargs):
def add_edge(dot, src, dst):
if not dot.get_edge(src, dst):
dot.add_edge(pydot.Edge(src, dst))
#create dot object
dot = pydot.Dot()
dot.set('rankdir', rankdir)
dot.set('concentrate', True)
dot.set('dpi', dpi)
dot.set_node_defaults(shape='record')
for key in kwargs:
dot.set(key, kwargs[key])
for node in graph:
if isinstance(node, Input):
label = node.name
else:
if node.frozen:
name = node.name + ' (Frozen)'
else:
name = node.name
label = f'{{{name} | {{{node.__class__.__name__} | {str(node.estimator).split("(")[0]}}} }}'
dotnode = pydot.Node(node.name, label=label)
dot.add_node(dotnode)
for edge in graph.edges:
add_edge(dot, edge[0].name, edge[1].name)
return dot
# Cell
from sklearn.base import clone
from copy import deepcopy
from sklearn.exceptions import NotFittedError
# Cell
#TODO: define best name for class
class DAGEstimator(BaseDAG):
def __init__(self, output_node, name, dirpath = './_skdag_cache'):
self.name = self._validate_name(name)
self.dirpath = Path(dirpath)
#create node representaitons
self._make_private_attributes(output_node)
self.output_node = [i for i in self.__graph if i.name == f'{self.name}__{output_node.name}'][0]
return
def _validate_name(self, name):
return name
def __getitem__(self, item):
return self.nodes_dict[item]
def _make_private_attributes(self, output_node):
'''
creates object private attributes
'''
#node attributes
graph = self._make_graph(output_node, make_clones = True)
self.__graph = self._clone_graph(graph)
#make names with pipename prefix
[setattr(node, 'name', f'{self.name}__' + node.name) for node in self.__graph]
self.__nodes = [node for node in self.__graph]
self.__nodes_dict = {node.name:node for node in self.__graph}
self.__input_nodes = tuple([i for i in self.__graph if isinstance(i, Input)])
self.__input_nodes_dict = {i.name:i for i in self.__graph if isinstance(i, Input)}
return
def _make_graph(self, output_node, make_clones = True):
'''
creates a graph (DAG) traversing output node until reaches inputs
'''
nodes,edges = _traverse_upstream(output_node)
g = nx.DiGraph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
return g
def _clone_graph(self, graph):
clones = {}
for node in nx.algorithms.dag.topological_sort(graph):
clones[node.name] = deepcopy(node)
output_clone = clones[node.name]
#replace nodes by its clones
for node in graph:
if not isinstance(node, Input):
node = node(*[clones[child.name] for child in node.input_nodes])
graph = self._make_graph(output_clone, make_clones = False)
return graph
def _make_inputs_dict(self, X, y):
'''
creates dict of (node.name,Input) and (node.name,Target) pairs
returns dicts of X, y
'''
return X, y
def _populate_input_nodes(self, X, y):
'''
X and y should be a dict of node.name:value
'''
X, y = self._make_inputs_dict(X, y)
for node in nx.algorithms.dag.topological_sort(self.graph):
#populate inputs and static targets
if isinstance(node, Input):
node.fit(X[node.name])
continue
if isinstance(node, Target):
node.fit(y[node.name])
continue
return
def _dask_fit_pipeline(self, inputs, targets, **kwargs):
'''
inputs and targets should be a dict of (node.name,Input/Target) pairs
'''
outputs = {}
targets = {}
estimators = {}
for node in nx.algorithms.dag.topological_sort(self.graph):
#populate inputs and targets. assume they are already fitted/populated
if isinstance(node, Input):
outputs[node.name] = delayed(node.transform)()
if isinstance(node, Target):
outputs[node.name] = delayed(node.transform)()
if isinstance(node, NodeEstimator): #ignore Input and Target nodes
X = (outputs[p.name] for p in node.input_nodes) #delayed object
y = targets[node.target_node.name] #also delayed object
estimators[node.name] = delayed(node.fit)(X,y)
outputs[node.name] = delayed(estimators[node.name].transform)(X)
return estimators
def _dask_transform_pipeline(self, inputs, targets, output_node = None, **kwargs):
'''
inputs should be a dict of (node.name,Input) pairs
'''
if output_node is None:
output_node = self.output_node
outputs = {}
for node in nx.algorithms.dag.topological_sort(self.graph):
#populate inputs and targets. assume they are already fitted/populated
if isinstance(node, Input):
outputs[node.name] = delayed(node.transform)()
if isinstance(node, Target):
outputs[node.name] = delayed(node.transform)()
if isinstance(node, NodeEstimator): #ignore Input and Target nodes
X = (outputs[p.name] for p in node.input_nodes) #delayed object
outputs[node.name] = delayed(node.transform)(X)
return outputs
@property
def dirpath(self,):
return d6tflow.settings.dirpath/self.name
@property
def nodes(self,):
return self.__nodes
@property
def nodes_dict(self,):
return self.__nodes_dict
@property
def graph(self,):
return self.__graph
@property
def input_nodes(self,):
return self.__input_nodes
@property
def input_nodes_dict(self,):
return self.__input_nodes_dict
@property
def y_loader(self,):
return self.__y_loader
def _reset_data(self,):
'''
reset input of transform nodes
'''
#reset inputs and intermediate inputs of transform nodes
for node in self.nodes:
if not node.output_path is None:
#reset input of transform nodes
remove_folder_or_file(node.output_path)
#reset y
self._reset_y_loader()
return
def _set_y_loader(self, y):
'''
sets y_loader for NodeTransformers (skips Input)
'''
if isinstance(y, dict):
y = {self.name + f'__{k}':v for k,v in y.items()}
passed_keys = set(y)
existing_keys = set(self.nodes_dict)
wrong_passed_keys = passed_keys - existing_keys
if wrong_passed_keys:
raise ValueError(f'Unknown node names: {wrong_passed_keys}')
#create input tasks
default_loader = input_task_factory(self.name + f'__None', y = None)
y_loader = {}
for key in y:
y_loader[key] = input_task_factory(f'{key}__target', y = y[key])
for node in self.__graph:
#if not input node, assign y
if isinstance(node, NodeTransformer):
if node.name in y_loader:
node._set_y_loader(y_loader[node.name])
else:
node._set_y_loader(default_loader)
else:
y_loader = input_task_factory(self.name + '__target', y = y)
for node in self.__graph:
#if not input node, assign y
if isinstance(node, NodeTransformer):
node._set_y_loader(y_loader)
return
def _reset_y_loader(self,):
'''
resets y_loader for NodeTransformers (skips Input)
'''
try:
self.y_loader().output().path.unlink()
except Exception as e:
warn(str(e))
pass
return
def _reset_estimators_states(self,):
'''
resets states (fitted objects) of nodes in DAG
'''
for node in self.nodes:
if not isinstance(node, Input):
if not node.estimator_path is None:
remove_folder_or_file(node.estimator_path)
return
def _check_graph(self,):
if not nx.is_connected(nx.Graph(self.__graph)):
raise | |
attribute telling until when the version
is valid. Not used if None. Default: None
- tofinder: a function(targetconnection, row, namemapping)
returning a value for the toatt. If not set, fromfinder is used
(note that if fromfinder is None, it is set to a default
function -- see the comments about fromfinder. The possibly
modified value is used here.) Default: None
- maxto: the value to use for toatt for new members. Default: None
- srcdateatt: the name of the attribute in the source data that
holds a date showing when a version is valid from. The data is
converted to a datetime by applying srcdateparser on it.
If not None, the date attribute is also used when comparing
a potential new version to the newest version in the DB.
If None, the date fields are not compared. Default: None
- srcdateparser: a function that takes one argument (a date in the
format scrdateatt has) and returns a datetime.datetime.
If srcdateatt is None, srcdateparser is not used.
Default: pyetlmr.ymdparser (i.e., the default value is a
function that parses a string of the form 'yyyy-MM-dd')
- type1atts: a sequence of attributes that should have type1 updates
applied. Default: ()
- cachesize: the maximum size of the cache. 0 disables caching
and values smaller than 0 allows unlimited caching
- idfinder: a function(row, namemapping) -> key value that assigns
a value to the primary key attribute based on the content of the
row and namemapping. If not given, it is assumed that the primary
key is an integer, and the assigned key value is then the current
maximum plus one.
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
"""
Dimension.__init__(self, name, key, attributes, lookupatts,
idfinder, defaultidvalue, None, targetconnection)
if not versionatt:
raise ValueError, 'A version attribute must be given'
self.versionatt = versionatt
self.fromatt = fromatt
if fromfinder is not None:
self.fromfinder = fromfinder
elif srcdateatt is not None: #and fromfinder is None
self.fromfinder = pyetlmr.datereader(srcdateatt, srcdateparser)
else: #fromfinder is None and srcdateatt is None
self.fromfinder = pyetlmr.today
self.toatt = toatt
if tofinder is None:
tofinder = self.fromfinder
self.tofinder = tofinder
self.maxto = maxto
self.srcdateatt = srcdateatt
self.srcdateparser = srcdateparser
self.type1atts = type1atts
self.caching = True
if cachesize > 0:
self.rowcache = FIFODict(cachesize)
self.keycache = FIFODict(cachesize)
elif cachesize < 0:
self.rowcache = {}
self.keycache = {}
else:
self.caching = False
# Check that versionatt, fromatt and toatt are also declared as
# attributes
for var in (versionatt, fromatt, toatt):
if var and var not in attributes:
raise ValueError, "%s not present in attributes argument" % \
(var,)
# Now extend the SQL from Dimension such that we use the versioning
self.keylookupsql += " ORDER BY %s DESC" % (versionatt,)
if toatt:
self.updatetodatesql = \
"UPDATE %s SET %s = %%(%s)s WHERE %s = %%(%s)s" % \
(name, toatt, toatt, key, key)
def lookup(self, row, namemapping={}):
""" Find the key for the newest version with the given values.
Arguments:
- row: a dict which must contain at least the lookup attributes
- namemapping: an optional namemapping (see module's documentation)
"""
res = Dimension.lookup(self, row, namemapping)
return res
def scdlookup(self, row, namemapping={}):
keylookupsql = "SELECT " + self.key + " FROM " + self.name + " WHERE " + \
" AND ".join(["%s = %%(%s)s" % (lv, lv) for lv in self.lookupatts]) + " AND %s<=%%(%s)s AND %%(%s)s<COALESCE(%s,'9999-12-31')" \
% (self.fromatt, self.srcdateatt, self.srcdateatt, self.toatt)
#rdt = self.srcdateparser(row[self.srcdateatt])
#modref = self.targetconnection.getunderlyingmodule()
#rowdate = modref.Date(rdt.year, rdt.month, rdt.day)
#row['rowdate'] = rowdate
#namemapping[self.fromatt] = 'rowdate'
self.targetconnection.execute(keylookupsql, row, namemapping)
keyvalue = self.targetconnection.fetchonetuple()[0]
if keyvalue is None:
keyvalue = self.defaultidvalue
return keyvalue
def ensure(self, row, namemapping={}):
"""Lookup or insert a version of a slowly changing dimension member.
NB: Has side-effects on the given row.
Arguments:
- row: a dict containing the attributes for the member.
key, versionatt, fromatt, and toatt are not required to be
present but will be added (if defined).
- namemapping: an optional namemapping (see module's documentation)
"""
versionatt = (namemapping.get(self.versionatt) or self.versionatt)
key = (namemapping.get(self.key) or self.key)
if self.fromatt: # this protects us against None in namemapping.
fromatt = (namemapping.get(self.fromatt) or self.fromatt)
else:
fromatt = None
if self.toatt:
toatt = (namemapping.get(self.toatt) or self.toatt)
else:
toatt = None
if self.srcdateatt:
srcdateatt = (namemapping.get(self.srcdateatt) or self.srcdateatt)
else:
srcdateatt = None
# Get the newest version and compare to that
keyval = self.lookup(row, namemapping)
if keyval is None or keyval==self.defaultidvalue:
# It is a new member. We add the first version.
row[versionatt] = 1
if fromatt and fromatt not in row:
row[fromatt] = str(self.fromfinder(self.targetconnection,
row, namemapping)).strip("':date ")
if toatt and toatt not in row:
row[toatt] = self.maxto
row[key] = self.insert(row, namemapping)
return row[key]
else:
# There is an existing version. Check if the attributes are
# identical
type1updates = {} # for type 1
addnewversion = False # for type 2
other = self.getbykey(keyval) # the full existing version
for att in self.all:
# Special (non-)handling of versioning and key attributes:
if att in (self.key, self.versionatt, self.toatt):
# Don't compare these - we don't expect them to have
# meaningful values in row
continue
# We may have to compare the "from dates"
elif att == self.fromatt:
if self.srcdateatt is not None:
# We have to compare the dates in row[..] and other[..]
# so we have to make sure that the value from row is
# converted to a Date in the native DB format.
# As it may be impossible to compare these directly,
# we cast both to strings before comparing...
rdt = self.srcdateparser(row[srcdateatt])
modref = self.targetconnection.getunderlyingmodule()
rowdate = modref.Date(rdt.year, rdt.month, rdt.day)
if str(rowdate).strip("':date ") != other[self.fromatt]:
addnewversion = True
else: # self.srcdateatt is None and we don't compare dates
continue
# Handling of "normal" attributes:
else:
mapped = (namemapping.get(att) or att)
if row[mapped] != other[att]:
if att in self.type1atts:
type1updates[att] = row[mapped]
else:
addnewversion = True
if addnewversion and not self.type1atts:
# We don't have to look for possible type 1 updates
# and we already know that a type 2 update is needed.
break
#else: continue
if len(type1updates) > 0:
# Some type 1 updates were found
self.__performtype1updates(type1updates, other)
if addnewversion: # type 2
# Make a new row version and insert it
row.pop(key, None)
row[versionatt] = other[self.versionatt] + 1
if fromatt:
row[fromatt] = str(self.fromfinder(self.targetconnection,
row, namemapping)).strip("':date ")
if toatt:
row[toatt] = self.maxto
row[key] = self.insert(row, namemapping)
# Update the todate attribute in the old row version in the DB.
if toatt:
toattval = self.tofinder(self.targetconnection, row,
namemapping)
self.targetconnection.execute(self.updatetodatesql,
{self.key : keyval, self.toatt : toattval})
else:
# Update the row dict by giving version and dates and the key
row[key] = keyval
row[versionatt] = other[self.versionatt]
if self.fromatt:
row[fromatt] = other[self.fromatt]
if self.toatt:
row[toatt] = other[self.toatt]
return row[key]
def _before_lookup(self, row, namemapping):
if self.caching:
namesinrow =[(namemapping.get(a) or a) for a in self.lookupatts]
searchtuple = tuple([row[n] for n in namesinrow])
return self.keycache.get(searchtuple, None)
def _after_lookup(self, row, namemapping, resultkey):
if self.caching and resultkey is not None:
namesinrow =[(namemapping.get(a) or a) for a in self.lookupatts]
searchtuple = tuple([row[n] for n in namesinrow])
self.keycache[searchtuple] = resultkey
def _before_getbykey(self, keyvalue):
if self.caching:
res = self.rowcache.get(keyvalue)
if res is not None:
return dict(zip(self.all, res))
return None
def _after_getbykey(self, keyvalue, resultrow):
if self.caching and resultrow[self.key] is not None:
# if resultrow[self.key] is None, no result was found in the db
self.rowcache[keyvalue] = tuple([resultrow[a] for a in self.all])
def _before_update(self, row, namemapping):
""" """
# We have to remove old values from the caches.
key = (namemapping.get(self.key) or self.key)
for att in self.lookupatts:
if (att in namemapping or att in row):
# A lookup attribute is changed. We don't know the old value
# and thus not if the old value is in | |
<reponame>ouyang-w-19/decogo<filename>tests/examples/minlplib/supplychainp1_022020.py
# MINLP written by GAMS Convert at 04/21/18 13:54:26
#
# Equation counts
# Total E G L N X C B
# 5301 861 840 3600 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 2941 2481 460 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 15041 15001 40 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.b1 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b2 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b3 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b4 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b5 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b6 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b7 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b8 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b9 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b10 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b11 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b12 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b13 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b14 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b15 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b16 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b17 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b18 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b19 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b20 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b21 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b22 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b23 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b24 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b25 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b26 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b27 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b28 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b29 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b30 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b31 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b32 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b33 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b34 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b35 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b36 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b37 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b38 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b39 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b40 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b41 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b42 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b43 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b44 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b45 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b46 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b47 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b48 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b49 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b50 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b51 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b52 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b53 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b54 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b55 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b56 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b57 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b58 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b59 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b60 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b61 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b62 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b63 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b64 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b65 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b66 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b67 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b68 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b69 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b70 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b71 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b72 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b73 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b74 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b75 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b76 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b77 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b78 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b79 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b80 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b81 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b82 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b83 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b84 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b85 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b86 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b87 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b88 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b89 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b90 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b91 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b92 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b93 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b94 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b95 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b96 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b97 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b98 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b99 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b100 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b101 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b102 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b103 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b104 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b105 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b106 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b107 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b108 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b109 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b110 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b111 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b112 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b113 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b114 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b115 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b116 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b117 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b118 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b119 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b120 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b121 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b122 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b123 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b124 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b125 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b126 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b127 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b128 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b129 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b130 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b131 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b132 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b133 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b134 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b135 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b136 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b137 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b138 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b139 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b140 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b141 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b142 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b143 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b144 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b145 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b146 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b147 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b148 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b149 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b150 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b151 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b152 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b153 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b154 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b155 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b156 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b157 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b158 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b159 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b160 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b161 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b162 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b163 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b164 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b165 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b166 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b167 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b168 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b169 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b170 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b171 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b172 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b173 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b174 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b175 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b176 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b177 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b178 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b179 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b180 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b181 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b182 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b183 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b184 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b185 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b186 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b187 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b188 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b189 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b190 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b191 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b192 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b193 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b194 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b195 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b196 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b197 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b198 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b199 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b200 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b201 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b202 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b203 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b204 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b205 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b206 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b207 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b208 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b209 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b210 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b211 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b212 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b213 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b214 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b215 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b216 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b217 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b218 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b219 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b220 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b221 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b222 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b223 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b224 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b225 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b226 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b227 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b228 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b229 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b230 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b231 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b232 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b233 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b234 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b235 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b236 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b237 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b238 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b239 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b240 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b241 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b242 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b243 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b244 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b245 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b246 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b247 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b248 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b249 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b250 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b251 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b252 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b253 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b254 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b255 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b256 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b257 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b258 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b259 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b260 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b261 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b262 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b263 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b264 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b265 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b266 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b267 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b268 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b269 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b270 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b271 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b272 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b273 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b274 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b275 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b276 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b277 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b278 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b279 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b280 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b281 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b282 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b283 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b284 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b285 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b286 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b287 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b288 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b289 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b290 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b291 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b292 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b293 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b294 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b295 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b296 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b297 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b298 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b299 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b300 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b301 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b302 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b303 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b304 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b305 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b306 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b307 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b308 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b309 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b310 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b311 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b312 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b313 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b314 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b315 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b316 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b317 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b318 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b319 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b320 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b321 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b322 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b323 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b324 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b325 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b326 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b327 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b328 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b329 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b330 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b331 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b332 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b333 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b334 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b335 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b336 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b337 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b338 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b339 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b340 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b341 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b342 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b343 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b344 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b345 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b346 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b347 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b348 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b349 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b350 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b351 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b352 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b353 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b354 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b355 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b356 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b357 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b358 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b359 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b360 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b361 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b362 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b363 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b364 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b365 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b366 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b367 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b368 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b369 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b370 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b371 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b372 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b373 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b374 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b375 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b376 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b377 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b378 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b379 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b380 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b381 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b382 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b383 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b384 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b385 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b386 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b387 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b388 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b389 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b390 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b391 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b392 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b393 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b394 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b395 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b396 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b397 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b398 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b399 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b400 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b401 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b402 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b403 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b404 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b405 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b406 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b407 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b408 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b409 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b410 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b411 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b412 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b413 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b414 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b415 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b416 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b417 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b418 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b419 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b420 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b421 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b422 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b423 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b424 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b425 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b426 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b427 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b428 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b429 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b430 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b431 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b432 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b433 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b434 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b435 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b436 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b437 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b438 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b439 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b440 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b441 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b442 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b443 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b444 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b445 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b446 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b447 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b448 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b449 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b450 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b451 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b452 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b453 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b454 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b455 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b456 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b457 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b458 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b459 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b460 = Var(within=Binary,bounds=(0,1),initialize=0)
m.x461 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x462 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x463 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x464 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x465 = Var(within=Reals,bounds=(0,8),initialize=0)
m.x466 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x467 = Var(within=Reals,bounds=(0,9),initialize=0)
m.x468 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x469 = Var(within=Reals,bounds=(0,8),initialize=0)
m.x470 = Var(within=Reals,bounds=(0,9),initialize=0)
m.x471 = Var(within=Reals,bounds=(0,8),initialize=0)
m.x472 = Var(within=Reals,bounds=(0,8),initialize=0)
m.x473 = Var(within=Reals,bounds=(0,4),initialize=0)
m.x474 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x475 = Var(within=Reals,bounds=(0,8),initialize=0)
m.x476 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x477 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x478 = Var(within=Reals,bounds=(0,9),initialize=0)
m.x479 = Var(within=Reals,bounds=(0,4),initialize=0)
m.x480 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x481 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x482 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x483 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x484 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x485 = Var(within=Reals,bounds=(0,8),initialize=0)
m.x486 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x487 = Var(within=Reals,bounds=(0,9),initialize=0)
m.x488 = Var(within=Reals,bounds=(0,6),initialize=0)
m.x489 = Var(within=Reals,bounds=(0,8),initialize=0)
m.x490 = Var(within=Reals,bounds=(0,9),initialize=0)
m.x491 = Var(within=Reals,bounds=(0,8),initialize=0)
m.x492 = Var(within=Reals,bounds=(0,8),initialize=0)
m.x493 = Var(within=Reals,bounds=(0,4),initialize=0)
m.x494 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x495 = Var(within=Reals,bounds=(0,8),initialize=0)
m.x496 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x497 = Var(within=Reals,bounds=(0,7),initialize=0)
m.x498 = Var(within=Reals,bounds=(0,9),initialize=0)
m.x499 = Var(within=Reals,bounds=(0,4),initialize=0)
m.x500 = Var(within=Reals,bounds=(0,5),initialize=0)
m.x501 = Var(within=Reals,bounds=(0,11),initialize=0)
m.x502 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x503 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x504 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x505 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x506 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x507 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x508 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x509 = Var(within=Reals,bounds=(0,11),initialize=0)
m.x510 = Var(within=Reals,bounds=(0,11),initialize=0)
m.x511 = Var(within=Reals,bounds=(0,11),initialize=0)
m.x512 = Var(within=Reals,bounds=(0,11),initialize=0)
m.x513 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x514 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x515 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x516 = Var(within=Reals,bounds=(0,11),initialize=0)
m.x517 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x518 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x519 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x520 = Var(within=Reals,bounds=(0,12),initialize=0)
m.x522 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x523 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x524 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x525 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x526 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x527 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x528 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x529 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x530 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x531 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x532 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x533 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x534 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x535 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x536 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x537 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x538 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x539 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x540 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x541 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x542 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x543 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x544 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x545 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x546 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x547 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x548 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x549 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x550 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x551 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x552 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x553 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x554 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x555 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x556 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x557 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x558 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x559 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x560 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x561 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x562 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x563 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x564 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x565 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x566 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x567 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x568 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x569 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x570 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x571 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x572 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x573 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x574 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x575 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x576 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x577 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x578 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x579 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x580 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x581 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x582 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x583 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x584 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x585 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x586 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x587 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x588 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x589 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x590 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x591 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x592 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x593 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x594 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x595 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x596 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x597 | |
+ 3)*ido);
tr2 = ref(cc,4*k*ido) + ref(cc,ido-1 + (4*k + 3)*ido);
tr3 = ref(cc,ido-1 + (4*k + 1)*ido) + ref(cc,ido-1 + (4*k + 1)*ido);
tr4 = ref(cc,(4*k + 2)*ido) + ref(cc,(4*k + 2)*ido);
ch[k*ido] = tr2 + tr3;
ch[(k + l1)*ido] = tr1 - tr4;
ch[(k + 2*l1)*ido] = tr2 - tr3;
ch[(k + 3*l1)*ido] = tr1 + tr4;
}
if (ido < 2) return;
if (ido != 2) {
for (k = 0; k < l1; ++k) {
for (i = 2; i < ido; i += 2) {
ic = ido - i;
ti1 = ref(cc,i + 4*k*ido) + ref(cc,ic + (4*k + 3)*ido);
ti2 = ref(cc,i + 4*k*ido) - ref(cc,ic + (4*k + 3)*ido);
ti3 = ref(cc,i + (4*k + 2)*ido) - ref(cc,ic + (4*k + 1)*ido);
tr4 = ref(cc,i + (4*k + 2)*ido) + ref(cc,ic + (4*k + 1)*ido);
tr1 = ref(cc,i - 1 + 4*k*ido) - ref(cc,ic - 1 + (4*k + 3)*ido);
tr2 = ref(cc,i - 1 + 4*k*ido) + ref(cc,ic - 1 + (4*k + 3)*ido);
ti4 = ref(cc,i - 1 + (4*k + 2)*ido) - ref(cc,ic - 1 + (4*k + 1)*ido);
tr3 = ref(cc,i - 1 + (4*k + 2)*ido) + ref(cc,ic - 1 + (4*k + 1)*ido);
ch[i - 1 + k*ido] = tr2 + tr3;
cr3 = tr2 - tr3;
ch[i + k*ido] = ti2 + ti3;
ci3 = ti2 - ti3;
cr2 = tr1 - tr4;
cr4 = tr1 + tr4;
ci2 = ti1 + ti4;
ci4 = ti1 - ti4;
ch[i - 1 + (k + l1)*ido] = wa1[i - 2]*cr2 - wa1[i - 1]*ci2;
ch[i + (k + l1)*ido] = wa1[i - 2]*ci2 + wa1[i - 1]*cr2;
ch[i - 1 + (k + 2*l1)*ido] = wa2[i - 2]*cr3 - wa2[i - 1]*ci3;
ch[i + (k + 2*l1)*ido] = wa2[i - 2]*ci3 + wa2[i - 1]*cr3;
ch[i - 1 + (k + 3*l1)*ido] = wa3[i - 2]*cr4 - wa3[i - 1]*ci4;
ch[i + (k + 3*l1)*ido] = wa3[i - 2]*ci4 + wa3[i - 1]*cr4;
}
}
if (ido % 2 == 1) return;
}
for (k = 0; k < l1; k++) {
ti1 = ref(cc,(4*k + 1)*ido) + ref(cc,(4*k + 3)*ido);
ti2 = ref(cc,(4*k + 3)*ido) - ref(cc,(4*k + 1)*ido);
tr1 = ref(cc,ido-1 + 4*k*ido) - ref(cc,ido-1 + (4*k + 2)*ido);
tr2 = ref(cc,ido-1 + 4*k*ido) + ref(cc,ido-1 + (4*k + 2)*ido);
ch[ido-1 + k*ido] = tr2 + tr2;
ch[ido-1 + (k + l1)*ido] = sqrt2*(tr1 - ti1);
ch[ido-1 + (k + 2*l1)*ido] = ti2 + ti2;
ch[ido-1 + (k + 3*l1)*ido] = -sqrt2*(tr1 + ti1);
}
} /* radb4 */
static void radf5(int ido, int l1, const Treal cc[], Treal ch[],
const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[])
{
static const Treal tr11 = 0.309016994374947;
static const Treal ti11 = 0.951056516295154;
static const Treal tr12 = -0.809016994374947;
static const Treal ti12 = 0.587785252292473;
int i, k, ic;
Treal ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3, dr4, dr5,
cr5, cr4, ti2, ti3, ti5, ti4, tr2, tr3, tr4, tr5;
for (k = 0; k < l1; k++) {
cr2 = ref(cc,(k + 4*l1)*ido) + ref(cc,(k + l1)*ido);
ci5 = ref(cc,(k + 4*l1)*ido) - ref(cc,(k + l1)*ido);
cr3 = ref(cc,(k + 3*l1)*ido) + ref(cc,(k + 2*l1)*ido);
ci4 = ref(cc,(k + 3*l1)*ido) - ref(cc,(k + 2*l1)*ido);
ch[5*k*ido] = ref(cc,k*ido) + cr2 + cr3;
ch[ido-1 + (5*k + 1)*ido] = ref(cc,k*ido) + tr11*cr2 + tr12*cr3;
ch[(5*k + 2)*ido] = ti11*ci5 + ti12*ci4;
ch[ido-1 + (5*k + 3)*ido] = ref(cc,k*ido) + tr12*cr2 + tr11*cr3;
ch[(5*k + 4)*ido] = ti12*ci5 - ti11*ci4;
}
if (ido == 1) return;
for (k = 0; k < l1; ++k) {
for (i = 2; i < ido; i += 2) {
ic = ido - i;
dr2 = wa1[i - 2]*ref(cc,i - 1 + (k + l1)*ido) + wa1[i - 1]*ref(cc,i + (k + l1)*ido);
di2 = wa1[i - 2]*ref(cc,i + (k + l1)*ido) - wa1[i - 1]*ref(cc,i - 1 + (k + l1)*ido);
dr3 = wa2[i - 2]*ref(cc,i - 1 + (k + 2*l1)*ido) + wa2[i - 1]*ref(cc,i + (k + 2*l1)*ido);
di3 = wa2[i - 2]*ref(cc,i + (k + 2*l1)*ido) - wa2[i - 1]*ref(cc,i - 1 + (k + 2*l1)*ido);
dr4 = wa3[i - 2]*ref(cc,i - 1 + (k + 3*l1)*ido) + wa3[i - 1]*ref(cc,i + (k + 3*l1)*ido);
di4 = wa3[i - 2]*ref(cc,i + (k + 3*l1)*ido) - wa3[i - 1]*ref(cc,i - 1 + (k + 3*l1)*ido);
dr5 = wa4[i - 2]*ref(cc,i - 1 + (k + 4*l1)*ido) + wa4[i - 1]*ref(cc,i + (k + 4*l1)*ido);
di5 = wa4[i - 2]*ref(cc,i + (k + 4*l1)*ido) - wa4[i - 1]*ref(cc,i - 1 + (k + 4*l1)*ido);
cr2 = dr2 + dr5;
ci5 = dr5 - dr2;
cr5 = di2 - di5;
ci2 = di2 + di5;
cr3 = dr3 + dr4;
ci4 = dr4 - dr3;
cr4 = di3 - di4;
ci3 = di3 + di4;
ch[i - 1 + 5*k*ido] = ref(cc,i - 1 + k*ido) + cr2 + cr3;
ch[i + 5*k*ido] = ref(cc,i + k*ido) + ci2 + ci3;
tr2 = ref(cc,i - 1 + k*ido) + tr11*cr2 + tr12*cr3;
ti2 = ref(cc,i + k*ido) + tr11*ci2 + tr12*ci3;
tr3 = ref(cc,i - 1 + k*ido) + tr12*cr2 + tr11*cr3;
ti3 = ref(cc,i + k*ido) + tr12*ci2 + tr11*ci3;
tr5 = ti11*cr5 + ti12*cr4;
ti5 = ti11*ci5 + ti12*ci4;
tr4 = ti12*cr5 - ti11*cr4;
ti4 = ti12*ci5 - ti11*ci4;
ch[i - 1 + (5*k + 2)*ido] = tr2 + tr5;
ch[ic - 1 + (5*k + 1)*ido] = tr2 - tr5;
ch[i + (5*k + 2)*ido] = ti2 + ti5;
ch[ic + (5*k + 1)*ido] = ti5 - ti2;
ch[i - 1 + (5*k + 4)*ido] = tr3 + tr4;
ch[ic - 1 + (5*k + 3)*ido] = tr3 - tr4;
ch[i + (5*k + 4)*ido] = ti3 + ti4;
ch[ic + (5*k + 3)*ido] = ti4 - ti3;
}
}
} /* radf5 */
static void radb5(int ido, int l1, const Treal cc[], Treal ch[],
const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[])
{
static const Treal tr11 = 0.309016994374947;
static const Treal ti11 = 0.951056516295154;
static const Treal tr12 = -0.809016994374947;
static const Treal ti12 = 0.587785252292473;
int i, k, ic;
Treal ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3,
ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5;
for (k = 0; k < l1; k++) {
ti5 = 2*ref(cc,(5*k + 2)*ido);
ti4 = 2*ref(cc,(5*k + 4)*ido);
tr2 = 2*ref(cc,ido-1 + (5*k + 1)*ido);
tr3 = 2*ref(cc,ido-1 + (5*k + 3)*ido);
ch[k*ido] = ref(cc,5*k*ido) + tr2 + tr3;
cr2 = ref(cc,5*k*ido) + tr11*tr2 + tr12*tr3;
cr3 = ref(cc,5*k*ido) + tr12*tr2 + tr11*tr3;
ci5 = ti11*ti5 + ti12*ti4;
ci4 = ti12*ti5 - ti11*ti4;
ch[(k + l1)*ido] = cr2 - ci5;
ch[(k + 2*l1)*ido] = cr3 - ci4;
ch[(k + 3*l1)*ido] = cr3 + ci4;
ch[(k + 4*l1)*ido] = cr2 + ci5;
}
if (ido == 1) return;
for (k = 0; k < l1; ++k) {
for (i = 2; i < ido; i += 2) {
ic = ido - i;
ti5 = ref(cc,i + (5*k + 2)*ido) + ref(cc,ic + (5*k + 1)*ido);
ti2 = ref(cc,i + (5*k + 2)*ido) - ref(cc,ic + (5*k + 1)*ido);
ti4 = ref(cc,i + (5*k + 4)*ido) + ref(cc,ic + (5*k + 3)*ido);
ti3 = ref(cc,i + (5*k + 4)*ido) - ref(cc,ic + (5*k + 3)*ido);
tr5 = ref(cc,i - 1 + (5*k + 2)*ido) - ref(cc,ic - 1 + (5*k + 1)*ido);
tr2 = ref(cc,i - 1 + (5*k + 2)*ido) + ref(cc,ic - 1 + (5*k + | |
*/
/* add_event() ! " */
/* get() ! " */
/* . */
/* . */
/* . */
/* flush_io() ! send get requests */
/* ! optional parallel processing */
/* . ! " */
/* . ! " */
/* pend_io() ! wait for replies from get requests */
/* . ! access to requested data */
/* . ! " */
/* pend_event() ! wait for requested events */
/* */
/************************************************************************/
/************************************************************************/
/* These routines wait for channel subscription events and call the */
/* functions specified with add_event when events occur. If the */
/* timeout is specified as 0 an infinite timeout is assumed. */
/* ca_flush_io() is called by this routine. If ca_pend_io () */
/* is called when no IO is outstanding then it will return immediately */
/* without processing. */
/************************************************************************/
/*
* ca_pend_event()
*
* timeOut R wait for this delay in seconds
*/
int ca_pend_event(ca_real timeOut);
/*
* ca_pend_io()
*
* timeOut R wait for this delay in seconds but return early
* if all get requests (or search requests with null
* connection handler pointer have completed)
*/
int ca_pend_io(ca_real timeOut);
/* calls ca_pend_io() if early is true otherwise ca_pend_event() is called */
int ca_pend (ca_real timeout, int early);
/*
* ca_test_io()
*
* returns TRUE when get requests (or search requests with null
* connection handler pointer) are outstanding
*/
int ca_test_io (void);
/************************************************************************/
/* Send out all outstanding messages in the send queue */
/************************************************************************/
/*
* ca_flush_io()
*/
int ca_flush_io();
/*
* ca_host_name_function()
*
* channel R channel identifier
*
* !!!! this function is _not_ thread safe !!!!
*/
const char * ca_host_name (chid channel);
/* thread safe version */
unsigned ca_get_host_name ( chid pChan,
char *pBuf, unsigned bufLength );
/*
* ca_replace_printf_handler ()
*
* for apps that want to change where ca formatted
* text output goes
*
* use two ifdef's for trad C compatibility
*
* ca_printf_func R pointer to new function called when
* CA prints an error message
*/
/*
typedef int caPrintfFunc (const char *pformat, va_list args);
int ca_replace_printf_handler (
caPrintfFunc *ca_printf_func
);
*/
/*
* CA synch groups
*
* This facility will allow the programmer to create
* any number of synchronization groups. The programmer might then
* interleave IO requests within any of the groups. Once The
* IO operations are initiated then the programmer is free to
* block for IO completion within any one of the groups as needed.
*/
/*
* ca_sg_create()
*
* create a sync group
*
* pgid W pointer to sync group id that will be written
*/
int ca_sg_create (CA_SYNC_GID * pgid);
/*
* ca_sg_delete()
*
* delete a sync group
*
* gid R sync group id
*/
int ca_sg_delete (const CA_SYNC_GID gid);
/*
* ca_sg_block()
*
* block for IO performed within a sync group to complete
*
* gid R sync group id
* timeout R wait for this duration prior to timing out
* and returning ECA_TIMEOUT
*/
int ca_sg_block (const CA_SYNC_GID gid, ca_real timeout);
/*
* ca_sg_test()
*
* test for sync group IO operations in progress
*
* gid R sync group id
*
* returns one of ECA_BADSYNCGRP, ECA_IOINPROGRESS, ECA_IODONE
*/
int ca_sg_test (const CA_SYNC_GID gid);
/*
* ca_sg_reset
*
* gid R sync group id
*/
int ca_sg_reset(const CA_SYNC_GID gid);
/*
* ca_sg_array_get()
*
* initiate a get within a sync group
* (essentially a ca_array_get() with a sync group specified)
*
* gid R sync group id
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pValue W channel value copied to this location
*/
int ca_sg_array_get
(
const CA_SYNC_GID gid,
chtype type,
unsigned long count,
chid chan,
void *pValue
);
/*
* ca_sg_array_put()
*
* initiate a put within a sync group
* (essentially a ca_array_put() with a sync group specified)
*
* gid R sync group id
* type R data type from db_access.h
* count R array element count
* chan R channel identifier
* pValue R new channel value copied from this location
*/
int ca_sg_array_put
(
const CA_SYNC_GID gid,
chtype type,
unsigned long count,
chid chan,
const void *pValue
);
/*
* ca_sg_stat()
*
* print status of a sync group
*
* gid R sync group id
*/
int ca_sg_stat (CA_SYNC_GID gid);
/*
* used when an auxillary thread needs to join a CA client context started
* by another thread
*/
struct ca_client_context * ca_current_context ();
int ca_attach_context ( struct ca_client_context * context );
int ca_client_status ( unsigned level );
int ca_context_status ( struct ca_client_context *, unsigned level );
const char * ca_message(long ca_status);
/*
* ca_version()
*
* returns the CA version string
*/
const char * ca_version (void);
""")
# alarm.h
ffi.cdef("""
#define NO_ALARM 0
/* ALARM SEVERITIES - must match menuAlarmSevr.dbd */
typedef enum {
epicsSevNone = NO_ALARM,
epicsSevMinor,
epicsSevMajor,
epicsSevInvalid,
ALARM_NSEV
} epicsAlarmSeverity;
/* ALARM STATUS - must match menuAlarmStat.dbd */
typedef enum {
epicsAlarmNone = NO_ALARM,
epicsAlarmRead,
epicsAlarmWrite,
epicsAlarmHiHi,
epicsAlarmHigh,
epicsAlarmLoLo,
epicsAlarmLow,
epicsAlarmState,
epicsAlarmCos,
epicsAlarmComm,
epicsAlarmTimeout,
epicsAlarmHwLimit,
epicsAlarmCalc,
epicsAlarmScan,
epicsAlarmLink,
epicsAlarmSoft,
epicsAlarmBadSub,
epicsAlarmUDF,
epicsAlarmDisable,
epicsAlarmSimm,
epicsAlarmReadAccess,
epicsAlarmWriteAccess,
ALARM_NSTATUS
} epicsAlarmCondition;
""")
# caeventmask.h
ffi.cdef("""
#define DBE_VALUE 1
#define DBE_ARCHIVE 2
#define DBE_LOG 2
#define DBE_ALARM 4
#define DBE_PROPERTY 8
""")
# epicsTypes.h
ffi.cdef("""
typedef int8_t epicsInt8;
typedef uint8_t epicsUInt8;
typedef int16_t epicsInt16;
typedef uint16_t epicsUInt16;
typedef epicsUInt16 epicsEnum16;
typedef int32_t epicsInt32;
typedef uint32_t epicsUInt32;
typedef int64_t epicsInt64;
typedef uint64_t epicsUInt64;
typedef float epicsFloat32;
typedef double epicsFloat64;
typedef epicsInt32 epicsStatus;
typedef struct {
unsigned length;
char *pString;
}epicsString;
/*
* !! Dont use this - it may vanish in the future !!
*
* Provided only for backwards compatibility with
* db_access.h
*
*/
typedef char epicsOldString[40];
""")
# epicsTime.h
ffi.cdef("""
/* epics time stamp for C interface*/
typedef struct epicsTimeStamp {
epicsUInt32 secPastEpoch; /* seconds since 0000 Jan 1, 1990 */
epicsUInt32 nsec; /* nanoseconds within second */
} epicsTimeStamp;
""")
# db_access.h
ffi.cdef("""
/*
* architecture independent types
*
* (so far this is sufficient for all archs we have ported to)
*/
typedef epicsOldString dbr_string_t;
typedef epicsUInt8 dbr_char_t;
typedef epicsInt16 dbr_short_t;
typedef epicsUInt16 dbr_ushort_t;
typedef epicsInt16 dbr_int_t;
typedef epicsUInt16 dbr_enum_t;
typedef epicsInt32 dbr_long_t;
typedef epicsUInt32 dbr_ulong_t;
typedef epicsFloat32 dbr_float_t;
typedef epicsFloat64 dbr_double_t;
typedef epicsUInt16 dbr_put_ackt_t;
typedef epicsUInt16 dbr_put_acks_t;
typedef epicsOldString dbr_stsack_string_t;
typedef epicsOldString dbr_class_name_t;
/* VALUES WITH STATUS STRUCTURES */
/* structure for a string status field */
struct dbr_sts_string {
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_string_t value; /* current value */
};
/* structure for a string status and ack field */
struct dbr_stsack_string{
dbr_ushort_t status; /* status of value */
dbr_ushort_t severity; /* severity of alarm */
dbr_ushort_t ackt; /* ack transient? */
dbr_ushort_t acks; /* ack severity */
dbr_string_t value; /* current value */
};
/* structure for an short status field */
struct dbr_sts_int{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_short_t value; /* current value */
};
struct dbr_sts_short{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_short_t value; /* current value */
};
/* structure for a float status field */
struct dbr_sts_float{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_float_t value; /* current value */
};
/* structure for a enum status field */
struct dbr_sts_enum{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_enum_t value; /* current value */
};
/* structure for a char status field */
struct dbr_sts_char{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_char_t RISC_pad; /* RISC alignment */
dbr_char_t value; /* current value */
};
/* structure for a long status field */
struct dbr_sts_long{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_long_t value; /* current value */
};
/* structure for a double status field */
struct dbr_sts_double{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
dbr_long_t RISC_pad; /* RISC alignment */
dbr_double_t value; /* current value */
};
/* VALUES WITH STATUS AND TIME STRUCTURES */
/* structure for a string time field */
struct dbr_time_string{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_string_t value; /* current value */
};
/* structure for an short time field */
struct dbr_time_int{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_short_t RISC_pad; /* RISC alignment */
dbr_short_t value; /* current value */
};
struct dbr_time_short{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_short_t RISC_pad; /* RISC alignment */
dbr_short_t value; /* current value */
};
/* structure for a float time field */
struct dbr_time_float{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_float_t value; /* current value */
};
/* structure for a enum time field */
struct dbr_time_enum{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_short_t RISC_pad; /* RISC alignment */
dbr_enum_t value; /* current value */
};
/* structure for a char time field */
struct dbr_time_char{
dbr_short_t status; /* status of value */
dbr_short_t severity; /* severity of alarm */
epicsTimeStamp stamp; /* time stamp */
dbr_short_t RISC_pad0; /* RISC alignment */
dbr_char_t RISC_pad1; /* RISC alignment */
dbr_char_t value; /* current value */
};
/* structure for a long time field */
struct dbr_time_long{
dbr_short_t status; /* | |
<filename>alf/utils/dist_utils.py
# Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import numbers
import numpy as np
import math
import torch
import torch.distributions as td
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
import torch.nn as nn
import alf
import alf.nest as nest
from alf.tensor_specs import TensorSpec, BoundedTensorSpec
def get_invertable(cls):
"""A helper function to turn on the cache mechanism for transformation.
This is useful as some transformations (say :math:`g`) may not be able to
provide an accurate inversion therefore the difference between :math:`x` and
:math:`g^{-1}(g(x))` is large. This could lead to unstable training in
practice. For a torch transformation :math:`y=g(x)`, when ``cache_size`` is
set to one, the latest value for :math:`(x, y)` is cached and will be used
later for future computations. E.g. for inversion, a call to
:math:`g^{-1}(y)` will return :math:`x`, solving the inversion error issue
mentioned above. Note that in the case of having a chain of transformations
(:math:`G`), all the element transformations need to turn on the cache to
ensure the composite transformation :math:`G` satisfy:
:math:`x=G^{-1}(G(x))`.
"""
class NewCls(cls):
__init__ = functools.partialmethod(cls.__init__, cache_size=1)
return NewCls
AbsTransform = get_invertable(td.AbsTransform)
AffineTransform = get_invertable(td.AffineTransform)
ExpTransform = get_invertable(td.ExpTransform)
PowerTransform = get_invertable(td.PowerTransform)
SigmoidTransform = get_invertable(td.SigmoidTransform)
SoftmaxTransform = get_invertable(td.SoftmaxTransform)
@alf.configurable
class Softplus(td.Transform):
r"""Transform via the mapping :math:`\text{Softplus}(x) = \log(1 + \exp(x))`.
Code adapted from `pyro <https://docs.pyro.ai/en/latest/_modules/pyro/distributions/transforms/softplus.html>`_
and `tensorflow <https://github.com/tensorflow/probability/blob/v0.12.2/tensorflow_probability/python/bijectors/softplus.py#L61-L189>`_.
"""
domain = constraints.real
codomain = constraints.positive
bijective = True
sign = +1
def __init__(self, hinge_softness=1.):
"""
Args:
hinge_softness (float): this positive parameter changes the transition
slope. A higher softness results in a smoother transition from
0 to identity.
"""
super().__init__(cache_size=1)
self._hinge_softness = float(hinge_softness)
assert self._hinge_softness > 0, "Must be a positive softness number!"
def __eq__(self, other):
return (isinstance(other, Softplus)
and self._hinge_softness == other._hinge_softness)
def _call(self, x):
return nn.functional.softplus(x, beta=1. / self._hinge_softness)
def _inverse(self, y):
return (y / self._hinge_softness).expm1().log() * self._hinge_softness
def log_abs_det_jacobian(self, x, y):
return -nn.functional.softplus(-x / self._hinge_softness)
@alf.configurable
def Softlower(low, hinge_softness=1.):
"""Create a Softlower transform by composing the Softplus and Affine
transforms. Mathematically, ``softlower(x, low) = softplus(x - low) + low``.
Args:
low (float|Tensor): the lower bound
hinge_softness (float): this positive parameter changes the transition
slope. A higher softness results in a smoother transition from
``low`` to identity.
"""
return td.transforms.ComposeTransform([
AffineTransform(loc=-low, scale=1.),
Softplus(hinge_softness=hinge_softness),
AffineTransform(loc=low, scale=1.)
])
@alf.configurable
def Softupper(high, hinge_softness=1.):
"""Create a Softupper transform by composing the Softplus and Affine
transforms. Mathematically, ``softupper(x, high) = -softplus(high - x) + high``.
Args:
high (float|Tensor): the upper bound
hinge_softness (float): this positive parameter changes the transition
slope. A higher softness results in a smoother transition from
identity to ``high``.
"""
return td.transforms.ComposeTransform([
AffineTransform(loc=high, scale=-1.),
Softplus(hinge_softness=hinge_softness),
AffineTransform(loc=high, scale=-1.)
])
@alf.configurable
def SoftclipTF(low, high, hinge_softness=1.):
"""Create a Softclip transform by composing Softlower, Softupper, and Affine
transforms, adapted from `tensorflow <https://www.tensorflow.org/probability/api_docs/python/tfp/bijectors/SoftClip>`_.
Mathematically,
.. code-block:: python
clipped = softupper(softlower(x, low), high)
softclip(x) = (clipped - high) / (high - softupper(low, high)) * (high - low) + high
The second scaling step is beause we will have
``softupper(low, high) < low`` due to distortion of softplus, so we need to
shrink the interval slightly by ``(high - low) / (high - softupper(low, high))``
to preserve the lower bound. Due to this rescaling, the bijector can be mildly
asymmetric.
Args:
low (float|Tensor): the lower bound
high (float|Tensor): the upper bound
hinge_softness (float): this positive parameter changes the transition
slope. A higher softness results in a smoother transition from
``low`` to ``high``.
"""
if not isinstance(low, torch.Tensor):
low = torch.tensor(low)
assert torch.all(high > low), "Invalid clipping range"
# Compute the clipped value of ``low`` upper bounded by ``high``
softupper_high_at_low = Softupper(high, hinge_softness=hinge_softness)(low)
return td.transforms.ComposeTransform([
Softlower(low=low, hinge_softness=hinge_softness),
Softupper(high=high, hinge_softness=hinge_softness), # clipped
AffineTransform(loc=-high, scale=1.),
AffineTransform(
loc=high, scale=(high - low) / (high - softupper_high_at_low))
])
@alf.configurable
class Softclip(td.Transform):
r"""Transform via the mapping defined in ``alf.math_ops.softclip()``.
Unlike ``SoftclipTF``, this transform is symmetric regarding the lower and
upper bound when squashing.
"""
domain = constraints.real
codomain = constraints.real
bijective = True
sign = +1
def __init__(self, low, high, hinge_softness=1.):
"""
Args:
low (float): the lower bound
high (float): the upper bound
hinge_softness (float): this positive parameter changes the transition
slope. A higher softness results in a smoother transition from
``low`` to ``high``.
"""
super().__init__(cache_size=1)
self._hinge_softness = float(hinge_softness)
assert self._hinge_softness > 0, "Must be a positive softness number!"
self._l = float(low)
self._h = float(high)
self.codomain = constraints.interval(self._l, self._h)
def __eq__(self, other):
return (isinstance(other, Softclip)
and self._hinge_softness == other._hinge_softness
and self._l == other._l and self._h == other._h)
def _call(self, x):
return alf.math.softclip(x, self._l, self._h, self._hinge_softness)
def _inverse(self, y):
"""``y`` should be in ``[self._l, self._h]``. Note that when ``y`` is
close to boundaries, this inverse function might have numerical issues.
Since we use ``cache_size=1`` in the init function, here we don't clip
``y``.
"""
s = self._hinge_softness
return (y + s * (((self._l - y) / s).expm1() / (
(y - self._h) / s).expm1()).log())
def log_abs_det_jacobian(self, x, y):
r"""Compute ``log|dy/dx|``.
"""
s = self._hinge_softness
return (1 - 1 / (1 + ((x - self._l) / s).exp()) - 1 / (1 + (
(self._h - x) / s).exp())).log()
@alf.configurable
class Softsign(td.Transform):
domain = constraints.real
codomain = constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self):
super().__init__(cache_size=1)
def __eq__(self, other):
return isinstance(other, Softsign)
def _call(self, x):
return alf.math.softsign(x)
def _inverse(self, y):
r"""
.. math::
\begin{array}{lll}
y = \frac{x}{1+x} \rightarrow x = \frac{y}{1 - y}, &\text{if} &y > 0\\
y = \frac{x}{1-x} \rightarrow x = \frac{y}{1 + y}, &\text{else}&\\
\end{array}
"""
return torch.where(y > 0, y / (1 - y), y / (1 + y))
def log_abs_det_jacobian(self, x, y):
r"""
.. math::
\begin{array}{lll}
y = \frac{x}{1+x} \rightarrow \frac{dy}{dx} = \frac{1}{(1+x)^2}, &\text{if} &x > 0\\
y = \frac{x}{1-x} \rightarrow \frac{dy}{dx} = \frac{1}{(1-x)^2}, &\text{else}&\\
\end{array}
"""
return -2. * torch.log(1 + x.abs())
@alf.configurable
class StableTanh(td.Transform):
r"""Invertable transformation (bijector) that computes :math:`Y = tanh(X)`,
therefore :math:`Y \in (-1, 1)`.
This can be achieved by an affine transform of the Sigmoid transformation,
i.e., it is equivalent to applying a list of transformations sequentially:
.. code-block:: python
transforms = [AffineTransform(loc=0, scale=2)
SigmoidTransform(),
AffineTransform(
loc=-1,
scale=2]
However, using the ``StableTanh`` transformation directly is more numerically
stable.
"""
domain = constraints.real
codomain = constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
# We use cache by default as it is numerically unstable for inversion
super().__init__(cache_size=cache_size)
def __eq__(self, other):
return isinstance(other, StableTanh)
def _call(self, x):
return torch.tanh(x)
def _inverse(self, y):
# Based on https://github.com/tensorflow/agents/commit/dfb8c85a01d65832b05315928c010336df13f7b9#diff-a572e559b953f965c5c2cd1b9ded2c7b
# 0.99999997 is the maximum value such that atanh(x) is valid for both
# float32 and float64
def _atanh(x):
return 0.5 * torch.log((1 + x) / (1 - x))
y = torch.where(
torch.abs(y) <= 1.0, torch.clamp(y, -0.99999997, 0.99999997), y)
return _atanh(y)
def log_abs_det_jacobian(self, x, y):
return 2.0 * (
torch.log(torch.tensor(2.0, dtype=x.dtype, requires_grad=False)) -
x - nn.functional.softplus(-2.0 * x))
# The pytorch kl_divergence has a bug
# (https://github.com/pytorch/pytorch/issues/34859)
# So we use our own:
@td.kl.register_kl(td.TransformedDistribution, td.TransformedDistribution)
def _kl_transformed_transformed(p, q):
if p.transforms != q.transforms:
raise NotImplementedError
if p.event_shape != q.event_shape:
raise NotImplementedError
return td.kl.kl_divergence(p.base_dist, q.base_dist)
class OUProcess(nn.Module):
"""A zero-mean Ornstein-Uhlenbeck process for generating noises."""
def __init__(self, initial_value, damping=0.15, stddev=0.2):
"""
The Ornstein-Uhlenbeck process is a process that generates temporally
correlated noise via a random walk with damping. This process describes
the velocity of a particle undergoing brownian motion in the presence of
friction. This can be useful for exploration in continuous action
environments with momentum.
The temporal update equation is:
.. code-block:: python
x_next = (1 - damping) * x + N(0, std_dev)
Args:
initial_value (Tensor): Initial value of the process.
damping (float): The rate at which the noise trajectory is damped
towards the mean. We must have :math:`0 <= damping | |
LOG.debug("Sending %s to switch %s", cmds, switch_ip)
response = self._session.post(
eapi_server_url,
verify=self._verify,
timeout=self._conn_timeout,
json=data)
try:
return response.json()['result']
except KeyError as e:
msg = ("Unexpected EAPI error - KeyError {} - result was {}"
"".format(e, response.json()))
LOG.info(msg)
raise arista_exc.AristaRpcError(msg=msg)
except requests.exceptions.ConnectTimeout:
msg = (_('Timed out while trying to connect to %(url)s') %
{'url': redacted_eapi_server_url})
LOG.warning(msg)
return None
except requests.exceptions.ReadTimeout:
msg = (_('Timed out while reading from %(url)s') %
{'url': redacted_eapi_server_url})
LOG.warning(msg)
return None
except requests.exceptions.ConnectionError as e:
msg = (_('Error while trying to connect to %(url)s'
'due to %(reason)s') %
{'url': redacted_eapi_server_url, 'reason': e})
LOG.warning(msg)
return None
except requests.exceptions.InvalidURL:
msg = (_('Ignore attempt to connect to invalid URL %(url)s') %
{'url': redacted_eapi_server_url})
LOG.warning(msg)
return None
except ValueError:
LOG.info("Ignoring invalid JSON response")
return None
except Exception as error:
msg = six.text_type(error)
LOG.warning(msg)
raise
def _validate_config(self, reason=''):
if len(cfg.CONF.ml2_arista.get('switch_info')) < 1:
msg = _('Required option - %s, '
'at least one switch must be specified ') % reason
LOG.exception(msg)
raise arista_exc.AristaConfigError(msg=msg)
def _maintain_connections(self):
switches = []
for s in cfg.CONF.ml2_arista.switch_info:
switch_ip, switch_user, switch_pass = s.split(":")
if switch_ip not in self._SERVER_BY_IP:
switches.append((switch_ip, switch_user, switch_pass))
if not switches:
return
server_by_ip = copy(self._SERVER_BY_IP)
server_by_id = copy(self._SERVER_BY_ID)
pool = Pool()
items = [s for s in pool.starmap(self._connect_to_switch, switches)
if s]
for switch_ip, system_id, server in items:
server_by_ip[switch_ip] = server
server_by_id[system_id] = server
AristaSwitchRPCMixin._SERVER_BY_ID = server_by_id
AristaSwitchRPCMixin._SERVER_BY_IP = server_by_ip
def _connect_to_switch(self, switch_ip, switch_user, switch_pass):
try:
def server(cmds):
return self._send_eapi_req(switch_ip, switch_user, switch_pass,
cmds)
@MEMOIZE
def get_lldp_info(_):
try:
ret = server(['show lldp local-info management 1'])
return EUI(ret[0]['chassisId'])
except (IndexError, TypeError, KeyError):
return None
system_id = get_lldp_info(switch_ip)
if not system_id:
get_lldp_info.invalidate(switch_ip)
LOG.warn("Could not connect to server %s",
switch_ip)
return
else:
return switch_ip, system_id, server
except (socket.error, HTTPException) as e:
LOG.warn("Could not connect to server %s due to %s",
switch_ip, e)
@property
def _server_by_id(self):
return self._SERVER_BY_ID
def _get_id_by_server(self, server):
for _idx, _srv in self._SERVER_BY_ID.items():
if _srv == server:
return _idx
def _get_ip_by_server(self, server):
for _idx, _srv in self._SERVER_BY_IP.items():
if _srv == server:
return _idx
def _get_info_by_server(self, server):
return self._get_id_by_server(server), self._get_ip_by_server(server)
def _get_server_by_id(self, switch_id):
return switch_id and self._SERVER_BY_ID.get(EUI(switch_id))
def _get_server_by_ip(self, switch_ip):
return switch_ip and self._SERVER_BY_IP.get(switch_ip)
def _get_server(self, switch_info=None, switch_id=None):
server = (self._get_server_by_id(switch_id) or
self._get_server_by_ip(switch_info))
if server:
return server
self._maintain_connections()
return (self._get_server_by_id(switch_id) or
self._get_server_by_ip(switch_info))
class AristaSecGroupSwitchDriver(AristaSwitchRPCMixin):
"""Wraps Arista JSON RPC.
All communications between Neutron and EOS are over JSON RPC.
EOS - operating system used on Arista hardware
Command API - JSON RPC API provided by Arista EOS
"""
def __init__(self, neutron_db, http_session=None):
super(AristaSecGroupSwitchDriver, self).__init__(
http_session=http_session)
self._ndb = neutron_db
self.sg_enabled = cfg.CONF.ml2_arista.get('sec_group_support')
if not self.sg_enabled:
return
self._validate_config(_('when "sec_group_support" is enabled'))
self.max_rules = cfg.CONF.ml2_arista.get('lossy_consolidation_limit')
self._protocol_table = {
num: name[8:] for name, num in vars(socket).items()
if name.startswith("IPPROTO")
}
self.aclCreateDict = acl_cmd['acl']
self.aclApplyDict = acl_cmd['apply']
def _get_port_name(self, port, protocol=None):
try:
return socket.getservbyport(port, protocol)
except socket.error:
return port
def _create_acl_on_eos(self, in_cmds, out_cmds, protocol, cidr,
from_port, to_port, direction):
"""Creates an ACL on Arista HW Device.
:param name: Name for the ACL
:param server: Server endpoint on the Arista switch to be configured
"""
if cidr:
if cidr == 'any' or cidr.endswith('/0'):
cidr = 'any'
elif cidr.endswith('/32'):
cidr = 'host ' + cidr[:-3]
elif '/' not in cidr:
cidr = 'host ' + cidr
if protocol == 'icmp':
# ICMP rules require special processing
if from_port is None and to_port is None:
rule = 'icmp_custom3'
elif from_port is not None and to_port is not None:
rule = 'icmp_custom2'
elif from_port is not None and to_port is None:
rule = 'icmp_custom1'
else:
msg = _('Invalid ICMP rule specified')
LOG.exception(msg)
raise arista_exc.AristaSecurityGroupError(msg=msg)
rule_type = 'in'
cmds = in_cmds
if direction == 'egress':
rule_type = 'out'
cmds = out_cmds
final_rule = rule_type + '_' + rule
acl_dict = self.aclCreateDict[final_rule]
# None port is problematic - should be replaced with 0
if not from_port:
from_port = 0
if not to_port:
to_port = 0
for c in acl_dict:
if rule == 'icmp_custom2':
cmds.append(c.format(cidr, from_port, to_port))
else:
cmds.append(c.format(cidr, from_port))
return in_cmds, out_cmds
elif protocol == 'dhcp':
# Not really a layer2 protocol
for c in self.aclCreateDict['in_dhcp_rule']:
in_cmds.append(c.format(protocol, cidr, from_port, to_port))
for c in self.aclCreateDict['out_dhcp_rule']:
out_cmds.append(c.format(protocol, cidr, from_port, to_port))
return in_cmds, out_cmds
else:
# Non ICMP rules processing here
rule_ext = ''
if from_port <= 1 and to_port == 65535:
rule_ext = '_norange'
flags = ''
if direction == 'egress':
if protocol == 'tcp':
flags = ' syn'
out_rule = self.aclCreateDict['out_rule_tcp' + rule_ext]
in_rule = []
else:
flags = ' range 32768 65535'
out_rule = self.aclCreateDict['out_rule' + rule_ext]
in_rule = self.aclCreateDict['out_rule_reverse' + rule_ext]
else:
in_rule = self.aclCreateDict['in_rule' + rule_ext]
if protocol == 'tcp':
flags = ' syn'
out_rule = []
else:
out_rule = self.aclCreateDict['in_rule_reverse' + rule_ext]
for c in in_rule:
in_cmds.append(c.format(protocol, cidr, from_port, to_port,
flags).strip())
for c in out_rule:
out_cmds.append(c.format(protocol, cidr, from_port, to_port,
flags).strip())
return in_cmds, out_cmds
def _delete_acl_from_eos(self, name, server):
"""deletes an ACL from Arista HW Device.
:param name: Name for the ACL
:param server: Server endpoint on the Arista switch to be configured
"""
cmds = []
for c in self.aclCreateDict['delete_acl']:
cmds.append(c.format(name))
self._run_openstack_sg_cmds(cmds, server)
def _delete_acl_rule_from_eos(self, name,
protocol, cidr,
from_port, to_port,
direction, server):
"""deletes an ACL from Arista HW Device.
:param name: Name for the ACL
:param server: Server endpoint on the Arista switch to be configured
"""
cmds = []
if protocol == 'icmp':
# ICMP rules require special processing
if from_port and to_port or (not from_port and not to_port):
rule = 'icmp_custom2'
elif from_port and not to_port:
rule = 'icmp_custom1'
else:
msg = _('Invalid ICMP rule specified')
LOG.exception(msg)
raise arista_exc.AristaSecurityGroupError(msg=msg)
rule_type = 'del_in'
if direction == 'egress':
rule_type = 'del_out'
final_rule = rule_type + '_' + rule
acl_dict = self.aclCreateDict[final_rule]
# None port is problematic - should be replaced with 0
if not from_port:
from_port = 0
if not to_port:
to_port = 0
for c in acl_dict:
if rule == 'icmp_custom2':
cmds.append(c.format(name, cidr, from_port, to_port))
else:
cmds.append(c.format(name, cidr, from_port))
else:
rule_ext = ''
if from_port <= 1 and to_port == 65535:
rule_ext = '_norange'
acl_dict = self.aclCreateDict['del_in_acl_rule' + rule_ext]
if direction == 'egress':
acl_dict = self.aclCreateDict['del_out_acl_rule' + rule_ext]
for c in acl_dict:
cmds.append(c.format(name, protocol, cidr,
from_port, to_port))
self._run_openstack_sg_cmds(cmds, server)
def _apply_acl_on_eos(self, port_id, name, direction, server,
accumulator=None):
"""Creates an ACL on Arista HW Device.
:param port_id: The port where the ACL needs to be applied
:param name: Name for the ACL
:param direction: must contain "ingress" or "egress"
:param server: Server endpoint on the Arista switch to be configured
"""
if accumulator is None:
cmds = []
else:
cmds = accumulator
for c in self.aclApplyDict[direction]:
cmds.append(c.format(port_id, name))
if not accumulator:
self._run_openstack_sg_cmds(cmds, server)
def _remove_acl_from_eos(self, port_id, name, direction, server):
"""Remove an ACL from a port on Arista HW Device.
:param port_id: The port where the ACL needs to be applied
:param name: Name for the ACL
:param direction: must contain "ingress" or "egress"
:param server: Server endpoint on the Arista switch to be configured
"""
cmds = []
if direction == 'egress':
acl_cmd = self.aclApplyDict['rm_egress']
else:
acl_cmd = self.aclApplyDict['rm_ingress']
for c in acl_cmd:
cmds.append(c.format(port_id, name))
self._run_openstack_sg_cmds(cmds, server)
def _create_acl_rule(self, context, in_cmds, out_cmds, sgr,
security_group_ips=None):
"""Creates an ACL on Arista Switch.
For a given Security Group (ACL), it adds additional rule
Deals with multiple configurations - such as multiple switches
"""
# Only deal with valid protocols - skip the rest
if not sgr or sgr['protocol'] not in SUPPORTED_SG_PROTOCOLS:
return in_cmds, out_cmds
if sgr['ethertype'] is not None \
and sgr['ethertype'] not in SUPPORTED_SG_ETHERTYPES:
return in_cmds, out_cmds
if sgr['protocol'] is None:
protocols = SUPPORTED_SG_PROTOCOLS[0:3]
else:
protocols = [sgr['protocol']]
remote_ips = ['any']
remote_ip_prefix = sgr['remote_ip_prefix']
remote_group_id = sgr['remote_group_id']
if remote_ip_prefix:
remote_ips = [remote_ip_prefix]
elif remote_group_id:
security_group_ips = security_group_ips or {}
if remote_group_id not in security_group_ips:
fetched = db_lib.select_ips_for_remote_group(
context, [remote_group_id])
security_group_ips.update(fetched)
remote_ips = security_group_ips[remote_group_id]
for remote_ip in remote_ips:
for protocol in protocols:
min_port = sgr['port_range_min']
if protocol != 'icmp' and not min_port:
min_port = 0
max_port = sgr['port_range_max']
if not max_port and protocol != 'icmp':
max_port = 65535
in_cmds, | |
it will be most efficient to scan all entities
below the ancestor and load them into memory first.
Use ORDER_FIRST if the query has a sort order and the result set is large
or you only plan to fetch the first few results. In that case, we
shouldn't try to load all of the results into memory; instead, we should
scan the index for this property, which is in sorted order.
Note that hints are currently ignored in the v3 datastore!
Arg:
one of datastore.Query.[ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST]
Returns:
# this query
Query
"""
if hint not in [self.ORDER_FIRST, self.ANCESTOR_FIRST, self.FILTER_FIRST]:
raise datastore_errors.BadArgumentError(
'Query hint must be ORDER_FIRST, ANCESTOR_FIRST, or FILTER_FIRST.')
self.__hint = hint
return self
def Ancestor(self, ancestor):
"""Sets an ancestor for this query.
This restricts the query to only return result entities that are descended
from a given entity. In other words, all of the results will have the
ancestor as their parent, or parent's parent, or etc.
Raises BadArgumentError or BadKeyError if parent is not an existing Entity
or Key in the datastore.
Args:
# the key must be complete
ancestor: Entity or Key
Returns:
# this query
Query
"""
self.__ancestor = _GetCompleteKeyOrError(ancestor)
return self
def IsKeysOnly(self):
"""Returns True if this query is keys only, false otherwise."""
return self.__keys_only
def GetCompiledCursor(self):
try:
compiled_cursor = self.__last_iterator.GetCompiledCursor(self)
if not compiled_cursor:
raise AttributeError()
except AttributeError:
raise AssertionError('No cursor available, either this query has not '
'been executed or there is no compilation '
'available for this kind of query')
return compiled_cursor
def GetCompiledQuery(self):
try:
if not self.__compiled_query:
raise AttributeError()
except AttributeError:
raise AssertionError('No compiled query available, either this query has '
'not been executed or there is no compilation '
'available for this kind of query')
return self.__compiled_query
def Run(self, **kwargs):
"""Runs this query.
If a filter string is invalid, raises BadFilterError. If a filter value is
invalid, raises BadValueError. If an IN filter is provided, and a sort
order on another property is provided, raises BadQueryError.
If you know in advance how many results you want, use Get() instead. It's
more efficient.
Args:
limit: integer, limit for the query.
offset: integer, offset for the query.
prefetch_count: integer, number of results to return in the first query.
next_count: number of results to return in subsequent next queries.
rpc: datastore.RPC to use for this request.
Returns:
# an iterator that provides access to the query results
Iterator
"""
return self._Run(**kwargs)
def _Run(self, limit=None, offset=None,
prefetch_count=None, next_count=None, **kwargs):
"""Runs this query, with an optional result limit and an optional offset.
Identical to Run, with the extra optional limit, offset, prefetch_count,
next_count parameters. These parameters must be integers >= 0.
This is not intended to be used by application developers. Use Get()
instead!
Args:
limit: integer, limit for the query.
offset: integer, offset for the query.
prefetch_count: integer, number of results to return in the first query.
next_count: number of results to return in subsequent next queries.
rpc: datastore.RPC to use for this request.
Returns:
# an iterator that provides access to the query results
Iterator
"""
rpc = GetRpcFromKwargs(kwargs)
self.__last_iterator, self.__compiled_query = Query._RunInternal(
self._ToPb(limit, offset, prefetch_count),
next_count=next_count,
rpc=rpc)
return self.__last_iterator
@staticmethod
def _RunInternal(request, next_count=None, rpc=None):
"""Runs the given request and wraps the result in an iterator.
Args:
request: datastore_pb.query, the request to run.
next_count: number of results to return in subsequent next queries.
rpc: datastore.RPC to use for this request.
Returns:
(Iterator, datastore_pb.CompiledQuery), the iterator and compiled query
that result from running the given request.
"""
if rpc:
rpc_clone = rpc.clone()
else:
rpc_clone = None
try:
result = _MakeSyncCall('datastore_v3', 'RunQuery', request,
datastore_pb.QueryResult(), rpc)
except apiproxy_errors.ApplicationError, err:
try:
raise _ToDatastoreError(err)
except datastore_errors.NeedIndexError, exc:
yaml = datastore_index.IndexYamlForQuery(
*datastore_index.CompositeIndexForQuery(request)[1:-1])
raise datastore_errors.NeedIndexError(
str(exc) + '\nThis query needs this index:\n' + yaml)
iterator = Iterator(result, query_request_pb=request, batch_size=next_count,
rpc=rpc_clone)
if result.has_compiled_query():
return iterator, result.compiled_query()
else:
return iterator, None
def Get(self, limit, offset=0, **kwargs):
"""Fetches and returns a maximum number of results from the query.
This method fetches and returns a list of resulting entities that matched
the query. If the query specified a sort order, entities are returned in
that order. Otherwise, the order is undefined.
The limit argument specifies the maximum number of entities to return. If
it's greater than the number of remaining entities, all of the remaining
entities are returned. In that case, the length of the returned list will
be smaller than limit.
The offset argument specifies the number of entities that matched the
query criteria to skip before starting to return results. The limit is
applied after the offset, so if you provide a limit of 10 and an offset of 5
and your query matches 20 records, the records whose index is 0 through 4
will be skipped and the records whose index is 5 through 14 will be
returned.
The results are always returned as a list. If there are no results left,
an empty list is returned.
If you know in advance how many results you want, this method is more
efficient than Run(), since it fetches all of the results at once. (The
datastore backend sets the the limit on the underlying
scan, which makes the scan significantly faster.)
Args:
# the maximum number of entities to return
int or long
# the number of entities to skip
int or long
rpc: datastore.RPC to use for this request.
Returns:
# a list of entities
[Entity, ...]
"""
if not isinstance(limit, (int, long)) or limit < 0:
raise datastore_errors.BadArgumentError(
'Argument to Get named \'limit\' must be an int greater than or '
'equal to 0; received %s (a %s)' % (limit, typename(limit)))
if not isinstance(offset, (int, long)) or offset < 0:
raise datastore_errors.BadArgumentError(
'Argument to Get named \'offset\' must be an int greater than or '
'equal to 0; received %s (a %s)' % (offset, typename(offset)))
return self._Run(
limit=limit, offset=offset, prefetch_count=limit, **kwargs)._Get(limit)
def Count(self, limit=1000, **kwargs):
"""Returns the number of entities that this query matches. The returned
count is cached; successive Count() calls will not re-scan the datastore
unless the query is changed.
Args:
limit, a number or None. If there are more results than this, stop short
and just return this number. Providing this argument makes the count
operation more efficient.
rpc: datastore.RPC to use for this request.
Returns:
The number of results.
"""
if not self.__cached_count:
if limit is None:
offset = _MAX_INT_32
else:
offset = limit
iterator = self._Run(limit=0, offset=offset, **kwargs)
self.__cached_count = iterator._SkippedResults()
return self.__cached_count
def __iter__(self):
raise NotImplementedError(
'Query objects should not be used as iterators. Call Run() first.')
def __setitem__(self, filter, value):
"""Implements the [] operator. Used to set filters.
If the filter string is empty or not a string, raises BadFilterError. If
the value is not a supported type, raises BadValueError.
"""
if isinstance(value, tuple):
value = list(value)
datastore_types.ValidateProperty(' ', value, read_only=True)
match = self._CheckFilter(filter, value)
property = match.group(1)
operator = match.group(3)
dict.__setitem__(self, filter, value)
if (operator in self.INEQUALITY_OPERATORS and
property != datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY):
if self.__inequality_prop is None:
self.__inequality_prop = property
else:
assert self.__inequality_prop == property
self.__inequality_count += 1
if filter not in self.__filter_order:
self.__filter_order[filter] = self.__filter_counter
self.__filter_counter += 1
self.__cached_count = None
def setdefault(self, filter, value):
"""If the filter exists, returns its value. Otherwise sets it to value.
If the property name is the empty string or not a string, raises
BadPropertyError. If the value is not a supported type, raises
BadValueError.
"""
datastore_types.ValidateProperty(' ', value)
self._CheckFilter(filter, value)
self.__cached_count = None
return dict.setdefault(self, filter, value)
def __delitem__(self, filter):
"""Implements the del [] operator. Used to remove filters.
"""
dict.__delitem__(self, filter)
del self.__filter_order[filter]
self.__cached_count = None
match = Query.FILTER_REGEX.match(filter)
property = match.group(1)
operator = match.group(3)
if operator in self.INEQUALITY_OPERATORS:
assert self.__inequality_count >= 1
assert property == self.__inequality_prop
self.__inequality_count -= 1
if self.__inequality_count == 0:
self.__inequality_prop = None
def update(self, other):
"""Updates this query's filters from | |
Control, Number),
UnaryExpression(Keyword('not'), Boolean, Boolean),
UnaryExpression(Keyword('numberofenginesrtd'), Object, Number),
UnaryExpression(Keyword('numbertodate'), Array, Array),
UnaryExpression(Keyword('objectcurators'), Object, Array),
UnaryExpression(Keyword('objectfromnetid'), String, Object),
UnaryExpression(Keyword('objectparent'), Object, Object),
UnaryExpression(Keyword('onbriefinggroup'), String, Nothing),
UnaryExpression(Keyword('onbriefingnotes'), String, Nothing),
UnaryExpression(Keyword('onbriefingplan'), String, Nothing),
UnaryExpression(Keyword('onbriefingteamswitch'), String, Nothing),
UnaryExpression(Keyword('oncommandmodechanged'), Code, Nothing),
UnaryExpression(Keyword('oncommandmodechanged'), String, Nothing),
UnaryExpression(Keyword('oneachframe'), Code, Nothing),
UnaryExpression(Keyword('oneachframe'), String, Nothing),
UnaryExpression(Keyword('ongroupiconclick'), Code, Nothing),
UnaryExpression(Keyword('ongroupiconclick'), String, Nothing),
UnaryExpression(Keyword('ongroupiconoverenter'), Code, Nothing),
UnaryExpression(Keyword('ongroupiconoverenter'), String, Nothing),
UnaryExpression(Keyword('ongroupiconoverleave'), Code, Nothing),
UnaryExpression(Keyword('ongroupiconoverleave'), String, Nothing),
UnaryExpression(Keyword('onhcgroupselectionchanged'), Code, Nothing),
UnaryExpression(Keyword('onhcgroupselectionchanged'), String, Nothing),
UnaryExpression(Keyword('onmapsingleclick'), Code, Nothing),
UnaryExpression(Keyword('onmapsingleclick'), String, Nothing),
UnaryExpression(Keyword('onplayerconnected'), Code, Nothing),
UnaryExpression(Keyword('onplayerconnected'), String, Nothing),
UnaryExpression(Keyword('onplayerdisconnected'), Code, Nothing),
UnaryExpression(Keyword('onplayerdisconnected'), String, Nothing),
UnaryExpression(Keyword('onpreloadfinished'), Code, Nothing),
UnaryExpression(Keyword('onpreloadfinished'), String, Nothing),
UnaryExpression(Keyword('onpreloadstarted'), Code, Nothing),
UnaryExpression(Keyword('onpreloadstarted'), String, Nothing),
UnaryExpression(Keyword('onteamswitch'), Code, Nothing),
UnaryExpression(Keyword('onteamswitch'), String, Nothing),
UnaryExpression(Keyword('opendlcpage'), Number, Boolean),
UnaryExpression(Keyword('openmap'), Array, Boolean),
UnaryExpression(Keyword('openmap'), Boolean, Boolean),
UnaryExpression(Keyword('opensteamapp'), Number, Boolean),
UnaryExpression(Keyword('openyoutubevideo'), String, Boolean),
UnaryExpression(Keyword('owner'), Object, Number),
UnaryExpression(Keyword('param'), Array, Anything),
UnaryExpression(Keyword('params'), Array, Boolean),
UnaryExpression(Keyword('parsenumber'), String, Number),
UnaryExpression(Keyword('parsenumber'), Boolean, Number),
UnaryExpression(Keyword('parsesimplearray'), String, Array),
UnaryExpression(Keyword('parsetext'), String, String),
UnaryExpression(Keyword('pickweaponpool'), Object, Nothing),
UnaryExpression(Keyword('pitch'), Object, String),
UnaryExpression(Keyword('playableslotsnumber'), Side, Number),
UnaryExpression(Keyword('playersnumber'), Side, Number),
UnaryExpression(Keyword('playmission'), Array, Nothing),
UnaryExpression(Keyword('playmusic'), String, Nothing),
UnaryExpression(Keyword('playmusic'), Array, Nothing),
UnaryExpression(Keyword('playscriptedmission'), Array, Nothing),
UnaryExpression(Keyword('playsound'), String, Nothing),
UnaryExpression(Keyword('playsound'), Array, Nothing),
UnaryExpression(Keyword('playsound3d'), Array, Nothing),
UnaryExpression(Keyword('position'), Object, Array),
UnaryExpression(Keyword('position'), Location, Array),
UnaryExpression(Keyword('positioncameratoworld'), Array, Array),
UnaryExpression(Keyword('ppeffectcommitted'), String, Boolean),
UnaryExpression(Keyword('ppeffectcommitted'), Number, Boolean),
UnaryExpression(Keyword('ppeffectcreate'), Array, Anything),
UnaryExpression(Keyword('ppeffectdestroy'), Number, Nothing),
UnaryExpression(Keyword('ppeffectdestroy'), Array, Nothing),
UnaryExpression(Keyword('ppeffectenabled'), Number, Boolean),
UnaryExpression(Keyword('precision'), Object, Number),
UnaryExpression(Keyword('preloadcamera'), Array, Boolean),
UnaryExpression(Keyword('preloadsound'), String, Boolean),
UnaryExpression(Keyword('preloadtitleobj'), Array, Boolean),
UnaryExpression(Keyword('preloadtitlersc'), Array, Boolean),
UnaryExpression(Keyword('preprocessfile'), String, String),
UnaryExpression(Keyword('preprocessfilelinenumbers'), String, String),
UnaryExpression(Keyword('primaryweapon'), Object, String),
UnaryExpression(Keyword('primaryweaponitems'), Object, Array),
UnaryExpression(Keyword('primaryweaponmagazine'), Object, Array),
UnaryExpression(Keyword('priority'), Task, Number),
UnaryExpression(Keyword('private'), String, Nothing),
UnaryExpression(Keyword('private'), Array, Nothing),
UnaryExpression(Keyword('processdiarylink'), String, Nothing),
UnaryExpression(Keyword('progressloadingscreen'), Number, Nothing),
UnaryExpression(Keyword('progressposition'), Control, Number),
UnaryExpression(Keyword('publicvariable'), String, Nothing),
UnaryExpression(Keyword('publicvariableserver'), String, Nothing),
UnaryExpression(Keyword('putweaponpool'), Object, Nothing),
UnaryExpression(Keyword('queryitemspool'), String, Number),
UnaryExpression(Keyword('querymagazinepool'), String, Number),
UnaryExpression(Keyword('queryweaponpool'), String, Number),
UnaryExpression(Keyword('rad'), Number, Number),
UnaryExpression(Keyword('radiochannelcreate'), Array, Number),
UnaryExpression(Keyword('random'), Array, Number),
UnaryExpression(Keyword('random'), Number, Number),
UnaryExpression(Keyword('rank'), Object, String),
UnaryExpression(Keyword('rankid'), Object, Number),
UnaryExpression(Keyword('rating'), Object, Number),
UnaryExpression(Keyword('rectangular'), Location, Boolean),
UnaryExpression(Keyword('registeredtasks'), TeamMember, Array),
UnaryExpression(Keyword('reload'), Object, Nothing),
UnaryExpression(Keyword('reloadenabled'), Object, Boolean),
UnaryExpression(Keyword('remoteexec'), Array, Anything),
UnaryExpression(Keyword('remoteexeccall'), Array, Anything),
UnaryExpression(Keyword('remove3denconnection'), Array, Nothing),
UnaryExpression(Keyword('remove3deneventhandler'), Array, Nothing),
UnaryExpression(Keyword('remove3denlayer'), Number, Boolean),
UnaryExpression(Keyword('removeall3deneventhandlers'), String, Nothing),
UnaryExpression(Keyword('removeallactions'), Object, Nothing),
UnaryExpression(Keyword('removeallassigneditems'), Object, Nothing),
UnaryExpression(Keyword('removeallcontainers'), Object, Nothing),
UnaryExpression(Keyword('removeallcuratoraddons'), Object, Nothing),
UnaryExpression(Keyword('removeallcuratorcameraareas'), Object, Nothing),
UnaryExpression(Keyword('removeallcuratoreditingareas'), Object, Nothing),
UnaryExpression(Keyword('removeallhandgunitems'), Object, Nothing),
UnaryExpression(Keyword('removeallitems'), Object, Nothing),
UnaryExpression(Keyword('removeallitemswithmagazines'), Object, Nothing),
UnaryExpression(Keyword('removeallmissioneventhandlers'), String, Nothing),
UnaryExpression(Keyword('removeallmusiceventhandlers'), String, Nothing),
UnaryExpression(Keyword('removeallownedmines'), Object, Nothing),
UnaryExpression(Keyword('removeallprimaryweaponitems'), Object, Nothing),
UnaryExpression(Keyword('removeallweapons'), Object, Nothing),
UnaryExpression(Keyword('removebackpack'), Object, Nothing),
UnaryExpression(Keyword('removebackpackglobal'), Object, Nothing),
UnaryExpression(Keyword('removefromremainscollector'), Array, Nothing),
UnaryExpression(Keyword('removegoggles'), Object, Nothing),
UnaryExpression(Keyword('removeheadgear'), Object, Nothing),
UnaryExpression(Keyword('removemissioneventhandler'), Array, Nothing),
UnaryExpression(Keyword('removemusiceventhandler'), Array, Nothing),
UnaryExpression(Keyword('removeswitchableunit'), Object, Nothing),
UnaryExpression(Keyword('removeuniform'), Object, Nothing),
UnaryExpression(Keyword('removevest'), Object, Nothing),
UnaryExpression(Keyword('requiredversion'), String, Boolean),
UnaryExpression(Keyword('resetsubgroupdirection'), Object, Nothing),
UnaryExpression(Keyword('resources'), TeamMember, Array),
UnaryExpression(Keyword('restarteditorcamera'), Control, Nothing),
UnaryExpression(Keyword('reverse'), Array, Nothing),
UnaryExpression(Keyword('roadat'), Object, Object),
UnaryExpression(Keyword('roadat'), Array, Object),
UnaryExpression(Keyword('roadsconnectedto'), Object, Array),
UnaryExpression(Keyword('roledescription'), Object, String),
UnaryExpression(Keyword('ropeattachedobjects'), Object, Array),
UnaryExpression(Keyword('ropeattachedto'), Object, Object),
UnaryExpression(Keyword('ropeattachenabled'), Object, Boolean),
UnaryExpression(Keyword('ropecreate'), Array, Object),
UnaryExpression(Keyword('ropecut'), Array, Nothing),
UnaryExpression(Keyword('ropedestroy'), Object, Nothing),
UnaryExpression(Keyword('ropeendposition'), Object, Array),
UnaryExpression(Keyword('ropelength'), Object, Number),
UnaryExpression(Keyword('ropes'), Object, Array),
UnaryExpression(Keyword('ropeunwind'), Array, Nothing),
UnaryExpression(Keyword('ropeunwound'), Object, Boolean),
UnaryExpression(Keyword('rotorsforcesrtd'), Object, Array),
UnaryExpression(Keyword('rotorsrpmrtd'), Object, Array),
UnaryExpression(Keyword('round'), Number, Number),
UnaryExpression(Keyword('save3deninventory'), Array, Nothing),
UnaryExpression(Keyword('saveoverlay'), Control, Nothing),
UnaryExpression(Keyword('savevar'), String, Nothing),
UnaryExpression(Keyword('scopename'), String, Nothing),
UnaryExpression(Keyword('score'), Object, Number),
UnaryExpression(Keyword('scoreside'), Side, Number),
UnaryExpression(Keyword('screenshot'), String, Boolean),
UnaryExpression(Keyword('screentoworld'), Array, Array),
UnaryExpression(Keyword('scriptdone'), Script, Boolean),
UnaryExpression(Keyword('scriptname'), String, Nothing),
UnaryExpression(Keyword('scudstate'), Object, Number),
UnaryExpression(Keyword('secondaryweapon'), Object, String),
UnaryExpression(Keyword('secondaryweaponitems'), Object, Array),
UnaryExpression(Keyword('secondaryweaponmagazine'), Object, Array),
UnaryExpression(Keyword('selectbestplaces'), Array, Array),
UnaryExpression(Keyword('selectededitorobjects'), Control, Nothing),
UnaryExpression(Keyword('selectionnames'), Object, Array),
UnaryExpression(Keyword('selectmax'), Array, Anything),
UnaryExpression(Keyword('selectmin'), Array, Anything),
UnaryExpression(Keyword('selectplayer'), Object, Nothing),
UnaryExpression(Keyword('selectrandom'), Array, Anything),
UnaryExpression(Keyword('selectrandomweighted'), Array, Anything),
UnaryExpression(Keyword('sendaumessage'), Array, Nothing),
UnaryExpression(Keyword('sendudpmessage'), Array, Boolean),
UnaryExpression(Keyword('servercommand'), String, Boolean),
UnaryExpression(Keyword('servercommandavailable'), String, Boolean),
UnaryExpression(Keyword('servercommandexecutable'), String, Boolean),
UnaryExpression(Keyword('set3denattributes'), Array, Boolean),
UnaryExpression(Keyword('set3dengrid'), Array, Nothing),
UnaryExpression(Keyword('set3deniconsvisible'), Array, Nothing),
UnaryExpression(Keyword('set3denlinesvisible'), Array, Nothing),
UnaryExpression(Keyword('set3denmissionattributes'), Array, Nothing),
UnaryExpression(Keyword('set3denmodelsvisible'), Array, Nothing),
UnaryExpression(Keyword('set3denselected'), Array, Nothing),
UnaryExpression(Keyword('setacctime'), Number, Nothing),
UnaryExpression(Keyword('setaperture'), Number, Nothing),
UnaryExpression(Keyword('setaperturenew'), Array, Nothing),
UnaryExpression(Keyword('setarmorypoints'), Number, Nothing),
UnaryExpression(Keyword('setcamshakedefparams'), Array, Nothing),
UnaryExpression(Keyword('setcamshakeparams'), Array, Nothing),
UnaryExpression(Keyword('setcompassoscillation'), Array, Nothing),
UnaryExpression(Keyword('setcurrentchannel'), Number, Boolean),
UnaryExpression(Keyword('setcustommissiondata'), Array, Nothing),
UnaryExpression(Keyword('setcustomsoundcontroller'), Array, Boolean),
UnaryExpression(Keyword('setdate'), Array, Nothing),
UnaryExpression(Keyword('setdefaultcamera'), Array, Nothing),
UnaryExpression(Keyword('setdetailmapblendpars'), Array, Nothing),
UnaryExpression(Keyword('setgroupiconsselectable'), Boolean, Nothing),
UnaryExpression(Keyword('setgroupiconsvisible'), Array, Nothing),
UnaryExpression(Keyword('sethorizonparallaxcoef'), Number, Nothing),
UnaryExpression(Keyword('sethudmovementlevels'), Array, Nothing),
UnaryExpression(Keyword('setinfopanel'), Array, Boolean),
UnaryExpression(Keyword('setlocalwindparams'), Array, Nothing),
UnaryExpression(Keyword('setmouseposition'), Array, Nothing),
UnaryExpression(Keyword('setmusiceventhandler'), Array, Nothing),
UnaryExpression(Keyword('setobjectviewdistance'), Number, Nothing),
UnaryExpression(Keyword('setobjectviewdistance'), Array, Nothing),
UnaryExpression(Keyword('setplayable'), Object, Nothing),
UnaryExpression(Keyword('setplayerrespawntime'), Number, Nothing),
UnaryExpression(Keyword('setshadowdistance'), Number, Nothing),
UnaryExpression(Keyword('setsimulweatherlayers'), Number, Nothing),
UnaryExpression(Keyword('setstaminascheme'), String, Nothing),
UnaryExpression(Keyword('setstatvalue'), Array, Boolean),
UnaryExpression(Keyword('setsystemofunits'), Number, Nothing),
UnaryExpression(Keyword('setterraingrid'), Number, Nothing),
UnaryExpression(Keyword('settimemultiplier'), Number, Nothing),
UnaryExpression(Keyword('settrafficdensity'), Array, Nothing),
UnaryExpression(Keyword('settrafficdistance'), Number, Nothing),
UnaryExpression(Keyword('settrafficgap'), Array, Nothing),
UnaryExpression(Keyword('settrafficspeed'), Array, Nothing),
UnaryExpression(Keyword('setviewdistance'), Number, Nothing),
UnaryExpression(Keyword('setwind'), Array, Nothing),
UnaryExpression(Keyword('setwinddir'), Array, Nothing),
UnaryExpression(Keyword('showchat'), Boolean, Nothing),
UnaryExpression(Keyword('showcinemaborder'), Boolean, Nothing),
UnaryExpression(Keyword('showcommandingmenu'), String, Nothing),
UnaryExpression(Keyword('showcompass'), Boolean, Nothing),
UnaryExpression(Keyword('showcuratorcompass'), Boolean, Nothing),
UnaryExpression(Keyword('showgps'), Boolean, Nothing),
UnaryExpression(Keyword('showhud'), Boolean, Nothing),
UnaryExpression(Keyword('showhud'), Array, Nothing),
UnaryExpression(Keyword('showmap'), Boolean, Nothing),
UnaryExpression(Keyword('showpad'), Boolean, Nothing),
UnaryExpression(Keyword('showradio'), Boolean, Nothing),
UnaryExpression(Keyword('showscoretable'), Number, Nothing),
UnaryExpression(Keyword('showsubtitles'), Boolean, Boolean),
UnaryExpression(Keyword('showuavfeed'), Boolean, Nothing),
UnaryExpression(Keyword('showwarrant'), Boolean, Nothing),
UnaryExpression(Keyword('showwatch'), Boolean, Nothing),
UnaryExpression(Keyword('showwaypoints'), Boolean, Nothing),
UnaryExpression(Keyword('side'), Object, Side),
UnaryExpression(Keyword('side'), Group, Side),
UnaryExpression(Keyword('side'), Location, Side),
UnaryExpression(Keyword('simpletasks'), Object, Array),
UnaryExpression(Keyword('simulationenabled'), Object, Boolean),
UnaryExpression(Keyword('simulclouddensity'), Array, Number),
UnaryExpression(Keyword('simulcloudocclusion'), Array, Number),
UnaryExpression(Keyword('simulinclouds'), Array, Boolean),
UnaryExpression(Keyword('sin'), Number, Number),
UnaryExpression(Keyword('size'), Location, Array),
UnaryExpression(Keyword('sizeof'), String, Number),
UnaryExpression(Keyword('skill'), Object, Number),
UnaryExpression(Keyword('skiptime'), Number, Nothing),
UnaryExpression(Keyword('sleep'), Number, Nothing),
UnaryExpression(Keyword('sliderposition'), Control, Number),
UnaryExpression(Keyword('sliderposition'), Number, Number),
UnaryExpression(Keyword('sliderrange'), Control, Array),
UnaryExpression(Keyword('sliderrange'), Number, Array),
UnaryExpression(Keyword('slidersetposition'), Array, Nothing),
UnaryExpression(Keyword('slidersetrange'), Array, Nothing),
UnaryExpression(Keyword('slidersetspeed'), Array, Nothing),
UnaryExpression(Keyword('sliderspeed'), Control, Array),
UnaryExpression(Keyword('sliderspeed'), Number, Array),
UnaryExpression(Keyword('soldiermagazines'), Object, Array),
UnaryExpression(Keyword('someammo'), Object, Boolean),
UnaryExpression(Keyword('speaker'), Object, String),
UnaryExpression(Keyword('speed'), Object, Number),
UnaryExpression(Keyword('speedmode'), Object, String),
UnaryExpression(Keyword('speedmode'), Group, String),
UnaryExpression(Keyword('sqrt'), Number, Number),
UnaryExpression(Keyword('squadparams'), Object, Array),
UnaryExpression(Keyword('stance'), Object, String),
UnaryExpression(Keyword('startloadingscreen'), Array, Nothing),
UnaryExpression(Keyword('stopenginertd'), Object, Nothing),
UnaryExpression(Keyword('stopped'), Object, Boolean),
UnaryExpression(Keyword('str'), Type, String),
UnaryExpression(Keyword('supportinfo'), String, Array),
UnaryExpression(Keyword('surfaceiswater'), Array, Boolean),
UnaryExpression(Keyword('surfacenormal'), Array, Array),
UnaryExpression(Keyword('surfacetype'), Array, String),
UnaryExpression(Keyword('switch'), Type, SwitchType),
UnaryExpression(Keyword('switchcamera'), Object, Nothing),
UnaryExpression(Keyword('synchronizedobjects'), Object, Array),
UnaryExpression(Keyword('synchronizedtriggers'), Array, Array),
UnaryExpression(Keyword('synchronizedwaypoints'), Object, Array),
UnaryExpression(Keyword('synchronizedwaypoints'), Array, Array),
UnaryExpression(Keyword('systemchat'), String, Nothing),
UnaryExpression(Keyword('tan'), Number, Number),
UnaryExpression(Keyword('taskalwaysvisible'), Task, Boolean),
UnaryExpression(Keyword('taskchildren'), Task, Array),
UnaryExpression(Keyword('taskcompleted'), Task, Boolean),
UnaryExpression(Keyword('taskcustomdata'), Task, Array),
UnaryExpression(Keyword('taskdescription'), Task, Array),
UnaryExpression(Keyword('taskdestination'), Task, Array),
UnaryExpression(Keyword('taskhint'), Array, Nothing),
UnaryExpression(Keyword('taskmarkeroffset'), Object, Array),
UnaryExpression(Keyword('taskparent'), Task, Task),
UnaryExpression(Keyword('taskresult'), Task, Array),
UnaryExpression(Keyword('taskstate'), Task, String),
UnaryExpression(Keyword('tasktype'), Task, String),
UnaryExpression(Keyword('teammember'), Object, TeamMember),
UnaryExpression(Keyword('teamname'), TeamMember, String),
UnaryExpression(Keyword('teamtype'), TeamMember, String),
UnaryExpression(Keyword('terminate'), Script, Nothing),
UnaryExpression(Keyword('terrainintersect'), Array, Boolean),
UnaryExpression(Keyword('terrainintersectasl'), Array, Boolean),
UnaryExpression(Keyword('terrainintersectatasl'), Array, Array),
UnaryExpression(Keyword('text'), String, String),
UnaryExpression(Keyword('text'), Location, String),
UnaryExpression(Keyword('textlog'), Type, Nothing),
UnaryExpression(Keyword('textlogformat'), Array, Nothing),
UnaryExpression(Keyword('tg'), Number, Number),
UnaryExpression(Keyword('throw'), Type, Nothing),
UnaryExpression(Keyword('titlecut'), Array, Nothing),
UnaryExpression(Keyword('titlefadeout'), Number, Nothing),
UnaryExpression(Keyword('titleobj'), Array, Nothing),
UnaryExpression(Keyword('titlersc'), Array, Nothing),
UnaryExpression(Keyword('titletext'), Array, Nothing),
UnaryExpression(Keyword('toarray'), String, Array),
UnaryExpression(Keyword('tofixed'), Number, Nothing),
UnaryExpression(Keyword('tolower'), String, String),
UnaryExpression(Keyword('tostring'), Array, String),
UnaryExpression(Keyword('toupper'), String, String),
UnaryExpression(Keyword('triggeractivated'), Object, Boolean),
UnaryExpression(Keyword('triggeractivation'), Object, Array),
UnaryExpression(Keyword('triggerarea'), Object, Array),
UnaryExpression(Keyword('triggerattachedvehicle'), Object, Object),
UnaryExpression(Keyword('triggerstatements'), Object, Array),
UnaryExpression(Keyword('triggertext'), Object, String),
UnaryExpression(Keyword('triggertimeout'), Object, Array),
UnaryExpression(Keyword('triggertimeoutcurrent'), Object, Number),
UnaryExpression(Keyword('triggertype'), Object, String),
UnaryExpression(Keyword('try'), Code, TryType),
UnaryExpression(Keyword('tvadd'), Array, Number),
UnaryExpression(Keyword('tvclear'), Number, Nothing),
UnaryExpression(Keyword('tvclear'), Control, Nothing),
UnaryExpression(Keyword('tvcollapse'), Array, Nothing),
UnaryExpression(Keyword('tvcollapseall'), Number, Nothing),
UnaryExpression(Keyword('tvcollapseall'), Control, Nothing),
UnaryExpression(Keyword('tvcount'), Array, Number),
UnaryExpression(Keyword('tvcursel'), Number, Array),
UnaryExpression(Keyword('tvcursel'), Control, Array),
UnaryExpression(Keyword('tvdata'), Array, String),
UnaryExpression(Keyword('tvdelete'), Array, Nothing),
UnaryExpression(Keyword('tvexpand'), Array, Nothing),
UnaryExpression(Keyword('tvexpandall'), Number, Nothing),
UnaryExpression(Keyword('tvexpandall'), Control, Nothing),
UnaryExpression(Keyword('tvpicture'), Array, String),
UnaryExpression(Keyword('tvpictureright'), Array, String),
UnaryExpression(Keyword('tvsetcursel'), Array, Nothing),
UnaryExpression(Keyword('tvsetdata'), Array, Nothing),
UnaryExpression(Keyword('tvsetpicture'), Array, Nothing),
UnaryExpression(Keyword('tvsetpicturecolor'), Array, Nothing),
UnaryExpression(Keyword('tvsetpictureright'), Array, Nothing),
UnaryExpression(Keyword('tvsetpicturerightcolor'), Array, Nothing),
UnaryExpression(Keyword('tvsettext'), Array, String),
UnaryExpression(Keyword('tvsettooltip'), Array, Nothing),
UnaryExpression(Keyword('tvsetvalue'), Array, Nothing),
UnaryExpression(Keyword('tvsort'), Array, Nothing),
UnaryExpression(Keyword('tvsortbyvalue'), Array, Nothing),
UnaryExpression(Keyword('tvtext'), Array, String),
UnaryExpression(Keyword('tvtooltip'), Array, String),
UnaryExpression(Keyword('tvvalue'), Array, Number),
UnaryExpression(Keyword('type'), Task, String),
UnaryExpression(Keyword('type'), Location, String),
UnaryExpression(Keyword('typename'), Type, String),
UnaryExpression(Keyword('typeof'), Object, String),
UnaryExpression(Keyword('uavcontrol'), Object, Array),
UnaryExpression(Keyword('uisleep'), Number, Nothing),
UnaryExpression(Keyword('unassigncurator'), Object, Nothing),
UnaryExpression(Keyword('unassignteam'), Object, Nothing),
UnaryExpression(Keyword('unassignvehicle'), Object, Nothing),
UnaryExpression(Keyword('underwater'), Object, Boolean),
UnaryExpression(Keyword('uniform'), Object, String),
UnaryExpression(Keyword('uniformcontainer'), Object, Object),
UnaryExpression(Keyword('uniformitems'), Object, Array),
UnaryExpression(Keyword('uniformmagazines'), Object, Array),
UnaryExpression(Keyword('unitaddons'), String, Array),
UnaryExpression(Keyword('unitaimposition'), Object, Array),
UnaryExpression(Keyword('unitaimpositionvisual'), Object, Array),
UnaryExpression(Keyword('unitbackpack'), Object, Object),
UnaryExpression(Keyword('unitisuav'), Object, Boolean),
UnaryExpression(Keyword('unitpos'), Object, String),
UnaryExpression(Keyword('unitready'), Object, Boolean),
UnaryExpression(Keyword('unitready'), Array, Boolean),
UnaryExpression(Keyword('unitrecoilcoefficient'), Object, Number),
UnaryExpression(Keyword('units'), Group, Array),
UnaryExpression(Keyword('units'), Object, Array),
UnaryExpression(Keyword('unlockachievement'), String, Boolean),
UnaryExpression(Keyword('updateobjecttree'), Control, Nothing),
UnaryExpression(Keyword('useaiopermapobstructiontest'), Boolean, Nothing),
UnaryExpression(Keyword('useaisteeringcomponent'), Boolean, Nothing),
UnaryExpression(Keyword('vectordir'), Object, Array),
UnaryExpression(Keyword('vectordirvisual'), Object, Array),
UnaryExpression(Keyword('vectormagnitude'), Array, Number),
UnaryExpression(Keyword('vectormagnitudesqr'), Array, Number),
UnaryExpression(Keyword('vectornormalized'), Array, Array),
UnaryExpression(Keyword('vectorup'), Object, Array),
UnaryExpression(Keyword('vectorupvisual'), Object, Array),
UnaryExpression(Keyword('vehicle'), Object, Object),
UnaryExpression(Keyword('vehiclecargoenabled'), Object, Boolean),
UnaryExpression(Keyword('vehiclereceiveremotetargets'), Object, Boolean),
UnaryExpression(Keyword('vehiclereportownposition'), Object, Boolean),
UnaryExpression(Keyword('vehiclereportremotetargets'), Object, Boolean),
UnaryExpression(Keyword('vehiclevarname'), Object, String),
UnaryExpression(Keyword('velocity'), Object, Array),
UnaryExpression(Keyword('velocitymodelspace'), Object, Array),
UnaryExpression(Keyword('verifysignature'), String, Boolean),
UnaryExpression(Keyword('vest'), Object, String),
UnaryExpression(Keyword('vestcontainer'), Object, Object),
UnaryExpression(Keyword('vestitems'), Object, Array),
UnaryExpression(Keyword('vestmagazines'), Object, Array),
UnaryExpression(Keyword('visibleposition'), Object, Array),
UnaryExpression(Keyword('visiblepositionasl'), Object, Array),
UnaryExpression(Keyword('waituntil'), Code, Nothing),
UnaryExpression(Keyword('waypointattachedobject'), Array, Object),
UnaryExpression(Keyword('waypointattachedvehicle'), Array, Object),
UnaryExpression(Keyword('waypointbehaviour'), Array, String),
UnaryExpression(Keyword('waypointcombatmode'), Array, String),
UnaryExpression(Keyword('waypointcompletionradius'), Array, Number),
UnaryExpression(Keyword('waypointdescription'), Array, String),
UnaryExpression(Keyword('waypointforcebehaviour'), Array, Boolean),
UnaryExpression(Keyword('waypointformation'), Array, String),
UnaryExpression(Keyword('waypointhouseposition'), Array, Number),
| |
'''
end = min(car1['Frame #'].iloc[-1],car2['Frame #'].iloc[-1])
start = max(car1['Frame #'].iloc[0],car2['Frame #'].iloc[0])
if end <= start: # if no overlaps in time
return -1
car1 = car1.loc[(car1['Frame #'] >= start) & (car1['Frame #'] <= end)]
car2 = car2.loc[(car2['Frame #'] >= start) & (car2['Frame #'] <= end)]
pts = ['bbr_x','bbr_y', 'fbr_x','fbr_y','fbl_x','fbl_y','bbl_x', 'bbl_y']
Y1 = np.array(car1[pts]) # N x 8
Y2 = np.array(car2[pts])
IOU = 0
N = 0
for j in range(min(len(Y1), len(Y2))):
D1 = Y1[j,:]
# try:
D2 = Y2[j,:]
if ~np.isnan(np.sum([D1,D2])): # if no Nan in any measurements
p = Polygon([(D1[2*i],D1[2*i+1]) for i in range(int(len(D1)/2))])
q = Polygon([(D2[2*i],D2[2*i+1]) for i in range(int(len(D2)/2))])
if (p.intersects(q)):
N += 1
intersection_area = p.intersection(q).area
union_area = p.union(q).area
# print(intersection_area, union_area)
IOU += float(intersection_area/union_area)
else:
IOU += 0
if N == 0:
return -1
return IOU / N
def stitch_objects_jpda(df, THRESHOLD_1 = 2.5, mc=True):
'''
10/20/2021
use JPDA, weighted average of all meas that fall into a gate (defined by IOU and mahalanobis distance)
create new ID for meas out side of the gate
'''
# define the x,y range to keep track of cars in FOV (meter)
if mc==True:
xmin, xmax = min(df["x"].values)-10,max(df["x"].values)+10
else:
camera_id_list = df['camera'].unique()
xmin, xmax, ymin, ymax = utils.get_camera_range(camera_id_list)
xrange = xmax-xmin
alpha = 0.4
xmin, xmax = xmin - alpha*xrange, xmax + alpha*xrange # extended camera range for prediction
ns = int(np.amin(np.array(df[['Frame #']]))) # start frame
nf = int(np.amax(np.array(df[['Frame #']]))) # end frame
tracks = dict() # a dictionary to store all current objects in view. key:ID, value:dataframe
pts = ['bbr_x','bbr_y','fbr_x','fbr_y','fbl_x','fbl_y','bbl_x', 'bbl_y']
pts_img = ["fbrx","fbry","fblx","fbly", "bbrx", "bbry", "bblx", "bbly", "ftrx", "ftry", "ftlx", "ftly", "btrx", "btry", "btlx", "btly"]
newdf = pd.DataFrame()
for k in range(ns,nf):
print("\rFrame {}/{}".format(k,nf),end = "\r",flush = True)
frame = df.loc[(df['Frame #'] == k)] # TODO: use groupby frame to save time
y = np.array(frame[pts])
notnan = ~np.isnan(y).any(axis=1)
y = y[notnan] # remove rows with missing values (dim = mx8)
frame = frame.iloc[notnan,:]
frame = frame.reset_index(drop=True)
m_box = len(frame)
n_car = len(tracks)
if (n_car > 0): # delete track that are out of view
for car_id in list(tracks.keys()):
# delete track if total matched frames <
last_frame = tracks[car_id].iloc[-1]
last_frame_x = np.array(last_frame[pts])[[0,2,4,6]]
x1,x2 = min(last_frame_x),max(last_frame_x)
frames = tracks[car_id]["Frame #"].values
matched_bool = ~np.isnan(frames)
frames_matched = tracks[car_id].loc[matched_bool]
if (x1<xmin) or (x2>xmax):
if len(frames_matched) > 0: # TODO: this threshold could be a ratio
newid = frames_matched["ID"].iloc[0]
frames_matched["ID"] = newid #unify ID
newdf = pd.concat([newdf,frames_matched])
del tracks[car_id]
n_car -= 1
if (m_box == 0) and (n_car == 0): # simply advance to the next frame
continue
elif (m_box == 0) and (n_car > 0): # if no measurements in current frame, simply predict
x, tracks = predict_tracks_df(tracks)
elif (m_box > 0) and (n_car == 0): # create new tracks (initialize)
for i, row in frame.iterrows():
row = frame.loc[i:i,:]
tracks[row['ID'].iloc[0]] = row
else:
x, tracks = predict_tracks_df(tracks)
n_car = len(tracks)
curr_id = list(tracks.keys()) # should be n id's
# score = np.ones([m_box,n_car])*(99)
score_dist = np.zeros([m_box,n_car])
score_iou =np.zeros([m_box,n_car])
invalid_meas = []
for m in range(m_box):
for n in range(n_car):
score_dist[m,n] = dist_score(x[n],y[m],'maha')
score_iou[m,n], areaa, areab = iou(x[n],y[m],DIRECTION=False,AREA=True)
if areab < 2: # invalid measurement
score_dist[m,:] = 99
score_iou[m,:] = -1
invalid_meas.append(m)
if (1260<k<1300):
vis.plot_track(np.array(np.vstack(x), dtype=float), np.array(y,dtype=float), curr_id, frame["ID"].values, xmin,xmax, k)
# if k == 409:
# print("")
# matching
gate = np.logical_or(score_dist<THRESHOLD_1, score_iou>0)
for n in range(n_car):
if any(gate[:,n]):
# calculate weighted average
tracks[curr_id[n]] = tracks[curr_id[n]].reset_index(drop=True)
frames_in_gate = frame.iloc[gate[:,n]]
if len(frames_in_gate) == 1:
avg_meas = frames_in_gate
else:
w = 1/score_dist[gate[:,n],n]
w = w / w.sum(axis=0)
frame_vals = np.array(frames_in_gate[pts_img+pts])
avg_meas_vals = np.reshape(np.dot(w,frame_vals),(1,-1))
avg_meas = pd.DataFrame(data=avg_meas_vals, columns=pts_img + pts)
avg_meas["Frame #"] = k
tracks[curr_id[n]].drop(tracks[curr_id[n]].tail(1).index,inplace=True) # drop the last row (prediction)
tracks[curr_id[n]] = pd.concat([tracks[curr_id[n]], avg_meas],ignore_index=True)
m_unassociated = np.where(np.sum(gate, axis=1)==0)[0]
for m in m_unassociated:
# !TODO: make sure that y[m] at not in the gate of each other
if m not in invalid_meas:
new_id = frame['ID'].iloc[m]
new_meas = frame.loc[m:m]
tracks[new_id] = new_meas
# print("Remove wrong direction", len(newdf))
# newdf = utils.remove_wrong_direction_df(newdf)
# print('Connect tracks', len(newdf)) # Frames of a track (ID) might be disconnected after DA
# newdf = newdf.groupby("ID").apply(utils.connect_track).reset_index(drop=True)
return newdf
def stitch_objects_bm(df, THRESHOLD_1 = 2.5, mc=True):
'''
bipartite matching based on Maha distance cost
'''
# define the x,y range to keep track of cars in FOV (meter)
if mc==True:
xmin, xmax = min(df["x"].values)-10,max(df["x"].values)+10
else:
camera_id_list = df['camera'].unique()
xmin, xmax, ymin, ymax = utils.get_camera_range(camera_id_list)
xrange = xmax-xmin
alpha = 0.4
xmin, xmax = xmin - alpha*xrange, xmax + alpha*xrange # extended camera range for prediction
ns = int(np.amin(np.array(df[['Frame #']]))) # start frame
nf = int(np.amax(np.array(df[['Frame #']]))) # end frame
tracks = dict() # a dictionary to store all current objects in view. key:ID, value:dataframe
pts = ['bbr_x','bbr_y','fbr_x','fbr_y','fbl_x','fbl_y','bbl_x', 'bbl_y']
# pts_img = ["fbrx","fbry","fblx","fbly", "bbrx", "bbry", "bblx", "bbly", "ftrx", "ftry", "ftlx", "ftly", "btrx", "btry", "btlx", "btly"]
newdf = pd.DataFrame()
for k in range(ns,nf):
print("\rFrame {}/{}".format(k,nf),end = "\r",flush = True)
frame = df.loc[(df['Frame #'] == k)] # TODO: use groupby frame to save time
y = np.array(frame[pts])
notnan = ~np.isnan(y).any(axis=1)
y = y[notnan] # remove rows with missing values (dim = mx8)
frame = frame.iloc[notnan,:]
frame = frame.reset_index(drop=True)
m_box = len(frame)
n_car = len(tracks)
invalid_tracks = set()
if (n_car > 0): # delete track that are out of view
for car_id in list(tracks.keys()):
# delete track if total matched frames <
last_frame = tracks[car_id].iloc[-1]
last_frame_x = np.array(last_frame[pts])[[0,2,4,6]]
x1,x2 = min(last_frame_x),max(last_frame_x)
frames = tracks[car_id]["Frame #"].values
matched_bool = ~np.isnan(frames)
frames_matched = tracks[car_id].loc[matched_bool]
if (x1<xmin) or (x2>xmax) or (car_id in invalid_tracks):
if len(frames_matched) > 0: # TODO: this threshold could be a ratio
newid = frames_matched["ID"].iloc[0]
frames_matched["ID"] = newid #unify ID
newdf = pd.concat([newdf,frames_matched])
del tracks[car_id]
n_car -= 1
if (m_box == 0) and (n_car == 0): # simply advance to the next frame
continue
elif (m_box == 0) and (n_car > 0): # if no measurements in current frame, simply predict
x, tracks = predict_tracks_df(tracks)
elif (m_box > 0) and (n_car == 0): # create new tracks (initialize)
for i, row in frame.iterrows():
row = frame.loc[i:i,:]
tracks[row['ID'].iloc[0]] = row
else:
x, tracks = predict_tracks_df(tracks)
n_car = len(tracks)
curr_id = list(tracks.keys()) # should be n id's
# score = np.ones([m_box,n_car])*(99)
score_dist = np.zeros([m_box,n_car])
score_iou =np.zeros([m_box,n_car])
invalid_meas = set()
invalid_tracks = set()
for m in range(m_box):
for n in range(n_car):
score_dist[m,n] = dist_score(x[n],y[m],'maha')
score_iou[m,n], areaa, areab = iou(x[n],y[m],DIRECTION=False,AREA=True)
if areaa < 0.5:
invalid_tracks.add(n)
if areab < 1:
invalid_meas.add(m)
# if (1715<k<1760):
# vis.plot_track(np.array(np.vstack(x), dtype=float), np.array(y,dtype=float), curr_id, frame["ID"].values, xmin,xmax, k)
# bipartite matching
a,b = scipy.optimize.linear_sum_assignment(score_dist)
gate = np.logical_or(score_dist<THRESHOLD_1, score_iou>0)
matched_m = set()
for i in range(len(a)):
if gate[a[i]][b[i]]:
n,m = b[i], a[i]
tracks[curr_id[n]] = tracks[curr_id[n]].reset_index(drop=True)
avg_meas = frame.loc[m:m]
tracks[curr_id[n]].drop(tracks[curr_id[n]].tail(1).index,inplace=True) # drop the last row (prediction)
tracks[curr_id[n]] = pd.concat([tracks[curr_id[n]], avg_meas],ignore_index=True)
matched_m.add(m)
# m_unassociated = np.where(np.sum(gate, axis=1)==0)[0]
m_unassociated = set(np.arange(m_box))-matched_m
for m in m_unassociated:
# !TODO: make sure that y[m] at not in the gate of each other
if (m not in invalid_meas) and (all(gate[m,:])==False) :
new_id = frame['ID'].iloc[m]
new_meas = frame.loc[m:m]
tracks[new_id] = new_meas
# print("Remove wrong direction", len(newdf))
# newdf = utils.remove_wrong_direction_df(newdf)
# print('Connect tracks', len(newdf)) # Frames of a track (ID) might be disconnected after DA
# newdf = newdf.groupby("ID").apply(utils.connect_track).reset_index(drop=True)
return newdf
def stitch_objects_gnn(df, THRESHOLD_1 = 2.5, mc=True):
'''
find the best meas for each track
prioritize on tracks that have higher # meas matched
'''
# define the x,y range to keep track of cars in FOV (meter)
if mc==True:
xmin, xmax = min(df["x"].values)-10,max(df["x"].values)+10
else:
camera_id_list = df['camera'].unique()
xmin, xmax, ymin, ymax = utils.get_camera_range(camera_id_list)
xrange = xmax-xmin
alpha = 0.4
xmin, xmax = xmin - alpha*xrange, xmax + alpha*xrange # extended camera range for prediction
ns = int(np.amin(np.array(df[['Frame #']]))) # start frame
nf = int(np.amax(np.array(df[['Frame #']]))) # end frame
tracks = dict() # a dictionary to store all current | |
#!/usr/bin/env python2
"""
Synopsis:
Generate Python classes from XML Schema definition.
Input is read from in_xsd_file or, if "-" (dash) arg, from stdin.
Output is written to files named in "-o" and "-s" options.
Usage:
python generateDS.py [ options ] <xsd_file>
python generateDS.py [ options ] -
Options:
-h, --help Display this help information.
-o <outfilename> Output file name for data representation classes
-s <subclassfilename> Output file name for subclasses
-p <prefix> Prefix string to be pre-pended to the class names
-f Force creation of output files. Do not ask.
-a <namespaceabbrev> Namespace abbreviation, e.g. "xsd:".
Default = 'xs:'.
-b <behaviorfilename> Input file name for behaviors added to subclasses
-m Generate properties for member variables
--subclass-suffix="XXX" Append XXX to the generated subclass names.
Default="Sub".
--root-element="XXX" Assume XXX is root element of instance docs.
Default is first element defined in schema.
Also see section "Recognizing the top level
element" in the documentation.
--super="XXX" Super module name in subclass module. Default="???"
--validator-bodies=path Path to a directory containing files that provide
bodies (implementations) of validator methods.
--use-old-getter-setter Name getters and setters getVar() and setVar(),
instead of get_var() and set_var().
--user-methods= <module>,
-u <module> Optional module containing user methods. See
section "User Methods" in the documentation.
--no-dates Do not include the current date in the generated
files. This is useful if you want to minimize
the amount of (no-operation) changes to the
generated python code.
--no-versions Do not include the current version in the generated
files. This is useful if you want to minimize
the amount of (no-operation) changes to the
generated python code.
--no-process-includes Do not process included XML Schema files. By
default, generateDS.py will insert content
from files referenced by <include ... />
elements into the XML Schema to be processed.
--silence Normally, the code generated with generateDS
echoes the information being parsed. To prevent
the echo from occurring, use the --silence switch.
--namespacedef='xmlns:abc="http://www.abc.com"'
Namespace definition to be passed in as the
value for the namespacedef_ parameter of
the export_xml() method by the generated
parse() and parseString() functions.
Default=''.
--external-encoding=<encoding>
Encode output written by the generated export
methods using this encoding. Default, if omitted,
is the value returned by sys.getdefaultencoding().
Example: --external-encoding='utf-8'.
--member-specs=list|dict
Generate member (type) specifications in each
class: a dictionary of instances of class
MemberSpec_ containing member name, type,
and array or not. Allowed values are
"list" or "dict". Default: do not generate.
-q, --no-questions Do not ask questios, for example,
force overwrite.
--session=mysession.session
Load and use options from session file. You can
create session file in generateds_gui.py. Or,
copy and edit sample.session from the
distribution.
--version Print version and exit.
"""
from __future__ import print_function
## LICENSE
## Copyright (c) 2003 <NAME>
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
## CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
## TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
## SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from future import standard_library
standard_library.install_aliases()
from builtins import input
from builtins import range
from builtins import object
import sys
import os.path
import time
import getopt
import urllib.request, urllib.error, urllib.parse
import imp
from xml.sax import handler, make_parser
import xml.sax.xmlreader
import logging
import keyword
import io
import textwrap
from cctype import TypeGenerator
from ccmap import IFMapGenerator
from ccsvc import ServiceGenerator
# Default logger configuration
## logging.basicConfig(level=logging.DEBUG,
## format='%(asctime)s %(levelname)s %(message)s')
## import warnings
## warnings.warn('importing IPShellEmbed', UserWarning)
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\\nHit Ctrl-D to exit')
#
# Global variables etc.
#
#
# Do not modify the following VERSION comments.
# Used by updateversion.py.
##VERSION##
VERSION = '2.7c'
##VERSION##
class XsdParserGenerator(object):
def __init__(self):
self.Version = VERSION
self.GenerateProperties = 0
self.UseOldGetterSetter = 0
self.MemberSpecs = None
self.DelayedElements = []
self.DelayedElements_subclass = []
self.AlreadyGenerated = []
self.AlreadyGenerated_subclass = []
self.PostponedExtensions = []
self.ElementsForSubclasses = []
self.ElementDict = {}
self.Force = False
self.NoQuestions = False
self.Dirpath = []
self.ExternalEncoding = sys.getdefaultencoding()
self.genCategory = None
self.genLang = None
self.LangGenr = None
self.NamespacesDict = {}
self.Targetnamespace = ""
self.NameTable = {
'type': 'type_',
'float': 'float_',
'build': 'build_',
}
extras = ['self']
for kw in keyword.kwlist + extras:
self.NameTable[kw] = '%sxx' % kw
self.SubclassSuffix = 'Sub'
self.RootElement = None
self.AttributeGroups = {}
self.ElementGroups = {}
self.SubstitutionGroups = {}
#
# SubstitutionGroups can also include simple types that are
# not (defined) elements. Keep a list of these simple types.
# These are simple types defined at top level.
self.SimpleElementDict = {}
self.SimpleTypeDict = {}
self.ValidatorBodiesBasePath = None
self.UserMethodsPath = None
self.UserMethodsModule = None
self.XsdNameSpace = ''
self.CurrentNamespacePrefix = 'xs:'
self.AnyTypeIdentifier = '__ANY__'
self.TEMPLATE_HEADER = """\
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated %s by generateDS.py%s.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%%d' %% input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%%s' %% input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%%f' %% input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%%s' %% input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%%e' %% input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%%s' %% input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%%s' %% input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%%s' %% input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
| |
<filename>video_prediction/savp/models/savp_model.py
import collections
import functools
import itertools
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest
from video_prediction.savp import ops, flow_ops
from video_prediction.savp.models import VideoPredictionModel
from video_prediction.savp.models import networks
from video_prediction.savp.ops import dense, pad2d, conv2d, flatten, tile_concat
from video_prediction.savp.rnn_ops import BasicConv2DLSTMCell, Conv2DGRUCell
from video_prediction.savp.utils import tf_utils
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
def posterior_fn(inputs, hparams):
images = inputs['images']
image_pairs = tf.concat([images[:-1], images[1:]], axis=-1)
if 'actions' in inputs:
image_pairs = tile_concat(
[image_pairs, inputs['actions'][..., None, None, :]], axis=-1)
h = tf_utils.with_flat_batch(networks.encoder)(
image_pairs, nef=hparams.nef, n_layers=hparams.n_layers, norm_layer=hparams.norm_layer)
if hparams.use_e_rnn:
with tf.variable_scope('layer_%d' % (hparams.n_layers + 1)):
h = tf_utils.with_flat_batch(dense, 2)(h, hparams.nef * 4)
if hparams.rnn == 'lstm':
RNNCell = tf.contrib.rnn.BasicLSTMCell
elif hparams.rnn == 'gru':
RNNCell = tf.contrib.rnn.GRUCell
else:
raise NotImplementedError
with tf.variable_scope('%s' % hparams.rnn):
rnn_cell = RNNCell(hparams.nef * 4)
h, _ = tf_utils.unroll_rnn(rnn_cell, h)
with tf.variable_scope('z_mu'):
z_mu = tf_utils.with_flat_batch(dense, 2)(h, hparams.nz)
with tf.variable_scope('z_log_sigma_sq'):
z_log_sigma_sq = tf_utils.with_flat_batch(dense, 2)(h, hparams.nz)
z_log_sigma_sq = tf.clip_by_value(z_log_sigma_sq, -10, 10)
outputs = {'zs_mu': z_mu, 'zs_log_sigma_sq': z_log_sigma_sq}
return outputs
def prior_fn(inputs, hparams):
images = inputs['images']
image_pairs = tf.concat([images[:hparams.context_frames - 1], images[1:hparams.context_frames]], axis=-1)
if 'actions' in inputs:
image_pairs = tile_concat(
[image_pairs, inputs['actions'][..., None, None, :]], axis=-1)
h = tf_utils.with_flat_batch(networks.encoder)(
image_pairs, nef=hparams.nef, n_layers=hparams.n_layers, norm_layer=hparams.norm_layer)
h_zeros = tf.zeros(tf.concat([[hparams.sequence_length - hparams.context_frames], tf.shape(h)[1:]], axis=0))
h = tf.concat([h, h_zeros], axis=0)
with tf.variable_scope('layer_%d' % (hparams.n_layers + 1)):
h = tf_utils.with_flat_batch(dense, 2)(h, hparams.nef * 4)
if hparams.rnn == 'lstm':
RNNCell = tf.contrib.rnn.BasicLSTMCell
elif hparams.rnn == 'gru':
RNNCell = tf.contrib.rnn.GRUCell
else:
raise NotImplementedError
with tf.variable_scope('%s' % hparams.rnn):
rnn_cell = RNNCell(hparams.nef * 4)
h, _ = tf_utils.unroll_rnn(rnn_cell, h)
with tf.variable_scope('z_mu'):
z_mu = tf_utils.with_flat_batch(dense, 2)(h, hparams.nz)
with tf.variable_scope('z_log_sigma_sq'):
z_log_sigma_sq = tf_utils.with_flat_batch(dense, 2)(h, hparams.nz)
z_log_sigma_sq = tf.clip_by_value(z_log_sigma_sq, -10, 10)
outputs = {'zs_mu': z_mu, 'zs_log_sigma_sq': z_log_sigma_sq}
return outputs
def discriminator_given_video_fn(targets, hparams):
sequence_length, batch_size = targets.shape.as_list()[:2]
clip_length = hparams.clip_length
# sample an image and apply the image distriminator on that frame
t_sample = tf.random_uniform([batch_size], minval=0, maxval=sequence_length, dtype=tf.int32)
image_sample = tf.gather_nd(targets, tf.stack([t_sample, tf.range(batch_size)], axis=1))
# sample a subsequence of length clip_length and apply the images/video discriminators on those frames
t_start = tf.random_uniform([batch_size], minval=0, maxval=sequence_length - clip_length + 1, dtype=tf.int32)
t_start_indices = tf.stack([t_start, tf.range(batch_size)], axis=1)
t_offset_indices = tf.stack([tf.range(clip_length), tf.zeros(clip_length, dtype=tf.int32)], axis=1)
indices = t_start_indices[None] + t_offset_indices[:, None]
clip_sample = tf.gather_nd(targets, flatten(indices, 0, 1))
clip_sample = tf.reshape(clip_sample, [clip_length] + targets.shape.as_list()[1:])
outputs = {}
if hparams.image_sn_gan_weight or hparams.image_sn_vae_gan_weight:
with tf.variable_scope('image'):
image_features = networks.image_sn_discriminator(image_sample, ndf=hparams.ndf)
image_features, image_logits = image_features[:-1], image_features[-1]
outputs['discrim_image_sn_logits'] = image_logits
for i, image_feature in enumerate(image_features):
outputs['discrim_image_sn_feature%d' % i] = image_feature
if hparams.video_sn_gan_weight or hparams.video_sn_vae_gan_weight:
with tf.variable_scope('video'):
video_features = networks.video_sn_discriminator(clip_sample, ndf=hparams.ndf)
video_features, video_logits = video_features[:-1], video_features[-1]
outputs['discrim_video_sn_logits'] = video_logits
for i, video_feature in enumerate(video_features):
outputs['discrim_video_sn_feature%d' % i] = video_feature
if hparams.images_sn_gan_weight or hparams.images_sn_vae_gan_weight:
with tf.variable_scope('images'):
images_features = tf_utils.with_flat_batch(networks.image_sn_discriminator)(clip_sample, ndf=hparams.ndf)
images_features, images_logits = images_features[:-1], images_features[-1]
outputs['discrim_images_sn_logits'] = images_logits
for i, images_feature in enumerate(images_features):
outputs['discrim_images_sn_feature%d' % i] = images_feature
return outputs
def discriminator_fn(inputs, outputs, mode, hparams):
# do the encoder version first so that it isn't affected by the reuse_variables() call
if hparams.nz == 0:
discrim_outputs_enc_real = collections.OrderedDict()
discrim_outputs_enc_fake = collections.OrderedDict()
else:
images_enc_real = inputs['images'][1:]
images_enc_fake = outputs['gen_images_enc']
if hparams.use_same_discriminator:
with tf.name_scope("real"):
discrim_outputs_enc_real = discriminator_given_video_fn(images_enc_real, hparams)
tf.get_variable_scope().reuse_variables()
with tf.name_scope("fake"):
discrim_outputs_enc_fake = discriminator_given_video_fn(images_enc_fake, hparams)
else:
with tf.variable_scope('encoder'), tf.name_scope("real"):
discrim_outputs_enc_real = discriminator_given_video_fn(images_enc_real, hparams)
with tf.variable_scope('encoder', reuse=True), tf.name_scope("fake"):
discrim_outputs_enc_fake = discriminator_given_video_fn(images_enc_fake, hparams)
images_real = inputs['images'][1:]
images_fake = outputs['gen_images']
with tf.name_scope("real"):
discrim_outputs_real = discriminator_given_video_fn(images_real, hparams)
tf.get_variable_scope().reuse_variables()
with tf.name_scope("fake"):
discrim_outputs_fake = discriminator_given_video_fn(images_fake, hparams)
discrim_outputs_real = OrderedDict([(k + '_real', v) for k, v in discrim_outputs_real.items()])
discrim_outputs_fake = OrderedDict([(k + '_fake', v) for k, v in discrim_outputs_fake.items()])
discrim_outputs_enc_real = OrderedDict([(k + '_enc_real', v) for k, v in discrim_outputs_enc_real.items()])
discrim_outputs_enc_fake = OrderedDict([(k + '_enc_fake', v) for k, v in discrim_outputs_enc_fake.items()])
outputs = [discrim_outputs_real, discrim_outputs_fake,
discrim_outputs_enc_real, discrim_outputs_enc_fake]
total_num_outputs = sum([len(output) for output in outputs])
outputs = collections.OrderedDict(itertools.chain(*[output.items() for output in outputs]))
assert len(outputs) == total_num_outputs # ensure no output is lost because of repeated keys
return outputs
class SAVPCell(tf.nn.rnn_cell.RNNCell):
def __init__(self, inputs, mode, hparams, reuse=None):
super(SAVPCell, self).__init__(_reuse=reuse)
self.inputs = inputs
self.mode = mode
self.hparams = hparams
if self.hparams.where_add not in ('input', 'all', 'middle'):
raise ValueError('Invalid where_add %s' % self.hparams.where_add)
batch_size = inputs['images'].shape[1].value
image_shape = inputs['images'].shape.as_list()[2:]
height, width, _ = image_shape
scale_size = min(height, width)
if scale_size >= 256:
self.encoder_layer_specs = [
(self.hparams.ngf, False),
(self.hparams.ngf * 2, False),
(self.hparams.ngf * 4, True),
(self.hparams.ngf * 8, True),
(self.hparams.ngf * 8, True),
]
self.decoder_layer_specs = [
(self.hparams.ngf * 8, True),
(self.hparams.ngf * 4, True),
(self.hparams.ngf * 2, False),
(self.hparams.ngf, False),
(self.hparams.ngf, False),
]
elif scale_size >= 128:
self.encoder_layer_specs = [
(self.hparams.ngf, False),
(self.hparams.ngf * 2, True),
(self.hparams.ngf * 4, True),
(self.hparams.ngf * 8, True),
]
self.decoder_layer_specs = [
(self.hparams.ngf * 8, True),
(self.hparams.ngf * 4, True),
(self.hparams.ngf * 2, False),
(self.hparams.ngf, False),
]
elif scale_size >= 64:
self.encoder_layer_specs = [
(self.hparams.ngf, True),
(self.hparams.ngf * 2, True),
(self.hparams.ngf * 4, True),
]
self.decoder_layer_specs = [
(self.hparams.ngf * 2, True),
(self.hparams.ngf, True),
(self.hparams.ngf, False),
]
elif scale_size >= 32:
self.encoder_layer_specs = [
(self.hparams.ngf, True),
(self.hparams.ngf * 2, True),
]
self.decoder_layer_specs = [
(self.hparams.ngf, True),
(self.hparams.ngf, False),
]
else:
raise NotImplementedError
assert len(self.encoder_layer_specs) == len(self.decoder_layer_specs)
total_stride = 2 ** len(self.encoder_layer_specs)
if (height % total_stride) or (width % total_stride):
raise ValueError("The image has dimension (%d, %d), but it should be divisible "
"by the total stride, which is %d." % (height, width, total_stride))
# output_size
num_masks = self.hparams.last_frames * self.hparams.num_transformed_images + \
int(bool(self.hparams.prev_image_background)) + \
int(bool(self.hparams.first_image_background and not self.hparams.context_images_background)) + \
int(bool(self.hparams.last_image_background and not self.hparams.context_images_background)) + \
int(bool(self.hparams.last_context_image_background and not self.hparams.context_images_background)) + \
(self.hparams.context_frames if self.hparams.context_images_background else 0) + \
int(bool(self.hparams.generate_scratch_image))
output_size = {
'gen_images': tf.TensorShape(image_shape),
'transformed_images': tf.TensorShape(image_shape + [num_masks]),
'masks': tf.TensorShape([height, width, 1, num_masks]),
}
if 'pix_distribs' in inputs:
num_motions = inputs['pix_distribs'].shape[-1].value
output_size['gen_pix_distribs'] = tf.TensorShape([height, width, num_motions])
output_size['transformed_pix_distribs'] = tf.TensorShape([height, width, num_motions, num_masks])
if 'states' in inputs:
output_size['gen_states'] = inputs['states'].shape[2:]
if self.hparams.transformation == 'flow':
output_size['gen_flows'] = tf.TensorShape([height, width, 2, self.hparams.last_frames * self.hparams.num_transformed_images])
output_size['gen_flows_rgb'] = tf.TensorShape([height, width, 3, self.hparams.last_frames * self.hparams.num_transformed_images])
self._output_size = output_size
# state_size
conv_rnn_state_sizes = []
conv_rnn_height, conv_rnn_width = height, width
for out_channels, use_conv_rnn in self.encoder_layer_specs:
conv_rnn_height //= 2
conv_rnn_width //= 2
if use_conv_rnn and not self.hparams.ablation_rnn:
conv_rnn_state_sizes.append(tf.TensorShape([conv_rnn_height, conv_rnn_width, out_channels]))
for out_channels, use_conv_rnn in self.decoder_layer_specs:
conv_rnn_height *= 2
conv_rnn_width *= 2
if use_conv_rnn and not self.hparams.ablation_rnn:
conv_rnn_state_sizes.append(tf.TensorShape([conv_rnn_height, conv_rnn_width, out_channels]))
if self.hparams.conv_rnn == 'lstm':
conv_rnn_state_sizes = [tf.nn.rnn_cell.LSTMStateTuple(conv_rnn_state_size, conv_rnn_state_size)
for conv_rnn_state_size in conv_rnn_state_sizes]
state_size = {'time': tf.TensorShape([]),
'gen_image': tf.TensorShape(image_shape),
'last_images': [tf.TensorShape(image_shape)] * self.hparams.last_frames,
'conv_rnn_states': conv_rnn_state_sizes}
if 'zs' in inputs and self.hparams.use_rnn_z and not self.hparams.ablation_rnn:
rnn_z_state_size = tf.TensorShape([self.hparams.nz])
if self.hparams.rnn == 'lstm':
rnn_z_state_size = tf.nn.rnn_cell.LSTMStateTuple(rnn_z_state_size, rnn_z_state_size)
state_size['rnn_z_state'] = rnn_z_state_size
if 'pix_distribs' in inputs:
state_size['gen_pix_distrib'] = tf.TensorShape([height, width, num_motions])
state_size['last_pix_distribs'] = [tf.TensorShape([height, width, num_motions])] * self.hparams.last_frames
if 'states' in inputs:
state_size['gen_state'] = inputs['states'].shape[2:]
self._state_size = state_size
if self.hparams.learn_initial_state:
learnable_initial_state_size = {k: v for k, v in state_size.items()
if k in ('conv_rnn_states', 'rnn_z_state')}
else:
learnable_initial_state_size = {}
learnable_initial_state_flat = []
for i, size in enumerate(nest.flatten(learnable_initial_state_size)):
with tf.variable_scope('initial_state_%d' % i):
state = tf.get_variable('initial_state', size,
dtype=tf.float32, initializer=tf.zeros_initializer())
learnable_initial_state_flat.append(state)
self._learnable_initial_state = nest.pack_sequence_as(
learnable_initial_state_size, learnable_initial_state_flat)
ground_truth_sampling_shape = [self.hparams.sequence_length - 1 - self.hparams.context_frames, batch_size]
if self.hparams.schedule_sampling == 'none' or self.mode != 'train':
ground_truth_sampling = tf.constant(False, dtype=tf.bool, shape=ground_truth_sampling_shape)
elif self.hparams.schedule_sampling in ('inverse_sigmoid', 'linear'):
if self.hparams.schedule_sampling == 'inverse_sigmoid':
k = self.hparams.schedule_sampling_k
start_step = self.hparams.schedule_sampling_steps[0]
iter_num = tf.to_float(tf.train.get_or_create_global_step())
prob = (k / (k + tf.exp((iter_num - start_step) / k)))
prob = tf.cond(tf.less(iter_num, start_step), lambda: 1.0, lambda: prob)
elif self.hparams.schedule_sampling == 'linear':
start_step, end_step = self.hparams.schedule_sampling_steps
step = tf.clip_by_value(tf.train.get_or_create_global_step(), start_step, end_step)
prob = 1.0 - tf.to_float(step - start_step) / tf.to_float(end_step - start_step)
log_probs = tf.log([1 - prob, prob])
ground_truth_sampling = tf.multinomial([log_probs] * batch_size, ground_truth_sampling_shape[0])
ground_truth_sampling = tf.cast(tf.transpose(ground_truth_sampling, [1, 0]), dtype=tf.bool)
# Ensure that eventually, the model is deterministically
# autoregressive (as opposed to autoregressive with very high probability).
ground_truth_sampling = tf.cond(tf.less(prob, 0.001),
lambda: tf.constant(False, dtype=tf.bool, shape=ground_truth_sampling_shape),
lambda: ground_truth_sampling)
else:
raise NotImplementedError
ground_truth_context = tf.constant(True, dtype=tf.bool, shape=[self.hparams.context_frames, batch_size])
self.ground_truth = tf.concat([ground_truth_context, ground_truth_sampling], axis=0)
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def zero_state(self, batch_size, dtype):
init_state = super(SAVPCell, self).zero_state(batch_size, dtype)
learnable_init_state = nest.map_structure(
lambda x: tf.tile(x[None], [batch_size] + [1] * x.shape.ndims), self._learnable_initial_state)
init_state.update(learnable_init_state)
init_state['last_images'] = [self.inputs['images'][0]] | |
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import itertools
import operator
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import exceptions
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from neutron._i18n import _
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import provisioning_blocks
from neutron.extensions import segment as segment_ext
from neutron.plugins.common import utils as p_utils
from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
class DhcpRpcCallback(object):
"""DHCP agent RPC callback in plugin implementations.
This class implements the server side of an rpc interface. The client
side of this interface can be found in
neutron.agent.dhcp.agent.DhcpPluginApi. For more information about
changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst.
"""
# API version history:
# 1.0 - Initial version.
# 1.1 - Added get_active_networks_info, create_dhcp_port,
# and update_dhcp_port methods.
# 1.2 - Removed get_dhcp_port. When removing a method (Making a
# backwards incompatible change) you would normally bump the
# major version. However, since the method was unused in the
# RPC client for many releases, it should be OK to bump the
# minor release instead and claim RPC compatibility with the
# last few client versions.
# 1.3 - Removed release_port_fixed_ip. It's not used by reference DHCP
# agent since Juno, so similar rationale for not bumping the
# major version as above applies here too.
# 1.4 - Removed update_lease_expiration. It's not used by reference
# DHCP agent since Juno, so similar rationale for not bumping the
# major version as above applies here too.
# 1.5 - Added dhcp_ready_on_ports.
# 1.6 - Removed get_active_networks. It's not used by reference
# DHCP agent since Havana, so similar rationale for not bumping
# the major version as above applies here too.
target = oslo_messaging.Target(
namespace=n_const.RPC_NAMESPACE_DHCP_PLUGIN,
version='1.6')
def _get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active networks."""
host = kwargs.get('host')
limit = kwargs.get('limit')
marker = kwargs.get('marker')
plugin = directory.get_plugin()
if utils.is_extension_supported(
plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS):
if cfg.CONF.network_auto_schedule:
plugin.auto_schedule_networks(context, host)
nets = plugin.list_active_networks_on_active_dhcp_agent(
context, host, limit=limit, marker=marker, sorts=[('id', True)]
)
else:
filters = dict(admin_state_up=[True])
nets = plugin.get_networks(context, filters=filters,
limit=limit, marker=marker,
sorts=[('id', True)])
return nets
def _port_action(self, plugin, context, port, action):
"""Perform port operations taking care of concurrency issues."""
try:
if action == 'create_port':
return p_utils.create_port(plugin, context, port)
elif action == 'update_port':
return plugin.update_port(context, port['id'], port)
else:
msg = _('Unrecognized action')
raise exceptions.Invalid(message=msg)
except (db_exc.DBReferenceError,
exceptions.NetworkNotFound,
exceptions.SubnetNotFound,
exceptions.InvalidInput,
exceptions.IpAddressGenerationFailure) as e:
with excutils.save_and_reraise_exception(reraise=False) as ctxt:
if isinstance(e, exceptions.IpAddressGenerationFailure):
# Check if the subnet still exists and if it does not,
# this is the reason why the ip address generation failed.
# In any other unlikely event re-raise
try:
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
plugin.get_subnet(context, subnet_id)
except exceptions.SubnetNotFound:
pass
else:
ctxt.reraise = True
if ctxt.reraise:
net_id = port['port']['network_id']
LOG.warning("Action %(action)s for network %(net_id)s "
"could not complete successfully: "
"%(reason)s",
{"action": action,
"net_id": net_id,
'reason': e})
def _group_by_network_id(self, res):
grouped = {}
keyfunc = operator.itemgetter('network_id')
for net_id, values in itertools.groupby(sorted(res, key=keyfunc),
keyfunc):
grouped[net_id] = list(values)
return grouped
def get_active_networks_info(self, context, **kwargs):
"""Returns all the networks/subnets/ports in system."""
host = kwargs.get('host')
LOG.debug('get_active_networks_info from %s', host)
networks = self._get_active_networks(context, **kwargs)
plugin = directory.get_plugin()
filters = {'network_id': [network['id'] for network in networks]}
ports = plugin.get_ports(context, filters=filters)
# default is to filter subnets based on 'enable_dhcp' flag
if kwargs.get('enable_dhcp_filter', True):
filters['enable_dhcp'] = [True]
# NOTE(kevinbenton): we sort these because the agent builds tags
# based on position in the list and has to restart the process if
# the order changes.
subnets = sorted(plugin.get_subnets(context, filters=filters),
key=operator.itemgetter('id'))
# Handle the possibility that the dhcp agent(s) only has connectivity
# inside a segment. If the segment service plugin is loaded and
# there are active dhcp enabled subnets, then filter out the subnets
# that are not on the host's segment.
seg_plug = directory.get_plugin(
segment_ext.SegmentPluginBase.get_plugin_type())
seg_subnets = [subnet for subnet in subnets
if subnet.get('segment_id')]
nonlocal_subnets = []
if seg_plug and seg_subnets:
host_segment_ids = seg_plug.get_segments_by_hosts(context, [host])
# Gather the ids of all the subnets that are on a segment that
# this host touches
seg_subnet_ids = {subnet['id'] for subnet in seg_subnets
if subnet['segment_id'] in host_segment_ids}
# Gather the ids of all the networks that are routed
routed_net_ids = {seg_subnet['network_id']
for seg_subnet in seg_subnets}
# Remove the subnets with segments that are not in the same
# segments as the host. Do this only for the networks that are
# routed because we want non-routed networks to work as
# before.
nonlocal_subnets = [subnet for subnet in seg_subnets
if subnet['id'] not in seg_subnet_ids]
subnets = [subnet for subnet in subnets
if subnet['network_id'] not in routed_net_ids or
subnet['id'] in seg_subnet_ids]
grouped_subnets = self._group_by_network_id(subnets)
grouped_nonlocal_subnets = self._group_by_network_id(nonlocal_subnets)
grouped_ports = self._group_by_network_id(ports)
for network in networks:
network['subnets'] = grouped_subnets.get(network['id'], [])
network['non_local_subnets'] = (
grouped_nonlocal_subnets.get(network['id'], []))
network['ports'] = grouped_ports.get(network['id'], [])
return networks
def get_network_info(self, context, **kwargs):
"""Retrieve and return extended information about a network."""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
LOG.debug('Network %(network_id)s requested from '
'%(host)s', {'network_id': network_id,
'host': host})
plugin = directory.get_plugin()
try:
network = plugin.get_network(context, network_id)
except exceptions.NetworkNotFound:
LOG.debug("Network %s could not be found, it might have "
"been deleted concurrently.", network_id)
return
filters = dict(network_id=[network_id])
subnets = plugin.get_subnets(context, filters=filters)
seg_plug = directory.get_plugin(
segment_ext.SegmentPluginBase.get_plugin_type())
nonlocal_subnets = []
if seg_plug and subnets:
seg_subnets = [subnet for subnet in subnets
if subnet.get('segment_id')]
# If there are no subnets with segments, then this is not a routed
# network and no filtering should take place.
if seg_subnets:
segment_ids = seg_plug.get_segments_by_hosts(context, [host])
# There might be something to do if no segment_ids exist that
# are mapped to this host. However, it seems that if this
# host is not mapped to any segments and this is a routed
# network, then this host shouldn't have even been scheduled
# to.
nonlocal_subnets = [subnet for subnet in seg_subnets
if subnet['segment_id'] not in segment_ids]
subnets = [subnet for subnet in seg_subnets
if subnet['segment_id'] in segment_ids]
# NOTE(kevinbenton): we sort these because the agent builds tags
# based on position in the list and has to restart the process if
# the order changes.
network['subnets'] = sorted(subnets, key=operator.itemgetter('id'))
network['non_local_subnets'] = sorted(nonlocal_subnets,
key=operator.itemgetter('id'))
network['ports'] = plugin.get_ports(context, filters=filters)
return network
@db_api.retry_db_errors
def release_dhcp_port(self, context, **kwargs):
"""Release the port currently being used by a DHCP agent."""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
LOG.debug('DHCP port deletion for %(network_id)s request from '
'%(host)s',
{'network_id': network_id, 'host': host})
plugin = directory.get_plugin()
plugin.delete_ports_by_device_id(context, device_id, network_id)
@oslo_messaging.expected_exceptions(exceptions.IpAddressGenerationFailure)
@db_api.retry_db_errors
@resource_registry.mark_resources_dirty
def create_dhcp_port(self, context, **kwargs):
"""Create and return dhcp port information.
If an expected failure occurs, a None port is returned.
"""
host = kwargs.get('host')
# Note(pbondar): Create deep copy of port to prevent operating
# on changed dict if RetryRequest is raised
port = copy.deepcopy(kwargs.get('port'))
LOG.debug('Create dhcp port %(port)s '
'from %(host)s.',
{'port': port,
'host': host})
port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP
port['port'][portbindings.HOST_ID] = host
if 'mac_address' not in port['port']:
port['port']['mac_address'] = constants.ATTR_NOT_SPECIFIED
plugin = directory.get_plugin()
return self._port_action(plugin, context, port, 'create_port')
def _is_dhcp_agent_hosting_network(self, plugin, context, host,
network_id):
"""Check whether a DHCP agent (host) is hosting a network."""
agents = plugin.get_dhcp_agents_hosting_networks(context, [network_id],
hosts=[host])
return len(agents) != 0
@oslo_messaging.expected_exceptions(exceptions.IpAddressGenerationFailure)
@db_api.retry_db_errors
def update_dhcp_port(self, context, **kwargs):
"""Update the dhcp port."""
host = kwargs.get('host')
port = kwargs.get('port')
port['id'] = kwargs.get('port_id')
port['port'][portbindings.HOST_ID] = host
plugin = directory.get_plugin()
try:
network_id = port['port']['network_id']
old_port = plugin.get_port(context, port['id'])
if (old_port['device_id'] !=
constants.DEVICE_ID_RESERVED_DHCP_PORT and
old_port['device_id'] !=
utils.get_dhcp_agent_device_id(network_id, host) or
not self._is_dhcp_agent_hosting_network(plugin, | |
<filename>find_leaks.py
# -*- Mode: Python; tab-width: 4 -*-
# Copyright (c) 2002-2011 IronPort Systems and Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
find_leaks contains some useful utilities for tracking memory.
find_leaks.print_top_n (n)
Estimate the number of objects of each class throughout
the system by looking at the reference count of each class object.
find_leaks.process_size ()
Returns resident and virtual process size in bytes.
find_leaks.object_tracking_mixin
A mix-in class to keep track of all instances of its subclasses.
"""
import types
import sys
import string
import os
import getrusage
import gc
out = sys.stdout
if hasattr (sys, 'getcounts'):
# Only activate this if COUNT_ALLOCS is enabled in this python binary.
# To make a COUNT_ALLOCS binary, follow these steps.
# Go to the Python source tree.
# gmake clean
# gmake count_allocs
# ./python.count_allocs
_prev_live = {}
def live_changes (dont_print=0):
"""live_changes (dont_print=0) -> result
This function takes a snapshot of objects currently in use.
Calling it will show you if any new objects were created or
if the object count for individual object types has changed.
Typically you call it once, then do something, then call it
again to see how things have changed in between.
<dont_print>: If set, the changes are not sent to stdout, but
instead or returned as a list of strings.
"""
global _prev_live
output = []
for (name, alloced, freed, max_alloc) in sys.getcounts():
live = alloced - freed
if not _prev_live.has_key (name):
line = "new: %s %d" % (name, live)
elif _prev_live[name] != live:
line = "change: %s %d" % (name, live - _prev_live[name])
else:
line = None
if line is not None:
if dont_print:
output.append (line)
else:
print line
_prev_live[name] = live
if dont_print:
return output
def get_refcounts():
"""get_refcounts() -> counts
Returns the refcount for all Class objects.
<counts>: Sorted list of (count, class) entries.
<count> is the reference count.
<class> is the class object.
"""
d = {}
# collect all classes
for m in sys.modules.values():
for sym in dir(m):
o = getattr (m, sym)
if type(o) is types.ClassType:
d[o] = sys.getrefcount (o)
# sort by refcount
pairs = map (lambda x: (x[1],x[0]), d.items())
pairs.sort()
pairs.reverse()
return pairs
def find_all_types():
"""find_all_types() -> types
Finds all type objects by scanning all imported modules.
Note that this will miss any Type objects that are not in the module's
global namespace.
<types>: List of type objects.
"""
d = {}
# collect all classes
for m in sys.modules.values():
for sym in dir(m):
o = getattr (m, sym)
ot = type(o)
if ot is types.TypeType:
# top-level type object
d[o] = None
else:
d[type(ot)] = None
all_types = d.keys()
all_types.sort (lambda a,b: cmp (id(a),id(b)))
return all_types
def print_type_counts (n=20):
"""print_type_counts (n=20) -> None
Print a list of the types with the highest refcount.
<n>: Number of types to display. Set to 0 to display all of them.
"""
import mstats
tl = find_all_types()
mstats.initialize_type_table (tl)
cl = mstats.get_type_hist()
sorted = zip (cl, tl)
sorted.sort()
if n:
sorted = sorted[-n:]
sorted.reverse()
for count, type in sorted:
print '%10d %s' % (count, type)
def print_top_100():
"""print_top_100() -> None
Alias to print_top_n(100).
"""
return print_top_n(100)
def print_top_n(num):
"""print_top_n(num) -> None
Display the classes with the highest refcount.
<n>: Number of classes to display.
"""
for n, c in get_refcounts()[:num]:
print '%10d %s' % (n, c.__name__)
class object_tracking_mixin:
"""object_tracking_mixin
This is a base class for monitoring the references to instances.
Inherit this class in your class and call _register_object() in your
__init__ function. You now have an _addresses dictionary of all live
instances.
<_addresses>: Dictionary with the class object as the key, and a
dictionary set of instance addresses.
"""
_addresses = {}
def _register_object (self):
addrs = object_tracking_mixin._addresses.get (self.__class__, {})
addrs[id(self)] = 1
object_tracking_mixin._addresses[self.__class__] = addrs
def __del__ (self):
del object_tracking_mixin._addresses[self.__class__][id(self)]
_ohw_addresses = {}
class object_hiding_wrapper:
def __init__ (self, obj):
self.__dict__['__ido'] = id(obj)
_ohw_addresses[id(obj)] = obj
def __getattr__ (self, attr):
return getattr (_ohw_addresses[self.__dict__['__ido']], attr)
def __setattr__ (self, attr, value):
setattr (_ohw_addresses[self.__dict__['__ido']], attr, value)
def __del__ (self):
del _ohw_addresses[self.__dict__['__ido']]
def process_size():
"""process_size() -> rsize, vsize
Returns the resident and virtual size of the process according to the OS.
"""
# only works on FreeBSD
if not os.path.exists('/proc/curproc'):
raise NotImplementedError, "sorry, FreeBSD only right now"
# read the memory map
fd = open ('/proc/curproc/map')
vsize = 0
# XXX we can probably determine which are resident and use that
# instead of getrusage, but I don't know how.
while 1:
line = fd.readline()
if not line: break
[first, second] = line.split ()[:2]
startaddr = string.atol (first, 16)
endaddr = string.atol (second, 16)
vsize += endaddr - startaddr
fd.close()
rsize = getrusage.getrusage() [3] * 1024L
return rsize, vsize
def analyze_strings (cutoff=10, tmpdir='/tmp/'):
"""analyze_strings ([<cutoff>=10], [<tmpdir>='/tmp/']) => None
dump all strings to a file, then build a histogram of all
the duplicates with more than <cutoff> identical copies.
Warning: may use lots of space in <tmpdir>...
Note: requires /usr/bin/sort.
"""
def NAME (kind):
return '%s%s.txt' % (
os.path.join (tmpdir, 'all_strings'),
'.' + kind
)
import mstats
print 'dumping... (%s)' % (NAME ('dump'))
mstats.dump_strings (NAME ('dump'))
print 'sorting...'
cmd = 'sort -T %s %s > %s' % (tmpdir, NAME ('dump'), NAME ('sorted'))
if not os.system (cmd):
os.unlink (NAME ('dump'))
print 'building histogram...'
f = open (NAME ('sorted'), 'rb')
f2 = open (NAME ('hist'), 'wb')
last = None
count = 1
total = 0
while 1:
l = f.readline()
if not l:
break
elif l == last:
count += 1
else:
if count >= cutoff:
f2.write ('%10d %r\n' % (count, last))
total += 1
count = 1
last = l
if count >= cutoff:
f2.write ('%10d %r\n' % (count, last))
total += 1
f2.close()
f.close()
os.unlink (NAME ('sorted'))
if total:
cmd = 'sort -T %s -n -k 1,1 %s > %s' % (tmpdir, NAME ('hist'), NAME ('sorted_hist'))
if not os.system (cmd):
print 'done. histogram is in %s' % (NAME ('sorted_hist'),)
else:
print 'error sorting histogram'
else:
print 'no strings duplicated over %d times' % (cutoff,)
os.unlink (NAME ('hist'))
else:
print 'error sorting string dump'
def why_not_collected(obj, exclude=None):
"""why_not_collected(obj, exclude=None) -> None
If you call gc.collect(), and then determine that your object is not
collected as you expect it would (by seeing it in gc.get_objects()),
use this to figure out why.
<obj>: The object to investigate.
<exclude>: Optional list of object to avoid analyzing.
Typically you would call this with exclude=[dir(), globals()].
"""
to_visit = [obj]
visited = set()
visited.add(id(to_visit))
visited.add(id(visited))
if exclude:
for x in exclude:
visited.add(id(x))
while 1:
try:
obj = to_visit.pop()
except IndexError:
print 'done'
return
if id(obj) in visited:
continue
if type(obj) == types.FrameType:
continue
ref = gc.get_referrers(obj)
refcount = sys.getrefcount(obj)
# refcount is +1 because of call to getrefcount() has to INCREF it for
# the arguments.
refcount -= 1
if len(ref) < refcount:
print 'Leaky object: %r' % obj
print 'refcount is too high (%i) for number of referrers (%i).' % (refcount, len(ref))
elif len(ref) > refcount:
print 'Invalid reference count found for: %r' % obj
print 'refcount=%i but | |
lines and comments.
if line == "" or line.startswith("#"):
continue
entry = line.split(None, 1)
# Format is FILENAME *CHECKSUM
if len(entry) != 2:
logger.error("%s: Invalid %s manifest entry: %s", self, alg, line)
continue
entry_hash = entry[0]
entry_path = os.path.normpath(entry[1].lstrip("*"))
entry_path = _decode_filename(entry_path)
if entry_path in self.entries:
self.entries[entry_path][alg] = entry_hash
else:
self.entries[entry_path] = {}
self.entries[entry_path][alg] = entry_hash
def _validate_structure(self):
"""Checks the structure of the bag, determining if it conforms to the
BagIt spec. Returns true on success, otherwise it will raise
a BagValidationError exception.
"""
self._validate_structure_payload_directory()
self._validate_structure_tag_files()
def _validate_structure_payload_directory(self):
data_dir_path = os.path.join(self.path, "data")
if not isdir(data_dir_path):
raise BagValidationError("Missing data directory")
def _validate_structure_tag_files(self):
# Note: we deviate somewhat from v0.96 of the spec in that it allows
# other files and directories to be present in the base directory
if len(list(self.manifest_files())) == 0:
raise BagValidationError("Missing manifest file")
if "bagit.txt" not in os.listdir(self.path):
raise BagValidationError("Missing bagit.txt")
def _validate_contents(self, processes=1, fast=False):
if fast and not self.has_oxum():
raise BagValidationError("cannot validate Bag with fast=True if Bag lacks a Payload-Oxum")
self._validate_oxum() # Fast
if not fast:
self._validate_entries(processes) # *SLOW*
def _validate_oxum(self):
oxum = self.info.get('Payload-Oxum')
if oxum is None:
return
# If multiple Payload-Oxum tags (bad idea)
# use the first listed in bag-info.txt
if isinstance(oxum, list):
oxum = oxum[0]
byte_count, file_count = oxum.split('.', 1)
if not byte_count.isdigit() or not file_count.isdigit():
raise BagError("Invalid oxum: %s" % oxum)
byte_count = int(byte_count)
file_count = int(file_count)
total_bytes = 0
total_files = 0
for payload_file in self.payload_files():
payload_file = os.path.join(self.path, payload_file)
total_bytes += os.stat(payload_file).st_size
total_files += 1
if file_count != total_files or byte_count != total_bytes:
raise BagValidationError("Oxum error. Found %s files and %s bytes on disk; expected %s files and %s bytes." % (total_files, total_bytes, file_count, byte_count))
def _validate_entries(self, processes):
"""
Verify that the actual file contents match the recorded hashes stored in the manifest files
"""
errors = list()
# First we'll make sure there's no mismatch between the filesystem
# and the list of files in the manifest(s)
only_in_manifests, only_on_fs = self.compare_manifests_with_fs()
for path in only_in_manifests:
e = FileMissing(path)
logger.warning(str(e))
errors.append(e)
for path in only_on_fs:
e = UnexpectedFile(path)
logger.warning(str(e))
errors.append(e)
# To avoid the overhead of reading the file more than once or loading
# potentially massive files into memory we'll create a dictionary of
# hash objects so we can open a file, read a block and pass it to
# multiple hash objects
available_hashers = set()
for alg in self.algs:
try:
hashlib.new(alg)
available_hashers.add(alg)
except ValueError:
logger.warning("Unable to validate file contents using unknown %s hash algorithm", alg)
if not available_hashers:
raise RuntimeError("%s: Unable to validate bag contents: none of the hash algorithms in %s are supported!" % (self, self.algs))
def _init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
args = ((self.path, rel_path, hashes, available_hashers) for rel_path, hashes in list(self.entries.items()))
try:
if processes == 1:
hash_results = list(map(_calc_hashes, args))
else:
try:
pool = multiprocessing.Pool(processes if processes else None, _init_worker)
hash_results = pool.map(_calc_hashes, args)
finally:
try:
pool.terminate()
except:
# we really don't care about any exception in terminate()
pass
# Any unhandled exceptions are probably fatal
except:
logger.exception("unable to calculate file hashes for %s", self)
raise
for rel_path, f_hashes, hashes in hash_results:
for alg, computed_hash in list(f_hashes.items()):
stored_hash = hashes[alg]
if stored_hash.lower() != computed_hash:
e = ChecksumMismatch(rel_path, alg, stored_hash.lower(), computed_hash)
logger.warning(str(e))
errors.append(e)
if errors:
raise BagValidationError("invalid bag", errors)
def _validate_bagittxt(self):
"""
Verify that bagit.txt conforms to specification
"""
bagit_file_path = os.path.join(self.path, "bagit.txt")
with open(bagit_file_path, 'r') as bagit_file:
first_line = bagit_file.readline()
if first_line.startswith(BOM):
raise BagValidationError("bagit.txt must not contain a byte-order mark")
class BagError(Exception):
pass
class BagValidationError(BagError):
def __init__(self, message, details=None):
super(BagValidationError, self).__init__()
if details is None:
details = []
self.message = message
self.details = details
def __str__(self):
if len(self.details) > 0:
details = " ; ".join([str(e) for e in self.details])
return "%s: %s" % (self.message, details)
return self.message
class ManifestErrorDetail(BagError):
def __init__(self, path):
super(ManifestErrorDetail, self).__init__()
self.path = path
class ChecksumMismatch(ManifestErrorDetail):
def __init__(self, path, algorithm=None, expected=None, found=None):
super(ChecksumMismatch, self).__init__(path)
self.path = path
self.algorithm = algorithm
self.expected = expected
self.found = found
def __str__(self):
return "%s checksum validation failed (alg=%s expected=%s found=%s)" % (self.path, self.algorithm, self.expected, self.found)
class FileMissing(ManifestErrorDetail):
def __str__(self):
return "%s exists in manifest but not found on filesystem" % self.path
class UnexpectedFile(ManifestErrorDetail):
def __str__(self):
return "%s exists on filesystem but is not in manifest" % self.path
def _calc_hashes(args):
# auto unpacking of sequences illegal in Python3
(base_path, rel_path, hashes, available_hashes) = args
full_path = os.path.join(base_path, rel_path)
# Create a clone of the default empty hash objects:
f_hashers = dict(
(alg, hashlib.new(alg)) for alg in hashes if alg in available_hashes
)
try:
f_hashes = _calculate_file_hashes(full_path, f_hashers)
except BagValidationError as e:
f_hashes = dict(
(alg, str(e)) for alg in list(f_hashers.keys())
)
return rel_path, f_hashes, hashes
def _calculate_file_hashes(full_path, f_hashers):
"""
Returns a dictionary of (algorithm, hexdigest) values for the provided
filename
"""
if not os.path.exists(full_path):
raise BagValidationError("%s does not exist" % full_path)
try:
with open(full_path, 'rb') as f:
while True:
block = f.read(1048576)
if not block:
break
for i in list(f_hashers.values()):
i.update(block)
except IOError as e:
raise BagValidationError("could not read %s: %s" % (full_path, str(e)))
except OSError as e:
raise BagValidationError("could not read %s: %s" % (full_path, str(e)))
return dict(
(alg, h.hexdigest()) for alg, h in list(f_hashers.items())
)
def _load_tag_file(tag_file_name):
with open(tag_file_name, 'r') as tag_file:
# Store duplicate tags as list of vals
# in order of parsing under the same key.
tags = {}
for name, value in _parse_tags(tag_file):
if name not in tags:
tags[name] = value
continue
if not isinstance(tags[name], list):
tags[name] = [tags[name], value]
else:
tags[name].append(value)
return tags
def _parse_tags(tag_file):
"""Parses a tag file, according to RFC 2822. This
includes line folding, permitting extra-long
field values.
See http://www.faqs.org/rfcs/rfc2822.html for
more information.
"""
tag_name = None
tag_value = None
# Line folding is handled by yielding values only after we encounter
# the start of a new tag, or if we pass the EOF.
for num, line in enumerate(tag_file):
# If byte-order mark ignore it for now.
if num == 0:
if line.startswith(BOM):
line = line.lstrip(BOM)
# Skip over any empty or blank lines.
if len(line) == 0 or line.isspace():
continue
elif line[0].isspace() and tag_value is not None: # folded line
tag_value += line
else:
# Starting a new tag; yield the last one.
if tag_name:
yield (tag_name, tag_value.strip())
if ':' not in line:
raise BagValidationError("invalid line '%s' in %s" % (line.strip(),
os.path.basename(tag_file.name)))
parts = line.strip().split(':', 1)
tag_name = parts[0].strip()
tag_value = parts[1]
# Passed the EOF. All done after this.
if tag_name:
yield (tag_name, tag_value.strip())
def _make_tag_file(bag_info_path, bag_info):
headers = list(bag_info.keys())
headers.sort()
with open(bag_info_path, 'w') as f:
for h in headers:
if isinstance(bag_info[h], list):
for val in bag_info[h]:
f.write("%s: %s\n" % (h, val))
else:
txt = bag_info[h]
# strip CR, LF and CRLF so they don't mess up the tag file
txt = re.sub(r'\n|\r|(\r\n)', '', txt)
f.write("%s: %s\n" % (h, txt))
def _make_manifest(manifest_file, data_dir, processes, algorithm='md5'):
logger.info('writing manifest with %s processes', processes)
if algorithm == 'md5':
manifest_line = _manifest_line_md5
elif algorithm == 'sha1':
manifest_line = _manifest_line_sha1
elif algorithm == 'sha256':
manifest_line = _manifest_line_sha256
elif algorithm == 'sha512':
manifest_line = _manifest_line_sha512
else:
raise RuntimeError("unknown algorithm %s" % algorithm)
if processes > 1:
pool = multiprocessing.Pool(processes=processes)
checksums = pool.map(manifest_line, _walk(data_dir))
pool.close()
pool.join()
else:
checksums = list(map(manifest_line, _walk(data_dir)))
with open(manifest_file, 'w') as manifest:
num_files = 0
total_bytes = 0
for digest, filename, byte_count in checksums:
num_files += 1
total_bytes += byte_count
manifest.write("%s %s\n" % (digest, _encode_filename(filename)))
manifest.close()
return "%s.%s" % (total_bytes, num_files)
def _make_tagmanifest_file(alg, bag_dir):
tagmanifest_file = join(bag_dir, "tagmanifest-%s.txt" % alg)
logger.info("writing %s", tagmanifest_file)
files = [f for f in listdir(bag_dir) if isfile(join(bag_dir, f))]
checksums = []
for f in files:
if re.match(r'^tagmanifest-.+\.txt$', f):
continue
with open(join(bag_dir, f), 'rb') as fh:
m = _hasher(alg)
while True:
block = fh.read(16384)
if not block:
break
m.update(block)
checksums.append((m.hexdigest(), f))
with open(join(bag_dir, tagmanifest_file), 'w') as tagmanifest:
for digest, filename in checksums:
tagmanifest.write('%s %s\n' % (digest, filename))
def _walk(data_dir):
for dirpath, dirnames, filenames in os.walk(data_dir):
# if we don't sort here the order of entries is non-deterministic
# | |
# -*- coding: utf-8 -*-
"""
Zenoss template_router
"""
from zenossapi.apiclient import ZenossAPIClientError
from zenossapi.routers import ZenossRouter
class TemplateRouter(ZenossRouter):
"""
Class for interacting with the Zenoss template router
"""
def __init__(self, url, headers, ssl_verify):
super(TemplateRouter, self).__init__(url, headers, ssl_verify, 'template_router', 'TemplateRouter')
self.uid = None
self.properties = None
def __repr__(self):
if self.uid:
identifier = self.uid
else:
identifier = hex(id(self))
return '<{0} object at {1}>'.format(
type(self).__name__, identifier
)
def _get_properties(self, zobject):
"""
Gets the properties of an object.
Arguments:
zobject (str): The uid of the Zenoss object (device, component,
etc.) to get the properties of
Returns:
dict:
"""
return self._router_request(
self._make_request_data(
'getInfo',
dict(uid=zobject)
)
)
def _get_template_by_uid(self, template_uid):
"""
Gets a template by its full UID
Arguments:
template_uid (str): UID of the template
Returns:
ZenossTemplate:
"""
template_data = self._router_request(
self._make_request_data(
'getInfo',
dict(uid=template_uid)
)
)
return ZenossTemplate(
self.api_url,
self.api_headers,
self.ssl_verify,
template_data['data']
)
def _get_data_source_by_uid(self, datasource_uid):
"""
Get a data source by its full UID.
Arguments:
datasource_uid (str): UID of the data source to get
Returns:
ZenossDataSource:
"""
data_source_data = self._router_request(
self._make_request_data(
'getDataSourceDetails',
dict(uid=datasource_uid),
)
)
return ZenossDataSource(
self.api_url,
self.api_headers,
self.ssl_verify,
data_source_data['record']
)
def _get_data_point_by_uid(self, datapoint_uid):
"""
Get a data point by its full UID.
Arguments:
datapoint_uid (str): UID of the data point to get details for
Returns:
ZenossDataPoint:
"""
dp_data = self._router_request(
self._make_request_data(
'getDataPointDetails',
dict(uid=datapoint_uid),
)
)
return ZenossDataPoint(
self.api_url,
self.api_headers,
self.ssl_verify,
dp_data['record']
)
def _get_threshold_by_uid(self, threshold_uid):
"""
Gets a threshold by its full UID
Arguments:
threshold_uid (str): UID of the template
Returns:
ZenossThreshold:
"""
threshold_data = self._router_request(
self._make_request_data(
'getThresholdDetails',
dict(uid=threshold_uid)
)
)
return ZenossThreshold(
self.api_url,
self.api_headers,
self.ssl_verify,
threshold_data['record']
)
def _get_graph_by_uid(self, graph_uid):
"""
Get a graph by its full UID.
Arguments:
graph_uid (str): UID of the graph to get the definition of
Returns:
ZenossGraph:
"""
graph_data = self._router_request(
self._make_request_data(
'getGraphDefinition',
dict(uid=graph_uid),
)
)
return ZenossGraph(
self.api_url,
self.api_headers,
self.ssl_verify,
graph_data['data']
)
def _find_templates_in_tree(self, templates_data):
"""
Works through the dict structure returned by the Zenoss API for
template queries and returns the defined templates from it.
Arguments:
templates_data (dict): Templates data returned by the API
Returns:
list:
"""
templates = []
for node in templates_data['children']:
if node['leaf']:
if node['text'].find("Locally Defined") > -1:
templates.append((node['uid'].replace('/zport/dmd/', '', 1), node['qtip']))
else:
templates.extend(self._find_templates_in_tree(node))
return templates
def _set_properties(self, properties):
"""
Sets arbitrary properties of any object.
Arguments:
properties (dict): Properties and values to set
Returns:
dict:
"""
return self._router_request(
self._make_request_data(
'setInfo',
properties
)
)
def set_properties(self, properties):
"""
Sets properties of an object.
Arguments:
properties (dict): Properties and values to set
"""
if not isinstance(properties, dict):
raise ZenossAPIClientError('Type error: Properties to set for {} must be a dict'.format(type(self).__name__))
if not self.uid:
return
data = properties
data['uid'] = self.uid
properties_result = self._set_properties(data)
for prop in properties:
if getattr(self, prop, False):
setattr(self, prop, properties[prop])
elif getattr(self, 'properties', False) and prop in self.properties:
self.properties[prop] = properties[prop]
return properties_result
def get_all_templates(self):
"""
Returns all defined templates.
Returns:
list(ZenossTemplate):
"""
templates_data = self._router_request(
self.get_device_class_templates(
device_class='Devices'
)
)
templates = []
found_templates = self._find_templates_in_tree(templates_data[0])
for t in found_templates:
templates.append(
self._get_template_by_uid(t[0])
)
return templates
def list_all_templates(self):
"""
Returns all defined templates as a list of tuples containing the
template UID and description.
Returns:
list(ZenossTemplate):
"""
templates_data = self._router_request(
self.get_device_class_templates(
device_class='Devices'
)
)
templates = []
found_templates = self._find_templates_in_tree(templates_data[0])
for t in found_templates:
templates.append(t)
return templates
def get_device_class_templates(self, device_class):
"""
Gets the defined templates for a device class
Arguments:
device_class (str): Device class to get templates for
Returns:
list(ZenossTemplate):
"""
templates_data = self._router_request(
self._make_request_data(
'getDeviceClassTemplates',
dict(id=device_class),
)
)
templates = []
found_templates = self._find_templates_in_tree(templates_data[0])
for t in found_templates:
templates.append(
self._get_template_by_uid(t[0])
)
return templates
def list_device_class_templates(self, device_class):
"""
Returns the defined templates for a device class as a list of
tuples containing the template UID and description.
Arguments:
device_class (str): Device class to list templates for
Returns:
list(str):
"""
if not device_class.startswith('Devices'):
if device_class.startswith('/'):
device_class = 'Devices{0}'.format(device_class)
else:
device_class = 'Devices/{0}'.format(device_class)
templates_data = self._router_request(
self._make_request_data(
'getDeviceClassTemplates',
dict(id=device_class),
)
)
templates = []
found_templates = self._find_templates_in_tree(templates_data[0])
for t in found_templates:
templates.append(t)
return templates
def get_object_templates(self, zenoss_object):
"""
Gets the templates bound to a specific object
(monitored resource or component)
Arguments:
zenoss_object (str): The uid of the object, e.g.
Devices/Server/Zuora/Aspose/devices/10.aspose.prod.slv.zuora
Returns:
list(ZenossTemplate):
"""
if not zenoss_object.startswith('Devices'):
if zenoss_object.startswith('/'):
zenoss_object = 'Devices{0}'.format(zenoss_object)
else:
zenoss_object = 'Devices/{0}'.format(zenoss_object)
templates_data = self._router_request(
self._make_request_data(
'getObjTemplates',
dict(uid=zenoss_object),
)
)
templates = []
found_templates = templates_data['data']
for t in found_templates:
templates.append(
self._get_template_by_uid(t['uid'].replace('/zport/dmd/', '', 1))
)
return templates
def get_template(self, device_class, template):
"""
Get a Zenoss template
Arguments:
device_class (str): Name of the device class where the template is defined
template (str): Name of the template to get
Returns:
ZenossTemplate:
"""
# Check to see if this is a local template instead of a device class
# template
if "devices" in device_class:
template_uid = '{0}/{1}'.format(device_class, template)
else:
template_uid = '{0}/rrdTemplates/{1}'.format(device_class, template)
return self._get_template_by_uid(template_uid)
def add_template(self, target, name):
"""
Adds a template to a device class.
Arguments:
target (str): The uid of the target device class
name (str): Unique name of the template to add
Returns:
ZenossTemplate:
"""
if not target.startswith('Devices'):
if target.startswith('/'):
target = 'Devices{0}'.format(target)
else:
target = 'Devices/{0}'.format(target)
if not target.endswith('rrdTemplates'):
target = target + '/rrdTemplates'
template_data = self._router_request(
self._make_request_data(
'addTemplate',
dict(
id=name,
targetUid=target,
)
)
)
return self._get_template_by_uid(template_data['nodeConfig']['uid'].replace('/zport/dmd/', '', 1))
def delete_template(self, device_class, template):
"""
Removes a template.
Arguments:
device_class (str): Name of the device class where the template is defined
template (str): Name of the template to remove
Returns:
dict:
"""
if not device_class.startswith('Devices'):
if device_class.startswith('/'):
device_class = 'Devices{0}'.format(device_class)
else:
device_class = 'Devices/{0}'.format(device_class)
template_uid = '{0}/rrdTemplates/{1}'.format(device_class, template)
return self._router_request(
self._make_request_data(
'deleteTemplate',
dict(uid=template_uid),
)
)
def add_local_template(self, zenoss_object, name):
"""
Adds a local template to an object.
Arguments:
zenoss_object (str): Uid of the object to add the local template to
name (str): Unique name for the new local template
Returns:
ZenossTemplate:
"""
if not zenoss_object.startswith('Devices'):
if zenoss_object.startswith('/'):
zenoss_object = 'Devices{0}'.format(zenoss_object)
else:
zenoss_object = 'Devices/{0}'.format(zenoss_object)
template_data = self._router_request(
self._make_request_data(
'addLocalRRDTemplate',
dict(
uid=zenoss_object,
templateName=name,
)
)
)
return self._get_template_by_uid(template_data['nodeConfig']['uid'].replace('/zport/dmd/', '', 1))
def delete_local_template(self, zenoss_object, name):
"""
Builds the request data for deleting a local template to an object.
Arguments:
object (str): Uid of the object to remove the local template from
name (str): Unique name of the new local template
Returns:
dict:
"""
if not zenoss_object.startswith('Devices'):
if zenoss_object.startswith('/'):
zenoss_object = 'Devices{0}'.format(zenoss_object)
else:
zenoss_object = 'Devices/{0}'.format(zenoss_object)
return self._router_request(
self._make_request_data(
'removeLocalRRDTemplate',
dict(
uid=zenoss_object,
templateName=name,
)
)
)
def get_data_source_types(self):
"""
Gets the list of available data source types.
Returns:
list:
"""
ds_types_data = self._router_request(
self._make_request_data(
'getDataSourceTypes',
dict(query=''),
)
)
data_source_types = []
for ds_type in ds_types_data['data']:
data_source_types.append(ds_type['type'])
return data_source_types
def get_threshold_types(self):
"""
Gets the list of available threshold types.
Returns:
list:
"""
threshold_types_data = self._router_request(
self._make_request_data(
'getThresholdTypes',
dict()
)
)
threshold_types = []
for threshold_type in threshold_types_data['data']:
threshold_types.append(threshold_type['type'])
return threshold_types
def add_data_point_to_graph(self, datapoint, graph, include_thresholds=False):
"""
Adds a data point to a graph.
Arguments:
datapoint (str): Uid of the data point to add
graph (str): Uid of the graph to add the data point to
include_thresholds (bool): Set to True to include the related
thresholds for the data point
Returns:
dict:
"""
return self._router_request(
self._make_request_data(
'addDataPointToGraph',
dict(
dataPointUid=datapoint,
graphUid=graph,
includeThresholds=include_thresholds,
)
)
)
class ZenossTemplate(TemplateRouter):
"""
Class for Zenoss Template objects
"""
def __init__(self, url, headers, ssl_verify, template_data):
super(ZenossTemplate, self).__init__(url, headers, ssl_verify)
self.definition = template_data.setdefault('definition', None)
self.description = template_data.setdefault('description', None)
self.hidden = template_data.setdefault('hidden', False)
self.iconCls = template_data.setdefault('iconCls', None)
self.id = template_data.setdefault('id', None)
self.inspector_type = template_data.setdefault('inspector_type', None)
self.leaf = template_data.setdefault('leaf', True)
self.meta_type = template_data.setdefault('meta_type', 'RRDTemplate')
self.name = template_data['name']
self.qtip = template_data.setdefault('qtip', None)
self.targetPythonClass = template_data.setdefault('targetPythonClass', None)
self.text = template_data.setdefault('text', None)
if 'uid' in template_data:
self.uid = template_data['uid'].replace('/zport/dmd/', '', 1)
else:
self.uid = None
def copy(self, target):
"""
Copy a template to another device or device class.
Arguments:
target (str): Uid of the device or device class to copy to
Returns:
ZenossTemplate:
"""
if not target.endswith('rrdTemplates'):
target = target + '/rrdTemplates'
template_data = self._router_request(
self._make_request_data(
'copyTemplate',
dict(
uid=self.uid,
targetUid=target,
)
)
)
return ZenossTemplate(
self.api_url,
self.api_headers,
self.ssl_verify,
template_data['data']
)
def delete(self):
| |
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import json
import tarfile
import uuid
import wagon
import yaml
import urllib
import shutil
import zipfile
import tempfile
import contextlib
from os import path
from setuptools import archive_util
from urllib2 import urlopen, URLError
from flask import request, current_app
from flask_restful import types
from flask_restful.reqparse import RequestParser
from manager_rest.constants import (FILE_SERVER_PLUGINS_FOLDER,
FILE_SERVER_SNAPSHOTS_FOLDER,
FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER,
FILE_SERVER_BLUEPRINTS_FOLDER,
FILE_SERVER_DEPLOYMENTS_FOLDER)
from manager_rest.deployment_update.manager import \
get_deployment_updates_manager
from manager_rest.archiving import get_archive_type
from manager_rest.storage.models import Plugin
from manager_rest.storage.models_states import SnapshotState
from manager_rest import config, chunked, manager_exceptions
from manager_rest.utils import (mkdirs,
get_formatted_timestamp,
current_tenant,
unzip,
files_in_folder,
remove)
from manager_rest.resource_manager import get_resource_manager
from manager_rest.constants import (CONVENTION_APPLICATION_BLUEPRINT_FILE,
SUPPORTED_ARCHIVE_TYPES)
class UploadedDataManager(object):
def receive_uploaded_data(self, data_id=None, **kwargs):
file_server_root = config.instance.file_server_root
resource_target_path = tempfile.mktemp(dir=file_server_root)
try:
additional_inputs = self._save_file_locally_and_extract_inputs(
resource_target_path,
self._get_data_url_key(),
self._get_kind())
doc, dest_file_name = self._prepare_and_process_doc(
data_id,
file_server_root,
resource_target_path,
additional_inputs=additional_inputs,
**kwargs)
if not os.path.isfile(resource_target_path):
# if the archive is a folder, we're copying its content,
# so there is no meaning to a specific archive file name...
dest_file_name = None
self._move_archive_to_uploaded_dir(doc.id,
file_server_root,
resource_target_path,
dest_file_name=dest_file_name)
return doc, 201
finally:
remove(resource_target_path)
@classmethod
def _extract_file_to_file_server(cls, archive_path, destination_root):
"""
Extracting a package.
:param destination_root: the root destination for the unzipped archive
:param archive_path: the archive path
:return: the full path for the extracted archive
"""
# extract application to file server
tempdir = tempfile.mkdtemp('-blueprint-submit')
try:
try:
archive_util.unpack_archive(archive_path, tempdir)
except archive_util.UnrecognizedFormat:
raise manager_exceptions.BadParametersError(
'Blueprint archive is of an unrecognized format. '
'Supported formats are: {0}'.format(
SUPPORTED_ARCHIVE_TYPES))
archive_file_list = os.listdir(tempdir)
if len(archive_file_list) != 1 or not os.path.isdir(
os.path.join(tempdir, archive_file_list[0])):
raise manager_exceptions.BadParametersError(
'archive must contain exactly 1 directory')
application_dir_base_name = archive_file_list[0]
# generating temporary unique name for app dir, to allow multiple
# uploads of apps with the same name (as it appears in the file
# system, not the app name field inside the blueprint.
# the latter is guaranteed to be unique).
generated_app_dir_name = '{0}-{1}'.format(
application_dir_base_name, uuid.uuid4())
temp_application_dir = os.path.join(tempdir,
application_dir_base_name)
temp_application_target_dir = os.path.join(tempdir,
generated_app_dir_name)
shutil.move(temp_application_dir, temp_application_target_dir)
shutil.move(temp_application_target_dir, destination_root)
return generated_app_dir_name
finally:
shutil.rmtree(tempdir)
@staticmethod
def _save_file_from_url(archive_target_path, data_url, data_type):
if any([request.data,
'Transfer-Encoding' in request.headers,
'blueprint_archive' in request.files]):
raise manager_exceptions.BadParametersError(
"Can't pass both a {0} URL via query parameters, request body"
", multi-form and chunked.".format(data_type))
try:
with contextlib.closing(urlopen(data_url)) as urlf:
with open(archive_target_path, 'w') as f:
f.write(urlf.read())
except URLError:
raise manager_exceptions.ParamUrlNotFoundError(
"URL {0} not found - can't download {1} archive"
.format(data_url, data_type))
except ValueError:
raise manager_exceptions.BadParametersError(
"URL {0} is malformed - can't download {1} archive"
.format(data_url, data_type))
@staticmethod
def _save_file_from_chunks(archive_target_path, data_type):
if any([request.data,
'blueprint_archive' in request.files]):
raise manager_exceptions.BadParametersError(
"Can't pass both a {0} URL via request body , multi-form "
"and chunked.".format(data_type))
with open(archive_target_path, 'w') as f:
for buffered_chunked in chunked.decode(request.input_stream):
f.write(buffered_chunked)
@staticmethod
def _save_file_content(archive_target_path, data_type):
if 'blueprint_archive' in request.files:
raise manager_exceptions.BadParametersError(
"Can't pass both a {0} URL via request body , multi-form"
.format(data_type))
uploaded_file_data = request.data
with open(archive_target_path, 'w') as f:
f.write(uploaded_file_data)
def _save_files_multipart(self, archive_target_path):
inputs = {}
for file_key in request.files:
if file_key == 'inputs':
content = request.files[file_key]
# The file is a binary
if 'application' in content.content_type:
content_payload = self._save_bytes(content)
# Handling yaml
if content.content_type == 'application/octet-stream':
inputs = yaml.load(content_payload)
# Handling json
elif content.content_type == 'application/json':
inputs = json.load(content_payload)
# The file is raw json
elif 'text' in content.content_type:
inputs = json.load(content)
elif file_key == 'blueprint_archive':
self._save_bytes(request.files[file_key],
archive_target_path)
return inputs
@staticmethod
def _save_bytes(content, target_path=None):
"""
content should support read() function if target isn't supplied,
string rep is returned
:param content:
:param target_path:
:return:
"""
if not target_path:
return content.getvalue().decode("utf-8")
else:
with open(target_path, 'wb') as f:
f.write(content.read())
def _save_file_locally_and_extract_inputs(self,
archive_target_path,
url_key,
data_type='unknown'):
"""
Retrieves the file specified by the request to the local machine.
:param archive_target_path: the target of the archive
:param data_type: the kind of the data (e.g. 'blueprint')
:param url_key: if the data is passed as a url to an online resource,
the url_key specifies what header points to the requested url.
:return: None
"""
inputs = {}
# Handling importing blueprint through url
if url_key in request.args:
self._save_file_from_url(archive_target_path,
request.args[url_key],
data_type)
# handle receiving chunked blueprint
elif 'Transfer-Encoding' in request.headers:
self._save_file_from_chunks(archive_target_path, data_type)
# handler receiving entire content through data
elif request.data:
self._save_file_content(archive_target_path, data_type)
# handle inputs from form-data (for both the blueprint and inputs
# in body in form-data format)
if request.files:
inputs = self._save_files_multipart(archive_target_path)
return inputs
def _move_archive_to_uploaded_dir(self,
data_id,
root_path,
archive_path,
dest_file_name=None):
if not os.path.exists(archive_path):
raise RuntimeError("Archive [{0}] doesn't exist - Cannot move "
"archive to uploaded {1}s "
"directory".format(archive_path,
self._get_kind()))
uploaded_dir = os.path.join(
root_path,
self._get_target_dir_path(),
data_id)
if not os.path.isdir(uploaded_dir):
os.makedirs(uploaded_dir)
current_app.logger.info('uploading archive to: {0}'
.format(uploaded_dir))
if os.path.isfile(archive_path):
if not dest_file_name:
archive_type = self._get_archive_type(archive_path)
dest_file_name = '{0}.{1}'.format(data_id, archive_type)
shutil.move(archive_path,
os.path.join(uploaded_dir, dest_file_name))
else:
for item in os.listdir(archive_path):
shutil.copy(os.path.join(archive_path, item), uploaded_dir)
def _get_kind(self):
raise NotImplementedError('Subclass responsibility')
def _get_data_url_key(self):
raise NotImplementedError('Subclass responsibility')
def _get_target_dir_path(self):
raise NotImplementedError('Subclass responsibility')
def _get_archive_type(self, archive_path):
raise NotImplementedError('Subclass responsibility')
def _prepare_and_process_doc(self,
data_id,
file_server_root,
archive_target_path,
additional_inputs,
**kwargs):
raise NotImplementedError('Subclass responsibility')
class UploadedSnapshotsManager(UploadedDataManager):
def _get_kind(self):
return 'snapshot'
def _get_data_url_key(self):
return 'snapshot_archive_url'
def _get_target_dir_path(self):
return FILE_SERVER_SNAPSHOTS_FOLDER
def _get_archive_type(self, archive_path):
return 'zip'
def _prepare_and_process_doc(self,
data_id,
file_server_root,
archive_target_path,
**kwargs):
return get_resource_manager().create_snapshot_model(
data_id,
status=SnapshotState.UPLOADED
), None
class UploadedBlueprintsDeploymentUpdateManager(UploadedDataManager):
def _get_kind(self):
return 'deployment'
def _get_data_url_key(self):
return 'blueprint_archive_url'
def _get_target_dir_path(self):
return os.path.join(FILE_SERVER_DEPLOYMENTS_FOLDER,
current_tenant.name)
def _get_archive_type(self, archive_path):
return get_archive_type(archive_path)
def _prepare_and_process_doc(self,
data_id,
file_server_root,
archive_target_path,
additional_inputs=None,
**kwargs):
application_dir = self._extract_file_to_file_server(
archive_target_path,
file_server_root
)
return self._prepare_and_submit_blueprint(
file_server_root,
application_dir,
data_id,
additional_inputs), archive_target_path
def _move_archive_to_uploaded_dir(self, *args, **kwargs):
pass
@classmethod
def _prepare_and_submit_blueprint(cls,
file_server_root,
app_dir,
deployment_id,
additional_inputs=None):
app_file_name = cls._extract_application_file(file_server_root,
app_dir)
# add to deployment update manager (will also dsl_parse it)
try:
cls._process_plugins(file_server_root, app_dir)
update = get_deployment_updates_manager().stage_deployment_update(
deployment_id,
app_dir,
app_file_name,
additional_inputs=additional_inputs or {}
)
# Moving the contents of the app dir to the dest dir, while
# overwriting any file encountered
# create the destination root dir
file_server_deployment_root = \
os.path.join(file_server_root,
FILE_SERVER_DEPLOYMENTS_FOLDER,
current_tenant.name,
deployment_id)
app_root_dir = os.path.join(file_server_root, app_dir)
for root, dirs, files in os.walk(app_root_dir):
# Creates a corresponding dir structure in the deployment dir
dest_rel_dir = os.path.relpath(root, app_root_dir)
dest_dir = os.path.abspath(
os.path.join(file_server_deployment_root,
dest_rel_dir))
mkdirs(dest_dir)
# Calculate source dir
source_dir = os.path.join(file_server_root, app_dir, root)
for file_name in files:
source_file = os.path.join(source_dir, file_name)
relative_dest_path = os.path.relpath(source_file,
app_root_dir)
dest_file = os.path.join(file_server_deployment_root,
relative_dest_path)
shutil.copy(source_file, dest_file)
return update
finally:
shutil.rmtree(os.path.join(file_server_root, app_dir))
@classmethod
def _extract_application_file(cls, file_server_root, application_dir):
full_application_dir = os.path.join(file_server_root, application_dir)
if 'application_file_name' in request.args:
application_file_name = urllib.unquote(
request.args['application_file_name']).decode('utf-8')
application_file = os.path.join(full_application_dir,
application_file_name)
if not os.path.isfile(application_file):
raise manager_exceptions.BadParametersError(
'{0} does not exist in the application '
'directory'.format(application_file_name)
)
else:
application_file_name = CONVENTION_APPLICATION_BLUEPRINT_FILE
application_file = os.path.join(full_application_dir,
application_file_name)
if not os.path.isfile(application_file):
raise manager_exceptions.BadParametersError(
'application directory is missing blueprint.yaml and '
'application_file_name query parameter was not passed')
# return relative path from the file server root since this path
# is appended to the file server base uri
return application_file_name
@classmethod
def _process_plugins(cls, file_server_root, app_dir):
plugins_directory = os.path.join(file_server_root, app_dir, 'plugins')
if not os.path.isdir(plugins_directory):
return
plugins = [os.path.join(plugins_directory, directory)
for directory in os.listdir(plugins_directory)
if os.path.isdir(os.path.join(plugins_directory,
directory))]
for plugin_dir in plugins:
final_zip_name = '{0}.zip'.format(os.path.basename(plugin_dir))
target_zip_path = os.path.join(file_server_root, app_dir,
'plugins', final_zip_name)
cls._zip_dir(plugin_dir, target_zip_path)
@classmethod
def _zip_dir(cls, dir_to_zip, target_zip_path):
zipf = zipfile.ZipFile(target_zip_path, 'w', zipfile.ZIP_DEFLATED)
try:
plugin_dir_base_name = os.path.basename(dir_to_zip)
rootlen = len(dir_to_zip) - len(plugin_dir_base_name)
for base, dirs, files in os.walk(dir_to_zip):
for entry in files:
fn = os.path.join(base, entry)
zipf.write(fn, fn[rootlen:])
finally:
zipf.close()
class UploadedBlueprintsManager(UploadedDataManager):
def _get_kind(self):
return 'blueprint'
def _get_data_url_key(self):
return 'blueprint_archive_url'
def _get_target_dir_path(self):
return os.path.join(
FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER, current_tenant.name)
def _get_archive_type(self, archive_path):
return get_archive_type(archive_path)
def _prepare_and_process_doc(self,
data_id,
file_server_root,
archive_target_path,
**kwargs):
application_dir = self._extract_file_to_file_server(
archive_target_path,
file_server_root
)
visibility = kwargs.get('visibility', None)
return self._prepare_and_submit_blueprint(file_server_root,
application_dir,
data_id,
visibility), None
@classmethod
def _process_plugins(cls, file_server_root, blueprint_id):
plugins_directory = path.join(
file_server_root,
FILE_SERVER_BLUEPRINTS_FOLDER,
current_tenant.name,
blueprint_id,
"plugins")
if not path.isdir(plugins_directory):
return
plugins = [path.join(plugins_directory, directory)
for directory in os.listdir(plugins_directory)
if path.isdir(path.join(plugins_directory, directory))]
for plugin_dir in plugins:
final_zip_name = '{0}.zip'.format(path.basename(plugin_dir))
target_zip_path = path.join(plugins_directory, final_zip_name)
| |
for the end game buttons to either return to main menu or start a new game"""
if event.type == pygame.MOUSEBUTTONDOWN:
pop_menu = [x for x in self.popup_menu if x.collidepoint(event.pos)]
replay_menu = [x for x in self.popup_replay if x.collidepoint(event.pos)]
# print(event.pos, pop_menu, replay_menu)
if pop_menu:
self.endgame()
return False
elif replay_menu:
# save old difficulty
diffcultly = self.difficulty
self.__init__() # reset
self.oncall(diffcultly)
pygame.display.flip()
return True
def onkeypress(self, event):
"""This function determines if mouse is pressing a valid empty circle, reset, solve button or
if a valid number key is pressed.
"""
if event.type == pygame.MOUSEBUTTONDOWN:
if self._active:
board_button_detection = {str(row) + str(col): g for row, d in enumerate(self.board) for col, g in enumerate(d) if g != 0 and g.collidepoint(event.pos)}
reset_button = [x for x in self.reset_button if x.collidepoint(event.pos)]
solve_button = [x for x in self.insta_solve_button if x.collidepoint(event.pos)]
if reset_button: # button to reset the board
self.unfinished_board = copy.deepcopy(self.unfinished_boardcopy)
self.drawlayout()
pygame.display.flip()
if solve_button and not self.insta_solve: # clicking the solve button
self.unfinished_board = self.solved_board
self.insta_solve = True
self.drawlayout()
return
if board_button_detection: # clicked board tile
bbd_key = [*board_button_detection][0]
row, col = int(bbd_key[0]), int(bbd_key[-1])
if self.board[row][col] != 0:
cir_posx = board_button_detection[bbd_key].x + board_button_detection[bbd_key].width/2
cir_posy = board_button_detection[bbd_key].y + board_button_detection[bbd_key].height/2
if self.selected_circle:
sc = self.selected_circle[0]
select_row = self.selected_circle[1]
select_col = self.selected_circle[2]
if sc.x == board_button_detection[bbd_key].x and sc.y == board_button_detection[bbd_key].y:
self.highlightbutton(row, col, cir_posx, cir_posy, False)
self.selected_circle = []
elif sc.x != board_button_detection[bbd_key].x or sc.y != board_button_detection[bbd_key].y:
old_posx = sc.x + sc.width/2
old_posy = sc.y + sc.height/2
self.highlightbutton(select_row, select_col, old_posx, old_posy, False)
self.highlightbutton(row, col, cir_posx, cir_posy, True)
self.setnumber(row, col)
else:
self.highlightbutton(row, col, cir_posx, cir_posy, True)
self.setnumber(select_row, select_col)
else:
self.highlightbutton(row, col, cir_posx, cir_posy, True)
self.setnumber(self.selected_circle[1], self.selected_circle[2])
pygame.display.flip()
if event.type == pygame.KEYDOWN:
if self._active and self.selected_circle:
sk = [*self.set_keys.values()]
csk = [sk.index(k) for k in sk if event.key in k]
if csk:
set_key = int(csk[0]) if event.key != pygame.K_BACKSPACE else 0
sr = self.selected_circle[0]
select_row = self.selected_circle[1]
select_col = self.selected_circle[2]
self.highlightbutton(select_row, select_col, sr.x+sr.width/2, sr.y + sr.height/2, True)
self.setnumber(select_row, select_col, set_key)
self.input_num += [int(str(select_row) + str(select_col))]
return
class ColorTheme:
def __init__(self):
self.color = None
self.theme_logo = pygame.image.load(resource_path('img/theme.png')).convert_alpha()
self.theme_button = None
self.bar_position = False
self.theme_clicked = False
self.theme_font = pygame.font.Font(None, 32)
self.themelist = []
self.theme_colors = {'dark_blue': ("#0D151B", "#4CC2F0"), 'dark_red': ("#1B1B1B", "#FF2470"),
'dark_green': ("#151014", "#379534"), 'dark_yellow': ("#181818", "#9C8F5D"),
'light_blue': ("#FFFFFF", "#66A0BF"), 'light_red': ("#FFFFFF", "#C16469"),
'grey_green': ("#383B35", "#AEC99E")
}
return
def themebutton(self):
"""This function is for drawing the """
self.theme_button = pygame.draw.circle(surface, safety_secondary_color, (width-25, 25), 20)
surlogo = surface.blit(self.theme_logo, self.theme_logo.get_rect(center=self.theme_button.center))
pygame.display.update([surlogo, self.theme_button])
return
def themeselector(self):
"""Function draws possible color combos and displays them in two columns"""
text_surface = self.theme_font.render("Pick a Theme!", True, (255-safety_base_color.r, 255-safety_base_color.g, 255-safety_base_color.b))
theme_text = pygame.draw.rect(surface, safety_base_color, (width / 2 - 100, 50, 200, 50))
surface.blit(text_surface, (theme_text.x + (theme_text.width / 4 - 25), theme_text.y + (theme_text.height / 4)))
ck = [*self.theme_colors]
ck_co = 0
for y in range(1, 6):
for x in range(1, 3):
for i in range(25):
if ck_co > len(ck) - 1:
return
time.sleep(0.001)
if i == 24:
self.themelist += [pygame.draw.circle(surface, self.theme_colors[ck[ck_co]][0], (width * (x / 3), height * (y / 5)), 0 + i)]
else:
pygame.draw.circle(surface, self.theme_colors[ck[ck_co]][0], (width * (x / 3), height * (y / 5)), 0 + i)
if 25 >= i >= 10:
pygame.draw.circle(surface, self.theme_colors[ck[ck_co]][1], (width * (x / 3), height * (y / 5)), -10 + i)
pygame.display.flip()
ck_co += 1
def expandocircle(self, slider_pos=None):
"""A function to animate the expanding theme selector
:param slider_pos: To either close or open the theme selector
:return:
"""
if slider_pos is None:
return
if slider_pos: # open theme selector
for i in range(height):
if height-i == 50:
continue
circle_pos = (width-25, 25)
pygame.draw.circle(surface, safety_secondary_color, circle_pos, 20+i+3)
pygame.draw.circle(surface, safety_base_color, circle_pos, 20+i+1)
self.themebutton()
pygame.display.update()
self.themeselector()
elif not slider_pos: # close theme selector
for i in range(height):
if height-i == 20:
return False
if menu.startgame:
board.drawlayout()
else:
main_menu()
circle_pos = (width - 25, 25)
pygame.draw.circle(surface, safety_secondary_color, circle_pos, 800-i-1)
pygame.draw.circle(surface, safety_base_color, circle_pos, 800-i-3)
self.themebutton()
pygame.display.update()
return
def themeevent(self, event) -> bool:
"""Function to switch between the different themes on mouse press
:return: Boolean if theme selector is open
"""
if event.type == pygame.MOUSEBUTTONDOWN:
if self.theme_button.collidepoint(event.pos) and not self.bar_position:
self.expandocircle(True)
self.bar_position = True
elif self.theme_button.collidepoint(event.pos) and self.bar_position:
self.expandocircle(False)
self.bar_position = False
if self.bar_position:
for i, bg in enumerate(self.themelist):
if bg.collidepoint(event.pos) and self.bar_position:
global safety_base_color, safety_secondary_color, safety_base_inverse, safety_secondary_inverse
safety_base_color = pygame.Color(list(self.theme_colors.values())[i][0])
safety_secondary_color = pygame.Color(list(self.theme_colors.values())[i][1])
safety_base_inverse = pygame.Color(255-safety_base_color.r, 255-safety_base_color.b, 255-safety_base_color.g)
safety_secondary_inverse = pygame.Color(255-safety_secondary_color.r, 255-safety_secondary_color.b, 255-safety_secondary_color.g)
self.expandocircle(True)
break
return self.bar_position
class MainMenu:
def __init__(self):
self.newgame_button = []
self.startgame = False
self.left_arr = None
self.right_arr = None
self.render_diff = None
self.base_font = pygame.font.Font(None, 32)
self.diff_txt = pygame.font.Font(None, 24)
self.logo = pygame.image.load(resource_path('img/main_logo.png')).convert_alpha()
self.logo = pygame.transform.scale(self.logo, (100, 100))
self.logo = pygame.transform.rotate(self.logo, 45)
self.difficulty = ['Beginner', 'Easy', 'Medium', 'Hard', 'Extreme']
self.diff_iter = 0
def diff(self):
return self.difficulty[self.diff_iter]
def fade(self, w, h, slide, side):
"""Function to animate the difficulty selector horizontal scroll when arrow is pressed"""
fade = pygame.Surface((w, h))
fade.fill(safety_base_color)
if side == "right":
if slide < 0:
fade.set_alpha(abs(slide)*4)
elif slide >= 0:
fade.set_alpha(slide)
elif side == "left":
if slide < 0:
fade.set_alpha(255-slide*2)
elif slide >= 0:
fade.set_alpha(slide*4)
surface.blit(fade, (width/3+41+slide, height/2+30, 75, 25))
pygame.display.flip()
pygame.time.delay(5)
def draw_static_menu(self):
"""Function to load the main menu"""
surface.fill(safety_base_color)
cir = pygame.draw.circle(surface, safety_secondary_color, (width/2, height/3), 75)
surface.blit(self.logo, self.logo.get_rect(center=cir.center))
text_surface = self.base_font.render("New Game", True, safety_base_color)
self.left_arr = pygame.draw.lines(surface, safety_base_inverse, False, [(width/4, height/2+50), (width/4-8, height/2+50-8), (width/4, height/2+50-16)], 5)
self.right_arr = pygame.draw.lines(surface, safety_base_inverse, False, [(width-(width/4), height/2+50), (width-(width/4)+8, (height/2)+50-8), (width-(width/4), (height/2)+50-16)], 5)
self.newgame_button = [pygame.draw.circle(surface, safety_secondary_color, (width/4, height/2+100), 25),
pygame.draw.circle(surface, safety_secondary_color, (width-(width/4), height / 2 + 100), 25),
pygame.draw.rect(surface, safety_secondary_color, (width/4, height/2+100-25, 245, 50))]
surface.blit(text_surface, (self.newgame_button[-1].x + (self.newgame_button[-1].width/4+5), self.newgame_button[-1].y + (self.newgame_button[-1].height/4+2)))
return
def draw_dynamic_menu(self, i=0):
"""Draw scrolling difficulty animation box"""
pygame.draw.rect(surface, safety_base_color, (width/3+41+i, height/2+30, 75, 25))
self.render_diff = self.diff_txt.render(self.difficulty[self.diff_iter], True, safety_base_inverse)
txwid = self.render_diff.get_width()
surface.blit(self.render_diff, ((width/2)-(txwid/2)+i, height*2/4+36))
return
def mouse_press(self, event):
"""Function to detect mouse press for main menu difficulty scroll animation"""
event.pos = (0, 0) if not hasattr(event, 'pos') else event.pos
event.key = 0 if not hasattr(event, 'key') else event.key
if event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN:
if self.left_arr.collidepoint(event.pos) or event.key == pygame.K_LEFT:
# move difficulty selector to the left
for slide in range(80):
self.draw_dynamic_menu(slide)
if slide >= 50:
self.fade(75, 25, slide, "left")
if self.diff_iter != 0:
self.diff_iter -= 1
for slide in reversed(range(80)):
self.draw_dynamic_menu(-slide)
if slide <= 20:
self.fade(75, 25, -slide, "left")
elif self.right_arr.collidepoint(event.pos) or event.key == pygame.K_RIGHT:
# move difficulty selector to the right
for slide in range(80):
self.draw_dynamic_menu(-slide)
if slide >= 50:
self.fade(75, 25, -slide, "right")
if self.diff_iter != len(self.difficulty) - 1:
self.diff_iter += 1
for slide in reversed(range(80)):
self.draw_dynamic_menu(slide)
if slide <= 20:
self.fade(75, 25, slide, "right")
pygame.display.flip()
def startplaying(self, event):
"""Function to start the game if new game button is pressed"""
if event.type == pygame.MOUSEBUTTONDOWN:
newgame_button = [x for x in self.newgame_button if x.collidepoint(event.pos)]
if not self.startgame and newgame_button:
self.startgame = True
return self.startgame
return False
def cleanup(event):
if event.type == pygame.QUIT:
pygame.quit()
exit()
def main_menu():
"""Function to draw menu and theme button all at once"""
menu.draw_static_menu()
menu.draw_dynamic_menu()
theme.themebutton()
def resource_path(relative_path):
"""Get absolute path to resource, works for dev and for PyInstaller"""
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except AttributeError:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def main():
global board, menu, theme
board = GameBoard()
menu = MainMenu()
theme = ColorTheme()
main_menu()
pygame.display.flip()
difficulty = "beginner"
theme_stat = False
set_scene = -1 # 0 menu, 1 board, 2 end popup
current_scene = 0
while True:
if set_scene == 0: # menu
main_menu()
pygame.display.flip()
elif set_scene == 1: # board
board.oncall(difficulty)
pygame.display.flip()
elif set_scene == 2: # popup
board.end_game_popup()
set_scene = -1
for event in pygame.event.get():
cleanup(event)
if current_scene != 2:
theme_stat = theme.themeevent(event) # checking if theme selector is active
if current_scene == 1 and board.solved():
set_scene = current_scene = 2
continue
if current_scene == 0 and not theme_stat:
menu.mouse_press(event)
if menu.startplaying(event):
difficulty = menu.diff()
set_scene = current_scene = 1
elif current_scene == 1 and not theme_stat:
board.onkeypress(event)
if | |
<reponame>balena-io/support-shift-scheduler<filename>algo-core/ortools_solver.py
"""
Copyright 2020 Balena Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import datetime
import math
import colorama
import onboarding
import scheduler_utils
import pandas as pd
from ortools.sat.python import cp_model
# The availabilities we allow, by default 1 and 2, but 3 can be added as well if no schedule is feasible without
allowed_availabilities = [1, 2]
# Cost weight assigned to various soft constraints:
coeff_non_preferred = 80
coeff_shorter_than_pref = 30
coeff_longer_than_pref = 70
coeff_total_week_slots = 3
coeff_agent = 30
coeff_handover = 30
# Other constants:
max_avg_per_week = 80
week_working_slots = 80
date_format = "%Y-%m-%d"
def setup_dataframes():
"""Set up dataframes for agents (df_a) and night shift info (df_n)."""
global min_week_average_slots
# Baseline for agent history:
min_week_average_slots = 200
# Initialize dataframes:
df_a = pd.DataFrame(
data=None,
columns=[
"handle",
"email",
"avg_slots_per_week",
"pref_ideal_length",
"slots",
"slot_ranges",
],
)
i_tuple = []
# Index for night-shifts (starting at 19hs => slot 38, ending 02hs => slot 52)
for t, track in enumerate(tracks):
if 38 in range(track["start_slot"], track["end_slot"]):
for d in range(track["start_day"], track["end_day"] + 1):
i_tuple.append((t, d))
df_n_indices = pd.MultiIndex.from_tuples(i_tuple, names=("track", "day"))
df_n = pd.DataFrame(
data="", columns=list(range(38, 52)), index=df_n_indices
)
# Fill dataframes per agent:
agents = input_json["agents"]
agents_with_fix_hours = scheduler_options["specialAgentConditions"]["agentsWithFixHours"]
for agent in agents:
week_average_slots = math.trunc(float(agent["weekAverageHours"]*2))
min_week_average_slots = min(
min_week_average_slots, week_average_slots
)
week_slots = agent["availableHours"]
for (d, _) in enumerate(week_slots):
# Set availability to 0 outside balena support hours:
for i in range(start_slot):
week_slots[d][i] = 0
for i in range(end_slot, slots_in_day):
week_slots[d][i] = 0
# Fill df_n dataframe with night shifts:
# (Night shifts encoded as 4 in Team Model)
indices_4 = [i for i, x in enumerate(week_slots[d]) if x == 4]
# If agent has a night shift today, check into which track
# it can slot, and fill df_n accordingly:
if len(indices_4) > 0:
track_found = False
# TODO should be improved: It is first come first serve without considering
# length of track and length of night shift
for t, track in enumerate(tracks):
if d in range(track["start_day"], track["end_day"] + 1) and not track_found:
if set(indices_4).issubset(set(range(track["start_slot"], track["end_slot"]))):
for s in indices_4:
df_n.loc[(t, d), s] = agent["handle"]
track_found = True
if not track_found:
print(
f"{colorama.Fore.RED}\nWARNING! The night shift "
f"for {agent['handle']} could not be fitted in."
f"{colorama.Style.RESET_ALL}"
)
# For Agents with fix hours, remove all availability except night shifts:
if agent['handle'] in agents_with_fix_hours:
for h in range(0, slots_in_day):
if week_slots[d][h] == 1 or week_slots[d][h] == 2:
week_slots[d][h] = 0
# Reset all 1s and 4s to 2s in night shift agent's preferences:
# This will also disincentivise algorithm from giving night
# shift volunteers other shifts during the week as well.
for h in range(0, slots_in_day):
if week_slots[d][h] == 1 or week_slots[d][h] == 4:
week_slots[d][h] = 2
# Give agent a break until 15:00 the next day if he/she was
# on night shift:
if d != 4:
week_slots[d + 1][0:28] = [0 for i in range(28)]
slot_ranges = scheduler_utils.slots_to_range(week_slots, end_slot, allowed_availabilities)
df_a.loc[len(df_a)] = {
"handle": agent["handle"],
"email": agent["email"],
"avg_slots_per_week": week_average_slots,
"pref_ideal_length": agent["idealShiftLength"] * 2,
"slots": week_slots,
"slot_ranges": slot_ranges,
}
# slots: list of 5 lists, each of which has 24 items that mark the
# availability of each slot (e.g.
# [ [0,0,0,0,...,1,2,0,0], [0,0,0,0,...,1,2,0,0], [...], [...], [...] ])
# slot_ranges: list of 5 lists, each of the 5 lists has a number
# of nested lists that mark the ranges that an agent is available to do
# support (e.g. [ [[8,12], [16, 24]], [], [...], [...], [...])
# NB: e.g. [8,12] indicates agent is available 8-12, NOT 8-13.
df_a.set_index("handle", inplace=True)
return [df_a, df_n]
def get_unavailable_agents(day):
"""Determine agents with no availability for a given day."""
day_number = day.weekday()
unavailable = set()
for handle in df_agents.index:
if len(df_agents.loc[handle, "slot_ranges"][day_number]) == 0:
unavailable.add(handle)
print(f"\nUnavailable employees on {day}")
[print(e) for e in unavailable]
return unavailable
def remove_agents_not_available_this_week():
"""Agents not available at all this week are removed from the model."""
print("")
original_handles = df_agents.index.tolist()
for handle in original_handles:
out = True
for d in range(num_days):
out = out and (handle in unavailable_agents[d])
if out:
df_agents.drop(index=handle, inplace=True)
print(handle, "was removed for this week.")
return df_agents
def flatten(lists):
"""Flatten nested lists."""
for el in lists:
if isinstance(el, collections.Iterable) and not isinstance(
el, (str, bytes)
):
yield from flatten(el)
else:
yield el
def setup_var_dataframes_veterans():
"""Create dataframes that will contain model variables for veterans."""
global v_h, v_td, v_tdh, v_tdsh
# h - veterans:
v_h = pd.DataFrame(
data=None,
index=agents_vet,
columns=[
"total_week_slots",
"total_week_slots_squared",
"total_week_slots_cost",
],
)
# td - veterans:
td_tuple = []
for t, track in enumerate(tracks):
for d in range(track["start_day"], track["end_day"] + 1):
td_tuple.append((t, d))
td_multi_index = pd.MultiIndex.from_tuples(
td_tuple,
names=("track", "day"),
)
v_td = pd.DataFrame(
data=None, index=td_multi_index, columns=["handover_cost"]
)
# tdh - veterans:
tdh_tuple = []
for t, track in enumerate(tracks):
for d in range(track["start_day"], track["end_day"] + 1):
for h in agents_vet:
tdh_tuple.append((t, d, h))
tdh_multi_index = pd.MultiIndex.from_tuples(
tdh_tuple,
names=("track", "day", "handle"),
)
v_tdh = pd.DataFrame(
data=None,
index=tdh_multi_index,
columns=[
"shift_start",
"shift_end",
"shift_duration",
"interval",
"is_agent_on",
"agent_cost",
"is_duration_shorter_than_ideal",
"duration_cost",
"is_in_pref_range",
],
)
tdsh_tuple = []
for t, track in enumerate(tracks):
for d in range(track["start_day"], track["end_day"] + 1):
for s in range(track["start_slot"], track["end_slot"]):
for h in agents_vet:
tdsh_tuple.append((t, d, s, h))
tdsh_multi_index = pd.MultiIndex.from_tuples(tdsh_tuple, names=("track", "day", "slot", "handle"))
v_tdsh = pd.DataFrame(
data=None,
index=tdsh_multi_index,
columns=[
"is_start_smaller_equal_slot",
"is_end_greater_than_slot",
"is_slot_cost",
"slot_cost",
],
)
def fill_var_dataframes_veterans():
"""Fill veteran variable dataframes with OR-Tools model variables."""
# h - veterans:
for h in v_h.index:
v_h.loc[h, "total_week_slots"] = model.NewIntVar(
0, week_working_slots, f"total_week_slots_{h}"
)
v_h.loc[h, "total_week_slots_squared"] = model.NewIntVarFromDomain(
cp_model.Domain.FromValues(
[x ** 2 for x in range(0, week_working_slots + 2)]
),
f"total_week_slots_squared_{h}",
)
v_h.loc[h, "total_week_slots_cost"] = model.NewIntVarFromDomain(
cp_model.Domain.FromValues(
[
coeff_total_week_slots * x ** 2
for x in range(0, week_working_slots + 2)
]
),
f"total_week_slots_cost_{h}",
)
# td - veterans:
for t, track in enumerate(tracks):
for d in range(track["start_day"], track["end_day"] + 1):
v_td.loc[(t, d), "handover_cost"] = model.NewIntVarFromDomain(
cp_model.Domain.FromValues(
[
coeff_handover * x
for x in range(0, max_daily_handovers + 1)
]
),
f"handover_cost_{t}_{d}",
)
# tdh - veterans:
print("")
for t, track in enumerate(tracks):
for d in range(track["start_day"], track["end_day"] + 1):
for h in agents_vet:
if h in unavailable_agents[d] or d_prefs.loc[(d, h)].Min() > track["end_slot"]:
v_tdh.loc[(t, d, h), "shift_start"] = model.NewIntVar(
8, 8, f"shift_start_{t}_{d}_{h}"
)
v_tdh.loc[(t, d, h), "shift_end"] = model.NewIntVar(
8, 8, f"shift_end_{t}_{d}_{h}"
)
v_tdh.loc[(t, d, h), "shift_duration"] = model.NewIntVar(
0, 0, f"shift_duration_{t}_{d}_{h}"
)
else:
when_on_night_shift = []
seven_pm_slot = 38
if seven_pm_slot in range(track["start_slot"], track["end_slot"]):
when_on_night_shift = [
seven_pm_slot + i
for i, x in enumerate(
df_nights.loc[(t, d)].to_list()
)
if x == h
]
if len(when_on_night_shift) > 0:
start = when_on_night_shift[0]
end = when_on_night_shift[-1] + 1
duration = end - start
v_tdh.loc[
(t, d, h), "shift_start"
] = model.NewIntVar(
start, start, f"shift_start_{t}_{d}_{h}"
)
v_tdh.loc[
(t, d, h), "shift_end"
] = model.NewIntVar(
end, end, f"shift_end_{t}_{d}_{h}"
)
v_tdh.loc[
(t, d, h), "shift_duration"
] = model.NewIntVar(
duration,
duration,
f"shift_duration_{t}_{d}_{h}",
)
print(f"{h} on duty on night of {days[d]}")
else:
v_tdh.loc[
(t, d, h), "shift_start"
] = model.NewIntVarFromDomain(
d_prefs.loc[(d, h)], f"shift_start_{t}_{d}_{h}"
)
v_tdh.loc[
(t, d, h), "shift_end"
] = model.NewIntVarFromDomain(
d_prefs.loc[(d, h)], f"shift_end_{t}_{d}_{h}"
)
v_tdh.loc[
(t, d, h), "shift_duration"
] = model.NewIntVarFromDomain(
d_duration, f"shift_duration_{t}_{d}_{h}"
)
v_tdh.loc[(t, d, h), "interval"] = model.NewIntervalVar(
v_tdh.loc[(t, d, h), "shift_start"],
v_tdh.loc[(t, d, h), "shift_duration"],
v_tdh.loc[(t, d, h), "shift_end"],
f"interval_{t}_{d}_{h}",
)
v_tdh.loc[(t, d, h), "is_agent_on"] = model.NewBoolVar(
f"is_agent_on_{t}_{d}_{h}"
)
v_tdh.loc[(t, d, h), "agent_cost"] = model.NewIntVarFromDomain(
cp_model.Domain.FromValues(
[coeff_agent * x for x in range(0, max_avg_per_week)]
),
f"agent_cost_{t}_{d}_{h}",
)
v_tdh.loc[
(t, d, h), "is_duration_shorter_than_ideal"
] = model.NewBoolVar(
f"is_duration_shorter_than_ideal_{t}_{d}_{h}"
)
duration_cost_list = set(
[
coeff_shorter_than_pref * x
for x in range(0, max_duration - min_duration)
]
)
duration_cost_list = list(
duration_cost_list.union(
set(
[
coeff_longer_than_pref * x
for x in range(0, max_duration - min_duration)
]
)
)
)
duration_cost_list.sort()
v_tdh.loc[
(t, | |
list of sprites
# (currently set to ignore duplictes), make it list on return
sprites.add(thing.sprite)
return thingsList, list(sprites)
# Some other functions used in for drawing
############################################
def getImageSizeOffset(vertexes, linedefs, sidedefs, sectors, options):
'''Calculate out file's size and offset to use for WAD's coordinates
'''
margins, hCoefX, hCoefY = \
options["margins"], options["coefX"], options["coefY"]
minX, minY, maxX, maxY = 100000, 100000, -100000, -100000
# Basically we go through all linedefs, their vertexes,
# and the the floor and the ceiling of walls attached to them
# calculating the minimum and maximum
for linedef in linedefs:
for sidedef in [linedef.front, linedef.back]:
if sidedef == 65535 or sidedef >= len(sidedefs):
continue
sectorN = sidedefs[sidedef].sector
sector = sectors[sectorN]
for height in [sector.floorHeight, sector.ceilingHeight]:
for vertex in [linedef.beg, linedef.end]:
if vertex >= len(vertexes):
continue
minX = min(minX, vertexes[vertex].x)
maxX = max(maxX, vertexes[vertex].x)
minY = min(minY, vertexes[vertex].y)
maxY = max(maxY, vertexes[vertex].y)
x = int(vertexes[vertex].x - height * hCoefX)
y = int(vertexes[vertex].y - height * hCoefY)
minX = min(minX, x)
maxX = max(maxX, x)
minY = min(minY, y)
maxY = max(maxY, y)
# Add margin twice: there's image size
# Margin minus minimum is an offset to convert XY coordinate
# to image coordinates
return maxX - minX + 2 * margins, maxY - minY + 2 * margins,\
-minX + margins, -minY + margins
def findFloodPoint(linedef, vertexes, right=True):
''' Given a linedef, find coordinates of a point to start floodfill from
it is 1 pixel sideways from linedef's center
"right" determines if it sideways means right or left
"right" side means if you are looking from Beginning to End of linedef
'''
# read coordinates from vertexes data, calculate the center
beg = linedef.beg
end = linedef.end
if beg >= len(vertexes) or end >= len(vertexes):
return -1000000, -1000000
x1 = vertexes[beg].x
y1 = vertexes[beg].y
x2 = vertexes[end].x
y2 = vertexes[end].y
x = (x1+x2)//2
y = (y1+y2)//2
# too short a linedef, let's ignore this one to be safe
if abs(x2 - x1) <= 2 and abs(y2 - y1) <= 2:
return -1000000, -1000000
# sideways distance. d=1 seems to work best
d = 1
# find right side
if right:
if x2 > x1:
y += d
if x2 < x1:
y -= d
if y2 > y1:
x -= d
if y2 < y1:
x += d
# or the left side
else:
if x2 > x1:
y -= d
if x2 < x1:
y += d
if y2 > y1:
x += d
if y2 < y1:
x -= d
return x, y
def getLinePixels(beg, end):
''' This will be used in the peculiar way we draw walls.
Basically, you give it two XY coordintes and it returns a list of
XY coordinates of a line connecting those two points
'''
if beg == end:
return [beg]
x1, y1 = beg
x2, y2 = end
dx = x2 - x1
dy = y2 - y1
pixels = []
if abs(dx) > abs(dy):
s = dx // abs(dx)
for x in range(x1, x2 + s, s):
y = int(y1 + dy * ((x - x1) / dx))
if x != x1:
# One little but important detail
# the line cannot go horizontally and verticaly
# at the same time. If it does, add a pixel in between
# Without this thing walls have holes in them
if x != pixels[-1][0] and y != pixels[-1][1]:
pixels.append((pixels[-1][0], y))
pixels.append((x, y))
else:
s = dy // abs(dy)
for y in range(y1, y2 + s, s):
x = int(x1 + dx*((y - y1) / dy))
if y != y1:
if x != pixels[-1][0] and y != pixels[-1][1]:
pixels.append((x, pixels[-1][1]))
pixels.append((x, y))
return pixels
def lightImage(im, light, colorConversion):
''' Make a lighting conversion for im image
Return image with lightng applied
Done through applying mapping from colorConversion
'''
px = im.load()
for i in range(im.size[0]):
for j in range(im.size[1]):
opacity = px[i, j][3]
rawColor = (px[i, j][0], px[i, j][1], px[i, j][2])
litColor = list(colorConversion[light][rawColor]) + [opacity]
px[i, j] = tuple(litColor)
return im
def gammaCorrection(im, gamma):
''' Apply gamma corretion to an image
(in place, so does not return anything)
gamma < 1 will lighten the image
gamma > 1 will darken the image
by default 0.7 gamma applied to the final image
(as it usually a bit dark)
'''
px = im.load()
for i in range(im.size[0]):
for j in range(im.size[1]):
if px[i, j][:3] == (0, 0, 0):
continue
pixel = []
for k in range(3):
data = px[i, j][k]
pixel.append(int((data / 255) ** gamma * 255))
px[i, j] = tuple(pixel)
# Functions that are used in actual drawing of the final picture
################################################################
def getWallImage(wall, textures, colorConversion, scaleY, shrink):
''' Given the Wall object, return wall image
That is, texture applied to a rectangle of wall's size
Lighting, offsets and "unpegged-ness" are applied here too
'''
# Just unpacking data for convenience
ceiling, floor, sx, sy, ex, ey, texture,\
xOff, yOff, fromTop, position, light = \
wall.ceiling, wall.floor, wall.sx, wall.sy, wall.ex, wall.ey, \
wall.texture.upper(), wall.xOffset, wall.yOffset, wall.fromTop, \
wall.position, wall.light
# This means no texture
if texture == "-\x00\x00\x00\x00\x00\x00\x00":
return False
# This means either there is a missing texture
# or I screwd up somewhere
if texture not in textures:
return False
# Wall's "physical" size
height = ceiling - floor
# "/ scaleY" to compensate for distortion of isometric projection,
# if we squeeze Y axis, wall "physical size should remain the same
width = int(math.sqrt((sx - ex) ** 2 + ((sy - ey) / scaleY) ** 2))
# Negative width is impossible, but negative height
# is an error that I saw a few times
if height <= 0 or width <= 0:
return False
textim = textures[texture]
im = Image.new("RGBA", (width, height), color=(0, 0, 0, 0))
# Correction of excessive xOffset
while xOff > textim.size[0]:
xOff -= textim.size[0]
while xOff < -textim.size[0]:
xOff += textim.size[0]
# Here we paste texture to the canvas
# TODO: Calculate i and j more elegantly
# I did budget 1 extra texture width to the left and 3 to the right
# but it will not be enough with some wild offest values
for i in range(-1, im.size[0] // textim.size[0] + 3):
for j in range(-1, im.size[1] // textim.size[1] + 3):
# special rule for midtextures:
# only repreat once vertically
if position == "mid" and j != 1:
continue
# Two different ways of pasting textures:
# FromTop (align top of the wall /top of the texture)
# Used for regular middles, regular bottom and unpegged tops
if fromTop:
im.paste(textim, (i * textim.size[0] - xOff,
j * textim.size[1] - yOff), textim)
else:
if position in ("top", "mid"):
# regular tops and mid-textures (draw from bottom)
im.paste(textim, (i * textim.size[0] - xOff,
im.size[1] - j * textim.size[1] - yOff - 1), textim)
else:
# upegged bottoms
im.paste(textim, (i*textim.size[0]-xOff,
im.size[1] - j * textim.size[1] -
yOff - 1 - (floor % (128 // shrink))), textim)
#yOff - (floor % 128)), textim)
lightLevel = 31 - light // 8
im = lightImage(im, lightLevel, colorConversion)
return im
def pasteWall(bgpx, coords, wall, textures, zBuffer, offsetX, offsetY,
colorConversion, options):
''' Draw a wall on a final picture
Among other things this function is given "coords":
"coords" is 4-point polygon that this wall should fill
(all calculations already been done at this point)
'''
hCoefX, hCoefY, scaleY, shrink = \
options["coefX"], options["coefY"], \
options["scaleY"], options["shrink"]
# get the wall image
fgim = getWallImage(wall, textures, colorConversion, scaleY, shrink)
if not fgim:
return
# unpack polygone coordinates
x1, y1, x2, y2, x3, y3, x4, y4 = coords
# Now the weird stuff:
# The way I draw that polygon is I draw two lines:
# along the floor (bottom) and along the ceiling (top) of the wall
# and then series of lines between each point of floor | |
<filename>tests/test_lin_rg.py
import unittest
import os
from numpy.testing import assert_allclose
from numpy import ones, zeros, float64, array, append, genfromtxt
from ml_algorithms.lin_rg import (normal_eqn, cost_func,
reg_cost_func, grad,
reg_grad, predict, h)
from ml_algorithms.utils import numerical_grad
TESTDATA1 = os.path.join(os.path.dirname(__file__), 'data1.csv')
TESTDATA2 = os.path.join(os.path.dirname(__file__), 'data2.csv')
class TestLinearRegression(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data1 = genfromtxt(TESTDATA1, delimiter=',')
cls.data2 = genfromtxt(TESTDATA2, delimiter=',')
cls.err = 1e-4
# NORMAL EQUATION
def test_normal_eqn_data1(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=int)
X = append(intercept, X, axis=1)
assert_allclose([[-3.896], [1.193]],
normal_eqn(X, y),
rtol=0, atol=0.001)
def test_normal_eqn_data2(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=int)
X = append(intercept, X, axis=1)
assert_allclose([[89597.909], [139.210], [-8738.019]],
normal_eqn(X, y),
rtol=0, atol=0.001)
# COST FUNCTION
def test_cost_func_data1_1(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = zeros((n + 1, 1), dtype=float64)
assert_allclose([[32.073]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
def test_cost_func_data1_2(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
assert_allclose([[10.266]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
def test_cost_func_data1_3(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-1], [2]])
assert_allclose([[54.242]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
def test_cost_func_data2_1(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = zeros((n + 1, 1), dtype=float64)
assert_allclose([[65591548106.457]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
def test_cost_func_data2_2(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
assert_allclose([[64828197300.798]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
def test_cost_func_data2_3(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-25.3], [32], [7.8]])
assert_allclose([[43502644952.311]],
cost_func(X, y, theta),
rtol=0, atol=0.001)
# REGULARIZED COST FUNCTION
def test_reg_cost_func_data1_1(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 0
assert_allclose([[10.266]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_cost_func_data1_2(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 100
assert_allclose([[10.781984]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_cost_func_data1_3(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-1], [2]])
_lambda = 750
assert_allclose([[69.706373]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_cost_func_data2_1(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 0
assert_allclose([[64828197300.798]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_cost_func_data2_2(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 1000000
assert_allclose([[64828218577.393623]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_cost_func_data2_3(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-25.3], [32], [7.8]])
_lambda = 1000000
assert_allclose([[43514185803.375198]],
reg_cost_func(X, y, theta, _lambda),
rtol=0, atol=0.001)
# GRADIENT
def test_grad_data1_1(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = zeros((n + 1, 1), dtype=float64)
assert_allclose([[-5.839], [-65.329]],
grad(X, y, theta),
rtol=0, atol=0.001)
def test_grad_data1_2(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
assert_allclose([[3.321], [24.235]],
grad(X, y, theta),
rtol=0, atol=0.001)
def test_grad_data1_3(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-1], [2]])
assert_allclose([[9.480], [89.319]],
grad(X, y, theta),
rtol=0, atol=0.001)
def test_grad_data1_4(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = (1 / 3) * ones((n + 1, 1), dtype=float64)
def J(theta):
return cost_func(X, y, theta)
assert_allclose(grad(X, y, theta),
numerical_grad(J, theta, self.err),
rtol=0, atol=0.001)
def test_grad_data1_5(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = - 7.43 * ones((n + 1, 1), dtype=float64)
def J(theta):
return cost_func(X, y, theta)
assert_allclose(grad(X, y, theta),
numerical_grad(J, theta, self.err),
rtol=0, atol=0.001)
def test_grad_data1_6(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[3.46], [-2.76]])
def J(theta):
return cost_func(X, y, theta)
assert_allclose(grad(X, y, theta),
numerical_grad(J, theta, self.err),
rtol=0, atol=0.001)
def test_grad_data2_1(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = zeros((n + 1, 1), dtype=float64)
assert_allclose([[-340412.659], [-764209128.191], [-1120367.702]],
grad(X, y, theta),
rtol=0, atol=0.001)
def test_grad_data2_2(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
assert_allclose([[-338407.808], [-759579615.064], [-1113679.894]],
grad(X, y, theta),
rtol=0, atol=0.001)
def test_grad_data2_3(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-25.3], [32], [7.8]])
assert_allclose([[-276391.445], [-616340858.434], [-906796.414]],
grad(X, y, theta),
rtol=0, atol=0.001)
# REGULARIZED GRADIENT
def test_reg_grad_data1_1(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 0
assert_allclose([[3.321], [24.235]],
reg_grad(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_grad_data1_2(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 100
assert_allclose([[3.320665], [25.265821]],
reg_grad(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_grad_data1_3(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-1], [2]])
_lambda = 750
assert_allclose([[9.480465], [104.783153]],
reg_grad(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_grad_data1_4(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = -8.4 * ones((n + 1, 1), dtype=float64)
_lambda = 0.762
def J(theta):
return reg_cost_func(X, y, theta, _lambda)
assert_allclose(reg_grad(X, y, theta, _lambda),
numerical_grad(J, theta, self.err),
rtol=0, atol=0.001)
def test_reg_grad_data1_5(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = 3.2 * ones((n + 1, 1), dtype=float64)
_lambda = 154
def J(theta):
return reg_cost_func(X, y, theta, _lambda)
assert_allclose(reg_grad(X, y, theta, _lambda),
numerical_grad(J, theta, self.err),
rtol=0, atol=0.001)
def test_reg_grad_data1_6(self):
y = self.data1[:, -1:]
X = self.data1[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = array([[-12.4], [23.56]])
_lambda = 943
def J(theta):
return reg_cost_func(X, y, theta, _lambda)
assert_allclose(reg_grad(X, y, theta, _lambda),
numerical_grad(J, theta, self.err),
rtol=0, atol=0.001)
def test_reg_grad_data2_1(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, X, axis=1)
theta = ones((n + 1, 1), dtype=float64)
_lambda = 0
assert_allclose([[-338407.808], [-759579615.064], [-1113679.894]],
reg_grad(X, y, theta, _lambda),
rtol=0, atol=0.001)
def test_reg_grad_data2_2(self):
y = self.data2[:, -1:]
X = self.data2[:, :-1]
m, n = X.shape
intercept = ones((m, 1), dtype=float64)
X = append(intercept, | |
'uliana':
uliana.send_chat_action(id, 'typing')
time.sleep(4)
uliana.send_message(-1001351496983,
'Как здорово! Спасибо за помощь, [' + x['pionername'] + '](tg://user?id=' + str(
x['id']) + ')!' + \
'', parse_mode='markdown')
users.update_one({'id': x['id']}, {'$inc': {'Uliana_respect': random.randint(4, 5)}})
if pioner == 'miku':
miku.send_chat_action(id, 'typing')
time.sleep(4)
miku.send_message(-1001351496983,
'Спасибо, [' + x['pionername'] + '](tg://user?id=' + str(
x['id']) + ')! Сама бы я в жизни не донесла их... А теперь пойдем в музыкальный клуб, я же обещала чай!', parse_mode='markdown')
cardplayers = []
alisastats = {
'strenght': 1,
'agility': 2,
'intelligence': 3,
'controller': None,
'bot': alisa,
'whohelps': None
}
lenastats = {
'strenght': 2,
'agility': 2,
'intelligence': 2,
'whohelps': None,
'timer': None,
'controller': None,
'bot': lena
}
mikustats = {
'strenght': 2,
'agility': 2,
'intelligence': 2,
'controller': None,
'bot': miku,
'whohelps': None,
'timer':None
}
ulianastats = {
'strenght': 1,
'agility': 4,
'intelligence': 1,
'controller': None,
'bot': uliana,
'whohelps': None,
'timer': None
}
slavyastats = {
'strenght': 1,
'agility': 1,
'whohelps': None,
'timer': None,
'intelligence': 4,
'controller': None,
'bot': slavya
}
electronicstats = {
'strenght': 3,
'agility': 1,
'intelligence': 4,
'waitingplayers': 0,
'playingcards': 0,
'cardsturn': 0,
'controller': None,
'bot': electronic
}
zhenyastats = {
'strenght': 2,
'agility': 1,
'intelligence': 3,
'controller': None,
'bot': zhenya
}
tolikstats = {
'strenght': 2,
'agility': 2,
'intelligence': 2,
'controller': None,
'bot': tolik
}
shurikstats = {
'strenght': 2,
'agility': 1,
'intelligence': 4,
'controller': None,
'bot': shurik
}
odstats = {
'lineyka': [],
'waitforlineyka': 0,
'controller': None,
'bot': bot
}
semenstats = {
'controller': None,
'bot': semen
}
pioneerstats = {
'controller': None,
'bot': pioneer
}
ctrls = []
ctrls.append(odstats)
ctrls.append(electronicstats)
ctrls.append(slavyastats)
ctrls.append(zhenyastats)
ctrls.append(ulianastats)
ctrls.append(mikustats)
ctrls.append(lenastats)
ctrls.append(alisastats)
ctrls.append(shurikstats)
ctrls.append(tolikstats)
zavtrak = '9:00'
obed = '14:00'
uzhin = '21:00'
def findindex(x):
i = 0
for ids in works:
if ids['name'] == x:
index = i
i += 1
return index
def randomhelp():
t = threading.Timer(random.randint(420, 1000), randomhelp)
t.start()
global rds
if rds == True:
spisok = []
pioners = ['lena', 'alisa', 'slavya', 'uliana', 'miku']
x = users.find({})
for ids in x:
if ids['pionername'] != None:
spisok.append(ids)
if len(spisok) > 0:
hour = gettime('h')
if hour >= 7 and hour <= 23:
pioner = random.choice(spisok)
z = random.choice(pioners)
helpto(pioner, z)
def helpto(pioner, x):
if pioner['gender'] == 'male':
g = ''
else:
g = 'ла'
if x == 'lena':
try:
if pioner['Lena_respect'] >= 85:
text = '[' + pioner['pionername'] + '](tg://user?id=' + str(pioner[
'id']) + '), привет! Ты мне часто помогаешь, поэтому хотелось бы попросить тебя о помощи еще раз... Не откажешь?'
else:
text = '[' + pioner['pionername'] + '](tg://user?id=' + str(
pioner['id']) + '), привет. Не мог' + g + ' бы ты мне помочь?'
lena.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
m = lena.send_message(-1001351496983, text, parse_mode='markdown')
lenastats['whohelps'] = pioner['id']
t = threading.Timer(300, helpcancel, args=['lena', m, pioner['id']])
t.start()
lenastats['timer'] = t
sendstick(lena, 'CAADAgADaQADgi0zD9ZBO-mNcLuBAg')
except:
pass
if x == 'alisa':
try:
if pioner['Alisa_respect'] >= 85:
text = '[' + pioner['pionername'] + '](tg://user?id=' + str(
pioner['id']) + '), привет, я же знаю, что ты любишь повеселиться! Готов на этот раз?'
else:
text = '[' + pioner['pionername'] + '](tg://user?id=' + str(pioner[
'id']) + '), смотри, куда идёшь! Должен будешь, и долг отработаешь прямо сейчас. Мне тут помощь нужна в одном деле...'
alisa.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
m = alisa.send_message(-1001351496983, text, parse_mode='markdown')
alisastats['whohelps'] = pioner['id']
t = threading.Timer(300, helpcancel, args=['alisa', m, pioner['id']])
t.start()
alisastats['timer'] = t
sendstick(alisa, 'CAADAgADOQADgi0zDztSbkeWq3BEAg')
except:
bot.send_message(441399484, traceback.format_exc())
if x == 'slavya':
try:
if pioner['Slavya_respect'] >= 85:
text = 'Привет, ' + '[' + pioner['pionername'] + '](tg://user?id=' + str(pioner[
'id']) + ')! Ты не раз выручал меня, поэтому я знаю, что тебе можно довериться. Поможешь мне с одним важным заданием?'
else:
text = 'Привет, [' + pioner['pionername'] + '](tg://user?id=' + str(
pioner['id']) + ')! Поможешь мне с одним важным заданием?'
slavya.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
m = slavya.send_message(-1001351496983, text, parse_mode='markdown')
slavyastats['whohelps'] = pioner['id']
t = threading.Timer(300, helpcancel, args=['slavya', m, pioner['id']])
t.start()
slavyastats['timer'] = t
sendstick(slavya, 'CAADAgADTAADgi0zD6PLpc722Bz3Ag')
except:
bot.send_message(441399484, traceback.format_exc())
if x == 'uliana':
try:
if pioner['Uliana_respect'] >= 85:
text = 'Привет, ' + '[' + pioner['pionername'] + '](tg://user?id=' + str(
pioner['id']) + ')! Мне не помешала бы помощь в одном деле... Я знаю, что ты согласишься!'
else:
text = 'Эй, [' + pioner['pionername'] + '](tg://user?id=' + str(
pioner['id']) + ')! Поможешь мне с одним делом?'
uliana.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
m = uliana.send_message(-1001351496983, text, parse_mode='markdown')
ulianastats['whohelps'] = pioner['id']
t = threading.Timer(300, helpcancel, args=['uliana', m, pioner['id']])
t.start()
ulianastats['timer'] = t
sendstick(uliana, 'CAADAgADLwADgi0zD7_x8Aph94DmAg')
except:
bot.send_message(441399484, traceback.format_exc())
if x == 'miku':
try:
text = 'Привет, [' + pioner['pionername'] + '](tg://user?id=' + str(
pioner['id']) + ')! Ты, случайно, не занят? У меня тут такая ситуация, надо колонки из музыкального кружка на сцену перетащить, я '+\
'кибернетиков просила, но они чем-то очень сильно заняты в своем клубе... Поможешь? А я тебя потом чаем угощу!'
miku.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
m = miku.send_message(-1001351496983, text, parse_mode='markdown')
mikustats['whohelps'] = pioner['id']
t = threading.Timer(300, helpcancel, args=['miku', m, pioner['id']])
t.start()
mikustats['timer'] = t
sendstick(miku, 'CAACAgIAAxkBAAIm3mJGInusdARgWct95yz14Q9Vm4lPAAJ7AAOCLTMPQuso0_ttgJcjBA')
except:
bot.send_message(441399484, traceback.format_exc())
def helpcancel(pioner, m, userid):
user = users.find_one({'id': userid})
if pioner == 'lena':
lenastats['whohelps'] = None
lena.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
lena.send_message(-1001351496983, 'Ты, наверное, сейчас занят... Прости, что побеспокоила.',
reply_to_message_id=m.message_id)
if user['Lena_respect'] > 0:
users.update_one({'id': user['id']}, {'$inc': {'Lena_respect': -1}})
if pioner == 'alisa':
alisastats['whohelps'] = None
alisa.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
if user['Alisa_respect'] < 85:
alisa.send_message(-1001351496983, 'Ну и пожалуйста!', reply_to_message_id=m.message_id)
else:
alisa.send_message(-1001351496983, 'Ну как хочешь! Сама справлюсь.', reply_to_message_id=m.message_id)
if user['Alisa_respect'] > 0:
users.update_one({'id': user['id']}, {'$inc': {'Alisa_respect': -1}})
if pioner == 'slavya':
slavyastats['whohelps'] = None
slavya.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
if user['Slavya_respect'] < 85:
slavya.send_message(-1001351496983, 'Ладно, спрошу кого-нибудь другого.', reply_to_message_id=m.message_id)
else:
slavya.send_message(-1001351496983, 'Ладно, ничего страшного - спрошу кого-нибудь другого.',
reply_to_message_id=m.message_id)
if user['Slavya_respect'] > 0:
users.update_one({'id': user['id']}, {'$inc': {'Slavya_respect': -1}})
if pioner == 'uliana':
ulianastats['whohelps'] = None
uliana.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
if user['Uliana_respect'] < 85:
uliana.send_message(-1001351496983, 'Ой, ну и ладно! Найду того, кому интересно!',
reply_to_message_id=m.message_id)
else:
uliana.send_message(-1001351496983, 'Ладно, как хочешь. Но если появится желание - говори!',
reply_to_message_id=m.message_id)
if user['Uliana_respect'] > 0:
users.update_one({'id': user['id']}, {'$inc': {'Uliana_respect': -1}})
if pioner == 'miku':
mikustats['whohelps'] = None
miku.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
miku.send_message(-1001351496983, 'Видимо, у тебя дела... Но если вдруг освободишься - обязательно скажи мне!',
reply_to_message_id=m.message_id)
def randomact():
t = threading.Timer(random.randint(4900, 18000), randomact)
t.start()
global rds
if rds == True:
lisst = ['talk_uliana+olgadmitrievna', 'talk_uliana+alisa', 'talk_el+shurik', 'talk_miku+slavya']
x = random.choice(lisst)
if x == 'talk_uliana+olgadmitrievna':
bot.send_chat_action(-1001351496983, 'typing')
time.sleep(4)
bot.send_message(-1001351496983, nametopioner('uliana') + ', а ну стой! Ты эти конфеты где взяла?',
parse_mode='markdown')
sendstick(bot, 'CAADAgADtwADgi0zD-9trZ_s35yQAg')
time.sleep(1)
uliana.send_chat_action(-1001351496983, 'typing')
time.sleep(2)
uliana.send_message(-1001351496983, 'Какие конфеты?')
sendstick(uliana, 'CAADAgADHQADgi0zD1aFI93sTseZAg')
time.sleep(2)
bot.send_chat_action(-1001351496983, 'typing')
time.sleep(3)
bot.send_message(-1001351496983, 'Те, что ты за спиной держишь! Быстро верни их в столовую!')
time.sleep(1)
uliana.send_chat_action(-1001351496983, 'typing')
time.sleep(2)
uliana.send_message(-1001351496983, 'Хорошо, <NAME>...')
sendstick(uliana, 'CAADAgADJQADgi0zD1PW7dDuU5hCAg')
if x == 'talk_uliana+alisa':
alisa.send_chat_action(-1001351496983, 'typing')
time.sleep(3)
alisa.send_message(-1001351496983, nametopioner('uliana') + ', не боишься, что <NAME>итриевна спалит?',
parse_mode='markdown')
time.sleep(1)
uliana.send_chat_action(-1001351496983, 'typing')
time.sleep(2)
uliana.send_message(-1001351496983, 'Ты о чём?')
time.sleep(2)
alisa.send_chat_action(-1001351496983, 'typing')
time.sleep(2)
alisa.send_message(-1001351496983, 'О конфетах, которые ты украла!')
sendstick(alisa, 'CAADAgADOwADgi0zDzD8ZNZXu5LHAg')
time.sleep(1)
uliana.send_chat_action(-1001351496983, 'typing')
time.sleep(2)
uliana.send_message(-1001351496983, 'Да не, не спалит! Я так уже много раз делала!')
sendstick(uliana, 'CAADAgADKQADgi0zD_inNy0pZyh0Ag')
time.sleep(2)
alisa.send_chat_action(-1001351496983, 'typing')
time.sleep(2)
alisa.send_message(-1001351496983, 'Тогда делись!')
time.sleep(1)
uliana.send_chat_action(-1001351496983, 'typing')
time.sleep(2)
uliana.send_message(-1001351496983, 'Тогда пошли в домик!')
if x == 'talk_el+shurik':
electronic.send_chat_action(-1001351496983, 'typing')
time.sleep(3)
electronic.send_message(-1001351496983,
nametopioner('shurik') + ', как думаешь, возможно ли перемещение во времени?',
parse_mode='markdown')
sendstick(electronic, 'CAADAgAD0wADgi0zD1LBx9yoFTBiAg')
time.sleep(1)
shurik.send_chat_action(-1001351496983, 'typing')
time.sleep(2)
shurik.send_message(-1001351496983, 'В теории... Хотя нет, это антинаучно.')
sendstick(shurik, 'CAADAgAD5QADgi0zDwyDLbq7ZQ4vAg')
time.sleep(2)
electronic.send_chat_action(-1001351496983, 'typing')
time.sleep(2)
electronic.send_message(-1001351496983,
'А мне вот кажется, что когда-нибудь прогресс дойдёт и до такого...')
if x == 'talk_miku+slavya':
miku.send_chat_action(-1001351496983, 'typing')
time.sleep(3)
miku.send_message(-1001351496983,
'О, Славя, доброе утро!',
parse_mode='markdown')
sendstick(miku, 'CAACAgIAAxkBAAIm2GJGHHEtq_wMxq9tAtbNfuer8ANsAAJ9AAOCLTMPfRt-eLWAJRkjBA')
time.sleep(1)
slavya.send_chat_action(-1001351496983, 'typing')
time.sleep(2)
slavya.send_message(-1001351496983, 'Доброе!')
sendstick(slavya, 'CAACAgIAAxkB<KEY>')
time.sleep(2)
miku.send_chat_action(-1001351496983, 'typing')
time.sleep(3)
miku.send_message(-1001351496983,
'А ты случайно не видела Алису? А то она гитару обещала мне сегодня одолжить, а то у второй гитары в музыкальном кружке '+
'струна порвалась... Но сейчас же только утро, может, она спит еще? А она точно не забыла?')
time.sleep(2)
slavya.send_chat_action(-1001351496983, 'typing')
time.sleep(1)
slavya.send_message(-1001351496983, 'Нет, не видела...')
sendstick(slavya, 'CA<KEY>')
time.sleep(1)
miku.send_chat_action(-1001351496983, 'typing')
time.sleep(3)
miku.send_message(-1001351496983,
'Но ты если увидишь её то передай, что я её очень жду! Я именно сегодня хотела к концерту подготовиться, новую мелодию '+
'разучить... А ты случайно не хочешь тоже что-то сыграть? Я могу тебя научить играть на гитаре, флейте, аккордеоне или... Славя, ты куда?')
sendstick(miku, 'CAACAgIAAxkBAAIm22JGIM8lYl16Aqh9wALRr6BWoK9lAAKBAAOCLTMPVapTRGHE3q8jBA')
checktime()
t = threading.Timer(120, randomhelp)
t.start()
def polling(pollingbot):
pollingbot.polling(none_stop=True, timeout=600)
t = threading.Timer(120, randomact)
t.start()
if True:
print('7777')
users.update_many({}, {'$set': {'working': 0}})
users.update_many({}, {'$set': | |
numbers (Z2, Beckman Coulter, Villepinte, France).",
{"entities": []}),
("The result was not statistically significant (p > .05)",
{"entities": []}),
("One-Way ANOVA revealed F(2,32)=1.203, p > .05",
{"entities": []}),
("There was a large effect of stimulation of the nucleus of darkschewitz (p < .01, **)",
{"entities": [(47, 70, LABEL)]}),
("Logistic regression analysis showed no difference between the two lines (p=.2384)",
{"entities": []}),
("In the object recognition test, animals of both groups detected novelty, with longer exploration durations of the novel object vs the familiar ones (vehicle: t11 = −2.",
{"entities": []}),
("For details of procedures, please refer to Material and Methods S1.",
{"entities": []}),
("05, ## p<.05",
{"entities": []}),
("Accordingly, blood flow measurements with PET have been used to investigate color discrimination tasks in rhesus monkeys [36].",
{"entities": []}),
("Custom-made chromatoscope consisting of the following parts: (a) an optical construction with a connection site for an optical fibre and a white colored reflector shield, (b) a tripod for precise and reproducible positioning above the animal eyes, (c) a filter holder with groove to insert Wratten Kodak filters, Blue (i) and Yellow (ii) as well as an impermeable film for monocular stimulation, (d) a 150 W light source, which produces luminosity with a color temperature of about 3200 K and 20 lumens/watt, (e) an optical fibre, which is guiding the light to the connector at the optical construction (a).",
{"entities": []}),
("However, using the uncorrected data and increasing the threshold cluster size in SPM (showing only areas where at least 10 or 20 adjacent voxels are active) did not change the significant results shown in Fig 8 indicating that the observed activations are real.",
{"entities": []}),
("Very recently, real-time imaging of brain activity under visual stimulation in freely moving rats using functional ultrasound has been performed [49].",
{"entities": []}),
("However, its adaptability and application for our present study design using a steady stimulation may not be feasible.",
{"entities": []}),
("Furthermore, in the evolutional trend genetic changes refined the downstream neural circuitry that more efficiently extract color from other sensory information over many generations.",
{"entities": []}),
("Table C: Original data (Standardized Uptake Values).",
{"entities": []}),
("Five minutes later, isoflurane application was completely switched off.",
{"entities": []}),
("No further processing filter was needed because the minor artifacts of the imaging system were small in comparison with the recorded field potential.",
{"entities": []}),
("8 mm, FOV 37 mm × 37 mm, matrix 256 × 256, RARE factor 8, and number of averages four.",
{"entities": []}),
###TRAINED TO HERE
("Genetic ablation and optogenetics revealed that the DP/DTT→DMH pathway drives thermogenic, hyperthermic, and cardiovascular sympathetic responses to psychosocial stress without contributing to basal homeostasis.",
{"entities": [(52, 54, LABEL), (55, 58, LABEL), (59, 62, LABEL), (71, 89, FUNC), (91, 103, FUNC), (109, 145, FUNC), (149, 168, FUNC)]}),
("AAAS is a partner of HINARI, AGORA, OARE, CHORUS, CLOCKSS, CrossRef and COUNTER.",
{"entities": []}),
("Although the corticolimbic circuits that process stress and emotions are undetermined, the PVT and MD thalamic nuclei, which provide stress inputs to the DP/DTT, constitute a fear stress circuit involving the amygdala (30, 31).",
{"entities": [(13, 35, LABEL), (41, 68, FUNC), (91, 94, LABEL), (99, 117, LABEL), (125, 146, FUNC), (154, 156, LABEL), (157, 160, LABEL), (175, 186, FUNC), (209, 217, LABEL)]}),
("In panic disorder, glutamatergic inputs to the DMH to develop the panic-prone state (32) may be provided from the DP/DTT.",
{"entities": [(19, 32, NT), (47, 50, LABEL), (66, 83, FUNC), (114, 116, LABEL), (117, 120, LABEL)]}),
("Imagine you are standing in your office and all of a sudden a man walks in and attacks you with a knife.",
{"entities": []}),
("Police officers are trained to deal with acute threat and to inhibit their automatic action tendencies in order to optimize adequate response capacity.",
{"entities": []}),
("When a stimulus or a situation is perceived to be threatening, the brain activates many neuronal circuits to adapt to the demand, the most well-known being the autonomic nervous system (ANS).",
{"entities": [(67, 72, LABEL), (73, 105, FUNC), (160, 184, LABEL), (186, 189, LABEL)]}),
("Before addressing these questions, I first describe the phenomenology of freezing and fight-or-flight reactions as well as the psychophysiological and neural mechanisms associated with these threat-related defensive states.",
{"entities": []}),
("Freezing, a state of parasympathetic dominance.",
{"entities": []}),
("For instance, negatively valenced and highly arousing pictures elicit sympathetic changes such as galvanic skin responses [9] and pupil dilation [76].",
{"entities": []}),
("In a visual discrimination, paradigm subjects had to indicate whether the target was tilted to the left or to the right with respect to the upright position.",
{"entities": []}),
("After a variable time interval, the target pulled a phone or a gun (cue), upon which the participant had to respond as fast as possible by shooting (go) or withholding (no-go), respectively.",
{"entities": []}),
("Finally, building on animal models, research in human developmental and clinical samples has provided starting points for investigating the role of freezing in the development of psychopathology.",
{"entities": []}),
("Step-down reaction periods and error times were higher, and step-down latency was lower, in the model, NC, and NRSF shRNA groups than in the normal group at all time points (all P < 0.003",
{"entities": []}),
("Note: NC, negative control; NRSF, neuron-restrictive silencer factor; d, day; *P < 0.05",
{"entities": []}),
("A laser Doppler flow meter (PeriFlux 5000, Perimed, Stockholm, Sweden) was used to measure regional CBF in the cortex.",
{"entities": []}),
("After anesthesia was administered (chloral hydrate 350 mg/kg), a midline incision was made on the neck.",
{"entities": []}),
("Subsequently, a portion of the tissue was removed and stored at −80°C for use in quantitative real-time polymerase chain reaction (qRT-PCR) and western blotting.",
{"entities": []}),
("Sections were then incubated with diaminobenzidine (DBA) for 1~2 mins, rinsed again 3 times with PBS (2 mins/rinse), re-dyed for 1 min with hematoxylin, dehydrated, mounted, and sealed.",
{"entities": []}),
("Apoptotic cells were quantified in rat brain tissues according to the instructions of the DeadEndTM fluorescence labeling TUNEL detection kit (Promega Corp., Madison, WI, USA).",
{"entities": []}),
("In all cases, rats were experimentally naive at the start of experiments, and were habituated to housing conditions and experimenter handling for ⩾1 week before the start of experimental manipulations.",
{"entities": []}),
("The time that rats spent in the two compartments was recorded as the pre-conditioning baseline, and rats were assigned supersac solution in one compartment and water solution in the other compartment.",
{"entities": []}),
("Elevated plus-maze (Experiment 3): The elevated plus-maze (EPM) test was used to test anxiety-like behavior in conditions previously described.",
{"entities": []}),
("These lighting conditions produce % open arm times of 15–20% in our lab, consistent with the literature for EPM results observed in genetically heterogeneous rats (in results reported below, mean % open arm time is approximately equal to 14%).",
{"entities": []}),
("Samples of protein (15 μg) were subjected to SDS-polyacrylamide gel electrophoresis on 10% acrylamide gels by using a Tris/Glycine/SDS buffer system (Bio-Rad), followed by electrophoretic transfer to polyvinylidene difluoride membranes (GE Healthcare, Piscataway, NJ, USA).",
{"entities": []}),
("Rats (n=48) were trained to self-administer 10% w/v ethanol versus water in a two-lever operant situation during 30-min self-administration sessions, as described above, for a period of 18 days.",
{"entities": []}),
("Four days following the EPM test, rats were allowed to explore an apparatus with three chambers that differ in the visual (wall pattern) and tactile (floor composition) cues.",
{"entities": []}),
("Injection of KOP receptor agonists stimulates the release of CRF and glucocorticoids",
{"entities": [(13, 25, PHYS), (35, 64, FUNC), (69, 84, FUNC)]}),
("Lipopolysaccharide | |
0, 1)
self.copy_from = [self.parent_w.current_obj.name] # Preselect the current selected MEEG
self.copy_tos = list()
self.listw1 = CheckList(self.all_files, self.copy_from, ui_buttons=False, one_check=True)
self.listw2 = CheckList(self.all_files, self.copy_tos)
layout.addWidget(self.listw1, 1, 0)
layout.addWidget(self.listw2, 1, 1)
copy_bt = QPushButton('Copy')
copy_bt.clicked.connect(self.copy_bads)
layout.addWidget(copy_bt, 2, 0)
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
layout.addWidget(close_bt, 2, 1)
self.setLayout(layout)
def copy_bads(self):
# Check, that at least one item is selected in each list and that the copy_from-item is in meeg_bad_channels
if len(self.copy_from) * len(self.copy_tos) > 0 and self.copy_from[0] in self.bad_channels_dict:
for copy_to in self.copy_tos:
copy_bad_chs = self.bad_channels_dict[self.copy_from[0]].copy()
copy_to_info = MEEG(copy_to, self.parent_w.mw.ct).load_info()
# Make sure, that only channels which exist too in copy_to are copied
for rm_ch in [r for r in copy_bad_chs if r not in copy_to_info['ch_names']]:
copy_bad_chs.remove(rm_ch)
self.bad_channels_dict[copy_to] = copy_bad_chs
class SubBadsWidget(QWidget):
""" A Dialog to select Bad-Channels for the files """
def __init__(self, main_win):
"""
:param main_win: The parent-window for the dialog
"""
super().__init__(main_win)
self.mw = main_win
self.ct = main_win.ct
self.pr = main_win.ct.pr
self.setWindowTitle('Assign bad_channels for your files')
self.bad_chkbts = dict()
self.info_dict = dict()
self.current_obj = None
self.raw = None
self.raw_fig = None
self.init_ui()
def init_ui(self):
self.layout = QGridLayout()
file_list = self.pr.all_meeg + self.pr.all_erm
self.files_widget = CheckDictList(file_list, self.pr.meeg_bad_channels, title='Files')
self.files_widget.currentChanged.connect(self.bad_dict_selected)
self.files_widget.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Preferred)
self.layout.addWidget(self.files_widget, 0, 0)
self.bt_scroll = QScrollArea()
self.bt_scroll.setWidgetResizable(True)
self.layout.addWidget(self.bt_scroll, 0, 1)
# Add Buttons
self.bt_layout = QHBoxLayout()
plot_bt = QPushButton('Plot Raw')
plot_bt.clicked.connect(self.plot_raw_bad)
self.bt_layout.addWidget(plot_bt)
copy_bt = QPushButton('Copy Bads')
copy_bt.clicked.connect(partial(CopyBadsDialog, self))
self.bt_layout.addWidget(copy_bt)
self.save_raw_annot = QCheckBox('Save Annotations')
self.bt_layout.addWidget(self.save_raw_annot)
self.layout.addLayout(self.bt_layout, 1, 0, 1, 2)
self.setLayout(self.layout)
def update_selection(self):
# Clear entries
for bt in self.bad_chkbts:
self.bad_chkbts[bt].setChecked(False)
# Catch Channels, which are present in meeg_bad_channels, but not in bad_chkbts
# Then load existing bads for choice
for bad in self.current_obj.bad_channels:
if bad in self.bad_chkbts:
self.bad_chkbts[bad].setChecked(True)
else:
# Remove bad channel from bad_channels if not existing in bad_chkbts (and thus not in ch_names)
self.current_obj.bad_channels.remove(bad)
def _make_bad_chbxs(self, info):
time.sleep(1)
# Store info in dictionary
self.info_dict[self.current_obj.name] = info
chbx_w = QWidget()
chbx_w.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
self.chbx_layout = QGridLayout()
row = 0
column = 0
h_size = 0
# Currently, you have to fine-tune the max_h_size,
# because it doesn't seem to reflect exactly the actual width
max_h_size = int(self.bt_scroll.geometry().width() * 0.85)
self.bad_chkbts = dict()
# Make Checkboxes for channels in info
for x, ch_name in enumerate(info['ch_names']):
chkbt = QCheckBox(ch_name)
chkbt.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
chkbt.clicked.connect(self.bad_ckbx_assigned)
self.bad_chkbts[ch_name] = chkbt
h_size += chkbt.sizeHint().width()
if h_size > max_h_size:
column = 0
row += 1
h_size = chkbt.sizeHint().width()
self.chbx_layout.addWidget(chkbt, row, column)
column += 1
chbx_w.setLayout(self.chbx_layout)
# Remove previous buttons if existing
if self.bt_scroll.widget():
self.bt_scroll.takeWidget()
self.bt_scroll.setWidget(chbx_w)
self.update_selection()
def make_bad_chbxs(self):
if self.current_obj:
# Don't load info twice from file
if self.current_obj.name in self.info_dict:
self._make_bad_chbxs(self.info_dict[self.current_obj.name])
else:
worker_dlg = WorkerDialog(self, self.current_obj.load_info, title='Loading Channels...')
worker_dlg.thread_finished.connect(self._make_bad_chbxs)
def bad_dict_selected(self, current, _):
self.current_obj = MEEG(current, self.ct)
# Close current Plot-Window
if self.raw_fig:
plt.close(self.raw_fig)
self.make_bad_chbxs()
def _assign_bad_channels(self, bad_channels):
# Directly replace value in bad_channels_dict (needed for first-time assignment)
self.current_obj.pr.meeg_bad_channels[self.current_obj.name] = bad_channels
# Restore/Establish reference to direct object-attribute
self.current_obj.bad_channels = bad_channels
self.files_widget.content_changed()
def bad_ckbx_assigned(self):
bad_channels = [ch for ch in self.bad_chkbts if self.bad_chkbts[ch].isChecked()]
self._assign_bad_channels(bad_channels)
def set_chkbx_enable(self, enable):
for chkbx in self.bad_chkbts:
self.bad_chkbts[chkbx].setEnabled(enable)
def get_selected_bads(self, _):
# In-Place-Operations to maintain reference from current_obj to meeg_bad_channels
bad_channels = self.raw.info['bads']
self._assign_bad_channels(bad_channels)
self.update_selection()
self.set_chkbx_enable(True)
if self.save_raw_annot.isChecked():
WorkerDialog(self, self.current_obj.save_raw, raw=self.raw, show_console=True,
title='Saving Raw with Annotations')
def plot_raw_bad(self):
# Disable CheckBoxes to avoid confusion (Bad-Selection only goes unidirectional from Plot>GUI)
self.set_chkbx_enable(False)
plot_dialog = QDialog(self)
plot_dialog.setWindowTitle('Opening Raw-Plot...')
plot_dialog.open()
self.raw = self.current_obj.load_raw()
try:
events = self.current_obj.load_events()
except FileNotFoundError:
events = None
self.raw_fig = self.raw.plot(events=events, n_channels=30, bad_color='red', title=self.current_obj.name)
# Connect Closing of Matplotlib-Figure to assignment of bad-channels
self.raw_fig.canvas.mpl_connect('close_event', self.get_selected_bads)
plot_dialog.close()
def resizeEvent(self, event):
if self.current_obj:
self.make_bad_chbxs()
self.update_selection()
event.accept()
def closeEvent(self, event):
if self.raw_fig:
plt.close(self.raw_fig)
event.accept()
else:
event.accept()
class SubBadsDialog(QDialog):
def __init__(self, main_win):
super().__init__(main_win)
layout = QVBoxLayout()
bads_widget = SubBadsWidget(main_win)
layout.addWidget(bads_widget)
close_bt = QPushButton('Close', self)
close_bt.clicked.connect(self.close)
bads_widget.bt_layout.addWidget(close_bt)
self.setLayout(layout)
set_ratio_geometry(0.8, self)
self.show()
class SubBadsWizPage(QWizardPage):
def __init__(self, main_win, title):
super().__init__()
self.setTitle(title)
layout = QVBoxLayout()
self.sub_bad_w = SubBadsWidget(main_win)
layout.addWidget(self.sub_bad_w)
self.setLayout(layout)
class SubjectWizard(QWizard):
def __init__(self, main_win):
super().__init__(main_win)
self.mw = main_win
self.setWindowTitle('Subject-Wizard')
self.setWizardStyle(QWizard.ModernStyle)
self.setOption(QWizard.HaveHelpButton, False)
set_ratio_geometry(0.6, self)
center(self)
self.add_pages()
self.open()
def add_pages(self):
self.add_files_page = QWizardPage()
self.add_files_page.setTitle('Import .fif-Files')
layout = QVBoxLayout()
layout.addWidget(AddFilesWidget(self.mw))
self.add_files_page.setLayout(layout)
self.add_mri_page = QWizardPage()
self.add_mri_page.setTitle('Import MRI-Files')
layout = QVBoxLayout()
layout.addWidget(AddMRIWidget(self.mw))
self.add_mri_page.setLayout(layout)
self.assign_mri_page = FileDictWizardPage(self.mw, 'mri', 'Assign File --> MRI')
self.assign_erm_page = FileDictWizardPage(self.mw, 'erm', 'Assign File --> ERM')
self.assign_bad_channels_page = SubBadsWizPage(self.mw, 'Assign Bad-Channels')
self.addPage(self.add_files_page)
self.addPage(self.add_mri_page)
self.addPage(self.assign_mri_page)
self.addPage(self.assign_erm_page)
self.addPage(self.assign_bad_channels_page)
class EventIDGui(QDialog):
def __init__(self, main_win):
super().__init__(main_win)
self.mw = main_win
self.ct = main_win.ct
self.pr = main_win.ct.pr
self.name = None
self.event_id = dict()
self.labels = list()
self.checked_labels = list()
self.layout = QVBoxLayout()
self.init_ui()
self.open()
def init_ui(self):
list_layout = QHBoxLayout()
self.files = CheckDictList(self.pr.all_meeg, self.pr.meeg_event_id, title='Files')
self.files.currentChanged.connect(self.file_selected)
list_layout.addWidget(self.files)
event_id_layout = QVBoxLayout()
self.event_id_widget = EditDict(self.event_id, ui_buttons=True, title='Event-ID')
# Connect editing of Event-ID-Table to update of Check-List
self.event_id_widget.dataChanged.connect(self.update_check_list)
self.event_id_widget.setToolTip('Add a Trial-Descriptor (as key) for each Event-ID (as value) '
'you want to include it in you analysis.\n'
'You can assign multiple descriptors per ID by '
'separating them by "/"')
event_id_layout.addWidget(self.event_id_widget)
self.event_id_label = QLabel()
event_id_layout.addWidget(self.event_id_label)
list_layout.addLayout(event_id_layout)
self.check_widget = CheckList(title='Select IDs')
list_layout.addWidget(self.check_widget)
self.layout.addLayout(list_layout)
bt_layout = QHBoxLayout()
apply_bt = QPushButton('Apply to')
apply_bt.clicked.connect(partial(EvIDApply, self))
bt_layout.addWidget(apply_bt)
show_events = QPushButton('Show Events')
show_events.clicked.connect(self.show_events)
bt_layout.addWidget(show_events)
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
bt_layout.addWidget(close_bt)
self.layout.addLayout(bt_layout)
self.setLayout(self.layout)
def get_event_id(self):
"""Get unique event-ids from events"""
if self.name in self.pr.meeg_event_id:
self.event_id = self.pr.meeg_event_id[self.name]
else:
self.event_id = dict()
self.event_id_widget.replace_data(self.event_id)
try:
# Load Events from File
meeg = MEEG(self.name, self.ct, suppress_warnings=True)
events = meeg.load_events()
except FileNotFoundError:
self.event_id_label.setText(f'No events found for {self.name}')
else:
ids = np.unique(events[:, 2])
self.event_id_label.setText(f'Events found: {ids}')
def save_event_id(self):
if self.name:
if len(self.event_id) > 0:
# Write Event-ID to Project
self.pr.meeg_event_id[self.name] = self.event_id
# Get selected Trials and write them to meeg.pr
self.pr.sel_event_id[self.name] = self.checked_labels
def file_selected(self, current, _):
"""Called when File from file_widget is selected"""
# Save event_id for previous file
self.save_event_id()
# Get event-id for selected file and update widget
self.name = current
self.get_event_id()
# Load checked trials
if self.name in self.pr.sel_event_id:
self.checked_labels = self.pr.sel_event_id[self.name]
else:
self.checked_labels = list()
self.update_check_list()
def update_check_list(self):
# Get selectable trials and update widget
prelabels = [i.split('/') for i in self.event_id.keys() if i != '']
if len(prelabels) > 0:
# Concatenate all lists
conc_labels = prelabels[0]
if len(prelabels) > 1:
for item in prelabels[1:]:
conc_labels += item
# Make sure that only unique labels exist
self.labels = list(set(conc_labels))
# Make sure, that only trials, which exist in event_id exist
for chk_label in self.checked_labels:
if not any(chk_label in key for key in self.event_id):
self.checked_labels.remove(chk_label)
else:
self.labels = list()
self.check_widget.replace_data(self.labels)
self.check_widget.replace_checked(self.checked_labels)
def show_events(self):
try:
meeg = MEEG(self.name, self.ct, suppress_warnings=True)
events = meeg.load_events()
mne.viz.plot_events(events, event_id=self.event_id or None, show=True)
except FileNotFoundError:
QMessageBox.warning(self, 'No events!',
f'No events found for {self.name}')
def closeEvent(self, event):
# Save event_id for last selected file
self.save_event_id()
event.accept()
class EvIDApply(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.p = parent
self.apply_to = list()
self.layout = QVBoxLayout()
self.init_ui()
self.open()
def init_ui(self):
label = QLabel(f'Apply {self.p.name} to:')
self.layout.addWidget(label)
self.check_listw = CheckList(self.p.pr.all_meeg, self.apply_to)
self.layout.addWidget(self.check_listw)
bt_layout = QHBoxLayout()
apply_bt = QPushButton('Apply')
apply_bt.clicked.connect(self.apply_evid)
bt_layout.addWidget(apply_bt)
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
bt_layout.addWidget(close_bt)
self.layout.addLayout(bt_layout)
self.setLayout(self.layout)
def apply_evid(self):
for file in self.apply_to:
# Avoid with copy that CheckList-Model changes selected for all afterwards (same reference)
self.p.pr.meeg_event_id[file] = self.p.event_id.copy()
self.p.pr.sel_event_id[file] = self.p.checked_labels.copy()
class CopyTrans(QDialog):
def __init__(self, main_win):
super().__init__(main_win)
self.mw = main_win
self.ct = main_win.ct
self.pr = main_win.ct.pr
# Get MEEGs, where a trans-file is already existing
self.from_meegs = list()
for meeg_name in self.pr.all_meeg:
meeg = MEEG(meeg_name, self.ct)
if isfile(meeg.trans_path):
self.from_meegs.append(meeg_name)
# Get the other MEEGs (wihtout trans-file)
self.to_meegs = [meeg for meeg in self.pr.all_meeg if meeg not in self.from_meegs]
self.current_meeg = None
self.copy_tos = list()
self.init_ui()
self.open()
def init_ui(self):
layout = QGridLayout()
from_list = SimpleList(self.from_meegs, title='From:')
from_list.currentChanged.connect(self.from_selected)
layout.addWidget(from_list, 0, 0)
self.to_list = CheckList(self.to_meegs, self.copy_tos, ui_button_pos='bottom', title='To:')
layout.addWidget(self.to_list, 0, 1)
copy_bt = QPushButton('Copy')
copy_bt.clicked.connect(self.copy_trans)
layout.addWidget(copy_bt, 1, 0)
close_bt = QPushButton('Close')
close_bt.clicked.connect(self.close)
layout.addWidget(close_bt, 1, 1)
self.setLayout(layout)
def _compare_digs(self, worker_signals):
self.copy_tos.clear()
# Get Digitization points
current_dig = self.current_meeg.load_info()['dig']
# Add all meeg, which have the exact same digitization points
# (assuming, that they can use the same trans-file)
worker_signals.pgbar_max.emit(len(self.to_meegs))
for n, to_meeg in enumerate(self.to_meegs):
worker_signals.pgbar_text.emit(f'Comparing: {to_meeg}')
if MEEG(to_meeg, self.ct).load_info()['dig'] == current_dig:
self.copy_tos.append(to_meeg)
worker_signals.pgbar_n.emit(n + 1)
self.to_list.content_changed()
def from_selected(self, current_meeg):
self.current_meeg = MEEG(current_meeg, self.ct)
WorkerDialog(self, | |
<gh_stars>10-100
import bisect
from collections import deque
import logging
import os
import pprint
import random
import warnings
from functools import partial
from typing import Union, Tuple
import h5py
import numpy as np
from omegaconf import DictConfig
import pandas as pd
import torch
from opencv_transforms import transforms
from torch.utils import data
from vidio import VideoReader
# from deepethogram.dataloaders import log
from deepethogram import projects
from deepethogram.data.augs import get_cpu_transforms
from deepethogram.data.utils import purge_unlabeled_elements_from_records, get_video_metadata, extract_metadata, \
find_labelfile, read_all_labels, get_split_from_records, remove_invalid_records_from_split_dictionary, \
make_loss_weight
from deepethogram.data.keypoint_utils import load_dlcfile, interpolate_bad_values, stack_features_in_time, \
expand_features_sturman
from deepethogram.file_io import read_labels
log = logging.getLogger(__name__)
# https://pytorch.org/docs/stable/data.html
class VideoIterable(data.IterableDataset):
"""Highly optimized Dataset for running inference on videos.
Features:
- Data is only read sequentially
- Each frame is only read once
- The input video is divided into NUM_WORKERS segments. Each worker reads its segment in parallel
- Each clip is read with stride = 1. If sequence_length==3, the first clips would be frames [0, 1, 2],
[1, 2, 3], [2, 3, 4], ... etc
"""
def __init__(self,
videofile: Union[str, os.PathLike],
transform,
sequence_length: int = 11,
num_workers: int = 0,
mean_by_channels: Union[list, np.ndarray] = [0, 0, 0]):
"""Cosntructor for video iterable
Parameters
----------
videofile : Union[str, os.PathLike]
Path to video file
transform : callable
CPU transforms (cropping, resizing)
sequence_length : int, optional
Number of images in one clip, by default 11
num_workers : int, optional
[description], by default 0
mean_by_channels : Union[list, np.ndarray], optional
[description], by default [0, 0, 0]
"""
super().__init__()
assert os.path.isfile(videofile) or os.path.isdir(videofile)
self.readers = {i: 0 for i in range(num_workers)}
self.videofile = videofile
# self.reader = VideoReader(videofile)
self.transform = transform
self.start = 0
self.sequence_length = sequence_length
with VideoReader(self.videofile) as reader:
self.N = len(reader)
self.blank_start_frames = self.sequence_length // 2
self.cnt = 0
self.mean_by_channels = self.parse_mean_by_channels(mean_by_channels)
# NOTE: not great practice, but I want each dataset to know when to stop
self.num_workers = num_workers
self.buffer = deque([], maxlen=self.sequence_length)
self.reset_counter = self.num_workers
self._zeros_image = None
self._image_shape = None
self.get_image_shape()
def __len__(self):
return self.N
def get_image_shape(self):
with VideoReader(self.videofile) as reader:
im = reader[0]
im = self.transform(im)
self._image_shape = im.shape
def get_zeros_image(self, ):
if self._zeros_image is None:
if self._image_shape is None:
raise ValueError('must set shape before getting zeros image')
# ALWAYS ASSUME OUTPUT IS TRANSPOSED
self._zeros_image = np.zeros(self._image_shape, dtype=np.uint8)
for i in range(3):
self._zeros_image[i, ...] = self.mean_by_channels[i]
return self._zeros_image.copy()
def parse_mean_by_channels(self, mean_by_channels):
if isinstance(mean_by_channels[0], (float, np.floating)):
return np.clip(np.array(mean_by_channels) * 255, 0, 255).astype(np.uint8)
elif isinstance(mean_by_channels[0], (int, np.integer)):
assert np.array_equal(np.clip(mean_by_channels, 0, 255), np.array(mean_by_channels))
return np.array(mean_by_channels).astype(np.uint8)
else:
raise ValueError('unexpected type for input channel mean: {}'.format(mean_by_channels))
def my_iter_func(self, start, end):
for i in range(start, end):
self.buffer.append(self.get_current_item())
yield {'images': np.stack(self.buffer, axis=1), 'framenum': self.cnt - 1 - self.sequence_length // 2}
def get_current_item(self):
worker_info = data.get_worker_info()
worker_id = worker_info.id if worker_info is not None else 0
# blank_start_frames =
# print(self.cnt)
if self.cnt < 0:
im = self.get_zeros_image()
elif self.cnt >= self.N:
im = self.get_zeros_image()
else:
try:
im = self.readers[worker_id][self.cnt]
except Exception as e:
print(f'problem reading frame {self.cnt}')
raise
im = self.transform(im)
# print(im.dtype)
self.cnt += 1
return im
def fill_buffer_init(self, iter_start):
self.cnt = iter_start
# hack for the first one: don't quite fill it up
for i in range(iter_start, iter_start + self.sequence_length - 1):
self.buffer.append(self.get_current_item())
def __iter__(self):
worker_info = data.get_worker_info()
# print(worker_info)
iter_end = self.N - self.sequence_length // 2
if worker_info is None:
iter_start = -self.blank_start_frames
self.readers[0] = VideoReader(self.videofile)
else:
per_worker = self.N // self.num_workers
remaining = self.N % per_worker
nums = [per_worker for i in range(self.num_workers)]
nums = [nums[i] + 1 if i < remaining else nums[i] for i in range(self.num_workers)]
# print(nums)
nums.insert(0, 0)
starts = np.cumsum(nums[:-1]) # - self.blank_start_frames
starts = starts.tolist()
ends = starts[1:] + [iter_end]
starts[0] = -self.blank_start_frames
# print(starts, ends)
iter_start = starts[worker_info.id]
iter_end = min(ends[worker_info.id], self.N)
# print(f'worker: {worker_info.id}, start: {iter_start} end: {iter_end}')
self.readers[worker_info.id] = VideoReader(self.videofile)
# FILL THE BUFFER TO START
# print('iter start: {}'.format(iter_start))
self.fill_buffer_init(iter_start)
return self.my_iter_func(iter_start, iter_end)
def close(self):
for k, v in self.readers.items():
if isinstance(v, int):
continue
try:
v.close()
except Exception as e:
print(f'error destroying reader {k}')
else:
print(f'destroyed {k}')
def __exit__(self, *args):
self.close()
def __del__(self):
self.close()
class SingleVideoDataset(data.Dataset):
"""PyTorch Dataset for loading a set of sequential frames and one-hot labels for Action Detection.
Features:
- Loads a set of sequential frames and sequential one-hot labels
- Adds zero frames at beginning or end so that every label has a corresponding clip
- Applies the same augmentations to every frame in the clip
- Automatically finds label files with similar names to the list of movies
- Stacks all channels together for input into a CNN
Example:
dataset = VideoDataset(['movie1.avi', 'movie2.avi'], frames_per_clip=11, reduce=False)
images, labels = dataset(np.random.randint(low=0, high=len(dataset))
print(images.shape)
# 33 x 256 x 256
print(labels.shape)
# assuming there are 5 classes in dataset
# ~5 x 11
"""
def __init__(self,
videofile: Union[str, os.PathLike],
labelfile: Union[str, os.PathLike] = None,
mean_by_channels: Union[list, np.ndarray] = [0, 0, 0],
frames_per_clip: int = 1,
transform=None,
reduce: bool = True,
conv_mode: str = '2d',
keep_reader_open: bool = False):
"""Initializes a VideoDataset object.
Args:
video_list: a list of strings or paths to movies
frames per clip: how many sequential images to load
transform: either None or a TorchVision.transforms object or opencv_transforms object
supervised: whether or not to return a label. False: for self-supervision
reduce: whether or not to change a set of one-hot labels to integers denoting the class that equals one.
Applicable for multiclass, not multi-label cases, using a softmax activation and NLLloss
conv_mode: if 2d, returns a tensor of shape C, H, W. Multiple frames are stacked in C dimension. if 3d,
returns a tensor of shape C, T, H, W
Returns:
VideoDataset object
"""
self.videofile = videofile
self.labelfile = labelfile
self.mean_by_channels = self.parse_mean_by_channels(mean_by_channels)
self.frames_per_clip = frames_per_clip
self.transform = transform
self.reduce = reduce
self.conv_mode = conv_mode
self.keep_reader_open = keep_reader_open
self.supervised = self.labelfile is not None
assert os.path.isfile(videofile) or os.path.isdir(videofile)
assert self.conv_mode in ['2d', '3d']
# find labels given the filename of a video, load, save as an attribute for fast reading
if self.supervised:
assert os.path.isfile(labelfile)
# self.video_list, self.label_list = purge_unlabeled_videos(self.video_list, self.label_list)
labels, class_counts, num_labels, num_pos, num_neg = read_all_labels([self.labelfile])
self.labels = labels
self.class_counts = class_counts
self.num_labels = num_labels
self.num_pos = num_pos
self.num_neg = num_neg
log.debug('label shape: {}'.format(self.labels.shape))
metadata = {}
ret, width, height, framecount = get_video_metadata(self.videofile)
if ret:
metadata['name'] = videofile
metadata['width'] = width
metadata['height'] = height
metadata['framecount'] = framecount
else:
raise ValueError('error loading video: {}'.format(videofile))
self.metadata = metadata
self.N = self.metadata['framecount']
self._zeros_image = None
def get_zeros_image(self, c, h, w, channel_first: bool = True):
if self._zeros_image is None:
# ALWAYS ASSUME OUTPUT IS TRANSPOSED
self._zeros_image = np.zeros((c, h, w), dtype=np.uint8)
for i in range(3):
self._zeros_image[i, ...] = self.mean_by_channels[i]
return self._zeros_image
def parse_mean_by_channels(self, mean_by_channels):
if isinstance(mean_by_channels[0], (float, np.floating)):
return np.clip(np.array(mean_by_channels) * 255, 0, 255).astype(np.uint8)
elif isinstance(mean_by_channels[0], (int, np.integer)):
assert np.array_equal(np.clip(mean_by_channels, 0, 255), np.array(mean_by_channels))
return np.array(mean_by_channels).astype(np.uint8)
else:
raise ValueError('unexpected type for input channel mean: {}'.format(mean_by_channels))
def __len__(self):
return self.N
def prepend_with_zeros(self, stack, blank_start_frames):
if blank_start_frames == 0:
return stack
for i in range(blank_start_frames):
stack.insert(0, self.get_zeros_image(*stack[0].shape))
return stack
def append_with_zeros(self, stack, blank_end_frames):
if blank_end_frames == 0:
return stack
for i in range(blank_end_frames):
stack.append(self.get_zeros_image(*stack[0].shape))
return stack
def __getitem__(self, index: int):
"""Used for reading frames and possibly labels from disk.
Args:
index: integer from 0 to number of total clips in dataset
Returns:
np.ndarray of shape (H,W,C), where C is 3* frames_per_clip
Could also be torch.Tensor of shape (C,H,W), depending on the augmentation applied
"""
images = []
# if frames per clip is 11, dataset[0] would have 5 blank frames preceding, with the 6th-11th being real frames
blank_start_frames = max(self.frames_per_clip // 2 - index, 0)
framecount = self.metadata['framecount']
# cap = cv2.VideoCapture(self.movies[style][movie_index])
start_frame = index - self.frames_per_clip // 2 + blank_start_frames
blank_end_frames = max(index - framecount + self.frames_per_clip // 2 + 1, 0)
real_frames = self.frames_per_clip - blank_start_frames - blank_end_frames
seed = np.random.randint(2147483647)
with VideoReader(self.videofile, assume_writer_style=True) as reader:
for i in range(real_frames):
try:
image = reader[i + start_frame]
except Exception as e:
image = self._zeros_image.copy().transpose(1, 2, 0)
log.warning('Error {} on frame {} of video {}. Is the video | |
<filename>color pattern with threading.py
import time
import random
from multiprocessing import pool
from playsound import playsound
from threading import Thread
i = -1
l = 0
count = 0
class loops:
def loop(self):
print(" ", end="")
def A(self):
global i
global l
global i
for j in range(i, 5):
for k in range(4, i, -1):
print(" ", end="")
print("*", end="")
if i != 0:
l = 1
for q in range(0, l):
if (i == 3):
print(" *" * 3, end="")
else:
print(" " * (i + (i - 1)), end="*")
for k in range(4, i, -1):
print(" ", end="")
x.loop()
return
def B(self):
global i
for j in range(i, 6):
print("*", end="")
if (i == 0 or i == 2 or i == 4):
print(" *" * 3, end=" ")
else:
print(" " * 6, end="*")
x.loop()
return
def C(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print(" " * 2, end=" *" * 3)
elif (i == 1 or i == 3):
print(" " * 1, end="*")
print(" " * 5, end=" ")
else:
print("*", end=" " *7)
x.loop()
return
def D(self):
global i
for i in range(i, 5):
print("*", end=" ")
if (i == 0 or i == 4):
print("* " * 2, end=" " * 1)
elif (i == 1 or i == 3):
print(" " * 4, end="*")
else:
print(" " * 3, end=" *")
x.loop()
return
def E(self):
global i
for i in range(i, 5):
if (i == 0 or i == 2 or i == 4):
print("* " * 3, end="*")
else:
print("* ", end=" " * 5)
x.loop()
return
def F(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
elif (i == 2):
print("* " * 3, end=" ")
else:
print("* ", end=" " * 5)
x.loop()
return
def G(self):
global i
for i in range(i, 5):
if (i == 0):
print(" " * 2, end=" *" * 3)
print(" ", end="")
elif (i == 4):
print(" " * 2, end=" * " * 2)
print(" ", end="")
elif (i == 1):
print(" " * 1, end="*")
print(" " * 7, end="")
elif (i == 3):
print(" " * 1, end="*")
print(" " * 5, end=" *")
else:
print("*", end=" " * 2)
print(" *" * 3, end="")
x.loop()
return
def H(self):
global i
for i in range(i, 5):
if (i == 2):
print("* " * 3, end="*")
else:
print("*", end=" " * 5)
print("*", end="")
x.loop()
return
def I(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("* " * 3, end="*")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def J(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
elif (i == 3 or i == 2):
print("* ", end=" *")
print(" " * 3, end="")
elif (i == 4):
print(" ", end="*")
print(" " * 2, end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def K(self):
global i
for i in range(i, 5):
if i == 0 or i == 4:
print("*", end=" " * 3)
print("*", end="")
elif i == 1 or i == 3:
print("*", end=" " * 2)
print("* ", end=" ")
else:
print("* ", end=" *")
print(" ", end=" ")
x.loop()
return
def L(self):
global i
for i in range(i,5):
if(i==4):
print("* "*3,end="*")
else:
print("* ",end=" "*5)
x.loop()
return
def M(self):
global i
for i in range(i,5):
print("* ",end="")
if(i==1):
print("* ",end=" * ")
elif(i==2):
print(" "*2,end="* ")
else:
print(" "*3,end="")
print("*",end="")
x.loop()
return
def N(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 ):
print(" "*3,end="")
else:
print(" "*i,end="*")
print(" "*(5-i),end="")
print("*",end="")
x.loop()
return
def O(self):
global i
for i in range(i,5):
if(i==0 or i==4):
print(" "*4,end="*")
print(" "*3,end=" ")
elif(i==2):
print("*",end=" "*7)
print("*",end="")
else:
print(" ",end="*")
print(" ",end="* ")
x.loop()
return
def P(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 or i==2):
print(" *"*3,end=" ")
elif(i==1):
print(" "*6,end="*")
else:
print(" "*7,end="")
x.loop()
return
def Q(self):
global i
for i in range(i,5):
if(i==0):
print(" "*4,end="*")
print(" "*3,end=" ")
elif(i==4):
print(" "*4,end="*")
print(" "*3,end="*")
elif(i==2):
print("*",end=" "*7)
print("*",end="")
elif(i==3):
print(" ",end="*")
print(" "*3,end="* * ")
else:
print(" ",end="*")
print(" ",end="* ")
x.loop()
return
def R(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 or i==2):
print(" *"*3,end=" ")
elif(i==1):
print(" "*6,end="*")
else:
print(" "*i,end=" *")
print(" ",end=" "*(4-i))
x.loop()
return
def S(self):
global i
for i in range(i, 5):
if (i == 0):
print(" " * 2, end="* " * 3)
print("", end="")
elif (i == 4):
print(" ", end="* " * 3)
print("", end="")
elif (i == 1):
print("*", end=" " * 7)
elif (i == 2):
print(" ", end="*")
print(" " * 4, end="")
else:
print("*", end=" " * 6)
print("*", end="")
x.loop()
return
def T(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
else:
print(" " * 2, end=" *")
print(" " * 2, end=" ")
x.loop()
return
def U(self):
global i
for i in range(i, 5):
if (i == 4):
print(" " * 2, end="* " * 2)
print(" " * 2, end="")
elif (i == 3):
print(" ", end="*")
print(" " * 4, end="*")
print(" ", end="")
else:
print("* ", end=" " * 5)
print("*", end="")
x.loop()
return
def V(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 7)
print("*", end="")
elif (i == 1):
print(" *", end=" " * 5)
print("*", end=" ")
elif (i == 2):
print(" *", end=" " * 3)
print("*", end=" ")
elif (i == 3):
print(" *", end=" ")
print("*", end=" ")
else:
print(" " * 4, end="*")
print(" " * 4, end="")
x.loop()
return
def W(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 11)
print("*", end="")
elif i == 1:
print(" *", end=" " * 9)
print("", end="* ")
elif (i == 2):
print(" * ", end=" *")
print(" ", end=" ")
elif (i == 3):
print(" " * 3, end="*")
print(" * * ", end=" " * 2)
else:
print(" " * 3, end=" *")
print(" *", end=" " * 4)
x.loop()
return
def X(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("*", end=" " * 5)
print("*", end="")
elif (i == 1 or i == 3):
print(" *", end=" " * 3)
print("* ", end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def Y(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 5)
print("*", end="")
elif (i == 1):
print(" *", end=" " * 3)
print("* ", end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def Z(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("* " * 3, end="*")
elif (i == 1):
print(" " * 5, end="*")
print(" ", end="")
elif (i == 2):
print(" " * 3, end="*")
print(" " * 2, end=" ")
else:
print(" " * 1, end="*")
print(" " * 3, end=" ")
x.loop()
return
print()
def play():
soun = input("ENTER SOUND")
time.sleep(1.8)
print("\n"*30)
# CHANGE DIRECTORY HERE ................................................................
playsound("C:\\Users\\chetan\\Desktop\\language\\playsound\\" + soun + ".mp3")
# CHANGE DIRECTORY HERE.................................................................
time.sleep(1.1)
x = loops()
# DRIVER CODE
n = input("ENTER YOUR TEXT")
print("type any song name from here ...")
lis=["birth",'rider','standard','teri mitti me','chitrakaar']
print(lis)
#WE CAN ADD birthday and rider SONG HERE
thread=Thread(target=play)
thread.start()
time.sleep(7)
k = len(n)
aa,bb,cc,dd,ee,ff,gg,hh,ii,jj,kk,ll,mm,nn,oo,pp,qq,rr,ss,tt,uu,vv,ww,xx,yy,zz=0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
s=0.5
list=[30,31,32,33,34,35,36,37]
color=0
for o in range(5):
i = i + 1
for f in range(k):
if (n[f] == "A" or n[f] == "a"):
if(aa==0):
aa=random.choice(list)
aa=aa+1
print("\033[1;{}m".format(aa),end="")
time.sleep(s)
x.A()
elif (n[f] == "B" or n[f] == "b"):
| |
import hashlib
import json
import sys
import time
import types
import warnings
try:
from urllib.request import build_opener, HTTPRedirectHandler
from urllib.parse import urlencode
from urllib.error import URLError, HTTPError
string_types = str,
integer_types = int,
numeric_types = (int, float)
text_type = str
binary_type = bytes
except ImportError as e:
from urllib2 import build_opener, HTTPRedirectHandler, URLError, HTTPError
from urllib import urlencode
string_types = basestring,
integer_types = (int, long)
numeric_types = (int, long, float)
text_type = unicode
binary_type = str
class DontRedirect(HTTPRedirectHandler):
def redirect_response(self, req, fp, code, msg, headers, newurl):
if code in (301, 302, 303, 307):
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
class Error(Exception):
pass
class BitlyError(Error):
def __init__(self, code, message):
Error.__init__(self, message)
self.code = code
def _utf8(s):
if isinstance(s, text_type):
s = s.encode('utf-8')
assert isinstance(s, binary_type)
return s
def _utf8_params(params):
"""encode a dictionary of URL parameters (including iterables) as utf-8"""
assert isinstance(params, dict)
encoded_params = []
for k, v in params.items():
if v is None:
continue
if isinstance(v, numeric_types):
v = str(v)
if isinstance(v, (list, tuple)):
v = [_utf8(x) for x in v]
else:
v = _utf8(v)
encoded_params.append((k, v))
return dict(encoded_params)
class Connection(object):
"""
This is a python library for accessing the bitly api
http://github.com/bitly/bitly-api-python
Usage:
import bitly_api
c = bitly_api.Connection('bitlyapidemo','R_{{apikey}}')
# or to use oauth2 endpoints
c = bitly_api.Connection(access_token='...')
c.shorten('http://www.google.com/')
"""
def __init__(self, login=None, api_key=None, access_token=None,
secret=None):
self.host = 'api.bit.ly'
self.ssl_host = 'api-ssl.bit.ly'
self.login = login
self.api_key = api_key
self.access_token = access_token
self.secret = secret
(major, minor, micro, releaselevel, serial) = sys.version_info
parts = (major, minor, micro, '?')
self.user_agent = "Python/%d.%d.%d bitly_api/%s" % parts
def shorten(self, uri, x_login=None, x_apiKey=None, preferred_domain=None):
""" creates a bitly link for a given long url
@parameter uri: long url to shorten
@parameter x_login: login of a user to shorten on behalf of
@parameter x_apiKey: apiKey of a user to shorten on behalf of
@parameter preferred_domain: bit.ly[default], bitly.com, or j.mp
"""
params = dict(uri=uri)
if preferred_domain:
params['domain'] = preferred_domain
if x_login:
params.update({
'x_login': x_login,
'x_apiKey': x_apiKey})
data = self._call(self.host, 'v3/shorten', params, self.secret)
return data['data']
def expand(self, hash=None, shortUrl=None, link=None):
""" given a bitly url or hash, decode it and return the target url
@parameter hash: one or more bitly hashes
@parameter shortUrl: one or more bitly short urls
@parameter link: one or more bitly short urls (preferred vocabulary)
"""
if link and not shortUrl:
shortUrl = link
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/expand', params, self.secret)
return data['data']['expand']
def clicks(self, hash=None, shortUrl=None):
"""
given a bitly url or hash, get statistics about the clicks on that link
"""
warnings.warn("/v3/clicks is depricated in favor of /v3/link/clicks",
DeprecationWarning)
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/clicks', params, self.secret)
return data['data']['clicks']
def referrers(self, hash=None, shortUrl=None):
"""
given a bitly url or hash, get statistics about the referrers of that
link
"""
warnings.warn("/v3/referrers is depricated in favor of "
"/v3/link/referrers", DeprecationWarning)
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/referrers', params, self.secret)
return data['data']['referrers']
def clicks_by_day(self, hash=None, shortUrl=None):
""" given a bitly url or hash, get a time series of clicks
per day for the last 30 days in reverse chronological order
(most recent to least recent) """
warnings.warn("/v3/clicks_by_day is depricated in favor of "
"/v3/link/clicks?unit=day", DeprecationWarning)
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/clicks_by_day', params, self.secret)
return data['data']['clicks_by_day']
def clicks_by_minute(self, hash=None, shortUrl=None):
""" given a bitly url or hash, get a time series of clicks
per minute for the last 30 minutes in reverse chronological
order (most recent to least recent)"""
warnings.warn("/v3/clicks_by_minute is depricated in favor of "
"/v3/link/clicks?unit=minute", DeprecationWarning)
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/clicks_by_minute', params,
self.secret)
return data['data']['clicks_by_minute']
def link_clicks(self, link, **kwargs):
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/clicks", params, **kwargs)
return data["link_clicks"]
def link_encoders(self, link, **kwargs):
"""return the bitly encoders who have saved this link"""
params = dict(link=link)
data = self._call(self.host, 'v3/link/encoders', params, **kwargs)
return data['data']
def link_encoders_count(self, link, **kwargs):
"""return the count of bitly encoders who have saved this link"""
params = dict(link=link)
data = self._call(self.host, 'v3/link/encoders_count', params,
**kwargs)
return data['data']
def link_referring_domains(self, link, **kwargs):
"""
returns the domains that are referring traffic to a single bitly link
"""
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/referring_domains", params,
**kwargs)
return data["referring_domains"]
def link_referrers_by_domain(self, link, **kwargs):
"""
returns the pages that are referring traffic to a single bitly link,
grouped by domain
"""
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/referrers_by_domain", params,
**kwargs)
return data["referrers"]
def link_referrers(self, link, **kwargs):
"""
returns the pages are are referring traffic to a single bitly link
"""
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/referrers", params, **kwargs)
return data["referrers"]
def link_shares(self, link, **kwargs):
"""return number of shares of a bitly link"""
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/shares", params, **kwargs)
return data
def link_countries(self, link, **kwargs):
params = dict(link=link)
data = self._call_oauth2_metrics("v3/link/countries", params, **kwargs)
return data["countries"]
def user_clicks(self, **kwargs):
"""aggregate number of clicks on all of this user's bitly links"""
data = self._call_oauth2_metrics('v3/user/clicks', dict(), **kwargs)
return data
def user_countries(self, **kwargs):
"""
aggregate metrics about countries from which people are clicking on all
of a user's bitly links
"""
data = self._call_oauth2_metrics('v3/user/countries', dict(), **kwargs)
return data["countries"]
def user_popular_links(self, **kwargs):
data = self._call_oauth2_metrics("v3/user/popular_links", dict(),
**kwargs)
return data["popular_links"]
def user_referrers(self, **kwargs):
"""
aggregate metrics about the referrers for all of the authed user's
bitly links
"""
data = self._call_oauth2_metrics("v3/user/referrers", dict(), **kwargs)
return data["referrers"]
def user_referring_domains(self, **kwargs):
"""
aggregate metrics about the domains referring traffic to all of the
authed user's bitly links
"""
data = self._call_oauth2_metrics("v3/user/referring_domains", dict(),
**kwargs)
return data["referring_domains"]
def user_share_counts(self, **kwargs):
"""number of shares by authed user in given time period"""
data = self._call_oauth2_metrics("v3/user/share_counts", dict(),
**kwargs)
return data["share_counts"]
def user_share_counts_by_share_type(self, **kwargs):
"""
number of shares by authed user broken down by type (facebook, twitter,
email) in a give time period
"""
data = self._call_oauth2_metrics("v3/user/share_counts_by_share_type",
dict(), **kwargs)
return data["share_counts_by_share_type"]
def user_shorten_counts(self, **kwargs):
data = self._call_oauth2_metrics("v3/user/shorten_counts", dict(),
**kwargs)
return data["user_shorten_counts"]
def user_tracking_domain_list(self):
data = self._call_oauth2("v3/user/tracking_domain_list", dict())
return data["tracking_domains"]
def user_tracking_domain_clicks(self, domain, **kwargs):
params = dict(domain=domain)
data = self._call_oauth2_metrics("v3/user/tracking_domain_clicks",
params, **kwargs)
return data["tracking_domain_clicks"]
def user_tracking_domain_shorten_counts(self, domain, **kwargs):
params = dict(domain=domain)
data = self._call_oauth2_metrics(
"v3/user/tracking_domain_shorten_counts", params, **kwargs)
return data["tracking_domain_shorten_counts"]
def user_info(self, **kwargs):
"""return or update info about a user"""
data = self._call_oauth2("v3/user/info", kwargs)
return data
def user_link_history(self, created_before=None, created_after=None,
archived=None, limit=None, offset=None,
private=None):
params = dict()
if created_before is not None:
assert isinstance(limit, integer_types)
params["created_before"] = created_before
if created_after is not None:
assert isinstance(limit, integer_types)
params["created_after"] = created_after
if archived is not None:
assert isinstance(archived, string_types)
archived = archived.lower()
assert archived is "on" or "off" or "both"
params["archived"] = archived
if private is not None:
assert isinstance(private, string_types)
private = private.lower()
assert private is "on" or "off" or "both"
params["private"] = private
if limit is not None:
assert isinstance(limit, integer_types)
params["limit"] = str(limit)
if offset is not None:
assert isinstance(offset, integer_types)
params["offset"] = str(offset)
data = self._call_oauth2("v3/user/link_history", params)
return data["link_history"]
def user_network_history(self, offset=None, expand_client_id=False,
limit=None, expand_user=False):
params = dict()
if expand_client_id is True:
params["expand_client_id"] = "true"
if expand_user is True:
params["expand_user"] = "true"
if offset is not None:
assert isinstance(offset, integer_types)
params["offset"] = str(offset)
if limit is not None:
assert isinstance(limit, integer_types)
params["limit"] = str(limit)
data = self._call_oauth2("v3/user/network_history", params)
return data
def info(self, hash=None, shortUrl=None, link=None):
""" return the page title for a given bitly link """
if link and not shortUrl:
shortUrl = link
if not hash and not shortUrl:
raise BitlyError(500, 'MISSING_ARG_SHORTURL')
params = dict()
if hash:
params['hash'] = hash
if shortUrl:
params['shortUrl'] = shortUrl
data = self._call(self.host, 'v3/info', params, self.secret)
return data['data']['info']
| |
The array for direction xi has shape (n_xi, p+1, der + 1).
global_spans : List of ndarray
List of 1D arrays, one per direction, containing the index of the last non-vanishing
basis function in each cell. The array for direction xi has shape (n_xi,).
cell_indexes : list of ndarray
List of 1D arrays, one per direction, containing the index of the cell in which
the corresponding point in grid is.
local_shape : List of tuple
Shape of what is local to this instance.
"""
# Check the grid
assert len(grid) == self.ldim
# Get the local domain
v = self.vector_space
starts, ends = self.local_domain
# Add the overlap if we are in parallel
if v.parallel:
starts = tuple(s - overlap if s!=0 else s for s in starts)
ends = tuple(e + overlap for e in ends)
# Compute the basis functions and spans and cell indexes.
global_basis = []
global_spans = []
cell_indexes = []
local_shape = []
for i in range(self.ldim):
# Check the that the grid is sorted.
grid_i = grid[i]
assert all(grid_i[j] <= grid_i[j + 1] for j in range(len(grid_i) - 1))
# Get the cell indexes
cell_index_i = cell_index(self.breaks[i], grid_i)
min_idx = np.searchsorted(cell_index_i, starts[i], side='left')
max_idx = np.searchsorted(cell_index_i, ends[i], side='right')
# We only care about the local cells.
cell_index_i = cell_index_i[min_idx:max_idx]
grid_local_i = grid_i[min_idx:max_idx]
# basis functions and spans
global_basis_i = basis_ders_on_irregular_grid(self.knots[i], self.degree[i], grid_local_i, cell_index_i, der, self.spaces[i].basis)
global_spans_i = elements_spans(self.knots[i], self.degree[i])[slice(starts[i], ends[i] + 1)] - v.starts[i] + v.shifts[i] * v.pads[i]
local_shape.append(len(grid_local_i))
global_basis.append(global_basis_i)
global_spans.append(global_spans_i)
# starts[i] is cell 0 of the local domain
cell_indexes.append(cell_index_i - starts[i])
return self.degree, global_basis, global_spans, cell_indexes, local_shape
# ...
def eval_fields(self, grid, *fields, weights=None, npts_per_cell=None, overlap=0):
"""Evaluate one or several fields at the given location(s) grid.
Parameters
-----------
grid : List of ndarray
Grid on which to evaluate the fields
*fields : tuple of psydac.fem.basic.FemField
Fields to evaluate
weights : psydac.fem.basic.FemField or None, optional
Weights field.
npts_per_cell: int or tuple of int or None, optional
number of evaluation points in each cell.
If an integer is given, then assume that it is the same in every direction.
overlap : int
How much to overlap. Only used in the distributed context.
Returns
-------
List of ndarray of floats
List of the evaluated fields.
"""
assert all(f.space is self for f in fields)
for f in fields:
# Necessary if vector coeffs is distributed across processes
if not f.coeffs.ghost_regions_in_sync:
f.coeffs.update_ghost_regions()
if weights is not None:
assert weights.space is self
assert all(f.coeffs.space is weights.coeffs.space for f in fields)
if not weights.coeffs.ghost_regions_in_sync:
weights.coeffs.update_ghost_regions()
assert len(grid) == self.ldim
grid = [np.asarray(grid[i]) for i in range(self.ldim)]
assert all(grid[i].ndim == grid[i + 1].ndim for i in range(self.ldim - 1))
# --------------------------
# Case 1. Scalar coordinates
if (grid[0].size == 1) or grid[0].ndim == 0:
if weights is not None:
return [self.eval_field(f, *grid, weights=weights.coeffs) for f in fields]
else:
return [self.eval_field(f, *grid) for f in fields]
# Case 2. 1D array of coordinates and no npts_per_cell is given
# -> grid is tensor-product, but npts_per_cell is not the same in each cell
elif grid[0].ndim == 1 and npts_per_cell is None:
out_fields = self.eval_fields_irregular_tensor_grid(grid, *fields, weights=weights, overlap=overlap)
return [np.ascontiguousarray(out_fields[..., i]) for i in range(len(fields))]
# Case 3. 1D arrays of coordinates and npts_per_cell is a tuple or an integer
# -> grid is tensor-product, and each cell has the same number of evaluation points
elif grid[0].ndim == 1 and npts_per_cell is not None:
if isinstance(npts_per_cell, int):
npts_per_cell = (npts_per_cell,) * self.ldim
for i in range(self.ldim):
ncells_i = len(self.breaks[i]) - 1
grid[i] = np.reshape(grid[i], newshape=(ncells_i, npts_per_cell[i]))
out_fields = self.eval_fields_regular_tensor_grid(grid, *fields, weights=weights, overlap=overlap)
# return a list
return [np.ascontiguousarray(out_fields[..., i]) for i in range(len(fields))]
# Case 4. (self.ldim)D arrays of coordinates and no npts_per_cell
# -> unstructured grid
elif grid[0].ndim == self.ldim and npts_per_cell is None:
raise NotImplementedError("Unstructured grids are not supported yet.")
# Case 5. Nonsensical input
else:
raise ValueError("This combination of argument isn't understood. The 4 cases understood are :\n"
"Case 1. Scalar coordinates\n"
"Case 2. 1D array of coordinates and no npts_per_cell is given\n"
"Case 3. 1D arrays of coordinates and npts_per_cell is a tuple or an integer\n"
"Case 4. {0}D arrays of coordinates and no npts_per_cell".format(self.ldim))
# ...
def eval_fields_regular_tensor_grid(self, grid, *fields, weights=None, overlap=0):
"""Evaluate fields on a regular tensor grid
Parameters
----------
grid : List of ndarray
List of 2D arrays representing each direction of the grid.
Each of these arrays should have shape (ne_xi, nv_xi) where ne is the
number of cells in the domain in the direction xi and nv_xi is the number of
evaluation points in the same direction.
*fields : tuple of psydac.fem.basic.FemField
Fields to evaluate on `grid`.
weights : psydac.fem.basic.FemField or None, optional
Weights to apply to our fields.
overlap : int
How much to overlap. Only used in the distributed context.
Returns
-------
List of ndarray of float
Values of the fields on the regular tensor grid
"""
degree, global_basis, global_spans, local_shape = self.preprocess_regular_tensor_grid(grid, der=0, overlap=overlap)
ncells = [local_shape[i][0] for i in range(self.ldim)]
n_eval_points = [local_shape[i][1] for i in range(self.ldim)]
out_fields = np.zeros((*(tuple(ncells[i] * n_eval_points[i] for i in range(self.ldim))), len(fields)))
glob_arr_coeffs = np.zeros(shape=(*fields[0].coeffs._data.shape, len(fields)))
for i in range(len(fields)):
glob_arr_coeffs[..., i] = fields[i].coeffs._data
if self.ldim == 2:
if weights is None:
eval_fields_2d_no_weights(ncells[0], ncells[1], degree[0], degree[1],
n_eval_points[0], n_eval_points[1], global_basis[0], global_basis[1],
global_spans[0], global_spans[1], glob_arr_coeffs, out_fields)
else:
global_weight_coeff = weights.coeffs._data
eval_fields_2d_weighted(ncells[0], ncells[1], degree[0], degree[1],
n_eval_points[0], n_eval_points[1], global_basis[0], global_basis[1],
global_spans[0], global_spans[1], glob_arr_coeffs, global_weight_coeff,
out_fields)
elif self.ldim == 3:
if weights is None:
eval_fields_3d_no_weights(ncells[0], ncells[1], ncells[2], degree[0],
degree[1], degree[2], n_eval_points[0], n_eval_points[1],
n_eval_points[2], global_basis[0], global_basis[1], global_basis[2],
global_spans[0], global_spans[1], global_spans[2], glob_arr_coeffs,
out_fields)
else:
global_weight_coeff = weights.coeffs._data
eval_fields_3d_weighted(ncells[0], ncells[1], ncells[2], degree[0],
degree[1], degree[2], n_eval_points[0], n_eval_points[1], n_eval_points[2],
global_basis[0], global_basis[1], global_basis[2], global_spans[0],
global_spans[1], global_spans[2], glob_arr_coeffs, global_weight_coeff,
out_fields)
else:
raise NotImplementedError("1D not Implemented")
return out_fields
# ...
def eval_fields_irregular_tensor_grid(self, grid, *fields, weights=None, overlap=0):
"""Evaluate fields on a regular tensor grid
Parameters
----------
grid : List of ndarray
List of 2D arrays representing each direction of the grid.
Each of these arrays should have shape (ne_xi, nv_xi) where ne is the
number of cells in the domain in the direction xi and nv_xi is the number of
evaluation points in the same direction.
*fields : tuple of psydac.fem.basic.FemField
Fields to evaluate on `grid`.
weights : psydac.fem.basic.FemField or None, optional
Weights to apply to our fields.
overlap : int
How much to overlap. Only used in the distributed context.
Returns
-------
List of ndarray of float
Values of the fields on the regular tensor grid
"""
degree, global_basis, global_spans, cell_indexes, local_shape = \
self.preprocess_irregular_tensor_grid(grid, overlap=overlap)
out_fields = np.zeros(tuple(local_shape) + (len(fields),))
glob_arr_coeffs = np.zeros(shape=(*fields[0].coeffs._data.shape, len(fields)))
npoints = local_shape
for i in range(len(fields)):
glob_arr_coeffs[..., i] = fields[i].coeffs._data
if self.ldim == 2:
if weights is None:
eval_fields_2d_irregular_no_weights(*npoints, *degree, *cell_indexes, *global_basis,
*global_spans, glob_arr_coeffs, out_fields)
else:
global_weight_coeff = weights.coeffs._data
eval_fields_2d_irregular_weighted(*npoints, *degree, *cell_indexes, *global_basis,
*global_spans, glob_arr_coeffs, global_weight_coeff, out_fields)
elif self.ldim == 3:
if weights is None:
eval_fields_3d_irregular_no_weights(*npoints, *degree, *cell_indexes, *global_basis,
*global_spans, glob_arr_coeffs, out_fields)
else:
global_weight_coeff = weights.coeffs._data
eval_fields_3d_irregular_weighted(*npoints, *degree, *cell_indexes, *global_basis,
*global_spans, glob_arr_coeffs, global_weight_coeff, out_fields)
else:
raise NotImplementedError("1D not Implemented")
return out_fields
# ...
def eval_field_gradient( self, field, *eta , weights=None):
assert isinstance( field, FemField )
assert field.space is self
assert len( eta ) == self.ldim
bases_0 = []
bases_1 = []
index = []
for (x, xlim, space) in zip( eta, self.eta_lims, self.spaces ):
knots = space.knots
degree = space.degree
span = find_span( knots, degree, x )
#-------------------------------------------------#
# Fix span for boundaries between subdomains #
#-------------------------------------------------#
# TODO: Use local knot sequence instead of global #
# one to get correct span in all situations #
#-------------------------------------------------#
if x == xlim[1] and x != knots[-1-degree]:
span -= 1
#-------------------------------------------------#
basis_0 = basis_funs(knots, degree, x, span)
basis_1 = basis_funs_1st_der(knots, degree, x, span)
# If needed, rescale B-splines to get M-splines
if space.basis == 'M':
| |
1465
SAI_PORT_STAT_ETHER_STATS_PKTS_128_TO_255_OCTETS = (SAI_PORT_STAT_ETHER_STATS_PKTS_65_TO_127_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_PKTS_256_TO_511_OCTETS = (SAI_PORT_STAT_ETHER_STATS_PKTS_128_TO_255_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_PKTS_512_TO_1023_OCTETS = (SAI_PORT_STAT_ETHER_STATS_PKTS_256_TO_511_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_PKTS_1024_TO_1518_OCTETS = (SAI_PORT_STAT_ETHER_STATS_PKTS_512_TO_1023_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_PKTS_1519_TO_2047_OCTETS = (SAI_PORT_STAT_ETHER_STATS_PKTS_1024_TO_1518_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_PKTS_2048_TO_4095_OCTETS = (SAI_PORT_STAT_ETHER_STATS_PKTS_1519_TO_2047_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_PKTS_4096_TO_9216_OCTETS = (SAI_PORT_STAT_ETHER_STATS_PKTS_2048_TO_4095_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_PKTS_9217_TO_16383_OCTETS = (SAI_PORT_STAT_ETHER_STATS_PKTS_4096_TO_9216_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_OVERSIZE_PKTS = (SAI_PORT_STAT_ETHER_STATS_PKTS_9217_TO_16383_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS = (SAI_PORT_STAT_ETHER_STATS_OVERSIZE_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS = (SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_JABBERS = (SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_OCTETS = (SAI_PORT_STAT_ETHER_STATS_JABBERS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_PKTS = (SAI_PORT_STAT_ETHER_STATS_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_COLLISIONS = (SAI_PORT_STAT_ETHER_STATS_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_CRC_ALIGN_ERRORS = (SAI_PORT_STAT_ETHER_STATS_COLLISIONS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_TX_NO_ERRORS = (SAI_PORT_STAT_ETHER_STATS_CRC_ALIGN_ERRORS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_STATS_RX_NO_ERRORS = (SAI_PORT_STAT_ETHER_STATS_TX_NO_ERRORS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IP_IN_RECEIVES = (SAI_PORT_STAT_ETHER_STATS_RX_NO_ERRORS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IP_IN_OCTETS = (SAI_PORT_STAT_IP_IN_RECEIVES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IP_IN_UCAST_PKTS = (SAI_PORT_STAT_IP_IN_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IP_IN_NON_UCAST_PKTS = (SAI_PORT_STAT_IP_IN_UCAST_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IP_IN_DISCARDS = (SAI_PORT_STAT_IP_IN_NON_UCAST_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IP_OUT_OCTETS = (SAI_PORT_STAT_IP_IN_DISCARDS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IP_OUT_UCAST_PKTS = (SAI_PORT_STAT_IP_OUT_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IP_OUT_NON_UCAST_PKTS = (SAI_PORT_STAT_IP_OUT_UCAST_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IP_OUT_DISCARDS = (SAI_PORT_STAT_IP_OUT_NON_UCAST_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_IN_RECEIVES = (SAI_PORT_STAT_IP_OUT_DISCARDS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_IN_OCTETS = (SAI_PORT_STAT_IPV6_IN_RECEIVES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_IN_UCAST_PKTS = (SAI_PORT_STAT_IPV6_IN_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_IN_NON_UCAST_PKTS = (SAI_PORT_STAT_IPV6_IN_UCAST_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_IN_MCAST_PKTS = (SAI_PORT_STAT_IPV6_IN_NON_UCAST_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_IN_DISCARDS = (SAI_PORT_STAT_IPV6_IN_MCAST_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_OUT_OCTETS = (SAI_PORT_STAT_IPV6_IN_DISCARDS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_OUT_UCAST_PKTS = (SAI_PORT_STAT_IPV6_OUT_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_OUT_NON_UCAST_PKTS = (SAI_PORT_STAT_IPV6_OUT_UCAST_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_OUT_MCAST_PKTS = (SAI_PORT_STAT_IPV6_OUT_NON_UCAST_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IPV6_OUT_DISCARDS = (SAI_PORT_STAT_IPV6_OUT_MCAST_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_GREEN_DISCARD_DROPPED_PACKETS = (SAI_PORT_STAT_IPV6_OUT_DISCARDS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_GREEN_DISCARD_DROPPED_BYTES = (SAI_PORT_STAT_GREEN_DISCARD_DROPPED_PACKETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_YELLOW_DISCARD_DROPPED_PACKETS = (SAI_PORT_STAT_GREEN_DISCARD_DROPPED_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_YELLOW_DISCARD_DROPPED_BYTES = (SAI_PORT_STAT_YELLOW_DISCARD_DROPPED_PACKETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_RED_DISCARD_DROPPED_PACKETS = (SAI_PORT_STAT_YELLOW_DISCARD_DROPPED_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_RED_DISCARD_DROPPED_BYTES = (SAI_PORT_STAT_RED_DISCARD_DROPPED_PACKETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_DISCARD_DROPPED_PACKETS = (SAI_PORT_STAT_RED_DISCARD_DROPPED_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_DISCARD_DROPPED_BYTES = (SAI_PORT_STAT_DISCARD_DROPPED_PACKETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ECN_MARKED_PACKETS = (SAI_PORT_STAT_DISCARD_DROPPED_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_IN_PKTS_64_OCTETS = (SAI_PORT_STAT_ECN_MARKED_PACKETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_IN_PKTS_65_TO_127_OCTETS = (SAI_PORT_STAT_ETHER_IN_PKTS_64_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_IN_PKTS_128_TO_255_OCTETS = (SAI_PORT_STAT_ETHER_IN_PKTS_65_TO_127_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_IN_PKTS_256_TO_511_OCTETS = (SAI_PORT_STAT_ETHER_IN_PKTS_128_TO_255_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_IN_PKTS_512_TO_1023_OCTETS = (SAI_PORT_STAT_ETHER_IN_PKTS_256_TO_511_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_IN_PKTS_1024_TO_1518_OCTETS = (SAI_PORT_STAT_ETHER_IN_PKTS_512_TO_1023_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2047_OCTETS = (SAI_PORT_STAT_ETHER_IN_PKTS_1024_TO_1518_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_IN_PKTS_2048_TO_4095_OCTETS = (SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2047_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_IN_PKTS_4096_TO_9216_OCTETS = (SAI_PORT_STAT_ETHER_IN_PKTS_2048_TO_4095_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_IN_PKTS_9217_TO_16383_OCTETS = (SAI_PORT_STAT_ETHER_IN_PKTS_4096_TO_9216_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_OUT_PKTS_64_OCTETS = (SAI_PORT_STAT_ETHER_IN_PKTS_9217_TO_16383_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_OUT_PKTS_65_TO_127_OCTETS = (SAI_PORT_STAT_ETHER_OUT_PKTS_64_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_OUT_PKTS_128_TO_255_OCTETS = (SAI_PORT_STAT_ETHER_OUT_PKTS_65_TO_127_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_OUT_PKTS_256_TO_511_OCTETS = (SAI_PORT_STAT_ETHER_OUT_PKTS_128_TO_255_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_OUT_PKTS_512_TO_1023_OCTETS = (SAI_PORT_STAT_ETHER_OUT_PKTS_256_TO_511_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_OUT_PKTS_1024_TO_1518_OCTETS = (SAI_PORT_STAT_ETHER_OUT_PKTS_512_TO_1023_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2047_OCTETS = (SAI_PORT_STAT_ETHER_OUT_PKTS_1024_TO_1518_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_OUT_PKTS_2048_TO_4095_OCTETS = (SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2047_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_OUT_PKTS_4096_TO_9216_OCTETS = (SAI_PORT_STAT_ETHER_OUT_PKTS_2048_TO_4095_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_ETHER_OUT_PKTS_9217_TO_16383_OCTETS = (SAI_PORT_STAT_ETHER_OUT_PKTS_4096_TO_9216_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IN_CURR_OCCUPANCY_BYTES = (SAI_PORT_STAT_ETHER_OUT_PKTS_9217_TO_16383_OCTETS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IN_WATERMARK_BYTES = (SAI_PORT_STAT_IN_CURR_OCCUPANCY_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IN_SHARED_CURR_OCCUPANCY_BYTES = (SAI_PORT_STAT_IN_WATERMARK_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IN_SHARED_WATERMARK_BYTES = (SAI_PORT_STAT_IN_SHARED_CURR_OCCUPANCY_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_OUT_CURR_OCCUPANCY_BYTES = (SAI_PORT_STAT_IN_SHARED_WATERMARK_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_OUT_WATERMARK_BYTES = (SAI_PORT_STAT_OUT_CURR_OCCUPANCY_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_OUT_SHARED_CURR_OCCUPANCY_BYTES = (SAI_PORT_STAT_OUT_WATERMARK_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_OUT_SHARED_WATERMARK_BYTES = (SAI_PORT_STAT_OUT_SHARED_CURR_OCCUPANCY_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_IN_DROPPED_PKTS = (SAI_PORT_STAT_OUT_SHARED_WATERMARK_BYTES + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_OUT_DROPPED_PKTS = (SAI_PORT_STAT_IN_DROPPED_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PAUSE_RX_PKTS = (SAI_PORT_STAT_OUT_DROPPED_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PAUSE_TX_PKTS = (SAI_PORT_STAT_PAUSE_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_0_RX_PKTS = (SAI_PORT_STAT_PAUSE_TX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_0_TX_PKTS = (SAI_PORT_STAT_PFC_0_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_1_RX_PKTS = (SAI_PORT_STAT_PFC_0_TX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_1_TX_PKTS = (SAI_PORT_STAT_PFC_1_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_2_RX_PKTS = (SAI_PORT_STAT_PFC_1_TX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_2_TX_PKTS = (SAI_PORT_STAT_PFC_2_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_3_RX_PKTS = (SAI_PORT_STAT_PFC_2_TX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_3_TX_PKTS = (SAI_PORT_STAT_PFC_3_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_4_RX_PKTS = (SAI_PORT_STAT_PFC_3_TX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_4_TX_PKTS = (SAI_PORT_STAT_PFC_4_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_5_RX_PKTS = (SAI_PORT_STAT_PFC_4_TX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_5_TX_PKTS = (SAI_PORT_STAT_PFC_5_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_6_RX_PKTS = (SAI_PORT_STAT_PFC_5_TX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_6_TX_PKTS = (SAI_PORT_STAT_PFC_6_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_7_RX_PKTS = (SAI_PORT_STAT_PFC_6_TX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_7_TX_PKTS = (SAI_PORT_STAT_PFC_7_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_0_ON2OFF_RX_PKTS = (SAI_PORT_STAT_PFC_7_TX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_1_ON2OFF_RX_PKTS = (SAI_PORT_STAT_PFC_0_ON2OFF_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_2_ON2OFF_RX_PKTS = (SAI_PORT_STAT_PFC_1_ON2OFF_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_3_ON2OFF_RX_PKTS = (SAI_PORT_STAT_PFC_2_ON2OFF_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_4_ON2OFF_RX_PKTS = (SAI_PORT_STAT_PFC_3_ON2OFF_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_5_ON2OFF_RX_PKTS = (SAI_PORT_STAT_PFC_4_ON2OFF_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_6_ON2OFF_RX_PKTS = (SAI_PORT_STAT_PFC_5_ON2OFF_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_PFC_7_ON2OFF_RX_PKTS = (SAI_PORT_STAT_PFC_6_ON2OFF_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_EEE_TX_EVENT_COUNT = (SAI_PORT_STAT_PFC_7_ON2OFF_RX_PKTS + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_EEE_RX_EVENT_COUNT = (SAI_PORT_STAT_EEE_TX_EVENT_COUNT + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_EEE_TX_DURATION = (SAI_PORT_STAT_EEE_RX_EVENT_COUNT + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
SAI_PORT_STAT_EEE_RX_DURATION = (SAI_PORT_STAT_EEE_TX_DURATION + 1) # /home/omer/P4/SAI/inc/saiport.h: 1465
sai_port_stat_t = enum__sai_port_stat_t # /home/omer/P4/SAI/inc/saiport.h: 1465
sai_create_port_fn = CFUNCTYPE(UNCHECKED(sai_status_t), POINTER(sai_object_id_t), sai_object_id_t, c_uint32, POINTER(sai_attribute_t)) # /home/omer/P4/SAI/inc/saiport.h: 1477
sai_remove_port_fn = CFUNCTYPE(UNCHECKED(sai_status_t), sai_object_id_t) # /home/omer/P4/SAI/inc/saiport.h: 1489
sai_set_port_attribute_fn = CFUNCTYPE(UNCHECKED(sai_status_t), sai_object_id_t, POINTER(sai_attribute_t)) # /home/omer/P4/SAI/inc/saiport.h: 1500
sai_get_port_attribute_fn = CFUNCTYPE(UNCHECKED(sai_status_t), sai_object_id_t, c_uint32, POINTER(sai_attribute_t)) # /home/omer/P4/SAI/inc/saiport.h: 1513
sai_get_port_stats_fn = CFUNCTYPE(UNCHECKED(sai_status_t), sai_object_id_t, POINTER(sai_port_stat_t), c_uint32, POINTER(c_uint64)) # /home/omer/P4/SAI/inc/saiport.h: 1528
sai_clear_port_stats_fn = CFUNCTYPE(UNCHECKED(sai_status_t), sai_object_id_t, POINTER(sai_port_stat_t), c_uint32) # /home/omer/P4/SAI/inc/saiport.h: 1543
sai_clear_port_all_stats_fn = CFUNCTYPE(UNCHECKED(sai_status_t), sai_object_id_t) # /home/omer/P4/SAI/inc/saiport.h: 1555
sai_port_state_change_notification_fn = CFUNCTYPE(UNCHECKED(None), c_uint32, POINTER(sai_port_oper_status_notification_t)) # /home/omer/P4/SAI/inc/saiport.h: 1566
# /home/omer/P4/SAI/inc/saiport.h: 1583
class struct__sai_port_api_t(Structure):
pass
struct__sai_port_api_t.__slots__ = [
'create_port',
'remove_port',
'set_port_attribute',
'get_port_attribute',
'get_port_stats',
'clear_port_stats',
'clear_port_all_stats',
]
struct__sai_port_api_t._fields_ = [
('create_port', sai_create_port_fn),
('remove_port', sai_remove_port_fn),
('set_port_attribute', sai_set_port_attribute_fn),
('get_port_attribute', sai_get_port_attribute_fn),
('get_port_stats', sai_get_port_stats_fn),
('clear_port_stats', sai_clear_port_stats_fn),
('clear_port_all_stats', sai_clear_port_all_stats_fn),
]
sai_port_api_t = struct__sai_port_api_t # /home/omer/P4/SAI/inc/saiport.h: 1583
enum__sai_qos_map_type_t = c_int # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_DOT1P_TO_TC = 0 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_DOT1P_TO_COLOR = 1 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_DSCP_TO_TC = 2 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_DSCP_TO_COLOR = 3 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_TC_TO_QUEUE = 4 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DSCP = 5 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DOT1P = 6 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_TC_TO_PRIORITY_GROUP = 7 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_PFC_PRIORITY_TO_PRIORITY_GROUP = 8 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_PFC_PRIORITY_TO_QUEUE = 9 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
SAI_QOS_MAP_TYPE_CUSTOM_RANGE_BASE = 268435456 # /home/omer/P4/SAI/inc/saiqosmap.h: 74
sai_qos_map_type_t = enum__sai_qos_map_type_t # /home/omer/P4/SAI/inc/saiqosmap.h: 74
enum__sai_qos_map_attr_t = c_int # /home/omer/P4/SAI/inc/saiqosmap.h: 118
SAI_QOS_MAP_ATTR_START = 0 # /home/omer/P4/SAI/inc/saiqosmap.h: 118
SAI_QOS_MAP_ATTR_TYPE = SAI_QOS_MAP_ATTR_START # /home/omer/P4/SAI/inc/saiqosmap.h: 118
SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST = 1 # /home/omer/P4/SAI/inc/saiqosmap.h: 118
SAI_QOS_MAP_ATTR_END = (SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST + 1) # /home/omer/P4/SAI/inc/saiqosmap.h: 118
SAI_QOS_MAP_ATTR_CUSTOM_RANGE_START = 268435456 # /home/omer/P4/SAI/inc/saiqosmap.h: 118
SAI_QOS_MAP_ATTR_CUSTOM_RANGE_END = (SAI_QOS_MAP_ATTR_CUSTOM_RANGE_START + 1) # /home/omer/P4/SAI/inc/saiqosmap.h: 118
sai_qos_map_attr_t = enum__sai_qos_map_attr_t # /home/omer/P4/SAI/inc/saiqosmap.h: 118
sai_create_qos_map_fn = CFUNCTYPE(UNCHECKED(sai_status_t), POINTER(sai_object_id_t), sai_object_id_t, c_uint32, POINTER(sai_attribute_t)) # /home/omer/P4/SAI/inc/saiqosmap.h: 130
sai_remove_qos_map_fn = CFUNCTYPE(UNCHECKED(sai_status_t), sai_object_id_t) # /home/omer/P4/SAI/inc/saiqosmap.h: 143
sai_set_qos_map_attribute_fn = CFUNCTYPE(UNCHECKED(sai_status_t), sai_object_id_t, POINTER(sai_attribute_t)) # /home/omer/P4/SAI/inc/saiqosmap.h: 154
sai_get_qos_map_attribute_fn = CFUNCTYPE(UNCHECKED(sai_status_t), sai_object_id_t, c_uint32, POINTER(sai_attribute_t)) # /home/omer/P4/SAI/inc/saiqosmap.h: 167
# /home/omer/P4/SAI/inc/saiqosmap.h: 182
class struct__sai_qos_map_api_t(Structure):
pass
struct__sai_qos_map_api_t.__slots__ = [
'create_qos_map',
'remove_qos_map',
'set_qos_map_attribute',
'get_qos_map_attribute',
]
struct__sai_qos_map_api_t._fields_ = [
('create_qos_map', sai_create_qos_map_fn),
('remove_qos_map', sai_remove_qos_map_fn),
('set_qos_map_attribute', sai_set_qos_map_attribute_fn),
('get_qos_map_attribute', sai_get_qos_map_attribute_fn),
]
sai_qos_map_api_t = struct__sai_qos_map_api_t # /home/omer/P4/SAI/inc/saiqosmap.h: 182
enum__sai_queue_type_t = c_int # /home/omer/P4/SAI/inc/saiqueue.h: 53
SAI_QUEUE_TYPE_ALL = 0 # /home/omer/P4/SAI/inc/saiqueue.h: 53
SAI_QUEUE_TYPE_UNICAST = 1 # /home/omer/P4/SAI/inc/saiqueue.h: 53
SAI_QUEUE_TYPE_MULTICAST = 2 # /home/omer/P4/SAI/inc/saiqueue.h: 53
SAI_QUEUE_TYPE_CUSTOM_RANGE_BASE = 268435456 # /home/omer/P4/SAI/inc/saiqueue.h: 53
sai_queue_type_t = enum__sai_queue_type_t # /home/omer/P4/SAI/inc/saiqueue.h: 53
enum__sai_queue_attr_t = c_int # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_START = 0 # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_TYPE = SAI_QUEUE_ATTR_START # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_PORT = 1 # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_INDEX = 2 # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_PARENT_SCHEDULER_NODE = 3 # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_WRED_PROFILE_ID = 4 # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_BUFFER_PROFILE_ID = 5 # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_SCHEDULER_PROFILE_ID = 6 # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_PAUSE_STATUS = 7 # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_END = (SAI_QUEUE_ATTR_PAUSE_STATUS + 1) # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_CUSTOM_RANGE_START = 268435456 # /home/omer/P4/SAI/inc/saiqueue.h: 164
SAI_QUEUE_ATTR_CUSTOM_RANGE_END = (SAI_QUEUE_ATTR_CUSTOM_RANGE_START + 1) # /home/omer/P4/SAI/inc/saiqueue.h: 164
sai_queue_attr_t = enum__sai_queue_attr_t # /home/omer/P4/SAI/inc/saiqueue.h: 164
enum__sai_queue_stat_t = c_int # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_PACKETS = 0 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_BYTES = 1 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_DROPPED_PACKETS = 2 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_DROPPED_BYTES = 3 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_GREEN_PACKETS = 4 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_GREEN_BYTES = 5 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_GREEN_DROPPED_PACKETS = 6 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_GREEN_DROPPED_BYTES = 7 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_YELLOW_PACKETS = 8 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_YELLOW_BYTES = 9 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_YELLOW_DROPPED_PACKETS = 10 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_YELLOW_DROPPED_BYTES = 11 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_RED_PACKETS = 12 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_RED_BYTES = 13 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_RED_DROPPED_PACKETS = 14 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_RED_DROPPED_BYTES = 15 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_GREEN_DISCARD_DROPPED_PACKETS = 16 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_GREEN_DISCARD_DROPPED_BYTES = 17 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_YELLOW_DISCARD_DROPPED_PACKETS = 18 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_YELLOW_DISCARD_DROPPED_BYTES = 19 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_RED_DISCARD_DROPPED_PACKETS = 20 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_RED_DISCARD_DROPPED_BYTES = 21 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_DISCARD_DROPPED_PACKETS = 22 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_DISCARD_DROPPED_BYTES = 23 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES = 24 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_WATERMARK_BYTES = 25 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_SHARED_CURR_OCCUPANCY_BYTES = 26 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES = 27 # /home/omer/P4/SAI/inc/saiqueue.h: 258
SAI_QUEUE_STAT_CUSTOM_RANGE_BASE = 268435456 # /home/omer/P4/SAI/inc/saiqueue.h: 258
sai_queue_stat_t = enum__sai_queue_stat_t # /home/omer/P4/SAI/inc/saiqueue.h: 258
sai_create_queue_fn = CFUNCTYPE(UNCHECKED(sai_status_t), POINTER(sai_object_id_t), sai_object_id_t, c_uint32, POINTER(sai_attribute_t)) # /home/omer/P4/SAI/inc/saiqueue.h: | |
'forbes island': 0.0,
'foreign cinema': 0.0,
'frances': 0.0,
'franchino': 0.0,
'franciscan crab restaurant': 0.0,
'frascati': 0.0,
'fresca': 0.0,
'fringale': 0.0,
'fujiyama ya japanese restaurant': 0.0,
'gajalee': 0.0,
'gamine': 0.0,
'garcon restaurant': 0.0,
'g<NAME>': 0.0,
'gitane': 0.0,
'golden era restaurant': 0.0,
'gracias madre': 0.0,
'great eastern restaurant': 1.0,
'hakka restaurant': 0.0,
'hakkasan': 0.0,
'han second kwan': 0.0,
'heirloom cafe': 0.0,
'helmand palace': 0.0,
'hi dive': 0.0,
'hillside supper club': 0.0,
'hillstone': 0.0,
'hong kong clay pot restaurant': 0.0,
'house of nanking': 0.0,
'house of prime rib': 0.0,
'hunan homes restaurant': 0.0,
'incanto': 0.0,
'isa': 0.0,
'jannah': 0.0,
'jasmine garden': 0.0,
'jitlada thai cuisine': 0.0,
'kappa japanese restaurant': 0.0,
'kim thanh restaurant': 0.0,
'kirin chinese restaurant': 0.0,
'kiss seafood': 0.0,
'kokkari estiatorio': 0.0,
'la briciola': 0.0,
'la ciccia': 0.0,
'la folie': 0.0,
'la mediterranee': 0.0,
'la traviata': 0.0,
'lahore karahi': 0.0,
'lavash': 0.0,
'le charm': 0.0,
'le colonial': 0.0,
'le soleil': 0.0,
'lime tree southeast asian kitchen': 0.0,
'little delhi': 0.0,
'little nepal': 0.0,
'luce': 0.0,
'lucky creation restaurant': 0.0,
'luella': 0.0,
'lupa': 0.0,
'm y china': 0.0,
'maki restaurant': 0.0,
'mangia tutti ristorante': 0.0,
'manna': 0.0,
'marlowe': 0.0,
'marnee thai': 0.0,
'maverick': 0.0,
'mela tandoori kitchen': 0.0,
'mescolanza': 0.0,
'mezes': 0.0,
'michael mina restaurant': 0.0,
'millennium': 0.0,
'minako organic japanese restaurant': 0.0,
'minami restaurant': 0.0,
'mission chinese food': 0.0,
'mochica': 0.0,
'modern thai': 0.0,
'mona lisa restaurant': 0.0,
'mozzeria': 0.0,
'muguboka restaurant': 0.0,
'my tofu house': 0.0,
'nicaragua restaurant': 0.0,
'nob hill cafe': 0.0,
'nopa': 0.0,
'old jerusalem restaurant': 0.0,
'old skool cafe': 0.0,
'one market restaurant': 0.0,
'orexi': 0.0,
'original us restaurant': 0.0,
'osha thai': 0.0,
'oyaji restaurant': 0.0,
'ozumo': 0.0,
'pad thai restaurant': 0.0,
'panta rei restaurant': 0.0,
'park tavern': 0.0,
'pera': 0.0,
'piperade': 0.0,
'ploy 2': 0.0,
'poc chuc': 0.0,
'poesia': 0.0,
'prospect': 0.0,
'quince': 0.0,
'radius san francisco': 0.0,
'range': 0.0,
'red door cafe': 0.0,
'restaurant ducroix': 0.0,
'ristorante bacco': 0.0,
'ristorante ideale': 0.0,
'ristorante milano': 0.0,
'ristorante parma': 0.0,
'rn74': 0.0,
'rue lepic': 0.0,
'saha': 0.0,
'sai jai thai restaurant': 0.0,
'salt house': 0.0,
'san tung chinese restaurant': 0.0,
'san wang restaurant': 0.0,
'sanjalisco': 0.0,
'sanraku': 0.0,
'seasons': 0.0,
'seoul garden': 0.0,
'seven hills': 0.0,
'shangri la vegetarian restaurant': 0.0,
'singapore malaysian restaurant': 0.0,
'skool': 0.0,
'so': 0.0,
'sotto mare': 0.0,
'source': 0.0,
'specchio ristorante': 0.0,
'spruce': 0.0,
'straits restaurant': 0.0,
'stroganoff restaurant': 0.0,
'sunflower potrero hill': 0.0,
'sushi bistro': 0.0,
'taiwan restaurant': 0.0,
'tanuki restaurant': 0.0,
'tataki': 0.0,
'tekka japanese restaurant': 0.0,
'thai cottage restaurant': 0.0,
'thai house express': 0.0,
'thai idea vegetarian': 0.0,
'thai time restaurant': 0.0,
'thanh long': 0.0,
'the big 4 restaurant': 0.0,
'the blue plate': 0.0,
'the house': 0.0,
'the richmond': 0.0,
'the slanted door': 0.0,
'the stinking rose': 0.0,
'thep phanom thai restaurant': 0.0,
'tommys joynt': 0.0,
'toraya japanese restaurant': 0.0,
'town hall': 0.0,
'trattoria contadina': 0.0,
'tu lan': 0.0,
'tuba restaurant': 0.0,
'u lee restaurant': 0.0,
'udupi palace': 0.0,
'venticello ristorante': 0.0,
'vicoletto': 0.0,
'yank sing': 0.0,
'yummy yummy': 0.0,
'z and y restaurant': 0.0,
'zadin': 0.0,
'zare at fly trap': 0.0,
'zarzuela': 0.0,
'zen yai thai restaurant': 0.0,
'zuni cafe': 0.0,
'zushi puzzle': 0.0},
'near': {'**NONE**': 0.0,
'bayview hunters point': 0.0,
'dontcare': 1.0,
'haight': 0.0,
'japantown': 0.0,
'marina cow hollow': 0.0,
'mission': 0.0,
'nopa': 0.0,
'north beach telegraph hill': 0.0,
'soma': 0.0,
'union square': 0.0},
'price': {'**NONE**': 1.0,
'10 dollar': 0.0,
'10 euro': 0.0,
'11 euro': 0.0,
'15 euro': 0.0,
'18 euro': 0.0,
'20 euro': 0.0,
'22 euro': 0.0,
'25 euro': 0.0,
'26 euro': 0.0,
'29 euro': 0.0,
'37 euro': 0.0,
'6': 0.0,
'7': 0.0,
'9': 0.0,
'between 0 and 15 euro': 0.0,
'between 10 and 13 euro': 0.0,
'between 10 and 15 euro': 0.0,
'between 10 and 18 euro': 0.0,
'between 10 and 20 euro': 0.0,
'between 10 and 23 euro': 0.0,
'between 10 and 30 euro': 0.0,
'between 11 and 15 euro': 0.0,
'between 11 and 18 euro': 0.0,
'between 11 and 22 euro': 0.0,
'between 11 and 25 euro': 0.0,
'between 11 and 29 euro': 0.0,
'between 11 and 35 euro': 0.0,
'between 13 and 15 euro': 0.0,
'between 13 and 18 euro': 0.0,
'between 13 and 24 euro': 0.0,
'between 15 and 18 euro': 0.0,
'between 15 and 22 euro': 0.0,
'between 15 and 26 euro': 0.0,
'between 15 and 29 euro': 0.0,
'between 15 and 33 euro': 0.0,
'between 15 and 44 euro': 0.0,
'between 15 and 58 euro': 0.0,
'between 18 and 26 euro': 0.0,
'between 18 and 29 euro': 0.0,
'between 18 and 44 euro': 0.0,
'between 18 and 55 euro': 0.0,
'between 18 and 58 euro': 0.0,
'between 18 and 73 euro': 0.0,
'between 18 and 78 euro': 0.0,
'between 2 and 15 euro': 0.0,
'between 20 and 30 euro': 0.0,
'between 21 and 23 euro': 0.0,
'between 22 and 29 euro': 0.0,
'between 22 and 30 dollar': 0.0,
'between 22 and 37 euro': 0.0,
'between 22 and 58 euro': 0.0,
'between 22 and 73 euro': 0.0,
'between 23 and 29': 0.0,
'between 23 and 29 euro': 0.0,
'between 23 and 37 euro': 0.0,
'between 23 and 58': 0.0,
'between 23 and 58 euro': 0.0,
'between 26 and 33 euro': 0.0,
'between 26 and 34 euro': 0.0,
'between 26 and 37 euro': 0.0,
'between 29 and 37 euro': 0.0,
'between 29 and 44 euro': 0.0,
'between 29 and 58 euro': 0.0,
'between 29 and 73 euro': 0.0,
'between 30 and 58': 0.0,
'between 30 and 58 euro': 0.0,
'between 31 and 50 euro': 0.0,
'between 37 and 110 euro': 0.0,
'between 37 and 44 euro': 0.0,
'between 37 and 58 euro': 0.0,
'between 4 and 22 euro': 0.0,
'between 4 and 58 euro': 0.0,
'between 5 an 30 euro': 0.0,
'between 5 and 10 euro': 0.0,
'between 5 and 11 euro': 0.0,
'between 5 and 15 dollar': 0.0,
'between 5 and 20 euro': 0.0,
'between 5 and 25 euro': 0.0,
'between 6 and 10 euro': 0.0,
'between 6 and 11 euro': 0.0,
'between 6 and 15 euro': 0.0,
'between 6 and 29 euro': 0.0,
'between 7 and 11 euro': 0.0,
'between 7 and 13 euro': 0.0,
'between 7 and 15 euro': 0.0,
'between 7 and 37 euro': 0.0,
'between 8 and 22 euro': 0.0,
'between 9 and 13 dolllar': 0.0,
'between 9 and 15 euro': 0.0,
'between 9 and 58 euro': 0.0,
'bteween 11 and 15 euro': 0.0,
'bteween 15 and 22 euro': 0.0,
'bteween 22 and 37': 0.0,
'bteween 30 and 58 euro': 0.0,
'bteween 51 and 73 euro': 0.0,
'netween 20 and 30 euro': 0.0},
'pricerange': {'**NONE**': 1.0,
'cheap': 0.0,
'dontcare': 0.0,
'expensive': 0.0,
'moderate': 0.0},
'requested': {'addr': 1.0,
'allowedforkids': 0.0,
'area': 0.0,
'food': 0.0,
'goodformeal': 0.0,
'name': 0.0,
'near': 0.0,
'phone': 1,
'postcode': 0.0,
'price': 0.0,
'pricerange': 0.0}},
'features': {'inform_info': [False,
False,
True,
False,
True,
False,
False,
True,
False,
True,
False,
False,
True,
False,
True,
False,
False,
True,
False,
True,
False,
False,
True,
False,
True],
'informedVenueSinceNone': ['great eastern restaurant',
'great eastern restaurant'],
'lastActionInformNone': False,
'lastInformedVenue': 'great eastern restaurant',
'offerHappened': False},
'userActs': [('request(name="<NAME>",phone)', 1.0)]}
b2 = {'beliefs': {'allowedforkids': {'**NONE**': 0.014367834316388661,
'0': 0.009175995595522114,
'1': 0.9579333306577846,
'dontcare': 0.01852283943030468},
'area': {'**NONE**': 0.9753165718480455,
'alamo square': 0.0,
'amanico ergina village': 0.0,
'anza vista': 0.0,
'ashbury heights': 0.0,
'balboa terrace': 0.0,
'bayview district': 0.0,
'bayview heights': 0.0,
'bernal heights': 0.0,
'bernal heights north': 0.0,
'bernal heights south': 0.0,
'buena vista park': 0.0,
'castro': 0.0,
'cathedral hill': 0.0,
'cayuga terrace': 0.0,
'central richmond': 0.0,
'central sunset': 0.0,
'central waterfront': 0.0,
'chinatown': 0.0,
'civic center': 0.0,
'clarendon heights': 0.0,
'cole valley': 0.0,
'corona heights': 0.0,
'cow hollow': 0.0,
'crocker amazon': 0.0,
'diamond heights': 0.0,
'doelger city': 0.0,
'dogpatch': 0.0,
'dolores heights': 0.0,
'dontcare': 0.0,
'downtown': 0.0,
'duboce triangle': 0.0,
'embarcadero': 0.0,
'eureka valley': 0.0,
'eureka valley dolores heights': 0.0,
'excelsior': 0.0,
'financial district': 0.0,
'financial district | |
# coding: utf-8
# Copyright (C) 2016 UKP lab
#
# Author: <NAME> (ukp.tu-darmstadt.de/ukp-home/)
#
import os
import ast, json
import numpy as np
np.random.seed(1)
from keras import layers, models, optimizers
from keras import backend as K
from keras import regularizers
import tqdm
from core import embeddings
from graph import graph_utils
RESOURCES_FOLDER = "../resources/"
module_location = os.path.abspath(__file__)
module_location = os.path.dirname(module_location)
with open(os.path.join(module_location, "../model_params.json")) as f:
model_params = json.load(f)
property_blacklist = embeddings.load_blacklist(os.path.join(module_location, "../../resources/property_blacklist.txt"))
property2idx = {}
with open(os.path.join(module_location, "../../resources/", model_params["property2idx"])) as f:
property2idx = ast.literal_eval(f.read())
idx2property = {v: k for k, v in property2idx.items()}
_, position2idx = embeddings.init_random(np.arange(-model_params['max_sent_len'], model_params['max_sent_len']),
1, add_all_zeroes=True)
p0_index = 1
MAX_EDGES_PER_GRAPH = 7
POSITION_EMBEDDING_MODE = "mark-bi"
POSITION_VOCAB_SIZE = 5 if POSITION_EMBEDDING_MODE == "mark-bi" and not graph_utils.LEGACY_MODE else 4
def model_LSTMbaseline(p, embedding_matrix, max_sent_len, n_out):
print("Parameters:", p)
# Take sentence encoded as indices and convert it to embeddings
sentence_input = layers.Input(shape=(max_sent_len,), dtype='int32', name='sentence_input')
word_embeddings = layers.Embedding(output_dim=embedding_matrix.shape[1], input_dim=embedding_matrix.shape[0],
input_length=max_sent_len, weights=[embedding_matrix],
mask_zero=True, trainable=False)(sentence_input)
word_embeddings = layers.Dropout(p['dropout1'])(word_embeddings)
# Take token markers that identify entity positions, convert to position embeddings
entity_markers = layers.Input(shape=(max_sent_len,), dtype='int8', name='entity_markers')
pos_embeddings = layers.Embedding(output_dim=p['position_emb'], input_dim=POSITION_VOCAB_SIZE, input_length=max_sent_len,
mask_zero=True, embeddings_regularizer=regularizers.l2(), trainable=True)(entity_markers)
# Merge word and position embeddings and apply the specified amount of RNN layers
x = layers.concatenate([word_embeddings, pos_embeddings])
for i in range(p["rnn1_layers"]-1):
lstm_layer = layers.LSTM(p['units1'], return_sequences=True)
if p['bidirectional']:
lstm_layer = layers.Bidirectional(lstm_layer)
x = lstm_layer(x)
lstm_layer = layers.LSTM(p['units1'], return_sequences=False)
if p['bidirectional']:
lstm_layer = layers.Bidirectional(lstm_layer)
sentence_vector = lstm_layer(x)
# Apply softmax
sentence_vector = layers.Dropout(p['dropout1'])(sentence_vector)
main_output = layers.Dense(n_out, activation="softmax", name='main_output')(sentence_vector)
model = models.Model(inputs=[sentence_input, entity_markers], outputs=[main_output])
model.compile(optimizer=p['optimizer'], loss='categorical_crossentropy', metrics=['accuracy'])
return model
def model_CNN(p, embedding_matrix, max_sent_len, n_out):
print("Parameters:", p)
# Take sentence encoded as indices split in three parts and convert it to embeddings
sentence_input = layers.Input(shape=(max_sent_len,), dtype='int32', name='sentence_input')
word_embeddings = layers.Embedding(output_dim=embedding_matrix.shape[1],
input_dim=embedding_matrix.shape[0],
input_length=max_sent_len, weights=[embedding_matrix],
mask_zero=True, trainable=False)(sentence_input)
word_embeddings = layers.Dropout(p['dropout1'])(word_embeddings)
# Take token markers that identify entity positions, convert to position embeddings
entity_markers = layers.Input(shape=(2, max_sent_len,), dtype='int8', name='entity_markers')
pos_embeddings = layers.wrappers.TimeDistributed(layers.Embedding(output_dim=p['position_emb'], input_dim=(max_sent_len*2)+1, input_length=max_sent_len,
mask_zero=False, embeddings_regularizer = regularizers.l2(), trainable=True), name='pos_embedding')(entity_markers)
pos_embeddings = layers.Permute((2,1,3))(pos_embeddings)
pos_embeddings = layers.Reshape((max_sent_len, p['position_emb']*2))(pos_embeddings)
# Merge word and position embeddings and apply the specified amount of CNN layers
x = layers.concatenate([word_embeddings, pos_embeddings])
x = MaskedConvolution1D(nb_filter=p['units1'], filter_length=p['window_size'], border_mode='same')(x)
sentence_vector = MaskedGlobalMaxPooling1D()(x)
sentence_vector = layers.Lambda(lambda l: K.tanh(l))(sentence_vector)
# Apply softmax
sentence_vector = layers.Dropout(p['dropout1'])(sentence_vector)
main_output = layers.Dense(n_out, activation="softmax", name='main_output')(sentence_vector)
model = models.Model(input=[sentence_input, entity_markers], output=[main_output])
model.compile(optimizer=p['optimizer'], loss='categorical_crossentropy', metrics=['accuracy'])
return model
def masked_categorical_crossentropy(y_true, y_pred):
mask = K.equal(y_true[..., 0], K.variable(1))
mask = 1 - K.cast(mask, K.floatx())
loss = K.categorical_crossentropy(y_true, y_pred) * mask
return loss
def model_ContextSum(p, embedding_matrix, max_sent_len, n_out):
print("Parameters:", p)
# Take sentence encoded as indices and convert it to embeddings
sentence_input = layers.Input(shape=(max_sent_len,), dtype='int32', name='sentence_input')
# Repeat the input N times for each edge
x = layers.RepeatVector(MAX_EDGES_PER_GRAPH)(sentence_input)
word_embeddings = layers.wrappers.TimeDistributed(layers.Embedding(output_dim=embedding_matrix.shape[1], input_dim=embedding_matrix.shape[0],
input_length=max_sent_len, weights=[embeddings],
mask_zero=True, trainable=False))(x)
word_embeddings = layers.Dropout(p['dropout1'])(word_embeddings)
# Take token markers that identify entity positions, convert to position embeddings
entity_markers = layers.Input(shape=(MAX_EDGES_PER_GRAPH, max_sent_len,), dtype='int8', name='entity_markers')
pos_embeddings = layers.wrappers.TimeDistributed(layers.Embedding(output_dim=p['position_emb'],
input_dim=POSITION_VOCAB_SIZE, input_length=max_sent_len,
mask_zero=True, embeddings_regularizer = regularizers.l2(),
trainable=True))(entity_markers)
# Merge word and position embeddings and apply the specified amount of RNN layers
for i in range(p["rnn1_layers"]-1):
lstm_layer = layers.LSTM(p['units1'], return_sequences=True)
if p['bidirectional']:
lstm_layer = layers.Bidirectional(lstm_layer)
x = layers.wrappers.TimeDistributed(lstm_layer)(x)
lstm_layer = layers.LSTM(p['units1'], return_sequences=False)
if p['bidirectional']:
lstm_layer = layers.Bidirectional(lstm_layer)
sentence_matrix = layers.wrappers.TimeDistributed(lstm_layer)(x)
# Take the vector of the sentences with the target entity pair
layers_to_concat = []
num_units = p['units1'] * (2 if p['bidirectional'] else 1)
for i in range(MAX_EDGES_PER_GRAPH):
sentence_vector = layers.Lambda(lambda l: l[:, i], output_shape=(num_units,))(sentence_matrix)
if i == 0:
context_vectors = layers.Lambda(lambda l: l[:, i+1:], output_shape=(MAX_EDGES_PER_GRAPH-1, num_units))(sentence_matrix)
elif i == MAX_EDGES_PER_GRAPH - 1:
context_vectors = layers.Lambda(lambda l: l[:, :i], output_shape=(MAX_EDGES_PER_GRAPH-1, num_units))(sentence_matrix)
else:
context_vectors = layers.Lambda(lambda l: K.concatenate([l[:, :i], l[:, i+1:]], axis=1), output_shape=(MAX_EDGES_PER_GRAPH-1, num_units))(sentence_matrix)
context_vector = GlobalSumPooling1D()(context_vectors)
edge_vector = layers.concatenate([sentence_vector, context_vector])
edge_vector = layers.Reshape((1, num_units * 2))(edge_vector)
layers_to_concat.append(edge_vector)
edge_vectors = layers.Concatenate(1)(layers_to_concat)
# Apply softmax
edge_vectors = layers.Dropout(p['dropout1'])(edge_vectors)
main_output = layers.wrappers.TimeDistributed(layers.Dense(n_out, activation="softmax", name='main_output'))(edge_vectors)
model = models.Model(inputs=[sentence_input, entity_markers], outputs=[main_output])
model.compile(optimizer=p['optimizer'], loss=masked_categorical_crossentropy, metrics=['accuracy'])
return model
def model_ContextWeighted(p, embedding_matrix, max_sent_len, n_out):
print("Parameters:", p)
# Take sentence encoded as indices and convert it to embeddings
sentence_input = layers.Input(shape=(max_sent_len,), dtype='int32', name='sentence_input')
# Repeat the input N times for each edge
x = layers.RepeatVector(MAX_EDGES_PER_GRAPH)(sentence_input)
word_embeddings = layers.wrappers.TimeDistributed(layers.Embedding(output_dim=embedding_matrix.shape[1], input_dim=embedding_matrix.shape[0],
input_length=max_sent_len, weights=[embedding_matrix],
mask_zero=True, trainable=False))(x)
word_embeddings = layers.Dropout(p['dropout1'])(word_embeddings)
# Take token markers that identify entity positions, convert to position embeddings
entity_markers = layers.Input(shape=(MAX_EDGES_PER_GRAPH, max_sent_len,), dtype='int8', name='entity_markers')
pos_embeddings = layers.wrappers.TimeDistributed(layers.Embedding(output_dim=p['position_emb'],
input_dim=POSITION_VOCAB_SIZE, input_length=max_sent_len,
mask_zero=True, embeddings_regularizer = regularizers.l2(),
trainable=True))(entity_markers)
# Merge word and position embeddings and apply the specified amount of RNN layers
x = layers.concatenate([word_embeddings, pos_embeddings])
for i in range(p["rnn1_layers"]-1):
lstm_layer = layers.LSTM(p['units1'], return_sequences=True)
if p['bidirectional']:
lstm_layer = layers.Bidirectional(lstm_layer)
x = layers.wrappers.TimeDistributed(lstm_layer)(x)
lstm_layer = layers.LSTM(p['units1'], return_sequences=False)
if p['bidirectional']:
lstm_layer = layers.Bidirectional(lstm_layer)
sentence_matrix = layers.wrappers.TimeDistributed(lstm_layer)(x)
### Attention over ghosts ###
layers_to_concat = []
num_units = p['units1'] * (2 if p['bidirectional'] else 1)
for i in range(MAX_EDGES_PER_GRAPH):
# Compute a memory vector for the target entity pair
sentence_vector = layers.Lambda(lambda l: l[:, i], output_shape=(num_units,))(sentence_matrix)
target_sentence_memory = layers.Dense(num_units,
activation="linear", use_bias=False)(sentence_vector)
if i == 0:
context_vectors = layers.Lambda(lambda l: l[:, i+1:],
output_shape=(MAX_EDGES_PER_GRAPH-1, num_units))(sentence_matrix)
elif i == MAX_EDGES_PER_GRAPH - 1:
context_vectors = layers.Lambda(lambda l: l[:, :i],
output_shape=(MAX_EDGES_PER_GRAPH-1, num_units))(sentence_matrix)
else:
context_vectors = layers.Lambda(lambda l: K.concatenate([l[:, :i], l[:, i+1:]], axis=1),
output_shape=(MAX_EDGES_PER_GRAPH-1, num_units))(sentence_matrix)
# Compute the score between each memory and the memory of the target entity pair
sentence_scores = layers.Lambda(lambda inputs: K.batch_dot(inputs[0],
inputs[1], axes=(1, 2)),
output_shape=(MAX_EDGES_PER_GRAPH,))([target_sentence_memory, context_vectors])
sentence_scores = layers.Activation('softmax')(sentence_scores)
# Compute the final vector by taking the weighted sum of context vectors and the target entity vector
context_vector = layers.Lambda(lambda inputs: K.batch_dot(inputs[0], inputs[1], axes=(1, 1)),
output_shape=(num_units,))([context_vectors, sentence_scores])
edge_vector = layers.concatenate([sentence_vector, context_vector])
edge_vector = layers.Reshape((1, num_units * 2))(edge_vector)
layers_to_concat.append(edge_vector)
edge_vectors = layers.concatenate(layers_to_concat, axis=1)
# Apply softmax
edge_vectors = layers.Dropout(p['dropout1'])(edge_vectors)
main_output = layers.wrappers.TimeDistributed(layers.Dense(n_out, activation="softmax", name='main_output'))(edge_vectors)
model = models.Model(inputs=[sentence_input, entity_markers], outputs=[main_output])
optimizer = optimizers.Adam(lr=0.001)
model.compile(optimizer=optimizer, loss=masked_categorical_crossentropy, metrics=['accuracy'])
return model
class GlobalSumPooling1D(layers.Layer):
def __init__(self, **kwargs):
super(GlobalSumPooling1D, self).__init__(**kwargs)
self.input_spec = [layers.InputSpec(ndim=3)]
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[2]
def call(self, x, mask=None):
return K.sum(x, axis=1)
class MaskedConvolution1D(layers.Convolution1D):
def __init__(self, **kwargs):
self.supports_masking = True
super(MaskedConvolution1D, self).__init__(**kwargs)
def compute_mask(self, x, mask=None):
return mask
class MaskedGlobalMaxPooling1D(layers.pooling._GlobalPooling1D):
def __init__(self, **kwargs):
self.supports_masking = True
super(MaskedGlobalMaxPooling1D, self).__init__(**kwargs)
def call(self, x, mask=None):
if mask is None:
return K.max(x, axis = 1)
else:
if(K.backend() == 'tensorflow'):
import tensorflow as tf
return K.max(tf.where(mask[:,:,np.newaxis], x, -np.inf ), axis = 1)
else:
print("theano")
return K.max(K.switch(mask[:,:,np.newaxis], x, -np.inf ), axis = 1)
def compute_mask(self, x, mask=None):
return None
def to_indices(graphs, word2idx):
max_sent_len = model_params['max_sent_len']
num_edges = sum(1 for g in graphs for e in g['edgeSet'])
sentences_matrix = np.zeros((num_edges, max_sent_len), dtype="int32")
entity_matrix = np.zeros((num_edges, max_sent_len), dtype="int8")
y_matrix = np.zeros(num_edges, dtype="int16")
index = 0
for g in tqdm.tqdm(graphs, ascii=True):
token_sent_ids = embeddings.get_idx_sequence(g["tokens"], word2idx)
if len(token_sent_ids) > max_sent_len:
token_sent_ids = token_sent_ids[:max_sent_len]
for edge in g["edgeSet"]:
if edge['kbID'] not in property_blacklist:
left_border, right_border = graph_utils.get_sentence_boundaries(g["tokens"], edge)
entity_markers = [m for _, m in graph_utils.get_entity_indexed_vector(g["tokens"], edge, mode=POSITION_EMBEDDING_MODE)][left_border:right_border]
token_sent_ids = token_sent_ids[left_border:right_border]
sentences_matrix[index, :len(token_sent_ids)] = token_sent_ids
entity_matrix[index, :len(token_sent_ids)] = entity_markers[:len(token_sent_ids)]
_, property_kbid, _ = graph_utils.edge_to_kb_ids(edge, g)
property_kbid = property2idx.get(property_kbid, property2idx[embeddings.all_zeroes])
y_matrix[index] = property_kbid
index += 1
return [sentences_matrix, entity_matrix, y_matrix]
def to_indices_with_extracted_entities(graphs, word2idx):
max_sent_len = model_params['max_sent_len']
graphs = split_graphs(graphs)
sentences_matrix = np.zeros((len(graphs), max_sent_len), dtype="int32")
entity_matrix = np.zeros((len(graphs), MAX_EDGES_PER_GRAPH, max_sent_len), dtype="int8")
y_matrix = np.zeros((len(graphs), MAX_EDGES_PER_GRAPH), dtype="int16")
for index, g in enumerate(tqdm.tqdm(graphs, ascii=True)):
token_sent_ids = embeddings.get_idx_sequence(g["tokens"], word2idx)
if len(token_sent_ids) > max_sent_len:
token_sent_ids = token_sent_ids[:max_sent_len]
sentences_matrix[index, :len(token_sent_ids)] = token_sent_ids
for j, edge in enumerate(g["edgeSet"][:MAX_EDGES_PER_GRAPH]):
entity_markers = [m for _, m in graph_utils.get_entity_indexed_vector(g["tokens"], edge, mode=POSITION_EMBEDDING_MODE)]
entity_matrix[index, j, :len(token_sent_ids)] = entity_markers[:len(token_sent_ids)]
_, property_kbid, _ = graph_utils.edge_to_kb_ids(edge, g)
property_kbid = property2idx.get(property_kbid, property2idx[embeddings.all_zeroes])
y_matrix[index, j] = property_kbid
return sentences_matrix, entity_matrix, y_matrix
def split_graphs(graphs):
graphs_to_process = []
for g in graphs:
if len(g['edgeSet']) > 0:
if len(g['edgeSet']) <= MAX_EDGES_PER_GRAPH:
graphs_to_process.append(g)
else:
for i in range(0, len(g['edgeSet']), MAX_EDGES_PER_GRAPH):
graphs_to_process.append(
{**g, "edgeSet": g["edgeSet"][i:i + MAX_EDGES_PER_GRAPH]})
return graphs_to_process
def to_indices_with_relative_positions(graphs, word2idx):
max_sent_len = model_params['max_sent_len']
num_edges = len([e for g in graphs for e in g['edgeSet']])
sentences_matrix = np.zeros((num_edges, max_sent_len), dtype="int32")
entity_matrix = np.zeros((num_edges, 2, max_sent_len), dtype="int8")
y_matrix = np.zeros(num_edges, dtype="int16")
index = 0
max_entity_index = max_sent_len - 1
for g in tqdm.tqdm(graphs, ascii=True):
token_ids = embeddings.get_idx_sequence(g["tokens"], word2idx)
if len(token_ids) > max_sent_len:
token_ids = token_ids[:max_sent_len]
for edge in g["edgeSet"]:
sentences_matrix[index, :len(token_ids)] = token_ids
_, property_kbid, _ = graph_utils.edge_to_kb_ids(edge, g)
property_kbid = property2idx[property_kbid]
entity_vector = graph_utils.get_entity_indexed_vector(token_ids, edge, mode="position")
entity_vector = [(-max_entity_index if m1 < -max_entity_index | |
m.x47)*m.x449) + m.x448) == 0)
m.c248 = Constraint(expr=m.x450 - (0.0025*(m.x47*(m.x248 - 10*m.x449) - (1 - m.x47)*m.x449 + m.x48*(m.x249 - 10*m.x450)
- (1 - m.x48)*m.x450) + m.x449) == 0)
m.c249 = Constraint(expr=m.x451 - (0.0025*(m.x48*(m.x249 - 10*m.x450) - (1 - m.x48)*m.x450 + m.x49*(m.x250 - 10*m.x451)
- (1 - m.x49)*m.x451) + m.x450) == 0)
m.c250 = Constraint(expr=m.x452 - (0.0025*(m.x49*(m.x250 - 10*m.x451) - (1 - m.x49)*m.x451 + m.x50*(m.x251 - 10*m.x452)
- (1 - m.x50)*m.x452) + m.x451) == 0)
m.c251 = Constraint(expr=m.x453 - (0.0025*(m.x50*(m.x251 - 10*m.x452) - (1 - m.x50)*m.x452 + m.x51*(m.x252 - 10*m.x453)
- (1 - m.x51)*m.x453) + m.x452) == 0)
m.c252 = Constraint(expr=m.x454 - (0.0025*(m.x51*(m.x252 - 10*m.x453) - (1 - m.x51)*m.x453 + m.x52*(m.x253 - 10*m.x454)
- (1 - m.x52)*m.x454) + m.x453) == 0)
m.c253 = Constraint(expr=m.x455 - (0.0025*(m.x52*(m.x253 - 10*m.x454) - (1 - m.x52)*m.x454 + m.x53*(m.x254 - 10*m.x455)
- (1 - m.x53)*m.x455) + m.x454) == 0)
m.c254 = Constraint(expr=m.x456 - (0.0025*(m.x53*(m.x254 - 10*m.x455) - (1 - m.x53)*m.x455 + m.x54*(m.x255 - 10*m.x456)
- (1 - m.x54)*m.x456) + m.x455) == 0)
m.c255 = Constraint(expr=m.x457 - (0.0025*(m.x54*(m.x255 - 10*m.x456) - (1 - m.x54)*m.x456 + m.x55*(m.x256 - 10*m.x457)
- (1 - m.x55)*m.x457) + m.x456) == 0)
m.c256 = Constraint(expr=m.x458 - (0.0025*(m.x55*(m.x256 - 10*m.x457) - (1 - m.x55)*m.x457 + m.x56*(m.x257 - 10*m.x458)
- (1 - m.x56)*m.x458) + m.x457) == 0)
m.c257 = Constraint(expr=m.x459 - (0.0025*(m.x56*(m.x257 - 10*m.x458) - (1 - m.x56)*m.x458 + m.x57*(m.x258 - 10*m.x459)
- (1 - m.x57)*m.x459) + m.x458) == 0)
m.c258 = Constraint(expr=m.x460 - (0.0025*(m.x57*(m.x258 - 10*m.x459) - (1 - m.x57)*m.x459 + m.x58*(m.x259 - 10*m.x460)
- (1 - m.x58)*m.x460) + m.x459) == 0)
m.c259 = Constraint(expr=m.x461 - (0.0025*(m.x58*(m.x259 - 10*m.x460) - (1 - m.x58)*m.x460 + m.x59*(m.x260 - 10*m.x461)
- (1 - m.x59)*m.x461) + m.x460) == 0)
m.c260 = Constraint(expr=m.x462 - (0.0025*(m.x59*(m.x260 - 10*m.x461) - (1 - m.x59)*m.x461 + m.x60*(m.x261 - 10*m.x462)
- (1 - m.x60)*m.x462) + m.x461) == 0)
m.c261 = Constraint(expr=m.x463 - (0.0025*(m.x60*(m.x261 - 10*m.x462) - (1 - m.x60)*m.x462 + m.x61*(m.x262 - 10*m.x463)
- (1 - m.x61)*m.x463) + m.x462) == 0)
m.c262 = Constraint(expr=m.x464 - (0.0025*(m.x61*(m.x262 - 10*m.x463) - (1 - m.x61)*m.x463 + m.x62*(m.x263 - 10*m.x464)
- (1 - m.x62)*m.x464) + m.x463) == 0)
m.c263 = Constraint(expr=m.x465 - (0.0025*(m.x62*(m.x263 - 10*m.x464) - (1 - m.x62)*m.x464 + m.x63*(m.x264 - 10*m.x465)
- (1 - m.x63)*m.x465) + m.x464) == 0)
m.c264 = Constraint(expr=m.x466 - (0.0025*(m.x63*(m.x264 - 10*m.x465) - (1 - m.x63)*m.x465 + m.x64*(m.x265 - 10*m.x466)
- (1 - m.x64)*m.x466) + m.x465) == 0)
m.c265 = Constraint(expr=m.x467 - (0.0025*(m.x64*(m.x265 - 10*m.x466) - (1 - m.x64)*m.x466 + m.x65*(m.x266 - 10*m.x467)
- (1 - m.x65)*m.x467) + m.x466) == 0)
m.c266 = Constraint(expr=m.x468 - (0.0025*(m.x65*(m.x266 - 10*m.x467) - (1 - m.x65)*m.x467 + m.x66*(m.x267 - 10*m.x468)
- (1 - m.x66)*m.x468) + m.x467) == 0)
m.c267 = Constraint(expr=m.x469 - (0.0025*(m.x66*(m.x267 - 10*m.x468) - (1 - m.x66)*m.x468 + m.x67*(m.x268 - 10*m.x469)
- (1 - m.x67)*m.x469) + m.x468) == 0)
m.c268 = Constraint(expr=m.x470 - (0.0025*(m.x67*(m.x268 - 10*m.x469) - (1 - m.x67)*m.x469 + m.x68*(m.x269 - 10*m.x470)
- (1 - m.x68)*m.x470) + m.x469) == 0)
m.c269 = Constraint(expr=m.x471 - (0.0025*(m.x68*(m.x269 - 10*m.x470) - (1 - m.x68)*m.x470 + m.x69*(m.x270 - 10*m.x471)
- (1 - m.x69)*m.x471) + m.x470) == 0)
m.c270 = Constraint(expr=m.x472 - (0.0025*(m.x69*(m.x270 - 10*m.x471) - (1 - m.x69)*m.x471 + m.x70*(m.x271 - 10*m.x472)
- (1 - m.x70)*m.x472) + m.x471) == 0)
m.c271 = Constraint(expr=m.x473 - (0.0025*(m.x70*(m.x271 - 10*m.x472) - (1 - m.x70)*m.x472 + m.x71*(m.x272 - 10*m.x473)
- (1 - m.x71)*m.x473) + m.x472) == 0)
m.c272 = Constraint(expr=m.x474 - (0.0025*(m.x71*(m.x272 - 10*m.x473) - (1 - m.x71)*m.x473 + m.x72*(m.x273 - 10*m.x474)
- (1 - m.x72)*m.x474) + m.x473) == 0)
m.c273 = Constraint(expr=m.x475 - (0.0025*(m.x72*(m.x273 - 10*m.x474) - (1 - m.x72)*m.x474 + m.x73*(m.x274 - 10*m.x475)
- (1 - m.x73)*m.x475) + m.x474) == 0)
m.c274 = Constraint(expr=m.x476 - (0.0025*(m.x73*(m.x274 - 10*m.x475) - (1 - m.x73)*m.x475 + m.x74*(m.x275 - 10*m.x476)
- (1 - m.x74)*m.x476) + m.x475) == 0)
m.c275 = Constraint(expr=m.x477 - (0.0025*(m.x74*(m.x275 - 10*m.x476) - (1 - m.x74)*m.x476 + m.x75*(m.x276 - 10*m.x477)
- (1 - m.x75)*m.x477) + m.x476) == 0)
m.c276 = Constraint(expr=m.x478 - (0.0025*(m.x75*(m.x276 - 10*m.x477) - (1 - m.x75)*m.x477 + m.x76*(m.x277 - 10*m.x478)
- (1 - m.x76)*m.x478) + m.x477) == 0)
m.c277 = Constraint(expr=m.x479 - (0.0025*(m.x76*(m.x277 - 10*m.x478) - (1 - m.x76)*m.x478 + m.x77*(m.x278 - 10*m.x479)
- (1 - m.x77)*m.x479) + m.x478) == 0)
m.c278 = Constraint(expr=m.x480 - (0.0025*(m.x77*(m.x278 - 10*m.x479) - (1 - m.x77)*m.x479 + m.x78*(m.x279 - 10*m.x480)
- (1 - m.x78)*m.x480) + m.x479) == 0)
m.c279 = Constraint(expr=m.x481 - (0.0025*(m.x78*(m.x279 - 10*m.x480) - (1 - m.x78)*m.x480 + m.x79*(m.x280 - 10*m.x481)
- (1 - m.x79)*m.x481) + m.x480) == 0)
m.c280 = Constraint(expr=m.x482 - (0.0025*(m.x79*(m.x280 - 10*m.x481) - (1 - m.x79)*m.x481 + m.x80*(m.x281 - 10*m.x482)
- (1 - m.x80)*m.x482) + m.x481) == 0)
m.c281 = Constraint(expr=m.x483 - (0.0025*(m.x80*(m.x281 - 10*m.x482) - (1 - m.x80)*m.x482 + m.x81*(m.x282 - 10*m.x483)
- (1 - m.x81)*m.x483) + m.x482) == 0)
m.c282 = Constraint(expr=m.x484 - (0.0025*(m.x81*(m.x282 - 10*m.x483) - (1 - m.x81)*m.x483 + m.x82*(m.x283 - 10*m.x484)
- (1 - m.x82)*m.x484) + m.x483) == 0)
m.c283 = Constraint(expr=m.x485 - (0.0025*(m.x82*(m.x283 - 10*m.x484) - (1 - m.x82)*m.x484 + m.x83*(m.x284 - 10*m.x485)
- (1 - m.x83)*m.x485) + m.x484) == 0)
m.c284 = Constraint(expr=m.x486 - (0.0025*(m.x83*(m.x284 - 10*m.x485) - (1 - m.x83)*m.x485 + m.x84*(m.x285 - 10*m.x486)
- (1 - m.x84)*m.x486) + m.x485) == 0)
m.c285 = Constraint(expr=m.x487 - (0.0025*(m.x84*(m.x285 - 10*m.x486) - (1 - m.x84)*m.x486 + m.x85*(m.x286 - 10*m.x487)
- (1 - m.x85)*m.x487) + m.x486) == 0)
m.c286 = Constraint(expr=m.x488 - (0.0025*(m.x85*(m.x286 - 10*m.x487) - (1 - m.x85)*m.x487 + m.x86*(m.x287 - 10*m.x488)
- (1 - m.x86)*m.x488) + m.x487) == 0)
m.c287 = Constraint(expr=m.x489 - (0.0025*(m.x86*(m.x287 - 10*m.x488) - (1 - m.x86)*m.x488 + m.x87*(m.x288 - 10*m.x489)
- (1 - m.x87)*m.x489) + m.x488) == 0)
m.c288 = Constraint(expr=m.x490 - (0.0025*(m.x87*(m.x288 - 10*m.x489) - (1 - m.x87)*m.x489 + m.x88*(m.x289 - 10*m.x490)
- (1 - m.x88)*m.x490) + m.x489) == 0)
m.c289 = Constraint(expr=m.x491 - (0.0025*(m.x88*(m.x289 - 10*m.x490) - (1 - m.x88)*m.x490 + m.x89*(m.x290 - 10*m.x491)
- (1 - m.x89)*m.x491) + m.x490) == 0)
m.c290 = Constraint(expr=m.x492 - (0.0025*(m.x89*(m.x290 - 10*m.x491) - (1 - m.x89)*m.x491 + m.x90*(m.x291 - 10*m.x492)
- (1 - m.x90)*m.x492) + m.x491) == 0)
m.c291 = Constraint(expr=m.x493 - (0.0025*(m.x90*(m.x291 - 10*m.x492) - (1 - m.x90)*m.x492 + m.x91*(m.x292 - 10*m.x493)
- (1 - m.x91)*m.x493) + m.x492) == 0)
m.c292 = Constraint(expr=m.x494 - (0.0025*(m.x91*(m.x292 - 10*m.x493) - (1 - m.x91)*m.x493 + m.x92*(m.x293 - 10*m.x494)
- (1 - m.x92)*m.x494) + m.x493) == 0)
m.c293 = Constraint(expr=m.x495 - (0.0025*(m.x92*(m.x293 - 10*m.x494) - (1 - m.x92)*m.x494 + m.x93*(m.x294 - 10*m.x495)
- (1 - m.x93)*m.x495) + m.x494) == 0)
m.c294 = Constraint(expr=m.x496 - (0.0025*(m.x93*(m.x294 - 10*m.x495) - (1 - m.x93)*m.x495 + m.x94*(m.x295 - 10*m.x496)
- (1 - m.x94)*m.x496) + m.x495) == 0)
m.c295 = Constraint(expr=m.x497 - (0.0025*(m.x94*(m.x295 - 10*m.x496) - (1 - m.x94)*m.x496 + m.x95*(m.x296 - 10*m.x497)
- (1 - m.x95)*m.x497) + m.x496) == 0)
m.c296 = Constraint(expr=m.x498 - (0.0025*(m.x95*(m.x296 - 10*m.x497) - (1 - m.x95)*m.x497 + m.x96*(m.x297 - 10*m.x498)
- (1 - m.x96)*m.x498) + m.x497) == 0)
m.c297 = Constraint(expr=m.x499 - (0.0025*(m.x96*(m.x297 - 10*m.x498) - (1 - m.x96)*m.x498 + m.x97*(m.x298 - 10*m.x499)
- (1 - m.x97)*m.x499) + m.x498) == 0)
m.c298 = Constraint(expr=m.x500 - (0.0025*(m.x97*(m.x298 - 10*m.x499) - (1 - m.x97)*m.x499 + m.x98*(m.x299 - 10*m.x500)
- (1 - m.x98)*m.x500) + m.x499) == 0)
m.c299 = Constraint(expr=m.x501 - (0.0025*(m.x98*(m.x299 - 10*m.x500) - (1 - m.x98)*m.x500 + m.x99*(m.x300 - 10*m.x501)
- (1 - m.x99)*m.x501) + m.x500) == 0)
m.c300 = Constraint(expr=m.x502 - (0.0025*(m.x99*(m.x300 - 10*m.x501) - (1 - m.x99)*m.x501 + m.x100*(m.x301 - 10*m.x502)
- (1 - m.x100)*m.x502) + m.x501) == 0)
m.c301 = Constraint(expr=m.x503 - (0.0025*(m.x100*(m.x301 - 10*m.x502) - (1 - m.x100)*m.x502 + m.x101*(m.x302 - 10*
m.x503) - (1 - m.x101)*m.x503) + m.x502) == 0)
m.c302 = Constraint(expr=m.x504 - (0.0025*(m.x101*(m.x302 - 10*m.x503) - (1 - m.x101)*m.x503 + m.x102*(m.x303 - 10*
m.x504) - (1 - m.x102)*m.x504) + m.x503) == 0)
m.c303 = Constraint(expr=m.x505 - (0.0025*(m.x102*(m.x303 - 10*m.x504) - (1 - m.x102)*m.x504 + m.x103*(m.x304 - 10*
m.x505) - (1 - m.x103)*m.x505) + m.x504) == 0)
m.c304 = Constraint(expr=m.x506 - (0.0025*(m.x103*(m.x304 - 10*m.x505) - (1 - m.x103)*m.x505 + m.x104*(m.x305 - 10*
m.x506) - (1 - m.x104)*m.x506) + m.x505) == 0)
m.c305 = Constraint(expr=m.x507 - (0.0025*(m.x104*(m.x305 - 10*m.x506) - (1 - m.x104)*m.x506 + m.x105*(m.x306 - 10*
m.x507) - (1 | |
from numpy import prod
import cupy
from cupy.cuda import cufft
from cupy.fft import config
from cupy.fft._fft import (_convert_fft_type, _default_fft_func, _fft,
_get_cufft_plan_nd, _get_fftn_out_size,
_output_dtype)
from cupy.fft._cache import get_plan_cache
def get_fft_plan(a, shape=None, axes=None, value_type='C2C'):
""" Generate a CUDA FFT plan for transforming up to three axes.
Args:
a (cupy.ndarray): Array to be transform, assumed to be either C- or
F- contiguous.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (None or int or tuple of int): The axes of the array to
transform. If `None`, it is assumed that all axes are transformed.
Currently, for performing N-D transform these must be a set of up
to three adjacent axes, and must include either the first or the
last axis of the array.
value_type (str): The FFT type to perform. Acceptable values are:
* 'C2C': complex-to-complex transform (default)
* 'R2C': real-to-complex transform
* 'C2R': complex-to-real transform
Returns:
a cuFFT plan for either 1D transform (``cupy.cuda.cufft.Plan1d``) or
N-D transform (``cupy.cuda.cufft.PlanNd``).
.. note::
The returned plan can not only be passed as one of the arguments of
the functions in ``cupyx.scipy.fftpack``, but also be used as a
context manager for both ``cupy.fft`` and ``cupyx.scipy.fftpack``
functions:
.. code-block:: python
x = cupy.random.random(16).reshape(4, 4).astype(cupy.complex)
plan = cupyx.scipy.fftpack.get_fft_plan(x)
with plan:
y = cupy.fft.fftn(x)
# alternatively:
y = cupyx.scipy.fftpack.fftn(x) # no explicit plan is given!
# alternatively:
y = cupyx.scipy.fftpack.fftn(x, plan=plan) # pass plan explicitly
In the first case, no cuFFT plan will be generated automatically,
even if ``cupy.fft.config.enable_nd_planning = True`` is set.
.. warning::
This API is a deviation from SciPy's, is currently experimental, and
may be changed in the future version.
"""
# check input array
if a.flags.c_contiguous:
order = 'C'
elif a.flags.f_contiguous:
order = 'F'
else:
raise ValueError('Input array a must be contiguous')
if isinstance(shape, int):
shape = (shape,)
if isinstance(axes, int):
axes = (axes,)
if (shape is not None) and (axes is not None) and len(shape) != len(axes):
raise ValueError('Shape and axes have different lengths.')
# check axes
# n=1: 1d (need axis1D); n>1: Nd
if axes is None:
n = a.ndim if shape is None else len(shape)
axes = tuple(i for i in range(-n, 0))
if n == 1:
axis1D = 0
else: # axes is a tuple
n = len(axes)
if n == 1:
axis1D = axes[0]
if axis1D >= a.ndim or axis1D < -a.ndim:
err = 'The chosen axis ({0}) exceeds the number of '\
'dimensions of a ({1})'.format(axis1D, a.ndim)
raise ValueError(err)
elif n > 3:
raise ValueError('Only up to three axes is supported')
# Note that "shape" here refers to the shape along trasformed axes, not
# the shape of the output array, and we need to convert it to the latter.
# The result is as if "a=_cook_shape(a); return a.shape" is called.
# Because of this, we need to use (possibly unsorted) axes.
transformed_shape = shape
shape = list(a.shape)
if transformed_shape is not None:
for s, axis in zip(transformed_shape, axes):
if s is not None:
if axis == axes[-1] and value_type == 'C2R':
s = s // 2 + 1
shape[axis] = s
shape = tuple(shape)
# check value_type
out_dtype = _output_dtype(a.dtype, value_type)
fft_type = _convert_fft_type(out_dtype, value_type)
# TODO(leofang): figure out if we really have to skip F-order?
if n > 1 and value_type != 'C2C' and a.flags.f_contiguous:
raise ValueError('C2R/R2C PlanNd for F-order arrays is not supported')
# generate plan
# (load from cache if it exists, otherwise create one but don't cache it)
if n > 1: # ND transform
if cupy.cuda.runtime.is_hip and value_type == 'C2R':
raise RuntimeError("hipFFT's C2R PlanNd is buggy and unsupported")
out_size = _get_fftn_out_size(
shape, transformed_shape, axes[-1], value_type)
# _get_cufft_plan_nd handles the interaction with plan cache
plan = _get_cufft_plan_nd(
shape, fft_type, axes=axes, order=order, out_size=out_size,
to_cache=False)
else: # 1D transform
# prepare plan arguments
if value_type != 'C2R':
out_size = shape[axis1D]
else:
out_size = _get_fftn_out_size(
shape, transformed_shape, axis1D, value_type)
batch = prod(shape) // shape[axis1D]
devices = None if not config.use_multi_gpus else config._devices
keys = (out_size, fft_type, batch, devices)
cache = get_plan_cache()
cached_plan = cache.get(keys)
if cached_plan is not None:
plan = cached_plan
else:
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
return plan
def fft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axis)
Note that `plan` is defaulted to None, meaning CuPy will use an
auto-generated plan behind the scene.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``n`` and type
will convert to complex if that of the input is another.
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
.. seealso:: :func:`scipy.fftpack.fft`
"""
return _fft(x, (n,), (axis,), None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axis)
Note that `plan` is defaulted to None, meaning CuPy will use an
auto-generated plan behind the scene.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``n`` and type
will convert to complex if that of the input is another.
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
.. seealso:: :func:`scipy.fftpack.ifft`
"""
return _fft(x, (n,), (axis,), None, cufft.CUFFT_INVERSE,
overwrite_x=overwrite_x, plan=plan)
def fft2(x, shape=None, axes=(-2, -1), overwrite_x=False, plan=None):
"""Compute the two-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.fft2`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifft2(x, shape=None, axes=(-2, -1), overwrite_x=False, plan=None):
"""Compute the two-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, | |
<filename>tests/integration/test_image_filters.py
# encoding: utf-8
# ------------------------------------------------------------------------
# Copyright 2022 All Histolab Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
import operator
import numpy as np
import pytest
from PIL import ImageChops
import histolab.filters.image_filters_functional as imf
from ..fixtures import GS, NPY, RGB, RGBA
from ..util import load_expectation
def _create_rag_mask(pil_image):
mask = np.ones((pil_image.size[1], pil_image.size[0]))
mask[:100, :] = 0
mask[:, :100] = 0
mask[-100:, :] = 0
mask[:, -100:] = 0
return mask
def test_invert_filter_with_rgb_image():
expected_value = load_expectation(
"pil-images-rgb/diagnostic-slide-thumb-rgb-inverted", type_="png"
)
inverted_img = imf.invert(RGB.DIAGNOSTIC_SLIDE_THUMB_RGB)
np.testing.assert_array_almost_equal(
np.array(inverted_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(inverted_img, expected_value)))[0] == 0
)
def test_invert_filter_with_rgba_image():
rgba_image = RGBA.DIAGNOSTIC_SLIDE_THUMB
expected_value = load_expectation(
"pil-images-rgba/diagnostic-slide-thumb-inverted", type_="png"
)
inverted_img = imf.invert(rgba_image)
np.testing.assert_array_almost_equal(
np.array(inverted_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(inverted_img, expected_value)))[0] == 0
)
def test_invert_filter_with_gs_image():
gs_image = GS.DIAGNOSTIC_SLIDE_THUMB_GS
expected_value = load_expectation(
"pil-images-gs/diagnostic-slide-thumb-gs-inverted", type_="png"
)
inverted_img = imf.invert(gs_image)
np.testing.assert_array_almost_equal(
np.array(inverted_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(inverted_img, expected_value)))[0] == 0
)
def test_rgb_to_hed_filter_with_rgb_image():
expected_value = load_expectation(
"arrays/diagnostic-slide-thumb-rgb-to-hed", type_="npy"
)
hed_arr = imf.rgb_to_hed(RGB.DIAGNOSTIC_SLIDE_THUMB_RGB)
np.testing.assert_array_almost_equal(hed_arr, expected_value)
def test_rgb_to_hed_filter_with_rgba_image():
img = RGBA.DIAGNOSTIC_SLIDE_THUMB
expected_value = load_expectation(
"arrays/diagnostic-slide-thumb-rgb-to-hed", type_="npy"
)
expected_warning_regex = (
r"Input image must be RGB. NOTE: the image will be converted to RGB before"
r" HED conversion."
)
with pytest.warns(UserWarning, match=expected_warning_regex):
hed_arr = imf.rgb_to_hed(img)
np.testing.assert_array_almost_equal(hed_arr, expected_value)
def test_rgb_to_hed_raises_exception_on_gs_image():
gs_img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
with pytest.raises(Exception) as err:
imf.rgb_to_hed(gs_img)
assert isinstance(err.value, Exception)
assert str(err.value) == "Input image must be RGB."
def test_hematoxylin_channel_filter_with_rgb_image():
img = RGB.TCGA_LUNG_RGB
expected_value = load_expectation(
"pil-images-rgb/tcga-lung-rgb-hematoxylin-channel", type_="png"
)
h_channel = imf.hematoxylin_channel(img)
np.testing.assert_array_almost_equal(np.array(h_channel), np.array(expected_value))
assert np.unique(np.array(ImageChops.difference(h_channel, expected_value)))[0] == 0
def test_hematoxylin_channel_filter_with_rgba_image():
img = RGBA.TCGA_LUNG
expected_value = load_expectation(
"pil-images-rgba/tcga-lung-hematoxylin-channel", type_="png"
)
expected_warning_regex = (
r"Input image must be RGB. NOTE: the image will be converted to RGB before"
r" HED conversion."
)
with pytest.warns(UserWarning, match=expected_warning_regex):
hematoxylin_img = imf.hematoxylin_channel(img)
np.testing.assert_array_almost_equal(
np.array(hematoxylin_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(hematoxylin_img, expected_value)))[0]
== 0
)
def test_hematoxylin_channel_raises_exception_on_gs_image():
gs_img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
with pytest.raises(ValueError) as err:
imf.hematoxylin_channel(gs_img)
assert isinstance(err.value, Exception)
assert str(err.value) == "Input image must be RGB/RGBA."
def test_eosin_channel_filter_with_rgb_image():
img = RGB.TCGA_LUNG_RGB
expected_value = load_expectation(
"pil-images-rgb/tcga-lung-rgb-eosin-channel", type_="png"
)
eosin_img = imf.eosin_channel(img)
np.testing.assert_array_almost_equal(np.array(eosin_img), np.array(expected_value))
assert np.unique(np.array(ImageChops.difference(eosin_img, expected_value)))[0] == 0
def test_eosin_channel_filter_with_rgba_image():
img = RGBA.TCGA_LUNG
expected_value = load_expectation(
"pil-images-rgba/tcga-lung-eosin-channel", type_="png"
)
expected_warning_regex = (
r"Input image must be RGB. NOTE: the image will be converted to RGB before"
r" HED conversion."
)
with pytest.warns(UserWarning, match=expected_warning_regex):
eosin_img = imf.eosin_channel(img)
np.testing.assert_array_almost_equal(np.array(eosin_img), np.array(expected_value))
assert np.unique(np.array(ImageChops.difference(eosin_img, expected_value)))[0] == 0
def test_eosin_channel_raises_exception_on_gs_image():
gs_img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
with pytest.raises(ValueError) as err:
imf.eosin_channel(gs_img)
assert isinstance(err.value, Exception)
assert str(err.value) == "Input image must be RGB/RGBA."
def test_rgb_to_hsv_filter_with_rgb_image():
expected_value = load_expectation(
"arrays/diagnostic-slide-thumb-rgb-to-hsv", type_="npy"
)
hsv_arr = imf.rgb_to_hsv(RGB.DIAGNOSTIC_SLIDE_THUMB_RGB)
np.testing.assert_array_almost_equal(hsv_arr, expected_value)
def test_rgb_to_hsv_raises_exception_on_rgba_image():
gs_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
with pytest.raises(Exception) as err:
imf.rgb_to_hsv(gs_img)
assert isinstance(err.value, Exception)
assert str(err.value) == "Input image must be RGB"
def test_rgb_to_hsv_raises_exception_on_gs_image():
gs_img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
with pytest.raises(Exception) as err:
imf.rgb_to_hsv(gs_img)
assert isinstance(err.value, Exception)
assert str(err.value) == "Input image must be RGB"
def test_stretch_contrast_filter_on_rgba_image():
rgba_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
expected_value = load_expectation(
"pil-images-rgba/diagnostic-slide-thumb-stretch-contrast", type_="png"
)
stretched_img = imf.stretch_contrast(rgba_img, 40, 60)
np.testing.assert_array_almost_equal(
np.array(stretched_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(stretched_img, expected_value)))[0]
== 0
)
def test_stretch_contrast_filter_on_rgb_image():
expected_value = load_expectation(
"pil-images-rgb/diagnostic-slide-thumb-rgb-stretch-contrast", type_="png"
)
stretched_img = imf.stretch_contrast(RGB.DIAGNOSTIC_SLIDE_THUMB_RGB)
np.testing.assert_array_almost_equal(
np.array(stretched_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(stretched_img, expected_value)))[0]
== 0
)
def test_stretch_contrast_filter_on_gs_image():
gs_img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
expected_value = load_expectation(
"pil-images-gs/diagnostic-slide-thumb-gs-stretch-contrast", type_="png"
)
stretched_img = imf.stretch_contrast(gs_img, 40, 60)
np.testing.assert_array_almost_equal(
np.array(stretched_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(stretched_img, expected_value)))[0]
== 0
)
@pytest.mark.parametrize(
"low, high",
(
(300, 40),
(300, 600),
(40, 500),
(-10, 340),
(-200, 300),
(-40, -60),
(None, 50),
(50, None),
(None, None),
),
)
def test_stretch_contrast_raises_exception_on_ranges(low, high):
rgba_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
with pytest.raises(Exception) as err:
imf.stretch_contrast(rgba_img, low, high)
assert isinstance(err.value, Exception)
assert str(err.value) == "low and high values must be in range [0, 255]"
def test_histogram_equalization_filter_on_rgba_image():
rgba_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
expected_value = load_expectation(
"pil-images-rgba/diagnostic-slide-thumb-histogram-equalization", type_="png"
)
hist_equ_img = imf.histogram_equalization(rgba_img, 200)
np.testing.assert_array_almost_equal(
np.array(hist_equ_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(hist_equ_img, expected_value)))[0] == 0
)
def test_histogram_equalization_filter_on_rgb_image():
expected_value = load_expectation(
"pil-images-rgb/diagnostic-slide-thumb-rgb-histogram-equalization", type_="png"
)
hist_equ_img = imf.histogram_equalization(RGB.DIAGNOSTIC_SLIDE_THUMB_RGB, 200)
np.testing.assert_array_almost_equal(
np.array(hist_equ_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(hist_equ_img, expected_value)))[0] == 0
)
def test_histogram_equalization_filter_on_gs_image():
img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
expected_value = load_expectation(
"pil-images-gs/diagnostic-slide-thumb-gs-histogram-equalization", type_="png"
)
hist_equ_img = imf.histogram_equalization(img, 200)
np.testing.assert_array_almost_equal(
np.array(hist_equ_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(hist_equ_img, expected_value)))[0] == 0
)
def test_adaptive_equalization_filter_on_rgba_image():
rgba_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
expected_value = load_expectation(
"pil-images-rgba/diagnostic-slide-thumb-adaptive-equalization", type_="png"
)
adap_equ_img = imf.adaptive_equalization(rgba_img, 200, 0.2)
np.testing.assert_array_almost_equal(
np.array(adap_equ_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(adap_equ_img, expected_value)))[0] == 0
)
def test_adaptive_equalization_filter_on_rgb_image():
expected_value = load_expectation(
"pil-images-rgb/diagnostic-slide-thumb-rgb-adaptive-equalization", type_="png"
)
adap_equ_img = imf.adaptive_equalization(RGB.DIAGNOSTIC_SLIDE_THUMB_RGB, 200, 0.2)
np.testing.assert_array_almost_equal(
np.array(adap_equ_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(adap_equ_img, expected_value)))[0] == 0
)
def test_adaptive_equalization_filter_on_gs_image():
gs_img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
expected_value = load_expectation(
"pil-images-gs/diagnostic-slide-thumb-gs-adaptive-equalization", type_="png"
)
adap_equ_img = imf.adaptive_equalization(gs_img, 200, 0.2)
np.testing.assert_array_almost_equal(
np.array(adap_equ_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(adap_equ_img, expected_value)))[0] == 0
)
@pytest.mark.parametrize(
"nbins, clip_limit", ((-10, 340), (-40, -60), (None, 50), (None, None))
)
def test_adaptive_equalization_raises_exception_on_params(nbins, clip_limit):
rgba_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
with pytest.raises(Exception) as err:
imf.adaptive_equalization(rgba_img, nbins, clip_limit)
assert isinstance(err.value, Exception)
assert str(err.value) == "Number of histogram bins must be a positive integer"
def test_local_equalization_filter_on_gs_image():
gs_img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
expected_value = load_expectation(
"pil-images-gs/diagnostic-slide-thumb-gs-local-equalization", type_="png"
)
local_equ_img = imf.local_equalization(gs_img, 80)
np.testing.assert_array_almost_equal(
np.array(local_equ_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(local_equ_img, expected_value)))[0]
== 0
)
@pytest.mark.parametrize(
"pil_rgb_image",
(
RGB.DIAGNOSTIC_SLIDE_THUMB_RGB,
RGBA.DIAGNOSTIC_SLIDE_THUMB,
),
)
def test_local_equalization_raises_exception_on_rgb_images(pil_rgb_image):
with pytest.raises(Exception) as err:
imf.local_equalization(pil_rgb_image, 80)
assert isinstance(err.value, Exception)
assert str(err.value) == "Input must be 2D."
def test_kmeans_segmentation_raises_value_error_on_rgba_images():
rgba_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
with pytest.raises(ValueError) as err:
imf.kmeans_segmentation(rgba_img)
assert isinstance(err.value, ValueError)
assert str(err.value) == "Input image cannot be RGBA"
def test_kmeans_segmentation_filter_on_rgb_image():
expected_value = load_expectation(
"pil-images-rgb/diagnostic-slide-thumb-rgb-kmeans-segmentation", type_="png"
)
kmeans_segmentation_img = imf.kmeans_segmentation(
RGB.DIAGNOSTIC_SLIDE_THUMB_RGB, 800, 10
)
np.testing.assert_array_almost_equal(
np.array(kmeans_segmentation_img), np.array(expected_value)
)
assert (
np.unique(
np.array(ImageChops.difference(kmeans_segmentation_img, expected_value))
)[0]
== 0
)
def test_kmeans_segmentation_filter_on_gs_image():
gs_img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
expected_value = load_expectation(
"pil-images-rgb/diagnostic-slide-thumb-gs-kmeans-segmentation", type_="png"
)
kmeans_segmentation_img = imf.kmeans_segmentation(gs_img, 800, 10)
np.testing.assert_array_almost_equal(
np.array(kmeans_segmentation_img), np.array(expected_value)
)
assert (
np.unique(
np.array(ImageChops.difference(kmeans_segmentation_img, expected_value))
)[0]
== 0
)
def test_rag_threshold_filter_on_rgb_image():
expected_value = load_expectation(
"pil-images-rgb/diagnostic-slide-thumb-rgb-rag-threshold", type_="png"
)
rag_threshold_img = imf.rag_threshold(RGB.DIAGNOSTIC_SLIDE_THUMB_RGB, 650, 20.6, 9)
np.testing.assert_array_almost_equal(
np.array(rag_threshold_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(rag_threshold_img, expected_value)))[0]
== 0
)
@pytest.mark.parametrize(
"pil_image, mask, expected_image",
(
(
RGB.DIAGNOSTIC_SLIDE_THUMB_RGB,
None,
"mask-arrays/diagnostic-slide-thumb-rgb-rag-threshold-labels",
),
(
RGB.DIAGNOSTIC_SLIDE_THUMB_RGB,
_create_rag_mask(RGB.DIAGNOSTIC_SLIDE_THUMB_RGB),
"mask-arrays/diagnostic-slide-thumb-rgb-rag-threshold-maskedlabels",
),
),
)
def test_rag_threshold_filter_return_labels(pil_image, mask, expected_image):
expected_value = load_expectation(expected_image, type_="npy")
ragged = imf.rag_threshold(pil_image, 650, 20.6, 9, return_labels=True, mask=mask)
np.testing.assert_array_almost_equal(ragged, expected_value)
def test_rag_threshold_filter_on_gs_image():
gs_img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
expected_value = load_expectation(
"pil-images-rgb/diagnostic-slide-thumb-gs-rag-threshold", type_="png"
)
rag_threshold_img = imf.rag_threshold(gs_img, 650, 20.6, 15)
np.testing.assert_array_almost_equal(
np.array(rag_threshold_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(rag_threshold_img, expected_value)))[0]
== 0
)
def test_rag_threshold_raises_exception_on_rgba_images():
rgba_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
with pytest.raises(Exception) as err:
imf.rag_threshold(rgba_img, 20, 50, 3.5)
assert isinstance(err.value, Exception)
assert str(err.value) == "Input image cannot be RGBA"
def test_hysteresis_threshold_filter_on_rgba_image():
rgba_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
expected_value = load_expectation(
"pil-images-rgba/diagnostic-slide-thumb-hysteresis-threshold", type_="png"
)
hysteresis_threshold_img = imf.hysteresis_threshold(rgba_img, 10.6, 200)
np.testing.assert_array_almost_equal(
np.array(hysteresis_threshold_img), np.array(expected_value)
)
assert (
np.unique(
np.array(ImageChops.difference(hysteresis_threshold_img, expected_value))
)[0]
== 0
)
def test_hysteresis_threshold_filter_on_rgb_image():
expected_value = load_expectation(
"pil-images-rgb/diagnostic-slide-thumb-rgb-hysteresis-threshold", type_="png"
)
hysteresis_threshold_img = imf.hysteresis_threshold(
RGB.DIAGNOSTIC_SLIDE_THUMB_RGB, 10.6, 200
)
np.testing.assert_array_almost_equal(
np.array(hysteresis_threshold_img), np.array(expected_value)
)
assert (
np.unique(
np.array(ImageChops.difference(hysteresis_threshold_img, expected_value))
)[0]
== 0
)
def test_hysteresis_threshold_filter_on_gs_image():
gs_img = GS.DIAGNOSTIC_SLIDE_THUMB_GS
expected_value = load_expectation(
"pil-images-gs/diagnostic-slide-thumb-gs-hysteresis-threshold", type_="png"
)
hysteresis_threshold_img = imf.hysteresis_threshold(gs_img, 10.6, 200)
np.testing.assert_array_almost_equal(
np.array(hysteresis_threshold_img), np.array(expected_value)
)
assert (
np.unique(
np.array(ImageChops.difference(hysteresis_threshold_img, expected_value))
)[0]
== 0
)
@pytest.mark.parametrize("low, high", ((None, 50), (-250, None), (None, None)))
def test_hysteresis_threshold_raises_exception_on_thresholds(low, high):
rgba_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
with pytest.raises(Exception) as err:
imf.hysteresis_threshold(rgba_img, low, high)
assert isinstance(err.value, Exception)
assert str(err.value) == "thresholds cannot be None"
@pytest.mark.parametrize(
"pil_image, expected_image, disk_size",
(
(
GS.DIAGNOSTIC_SLIDE_THUMB_GS,
"pil-images-gs/diagnostic-slide-thumb-gs1-local-otsu",
10,
),
(
GS.DIAGNOSTIC_SLIDE_THUMB_GS,
"pil-images-gs/diagnostic-slide-thumb-gs2-local-otsu",
3.8,
),
(
GS.DIAGNOSTIC_SLIDE_THUMB_GS,
"pil-images-gs/diagnostic-slide-thumb-gs3-local-otsu",
0,
),
(
GS.DIAGNOSTIC_SLIDE_THUMB_GS,
"pil-images-gs/diagnostic-slide-thumb-gs4-local-otsu",
np.sqrt(2),
),
),
)
def test_local_otsu_threshold_filter_on_gs_image(pil_image, expected_image, disk_size):
expected_value = load_expectation(expected_image, type_="png")
local_otsu_img = imf.local_otsu_threshold(pil_image, disk_size)
np.testing.assert_array_almost_equal(
np.array(local_otsu_img), np.array(expected_value)
)
assert (
np.unique(np.array(ImageChops.difference(local_otsu_img, expected_value)))[0]
== 0
)
@pytest.mark.parametrize(
"pil_image, disk_size, expected_exception, expected_message",
(
(RGBA.DIAGNOSTIC_SLIDE_THUMB, 6, ValueError, "Input must be 2D."),
(RGBA.DIAGNOSTIC_SLIDE_THUMB, -10, ValueError, "Input must be 2D."),
(RGB.DIAGNOSTIC_SLIDE_THUMB_RGB, 10, ValueError, "Input must be 2D."),
(
GS.DIAGNOSTIC_SLIDE_THUMB_GS,
-10,
ValueError,
"Disk size must be a positive number.",
),
(
GS.DIAGNOSTIC_SLIDE_THUMB_GS,
None,
ValueError,
"Disk size must be a positive number.",
),
(
GS.DIAGNOSTIC_SLIDE_THUMB_GS,
np.inf,
ValueError,
"Disk size must be a positive number.",
),
),
)
def test_local_otsu_threshold_raises_right_exceptions(
pil_image, disk_size, expected_exception, expected_message
):
with pytest.raises(expected_exception) as err:
imf.local_otsu_threshold(pil_image, disk_size)
assert isinstance(err.value, expected_exception)
assert str(err.value) == expected_message
# -------- Branching function --------
def test_hysteresis_threshold_mask_filter_on_rgba_image():
rgba_img = RGBA.DIAGNOSTIC_SLIDE_THUMB
expected_value = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.