input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
rejected the transfer """
pass
def set_date(self, date):
""" This function is called by the simulation class to set the current date
for the simulation """
# if there is an inconsistency in the date progression, report
# a warning on the command line
delta = (date - self._current_date).days
if delta != 1:
warnings.warn('Difference between current date and next date is %i and not 1' % delta)
if date < self._date_start:
warnings.warn('Date is before start date of account.')
self._current_date = date
def start_of_day(self):
""" Things that should happen on the start of the day, before any money
transfer happens """
pass
def end_of_day(self):
""" Things that should happen at the end of the day, after all money
transfers have been accomplished """
pass
class DummyAccount(Account):
""" This account is used when the user creates a Transfer using a
String as the from-account or to-account. This account basically agrees
to everything. It can be used to create payments for loans or for
outgoing costs """
def __init__(self, name):
""" Creates a dummy account class """
self._name = validate.valid_name(name)
# now the implementation of the real, usable classes begins. In contrast to the account class,
# in these classes, report gets some semantic information about how to handle different
# properties of the class
class Bank_Account(Account):
""" This is a normal bank account that can be used to manage income and
outgoings within a normal household """
def __init__(self, amount, interest, date = None, name = None, meta = {}):
""" Creates a bank account class """
# call inherited method __init__
super().__init__(
amount = amount, interest = interest, date = date, name = name, meta = meta)
self._report_input = 0
self._report_output = 0
self._report.add_semantics('account', 'saving_abs')
self._report.add_semantics('interest', 'win_cum')
self._report.add_semantics('input', 'input_cum')
self._report.add_semantics('output', 'output_cum')
self._report.add_semantics('foreign_account', 'none')
self._report.add_semantics('kind', 'none')
self._report.add_semantics('description', 'none')
self._interest_paydate = {'month': 12, 'day': 31}
# reporting functionality
self._report_interest = 0
self.make_report()
# overwriting function
def make_report(self, interest=0, input=0, output=0,
foreign_account = '', kind = '', description = '',
meta = {}):
""" creates a report entry and resets some variables """
self._report.append(date = self._current_date,
account = self._caccount / 100,
interest = float('%.2f' % (interest / 100)),
input = input / 100,
output = output / 100,
foreign_account = foreign_account,
kind = kind,
description = description,
meta = meta
)
def exec_interest_time(self):
""" Does all things, when self.interest_time() returns true (like adding
interests to the account """
self._caccount = int(round(self._caccount + self._sum_interest))
self.make_report(
interest = self._sum_interest,
kind = 'yearly interest'
)
self._sum_interest = 0
def as_df(self):
df = self.report.as_df()
df = df[['foreign_account', 'description', 'input', 'output', 'interest', 'account']]
return df
def get_table_json(self, report):
""" Creates a table for a given report """
rows = []
if report.precision is 'daily':
header = ['date', 'from', 'description', 'input', 'output', 'interest', 'account']
for status in report._statuses:
item = [status.strdate, status._status['foreign_account'],
status._status['description'],
'%.02f EUR' % status._status['input'],
'%.02f EUR' % status._status['output'],
'%.02f EUR' % status._status['interest'],
'%.02f EUR' % status._status['account']]
rows.append(item)
else:
header = ['date', 'input', 'output', 'interest', 'account']
for status in report._statuses:
item = [status.strdate,
'%.02f EUR' % status._status['input'],
'%.02f EUR' % status._status['output'],
'%.02f EUR' % status._status['interest'],
'%.02f EUR' % status._status['account']]
rows.append(item)
return {'header': header, 'rows': rows}
def interest_time(self):
""" Checks, whether it is time to book the interests to the account """
return ((self._current_date.day == self._interest_paydate['day']) and
(self._current_date.month == self._interest_paydate['month']))
def payment_input(self, account_str, payment, kind, description, meta):
""" Input function for payments. This account is the receiver
of a transfer. This function, if derived from,
can account for special checks for input operations """
return self.payment_move(account_str, payment, kind, description, meta)
def payment_output(self, account_str, payment, kind, description, meta):
""" Output function for payments. This account is the sender
of a transfer. This function, if derived from,
can account for special checks for output operations """
return self.payment_move(account_str, payment, kind, description, meta)
def payment_move(self, account_str, payment, kind, description, meta):
""" in the base class, payment_input and payment_output have almost
the same behavior. Only the type of reporting differs
account_str : the opposite account, sender or receiver
payment : the int or function which includes the payment
kind : whether this is a regular payment or a unique one
description: description of the payment (usually its name)
move_type: "input" or "output" for indicating the direction of movement """
move_type = 'input'
if payment < 0:
move_type = 'output'
self._caccount = int(self._caccount + payment)
report = {'foreign_account': account_str,
move_type: payment,
'kind': kind,
'description': description,
'meta': meta}
self.make_report(**report)
return TransferMessage(C_transfer_OK, money = payment)
def return_money(self, money):
""" this is a hard return of transfer-money, in case the receiving side
rejected the transfer """
self._caccount = int(self._caccount + money)
report = {
'input': money,
'kind': 'storno',
'description': 'transfer did not succeeded'}
self.make_report(**report)
def start_of_day(self):
""" Things that should happen on the start of the day, before any money
transfer happens """
pass
def end_of_day(self):
""" Things that should happen at the end of the day, after all money
transfers have been accomplished """
# TODO: needs to be replaced by a mechanism that checks not every day
days_per_year = get_days_per_year(self._current_date.year)
# calculate interest for this day
interest = self._caccount * (self._interest / days_per_year)
# store interest for later calculations
self._sum_interest += interest
# if paydate is there, add the summed interest to the account
if self.interest_time():
self.exec_interest_time()
class Loan(Account):
"""
This is the default account class that should capture the essential
functionalities of account models
"""
def __init__(self, amount, interest, date = None, name = None, meta = {}):
"""
Creates the data for a basic account model
"""
# call inherited method __init__
super().__init__(
amount = -amount, interest = interest, date = date, name = name, meta = meta)
# reporting functionality
self._report_payment = 0
self._report.add_semantics('account', 'debt_abs')
self._report.add_semantics('interest', 'cost_cum')
self._report.add_semantics('payment', 'debtpayment_cum')
self._report.add_semantics('foreign_account', 'none')
self._report.add_semantics('kind', 'none')
self._report.add_semantics('description', 'none')
self._interest_paydate = {'month': 12, 'day': 31}
self.make_report()
def as_df(self):
df = self.report.as_df()
df = df[['foreign_account', 'description', 'payment', 'interest', 'account']]
return df
def get_table_json(self, report):
rows = []
if report.precision is 'daily':
header = ['date', 'from', 'description', 'payment', 'interest', 'account']
for status in report._statuses:
item = [status.strdate,
status._status['foreign_account'],
status._status['description'],
'%.02f EUR' % status._status['payment'],
'%.02f EUR' % status._status['interest'],
'%.02f EUR' % status._status['account']]
rows.append(item)
else:
header = ['date', 'payment', 'interest', 'account']
for status in report._statuses:
item = [status.strdate,
'%.02f EUR' % status._status['payment'],
'%.02f EUR' % status._status['interest'],
'%.02f EUR' % status._status['account']]
rows.append(item)
return {'header': header, 'rows': rows}
def is_finished(self):
""" Returns true, if the loan has been payed back, including
interest for the current year """
return (self._caccount + self._sum_interest) >= 0.
def make_report(self, payment = 0, interest = 0,
foreign_account = '', kind = '', description = '',
meta = {}):
""" creates a report entry and resets some variables """
self._report.append(
date = self._current_date,
account = self._caccount / 100,
payment = payment / 100,
interest = float('%.2f' % (interest / 100)),
foreign_account = foreign_account,
kind = kind,
description = description,
meta = meta
)
@property
def account(self):
return (self._caccount + self._sum_interest) / 100
def get_account(self):
return self.account
def exec_interest_time(self):
""" Does all things, when self.interest_time() returns true (like adding
interests to the account """
self._caccount = int(round(self._caccount + self._sum_interest))
self.make_report(
interest = self._sum_interest,
kind = 'yearly interest'
)
self._sum_interest = 0
def interest_time(self):
""" Checks, whether it is time to book the interests to the account """
return (((self._current_date.day == self._interest_paydate['day']) and
(self._current_date.month == self._interest_paydate['month'])) or
(self._caccount > 0))
def payment_input(self, account_str, payment, kind, description, meta):
""" Input function for payments. This account is the receiver
of a transfer. This function, if derived from,
can account for special checks for input operations """
if ((self._caccount + self._sum_interest) >= 0):
return TransferMessage(C_transfer_NA, money = 0, message = "No credit to pay for")
payed = min(-(self._caccount + self._sum_interest), payment)
if payed == payment:
self._caccount = int(self._caccount + payed)
report = {'payment': payed,
'foreign_account': account_str,
'kind': kind,
'description': description,
'meta': meta}
self.make_report(**report)
else:
self._caccount = int(self._caccount + self._sum_interest | |
<filename>database_creator.py
# -*- coding: utf-8 -*-
"""
Created on Mon May 18 14:56:53 2020
@author: Dainean
"""
#Prepare the python system
import pandas as pd
import numpy as np
import fnmatch #For filtering
import os #move around in our OS
from astropy.io import fits #Working with fits
from astropy.cosmology import WMAP9 as cosmo #Cosmology calculators
import itertools as it #iteration / combination trick used
import seaborn as sb
import matplotlib.pyplot as plt
#Working directory control
cwd = os.getcwd()
#Working directory control
cwd = os.getcwd()
print("Initial working directory is:", cwd)
if '/Users/users/verdult/Thesis/thesis' in cwd:
print("Working at kapteyn, changing to data directory")
os.chdir('/net/virgo01/data/users/verdult/Thesis') #This is for kapteyn
if 'data' in cwd:
print("Working in kapteyn data folder")
if 'Dropbox' in cwd:
print("Working at home, changing to onedrive folder")
os.chdir('D:\Onedrive\Thesis')
if 'Dainean' in cwd:
print("Working at home, changing to onedrive folder")
os.chdir('D:\Onedrive\Thesis')
if 'Onedrive' in cwd:
print("Working in onedrive folder")
cwd = os.getcwd()
print("Current working directory is:", cwd)
#%%
def pandafy3(filename): #load up the whole fit file as a pandafile
filename = filename #which file? #222,617 KB
while True:
try:
if remake == True:
print("New file requested")
raise NameError('remake')
dfm = pd.read_hdf('support/SupportDB.h5', 'initial_db') #Read the initial dataframe
print("file found")
break
except (FileNotFoundError,KeyError,NameError):
print("creating new file")
simple = fits.open(filename) #open it
data = simple[1].data #data bit
hdr = simple[1].header #header bit
cols = simple[1].columns #The columns from .fits file as an object
simple.close()
coln = cols.names #Names of the columns
colnpd = pd.Series(coln) #Convert to a pandas series (so we can search the strings)
columns = colnpd
B = np.zeros(len(data)) #Initiate an array of the correct size
for i in columns:
C = data.field(i) #Read the data from a specific coloum
B = np.column_stack((B,C))
D = np.delete(B,0,1) #We did the first column twice
# create the dataframe
df = pd.DataFrame(D, index = data.field(0), columns = columns.values)
df.to_hdf('support/tester.h5', 'test_db') # 195,492 KB
break
# create the dataframe
df = pd.DataFrame(D, index = data.field(0), columns = columns.values)
return(df)
#%%
#This cell will contain the main variables and functions
filename_1 = 'fits/combined/DS-Sersic-SA-kCorr_m4.fits' #which file? #222,617 KB
#we are at version 4 right now.
simple = fits.open(filename_1) #open it
data = simple[1].data #data bit
hdr = simple[1].header #header bit
cols = simple[1].columns #The columns from .fits file as an object
coln = cols.names #Names of the columns
colnpd = pd.Series(coln) #Convert to a pandas series (so we can search the strings)
simple.close()
def pandafy(data,columns): #Colns must be all the columns you want to include
ARG = columns.index #!!!! Does this do anything?
B = np.zeros(len(data)) #Initiate an array of the correct size
for i in columns:
C = data.field(i) #Read the data from a specific coloum
B = np.column_stack((B,C))
D = np.delete(B,0,1) #We did the first column twice
df = pd.DataFrame(D, index = data.field(4), columns = columns.values, dtype='float32')
return(df)
def pandafy2(filename): #load up the whole fit file as a pandafile
filename = filename #which file? #222,617 KB
simple = fits.open(filename) #open it
data = simple[1].data #data bit
hdr = simple[1].header #header bit
cols = simple[1].columns #The columns from .fits file as an object
simple.close()
coln = cols.names #Names of the columns
colnpd = pd.Series(coln) #Convert to a pandas series (so we can search the strings)
columns = colnpd
B = np.zeros(len(data)) #Initiate an array of the correct size
for i in columns:
C = data.field(i) #Read the data from a specific coloum
B = np.column_stack((B,C))
D = np.delete(B,0,1) #We did the first column twice
# create the dataframe
df = pd.DataFrame(D, index = data.field(0), columns = columns.values)
return(df)
def fittify(df,filename='ThesisDB_selected.fits'): #say which dataframe you want to turn into a fit file
holder = []
for i in range(df.columns.values.size):
holder.append(fits.Column(name=df.columns.values[i], format='D', array=df.iloc[:,i]))
cols = fits.ColDefs(holder)
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(filename,overwrite=True)
#%%
#Check for initial dataframe
remake = False #remake the dataframe even if it exists?
#remake = True
while True:
try:
if remake == True:
print("New file requested")
raise Exception()
dfm = pd.read_hdf('support/InitialDB.h5', 'initial_db') #Read the initial dataframe
print("file found")
break
except (FileNotFoundError,KeyError,NameError):
print("creating new file")
dfm = pandafy(data,colnpd[2:430]) #Turning the whole dataset into a pandas dataframe, keep out any strings
dfm.to_hdf('support/InitialDB.h5', 'initial_db') # 195,492 KB
fittify(dfm, "thesis_gama.fits")
break
def pandafy3(filename, remake = False): #load up the whole fit file as a pandafile
while True:
try:
if remake == True:
print("New file requested")
raise NameError('remake')
dfm = pd.read_hdf('support/SupportDB.h5', 'initial_db') #Read the initial dataframe
print("file found")
break
except (FileNotFoundError,KeyError,NameError):
print("creating new file")
dfm = pandafy2]) #Turning the whole dataset into a pandas dataframe, keep out any strings
dfm.to_hdf('support/InitialDB.h5', 'initial_db') # 195,492 KB
break
simple = fits.open(filename) #open it
data = simple[1].data #data bit
hdr = simple[1].header #header bit
print(hdr)
cols = simple[1].columns #The columns from .fits file as an object
simple.close() # close the file again
coln = cols.names #Names of the columns
colnpd = pd.Series(coln) #Convert to a pandas series (so we can search the strings)
columns = colnpd
B = np.zeros(len(data)) #Initiate an array of the correct size
for i in columns:
C = data.field(i) #Read the data from a specific coloum
B = np.column_stack((B,C))
D = np.delete(B,0,1) #We did the first column twice
# create the dataframe
df = pd.DataFrame(D, index = data.field(0), columns = columns.values, dtype='float32')
return(df)
# Extinction dataframe
remake = False
#remake = True
while True:
try:
if remake == True:
print("New file requested")
raise Exception()
extinc = pd.read_hdf('support/SupportDB.h5', 'extinction')
print("file found")
break
except:
print("creating new file")
extinc = pandafy2('fits/GalacticExtinction.fits')
extinc.to_hdf('support/SupportDB.h5', 'extinction')
break
# SDSS Dataframe
remake = False
#remake = True
while True:
try:
if remake == True:
print("New file requested")
raise Exception()
SDSS = pd.read_hdf('support/SupportDB.h5', 'SersicSDSS')
print("file found")
break
except:
print("creating new file")
SDSS = pandafy2('fits/SersicCatSDSS.fits')
SDSS.to_hdf('support/SupportDB.h5', 'SersicSDSS')
break
# UKID dataframe
remake = False
while True:
try:
if remake == True:
print("New file requested")
raise Exception()
UKID = pd.read_hdf('support/SupportDB.h5', 'SersicUKIDSS')
print("file found")
break
except:
print("creating new file")
UKID = pandafy2('fits/SersicCatUKIDSS.fits')
UKID.to_hdf('support/SupportDB.h5', 'SersicUKIDSS')
break
#%%
#This cell will contain the main variables and functions
filename_1 = 'fits/combined/DS-Sersic-SA-kCorr_m4.fits' #which file? #222,617 KB
#we are at version 4 right now.
simple = fits.open(filename_1) #open it
data = simple[1].data #data bit
hdr = simple[1].header #header bit
cols = simple[1].columns #The columns from .fits file as an object
coln = cols.names #Names of the columns
colnpd = pd.Series(coln) #Convert to a pandas series (so we can search the strings)
simple.close()
def pandafy(data,columns): #Colns must be all the columns you want to include
ARG = columns.index #!!!! Does this do anything?
B = np.zeros(len(data)) #Initiate an array of the correct size
for i in columns:
C = data.field(i) #Read the data from a specific coloum
B = np.column_stack((B,C))
D = np.delete(B,0,1) #We did the first column twice
df = pd.DataFrame(D, index = data.field(4), columns = columns.values, dtype='float32')
return(df)
def pandafy2(filename): #load up the whole fit file as a pandafile
filename = filename #which file? #222,617 KB
simple = fits.open(filename) #open it
data = simple[1].data #data bit
hdr = simple[1].header #header bit
cols = simple[1].columns #The columns from .fits file as an object
simple.close()
coln = cols.names #Names of the columns
colnpd = pd.Series(coln) #Convert to a pandas series (so we can search the strings)
columns = colnpd
B = np.zeros(len(data)) #Initiate an array of the correct size
for i in columns:
C = data.field(i) #Read the data from a specific coloum
B = np.column_stack((B,C))
D = np.delete(B,0,1) #We did the first column twice
# create the dataframe
df = pd.DataFrame(D, index = data.field(0), columns = columns.values, dtype='float32')
return(df)
def fittify(df,filename='ThesisDB_selected.fits'): #say which dataframe you want to turn into a fit file
holder = []
for i in range(df.columns.values.size):
holder.append(fits.Column(name=df.columns.values[i], format='D', array=df.iloc[:,i]))
cols = fits.ColDefs(holder)
hdu = fits.BinTableHDU.from_columns(cols)
hdu.writeto(filename,overwrite=True)
#%%
#Updated database creation!
dfm = pd.read_hdf('support/InitialDB.h5', 'initial_db') #Read the initial dataframe
#-------------------------------------------------------------
GALMAG = dfm[dfm.columns[dfm.columns.str.contains("GALMAG_")]] #Grab all the Magnitudes
GALMAG = GALMAG[GALMAG > -9999] #create a new dataframe, where all the "bad" values are replaced by NaN
dis = cosmo.comoving_distance(dfm['Z']) #Comoving distances, using cosmo package and applied to Z
dfm2 = GALMAG #needless renaming, but hassle to rewrite
#Starting out with 6 columns,
#iloc[:,0:6]
dfm2['CATAID'] = dfm['CATAID']
dfm2['RA'] = dfm['RA']
dfm2['DEC'] = dfm['DEC']
dfm2['NQ'] = dfm['NQ'] #Add Redshift | |
#!/usr/bin/env python3
from . import command_codes as cc
import asyncio
from collections import namedtuple
import logging
import re
from typing import List, Callable, Union, Sequence, Any
from types import coroutine
class LoggerMetaClass(type):
def __new__(mcs, name, bases, namespace):
inst = type.__new__(mcs, name, bases, namespace)
inst._log = logging.getLogger("bot.{}".format(name))
inst._log.debug("Attached logger to {}".format(name))
return inst
RegEx = type(re.compile(""))
class User(metaclass=LoggerMetaClass):
def __init__(self, nick: str, client: 'Client', hostmask: str=None):
self.name = nick
self.hostmask = hostmask
self.client = client
self._log.debug("Created {}".format(self))
async def message(self, text: str, notice: bool=False) -> None:
await self.client.message(self.name, text, notice=notice)
def __eq__(self, other: 'User') -> bool:
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __repr__(self):
return "<User {self.name}!{self.hostmask}>".format(self=self)
class Channel(metaclass=LoggerMetaClass):
def __init__(self, name: str, client: 'Client'):
self.name = name
self.client = client
self.users = set()
self._log.debug("Created {}".format(self))
def on_message(self, *args, accept_query=False, matcher=None, **kwargs):
"""
Convenience wrapper of `Client.on_message` pre-bound with `channel=self.name`.
"""
if accept_query:
def new_matcher(msg: Message):
ret = True
if matcher:
ret = matcher(msg)
if ret is None or ret is False:
return ret
if msg.recipient is not self and not isinstance(msg.sender, User):
return False
return ret
else:
kwargs.setdefault("channel", self.name)
new_matcher = matcher
return self.client.on_message(*args, matcher=new_matcher, **kwargs)
async def message(self, text: str, notice: bool=False) -> None:
await self.client.message(self.name, text, notice=notice)
async def part(self, reason: str=None, block: bool=False) -> None:
await self.client.part(self.name, reason=reason, block=block)
def __contains__(self, other: User) -> bool:
return other in self.users
def __eq__(self, other: 'Channel') -> bool:
return self.name == other.name
def __hash__(self):
return hash(self.name)
def __repr__(self):
return "<Channel {self.name} users={num_users}>" \
.format(self=self, num_users=len(self.users))
class Message(metaclass=LoggerMetaClass):
def __init__(self, sender: Union[User, Channel],
recipient: Union[User, Channel],
text: str, notice: bool=False):
self.sender = sender
self.recipient = recipient
self.text = text
self.notice = notice
async def reply(self, text: str, notice: bool=None) -> None:
if notice is None:
notice = self.notice
recipient = self.recipient if isinstance(self.recipient, Channel) else self.sender
await recipient.message(text, notice=notice)
def __repr__(self):
return "<Message sender={self.sender} recipient={self.recipient}>".format(self=self)
class Client(metaclass=LoggerMetaClass):
def __init__(self, host: str, port: int, nick: str="TheBot", user: str="bot",
realname: str="The Bot", secure: bool=False, encoding: str="utf-8",
password: str=None):
self.host = host
self.port = port
self.secure = secure
self.nick = nick
self.user = user
self.realname = realname
self.encoding = encoding
self.password = password
self._on_connected_handlers = []
self._on_message_handlers = []
self._users = {}
self._channels = {}
self._on_command_handlers = []
self._on_join_handlers = []
# default chan types, can be overridden by `cc.RPL_ISUPPORT` CHANTYPES
self._channel_types = "#&"
# default user mode prefixes, can be overridden by `cc.RPL_ISUPPORT` PREFIX
self._prefix_map = {"@": "o", "+": "v"}
self._connected = False
self._modules = []
# Register JOIN, QUIT, PART, NICK handlers
self.on_command(cc.JOIN)(self._on_join)
self.on_command(cc.QUIT)(self._on_quit)
self.on_command(cc.PART)(self._on_part)
self.on_command(cc.NICK)(self._on_nick)
def on_connected(self) -> Callable[[Callable], Callable]:
def decorator(fn: Callable[[], None]):
self._on_connected_handlers.append(fn)
return fn
return decorator
MessageHandler = namedtuple("MessageHandler", ("matcher", "handler"))
def on_message(self, message: Union[str, RegEx]=None, channel: Union[str, RegEx]=None,
sender: Union[str, RegEx]=None, matcher: Callable[[Message], None]=None,
notice: bool=None) -> Callable[[Callable], Callable]:
"""
Register a handler that's called after a message is received (PRIVMSG, NOTICE).
The handler is called with the `Message` as argument, must be a coroutine
and is run non-blocking. All filters must match for a message to be accepted.
:param message: message filter, string (exact match) or compiled regex object
:param channel: channel filter, string (exact match) or compiled regex object
:param sender: sender filter, string (exact match) or compiled regex object
:param matcher: test function, return true to accept the message.
Gets the `Message` as parameter
"""
matchers = []
if notice is not None:
def notice_matcher(msg: Message) -> bool:
return msg.notice == notice
matchers.append(notice_matcher)
if matcher:
matchers.append(matcher)
# message
if message is None:
pass
elif isinstance(message, str):
def matcher(msg: Message) -> bool:
return msg.text == message
matchers.append(matcher)
elif hasattr(message, "search"):
# regex or so
def matcher(msg: Message) -> bool:
m = message.search(msg.text)
if m is not None:
return m.groupdict()
matchers.append(matcher)
else:
raise ValueError("Don't know what to do with message={}".format(message))
# sender
if sender is None:
pass
elif isinstance(sender, User):
def matcher(msg: Message) -> bool:
return msg.sender == sender
matchers.append(matcher)
elif isinstance(sender, str):
def matcher(msg: Message) -> bool:
return msg.sender.name == sender
matchers.append(matcher)
elif hasattr(sender, "search"):
# regex or so
def matcher(msg: Message) -> bool:
m = sender.search(msg.sender.name)
if m is not None:
return m.groupdict()
matchers.append(matcher)
else:
raise ValueError("Don't know what to do with sender={}".format(sender))
# channel
if channel is None:
pass
elif isinstance(channel, Channel):
def matcher(msg: Message) -> bool:
return isinstance(msg.recipient, Channel) \
and msg.recipient == channel
matchers.append(matcher)
elif isinstance(channel, str):
def matcher(msg: Message) -> bool:
return isinstance(msg.recipient, Channel) \
and msg.recipient.name == channel
matchers.append(matcher)
elif hasattr(channel, "search"):
# regex or so
def matcher(msg: Message) -> bool:
if not isinstance(msg.recipient, Channel):
return
m = channel.search(msg.recipient.name)
if m is not None:
return m.groupdict()
matchers.append(matcher)
else:
raise ValueError("Don't know what to do with channel={}".format(channel))
def message_matcher(msg: Message) -> bool:
fn_kwargs = {}
for m in matchers:
ret = m(msg)
# Internal matchers may return False or None to fail
if ret is None or ret is False:
return
# If one returns a dict the values in it will be passed to the handler
if isinstance(ret, dict):
fn_kwargs.update(ret)
return fn_kwargs
def decorator(fn: Callable[[Message], None]) -> Callable[[Message], None]:
mh = self.MessageHandler(message_matcher, fn)
self._on_message_handlers.append(mh)
self._log.debug("Added message handler {} with matchers {}".format(mh, matchers))
return fn
return decorator
def remove_message_handler(self, handler: Callable[[Message], None]) -> None:
for mh in self._on_message_handlers:
if mh.handler == handler:
self._log.debug("Removing message handler {}".format(mh))
self._on_message_handlers.remove(mh)
def await_message(self, *args, **kwargs) -> 'asyncio.Future[Message]':
"""
Block until a message matches. See `on_message`
"""
fut = asyncio.Future()
@self.on_message(*args, **kwargs)
async def handler(message):
fut.set_result(message)
# remove handler when done or cancelled
fut.add_done_callback(lambda _: self.remove_message_handler(handler))
return fut
IrcMessage = namedtuple("IrcMessage", ("prefix", "args"))
JoinHandler = namedtuple("JoinHandler", ("channel", "handler"))
def on_join(self, channel: str=None) -> Callable[[Callable], Callable]:
"""
Register a handler that's called after a channel is joined.
The handler is called with the `Channel` as argument, must be a coroutine
and is run non-blocking.
:param channel: channel to look out for or `None` for all channels
"""
def decorator(fn: Callable[[self.IrcMessage], None]):
jh = self.JoinHandler(channel, fn)
self._on_join_handlers.append(jh)
self._log.debug("Added join handler {}".format(jh))
return fn
return decorator
def remove_join_handler(self, handler: Callable[[Channel], None]) -> None:
for jh in self._on_join_handlers:
if jh.handler == handler:
self._log.debug("Removing join handler {}".format(jh))
self._on_join_handlers.remove(jh)
CommandHandler = namedtuple("CommandHandler", ("args", "handler"))
def on_command(self, *args: Sequence[str]) -> Callable[[Callable], Callable]:
"""
Register a handler that's called when (the beginning of) a `IrcMessage` matches.
The handler is called with the `IrcMessage` as argument, must be a coroutine
and is run blocking, i.e. you cannot use `await_command` in it!
:param args: commands args that must match (the actual command is the first arg)
"""
def decorator(fn: Callable[[self.IrcMessage], None]):
ch = self.CommandHandler(args, fn)
self._on_command_handlers.append(ch)
self._log.debug("Added command handler {}".format(ch))
return fn
return decorator
def remove_command_handler(self, handler: Callable[[IrcMessage], None]) -> None:
for ch in self._on_command_handlers:
if ch.handler == handler:
self._log.debug("Removing command handler {}".format(ch))
self._on_command_handlers.remove(ch)
def await_command(self, *args, **kwargs) -> 'asyncio.Future[IrcMessage]':
"""
Block until a command matches. See `on_command`
"""
fut = asyncio.Future()
@self.on_command(*args, **kwargs)
async def handler(msg):
fut.set_result(msg)
# remove handler when done or cancelled
fut.add_done_callback(lambda _: self.remove_command_handler(handler))
return fut
def _parsemsg(self, msg: str) -> IrcMessage:
# adopted from twisted/words/protocols/irc.py
if not msg:
return
prefix = None
if msg[0] == ":":
prefix, msg = msg[1:].split(" ", 1)
if " :" in msg:
msg, rest = msg.split(" :", 1)
args = msg.split() + [rest]
else:
args = msg.split()
return self.IrcMessage(prefix, tuple(args))
def _buildmsg(self, *args: List[str], prefix: str=None) -> str:
msg = ""
if prefix:
msg += ":{} ".format(prefix)
def fmtarg(i, arg):
arg = str(arg)
if i == len(args) - 1 and (" " in arg or arg.startswith(":")):
return ":" + arg
elif i != len(args) - 1 and (" " in arg or arg.startswith(":")):
raise ValueError(f"non-final argument contains space or begins with colon: {args}")
else:
return arg
msg += " ".join((fmtarg(i, arg) for i, arg in enumerate(args)))
return msg
async def _send(self, *args: List[Any], prefix: str=None) -> None:
msg = self._buildmsg(*args, prefix=prefix)
self._log.debug("<- {}".format(msg))
self._writer.write(msg.encode(self.encoding) + b"\r\n")
async def message(self, recipient: str, text: str, notice: bool=False) -> None:
"""
Lower level messaging function used by User and | |
database dump to SQL file.
@param progress callback(name, count) to report progress,
returning false if export should cancel
"""
result = False
tables, namespace, cursors = db.schema["table"], {}, []
def gen(func, *a, **kw):
cursor = func(*a, **kw)
cursors.append(cursor)
for x in cursor: yield x
try:
with open(filename, "wb") as f:
db.lock(None, None, filename, label="database dump")
namespace = {
"db": db,
"sql": db.get_sql(),
"data": [{"name": t, "columns": opts["columns"],
"rows": gen(db.execute, "SELECT * FROM %s" % grammar.quote(t))}
for t, opts in tables.items()],
"pragma": db.get_pragma_values(dump=True),
"progress": progress,
"buffer": f,
}
template = step.Template(templates.DUMP_SQL, strip=False)
template.stream(f, namespace, unbuffered=True)
result = progress() if progress else True
except Exception as e:
logger.exception("Error exporting database dump from %s to %s.",
db, filename)
if progress: progress(error=util.format_exc(e), done=True)
result = False
finally:
db.unlock(None, None, filename)
for x in cursors: util.try_until(x.close)
if not result: util.try_until(lambda: os.unlink(filename))
return result
def export_to_db(db, filename, schema, renames=None, data=False, selects=None, progress=None):
"""
Exports selected tables and views to another database, structure only or
structure plus data, auto-creating table and view indexes and triggers.
@param filename database filename to export to
@param schema {category: [name, ]} to export
@param renames {category: {name1: name2}}
@param data whether to export table data
@param selects {table name: SELECT SQL if not using default}
@param progress callback(?name, ?error) to report export progress,
returning false if export should cancel
"""
result = True
CATEGORIES = "table", "view"
sqls0, sqls1, actionsqls = [], [], []
requireds, processeds, exporteds = {}, set(), set()
is_samefile = util.lceq(db.filename, filename)
file_existed = is_samefile or os.path.isfile(filename)
insert_sql = "INSERT INTO %s.%s SELECT * FROM main.%s;"
for category, name in ((c, n) for c, nn in schema.items() for n in nn):
items = [db.schema[category][name]]
items.extend(db.get_related(category, name, own=True).get("trigger", {}).values())
for item in items:
# Foreign tables and tables/views used in triggers for table,
# tables/views used in view body and view triggers for view.
for name2 in util.get(item, "meta", "__tables__"):
if util.lceq(name, name2): continue # for name2
requireds.setdefault(name, []).append(name2)
finalargs = {"done": True}
db.lock(None, None, filename, label="database export")
try:
schema2 = "main"
if not is_samefile:
schemas = [x.values()[1] for x in
db.execute("PRAGMA database_list").fetchall()]
schema2 = util.make_unique("main", schemas, suffix="%s")
db.execute("ATTACH DATABASE ? AS %s;" % schema2, [filename])
sqls0.append("ATTACH DATABASE ? AS %s;" % schema2)
myrenames = dict(renames or {}, schema=schema2)
allnames2 = util.CaselessDict({x["name"]: x["type"] for x in db.execute(
"SELECT name, type FROM %s.sqlite_master" % schema2
).fetchall()})
fks_on = db.execute("PRAGMA foreign_keys").fetchone()["foreign_keys"]
if fks_on:
db.execute("PRAGMA foreign_keys = off;")
sqls0.append("PRAGMA foreign_keys = off;")
for category, name in ((c, x) for c in CATEGORIES for x in schema.get(c, ())):
name2 = renames.get(category, {}).get(name, name)
processeds.add(name)
if requireds.get(name) \
and any(x in processeds and x not in exporteds for x in requireds[name]):
# Skip item if it requires something that failed to export
reqs = {}
for name0 in requireds[name]:
if name0 in processeds and name0 not in exporteds:
category0 = "table" if name0 in db.schema.get("table", {}) else "view"
reqs.setdefault(category0, set()).add(name0)
err = "Requires %s" % " and ".join(
"%s %s" % (util.plural(c, vv, numbers=False),
", ".join(grammar.quote(v, force=True)
for v in sorted(vv, key=lambda x: x.lower())))
for c, vv in sorted(reqs.items())
)
if progress and not progress(name=name, error=err):
result = False
break # for category, name
else: continue # for category, name
try:
# Create table or view structure
label = "%s %s" % (category, grammar.quote(name, force=True))
if name != name2: label += " as %s" % grammar.quote(name2, force=True)
if name2 in allnames2:
logger.info("Dropping %s %s in %s.", allnames2[name2], grammar.quote(name2, force=True), filename)
sql = "DROP %s %s.%s;" % (allnames2[name2].upper(), schema2, grammar.quote(name2))
db.execute(sql)
actionsqls.append(sql)
logger.info("Creating %s in %s.", label, filename)
sql, err = grammar.transform(db.schema[category][name]["sql"], renames=myrenames)
if err:
if progress and not progress(name=name, error=err):
result = False
break # for category, name
else: continue # for category, name
db.execute(sql)
actionsqls.append(sql)
if not data or "table" != category: exporteds.add(name)
allnames2[name2] = category
# Copy table data
if data and "table" == category:
if selects and name in selects:
sql = "INSERT INTO %s.%s %s;" % (
schema2, grammar.quote(name2), selects[name])
else:
sql = insert_sql % (schema2, grammar.quote(name2),
grammar.quote(name))
logger.info("Copying data to %s in %s.", label, filename)
db.execute(sql)
actionsqls.append(sql)
exporteds.add(name)
# Create indexes and triggers for tables, triggers for views
relateds = db.get_related(category, name, own=True)
for subcategory, subitemmap in relateds.items():
for subname, subitem in subitemmap.items():
subname2 = subname
if name != name2:
subname2 = re.sub(re.escape(name), re.sub(r"\W", "", name2),
subname2, count=1, flags=re.I | re.U)
subname2 = util.make_unique(subname2, allnames2)
allnames2[subname2] = subcategory
sublabel = "%s %s" % (subcategory, grammar.quote(subname, force=True))
if subname != subname2: sublabel += " as %s" % grammar.quote(subname2, force=True)
logger.info("Creating %s for %s in %s.", sublabel, label, filename)
subrenames = dict(myrenames, **{subcategory: {subname: subname2}}
if subname != subname2 else {})
sql, err = grammar.transform(subitem["sql"], renames=subrenames)
if sql:
db.execute(sql)
actionsqls.append(sql)
except Exception as e:
logger.exception("Error exporting %s %s from %s to %s.",
category, grammar.quote(name, force=True),
db, filename)
if progress and not progress(name=name, error=util.format_exc(e)):
result = False
break # for category, name
else:
if progress and not progress(name=name):
result = False
break # for category, name
except Exception as e:
logger.exception("Error exporting from %s to %s.", db, filename)
finalargs["error"] = util.format_exc(e)
finally:
if fks_on:
try:
db.execute("PRAGMA foreign_keys = on;")
sqls1.append("PRAGMA foreign_keys = on;")
except Exception: pass
try:
db.execute("DETACH DATABASE %s;" % schema2)
sqls1.append("DETACH DATABASE %s;" % schema2)
except Exception: pass
if not file_existed and (not actionsqls or not result):
util.try_until(lambda: os.unlink(filename))
db.unlock(None, None, filename)
result = bool(actionsqls)
if result: db.log_query("EXPORT TO DB", sqls0 + actionsqls + sqls1,
params=None if is_samefile else filename)
if progress: progress(**finalargs)
return result
def get_import_file_data(filename):
"""
Returns import file metadata, as {
"name": file name and path}.
"size": file size in bytes,
"format": "xlsx", "xlsx", "csv" or "json",
"sheets": [
"name": sheet name or None if CSV or JSON,
"rows": count or -1 if file too large,
"columns": [first row cell value, ],
]}.
"""
logger.info("Getting import data from %s.", filename)
sheets, size = [], os.path.getsize(filename)
if not size: raise ValueError("File is empty.")
extname = os.path.splitext(filename)[-1][1:].lower()
is_csv, is_json, is_xls, is_xlsx = \
(extname == x for x in ("csv", "json", "xls", "xlsx"))
if is_csv:
with open(filename, "rbU") as f:
firstline = next(f, "")
if firstline.startswith("\xFF\xFE"): # Unicode little endian header
try:
firstline = firstline.decode("utf-16") # GMail CSVs can be in UTF-16
except UnicodeDecodeError:
firstline = firstline[2:].replace("\x00", "")
else: # CSV has trouble with Unicode: turn back to str
firstline = firstline.encode("latin1", errors="xmlcharrefreplace")
iterable = itertools.chain([firstline], f)
csvfile = csv.reader(iterable, csv.Sniffer().sniff(firstline, ",;\t"))
rows, columns = -1, next(csvfile)
if 0 < size <= MAX_IMPORT_FILESIZE_FOR_COUNT:
rows = sum((1 for _ in csvfile), 1 if firstline else 0)
sheets.append({"rows": rows, "columns": columns, "name": "<no name>"})
elif is_json:
rows, columns, buffer, started = 0, {}, "", False
decoder = json.JSONDecoder(object_pairs_hook=collections.OrderedDict)
with open(filename, "rbU") as f:
for chunk in iter(functools.partial(f.read, 2**16), ""):
buffer += chunk
if not started: # Strip line comments and list start from beginning
buffer = re.sub("^//[^\n]*$", "", buffer.lstrip(), flags=re.M).lstrip()
if buffer[:1] == "[": buffer, started = buffer[1:].lstrip(), True
while started and buffer:
# Strip whitespace and interleaving commas from between dicts
buffer = re.sub(r"^\s*[,]?\s*", "", buffer)
try:
data, index = decoder.raw_decode(buffer)
buffer = buffer[index:]
if isinstance(data, collections.OrderedDict):
columns, rows = columns or data, rows + 1
except ValueError: # Not enough data to decode, read more
break # while started and buffer
if columns and any(x > MAX_IMPORT_FILESIZE_FOR_COUNT for x in (size, f.tell())):
break # for chunk
if rows and f.tell() < size: rows = -1
sheets.append({"rows": rows, "columns": columns, "name": "<JSON data>"})
elif is_xls:
with xlrd.open_workbook(filename, on_demand=True) as wb:
for sheet in wb.sheets():
rows = -1 if size > MAX_IMPORT_FILESIZE_FOR_COUNT else sheet.nrows
columns = [x.value for x in next(sheet.get_rows(), [])]
while columns and columns[-1] is None: columns.pop(-1)
sheets.append({"rows": rows, "columns": columns, "name": sheet.name})
elif is_xlsx:
wb = None
try:
wb = openpyxl.load_workbook(filename, data_only=True, read_only=True)
for sheet in wb.worksheets:
rows = -1 if size > MAX_IMPORT_FILESIZE_FOR_COUNT \
else sum(1 for | |
(`pulumi.Input[float]`) - The volume size, in gibibytes (GiB).
* `type` (`pulumi.Input[str]`) - The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
* `volumesPerInstance` (`pulumi.Input[float]`) - The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
* `id` (`pulumi.Input[str]`) - The ID of the EMR Cluster
* `instance_count` (`pulumi.Input[float]`) - Target number of instances for the instance group. Must be 1 or 3. Defaults to 1. Launching with multiple master nodes is only supported in EMR version 5.23.0+, and requires this resource's `core_instance_group` to be configured. Public (Internet accessible) instances must be created in VPC subnets that have `map public IP on launch` enabled. Termination protection is automatically enabled when launched with multiple master nodes and this provider must have the `termination_protection = false` configuration applied before destroying this resource.
* `instance_type` (`pulumi.Input[str]`) - EC2 instance type for all instances in the instance group.
* `name` (`pulumi.Input[str]`) - The name of the step.
The **ec2_attributes** object supports the following:
* `additionalMasterSecurityGroups` (`pulumi.Input[str]`) - String containing a comma separated list of additional Amazon EC2 security group IDs for the master node
* `additionalSlaveSecurityGroups` (`pulumi.Input[str]`) - String containing a comma separated list of additional Amazon EC2 security group IDs for the slave nodes as a comma separated string
* `emrManagedMasterSecurityGroup` (`pulumi.Input[str]`) - Identifier of the Amazon EC2 EMR-Managed security group for the master node
* `emrManagedSlaveSecurityGroup` (`pulumi.Input[str]`) - Identifier of the Amazon EC2 EMR-Managed security group for the slave nodes
* `instanceProfile` (`pulumi.Input[str]`) - Instance Profile for EC2 instances of the cluster assume this role
* `key_name` (`pulumi.Input[str]`) - Amazon EC2 key pair that can be used to ssh to the master node as the user called `hadoop`
* `serviceAccessSecurityGroup` (`pulumi.Input[str]`) - Identifier of the Amazon EC2 service-access security group - required when the cluster runs on a private subnet
* `subnet_id` (`pulumi.Input[str]`) - VPC subnet id where you want the job flow to launch. Cannot specify the `cc1.4xlarge` instance type for nodes of a job flow launched in a Amazon VPC
The **instance_groups** object supports the following:
* `autoscaling_policy` (`pulumi.Input[str]`) - The autoscaling policy document. This is a JSON formatted string. See [EMR Auto Scaling](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-automatic-scaling.html)
* `bid_price` (`pulumi.Input[str]`) - Bid price for each EC2 instance in the instance group, expressed in USD. By setting this attribute, the instance group is being declared as a Spot Instance, and will implicitly create a Spot request. Leave this blank to use On-Demand Instances.
* `ebs_configs` (`pulumi.Input[list]`) - Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
* `iops` (`pulumi.Input[float]`) - The number of I/O operations per second (IOPS) that the volume supports
* `size` (`pulumi.Input[float]`) - The volume size, in gibibytes (GiB).
* `type` (`pulumi.Input[str]`) - The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
* `volumesPerInstance` (`pulumi.Input[float]`) - The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
* `id` (`pulumi.Input[str]`) - The ID of the EMR Cluster
* `instance_count` (`pulumi.Input[float]`) - Target number of instances for the instance group. Must be 1 or 3. Defaults to 1. Launching with multiple master nodes is only supported in EMR version 5.23.0+, and requires this resource's `core_instance_group` to be configured. Public (Internet accessible) instances must be created in VPC subnets that have `map public IP on launch` enabled. Termination protection is automatically enabled when launched with multiple master nodes and this provider must have the `termination_protection = false` configuration applied before destroying this resource.
* `instanceRole` (`pulumi.Input[str]`) - The role of the instance group in the cluster. Valid values are: `MASTER`, `CORE`, and `TASK`.
* `instance_type` (`pulumi.Input[str]`) - EC2 instance type for all instances in the instance group.
* `name` (`pulumi.Input[str]`) - The name of the step.
The **kerberos_attributes** object supports the following:
* `adDomainJoinPassword` (`pulumi.Input[str]`) - The Active Directory password for `ad_domain_join_user`. This provider cannot perform drift detection of this configuration.
* `adDomainJoinUser` (`pulumi.Input[str]`) - Required only when establishing a cross-realm trust with an Active Directory domain. A user with sufficient privileges to join resources to the domain. This provider cannot perform drift detection of this configuration.
* `crossRealmTrustPrincipalPassword` (`pulumi.Input[str]`) - Required only when establishing a cross-realm trust with a KDC in a different realm. The cross-realm principal password, which must be identical across realms. This provider cannot perform drift detection of this configuration.
* `kdcAdminPassword` (`pulumi.Input[str]`) - The password used within the cluster for the kadmin service on the cluster-dedicated KDC, which maintains Kerberos principals, password policies, and keytabs for the cluster. This provider cannot perform drift detection of this configuration.
* `realm` (`pulumi.Input[str]`) - The name of the Kerberos realm to which all nodes in a cluster belong. For example, `EC2.INTERNAL`
The **master_instance_group** object supports the following:
* `bid_price` (`pulumi.Input[str]`) - Bid price for each EC2 instance in the instance group, expressed in USD. By setting this attribute, the instance group is being declared as a Spot Instance, and will implicitly create a Spot request. Leave this blank to use On-Demand Instances.
* `ebs_configs` (`pulumi.Input[list]`) - Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
* `iops` (`pulumi.Input[float]`) - The number of I/O operations per second (IOPS) that the volume supports
* `size` (`pulumi.Input[float]`) - The volume size, in gibibytes (GiB).
* `type` (`pulumi.Input[str]`) - The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
* `volumesPerInstance` (`pulumi.Input[float]`) - The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
* `id` (`pulumi.Input[str]`) - The ID of the EMR Cluster
* `instance_count` (`pulumi.Input[float]`) - Target number of instances for the instance group. Must be 1 or 3. Defaults to 1. Launching with multiple master nodes is only supported in EMR version 5.23.0+, and requires this resource's `core_instance_group` to be configured. Public (Internet accessible) instances must be created in VPC subnets that have `map public IP on launch` enabled. Termination protection is automatically enabled when launched with multiple master nodes and this provider must have the `termination_protection = false` configuration applied before destroying this resource.
* `instance_type` (`pulumi.Input[str]`) - EC2 instance type for all instances in the instance group.
* `name` (`pulumi.Input[str]`) - The name of the step.
The **steps** object supports the following:
* `actionOnFailure` (`pulumi.Input[str]`) - The action to take if the step fails. Valid values: `TERMINATE_JOB_FLOW`, `TERMINATE_CLUSTER`, `CANCEL_AND_WAIT`, and `CONTINUE`
* `hadoopJarStep` (`pulumi.Input[dict]`) - The JAR file used for the step. Defined below.
* `args` (`pulumi.Input[list]`) - List of command line arguments passed to the JAR file's main function when executed.
* `jar` (`pulumi.Input[str]`) - Path to a JAR file run during the step.
* `mainClass` (`pulumi.Input[str]`) - Name of the main class in the specified Java file. If not specified, the JAR file should specify a Main-Class in its manifest file.
* `properties` (`pulumi.Input[dict]`) - Key-Value map of Java properties that are set when the step runs. You can use these properties to pass key value pairs to your main function.
* `name` (`pulumi.Input[str]`) - The name of the step.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ | |
save_process.start()
save_process.join()
save_process.terminate()
# empty the dictionaries to release the memory because they are not needed anymore
self.init_algorithm_attributes()
# if this is not the last chunk, set up the next chunk of SNPs
if not self.is_last_chunk():
self.setup_next_chunk()
else:
# if this is the last chunk, generate the manhattan plot first, and then, tell clients to download the results
self.manhattan_plot()
self.set_step(HyFedProjectStep.RESULT)
except Exception as contingency_table_exception:
logger.error(f'Project {self.project_id}: {contingency_table_exception}')
self.project_failed()
# ##### linear regression beta-step related functions
def beta_linear_step(self):
""" Compute linear regression global beta values using the aggregated XTX and XTY matrices for the chunk """
try:
# aggregate X'X matrices and X'Y vectors from the clients
xt_x_matrices = self.compute_aggregated_parameter(SplinkLocalParameter.XT_X_MATRIX,
DataType.LIST_NUMPY_ARRAY_FLOAT)
xt_y_vectors = self.compute_aggregated_parameter(SplinkLocalParameter.XT_Y_VECTOR,
DataType.LIST_NUMPY_ARRAY_FLOAT)
# convert lists to dictionaries
self.xt_x_matrices = dict()
self.xt_y_vectors = dict()
snp_counter = -1
for snp_index in sorted(self.considered_snp_indices.copy()):
snp_counter += 1
self.xt_x_matrices[snp_index] = xt_x_matrices[snp_counter]
self.xt_y_vectors[snp_index] = xt_y_vectors[snp_counter]
# initialize beta values and xt_x_inverse_matrices as empty dictionaries
self.beta_values = dict()
self.xt_x_inverse_matrices = dict()
# queues
queue_beta = multiprocessing.Queue()
queue_xt_x_inverse = multiprocessing.Queue()
# threads to read from the queue
beta_read_thread = threading.Thread(target=self.read_queue_beta_linear, args=(queue_beta,))
beta_read_thread.daemon = True
beta_read_thread.start()
xt_x_inverse_read_thread = threading.Thread(target=self.read_queue_xt_x_inverse, args=(queue_xt_x_inverse,))
xt_x_inverse_read_thread.daemon = True
xt_x_inverse_read_thread.start()
# processes to compute the beta values and xt_x_inverse matrices for the sub-chunks
sub_chunk_start_indices, sub_chunk_end_indices = self.get_start_end_indices(cpu_cores=8)
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(sub_chunk_start_indices, sub_chunk_end_indices):
process = multiprocessing.Process(target=self.calculate_beta_linear_sub_chunk,
args=(start_index_sub_chunk, end_index_sub_chunk,
queue_beta, queue_xt_x_inverse,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read threads to be done
beta_read_thread.join()
xt_x_inverse_read_thread.join()
# close queues
queue_beta.close()
queue_xt_x_inverse.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# update considered index set
for snp_index in self.considered_snp_indices:
if self.beta_values[snp_index][0] == "NA":
self.considered_snp_indices.discard(snp_index)
self.std_error_values[snp_index] = self.beta_values[snp_index]
self.t_stat_values[snp_index] = self.beta_values[snp_index]
self.p_values[snp_index] = self.beta_values[snp_index]
continue
# only share beta values for considered SNPs with clients to compute sum square error values
beta_values = {snp_index: self.beta_values[snp_index] for snp_index in self.considered_snp_indices}
self.global_parameters[SplinkGlobalParameter.BETA] = beta_values
# tell clients to go to std-error step
self.set_step(SplinkProjectStep.STD_ERROR_LINEAR)
except Exception as beta_linear_exception:
logger.error(f'Project {self.project_id}: {beta_linear_exception}')
self.project_failed()
def calculate_beta_linear_sub_chunk(self, start_index, end_index, queue_beta, queue_xt_x_inverse):
""" Compute linear regression beta values for a sub-chunk """
beta_values = dict()
xt_x_inverse_matrices = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.considered_snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_beta.put(beta_values)
queue_xt_x_inverse.put(xt_x_inverse_matrices)
beta_values = dict()
xt_x_inverse_matrices = dict()
if np.linalg.det(self.xt_x_matrices[snp_index]) == 0:
self.beta_values[snp_index] = np.array(["NA" for _ in range(len(self.covariates) + 2)])
continue
xt_x_inverse_matrices[snp_index] = np.linalg.inv(self.xt_x_matrices[snp_index])
beta_values[snp_index] = np.dot(xt_x_inverse_matrices[snp_index], self.xt_y_vectors[snp_index]).flatten()
queue_beta.put(beta_values)
queue_xt_x_inverse.put(xt_x_inverse_matrices)
def read_queue_xt_x_inverse(self, queue_xt_x_inverse):
while len(self.xt_x_inverse_matrices) < len(self.considered_snp_indices):
xt_x_inverse = queue_xt_x_inverse.get()
self.xt_x_inverse_matrices.update(xt_x_inverse)
def read_queue_beta_linear(self, queue_beta_linear):
while len(self.beta_values) < len(self.considered_snp_indices):
betas = queue_beta_linear.get()
self.beta_values.update(betas)
# ##### linear regression std-error step related functions
def std_error_linear_step(self):
""" Compute linear regression standard error values using the aggregated SSE values """
try:
# aggregate SSE values from the clients
sse_values = self.compute_aggregated_parameter(SplinkLocalParameter.SSE, DataType.NUMPY_ARRAY_FLOAT)
# convert sse list to dictionary
self.sse_values = dict()
snp_counter = -1
for snp_index in sorted(self.considered_snp_indices):
snp_counter += 1
self.sse_values[snp_index] = sse_values[snp_counter]
# initialize std_error_values as an empty dictionary
self.std_error_values = dict()
# queue
queue_std_error = multiprocessing.Queue()
# thread to read from the queue
std_error_read_thread = threading.Thread(target=self.read_queue_std_error, args=(queue_std_error,))
std_error_read_thread.daemon = True
std_error_read_thread.start()
# processes to compute the std error values for the sub-chunks
sub_chunk_start_indices, sub_chunk_end_indices = self.get_start_end_indices(cpu_cores=8)
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(sub_chunk_start_indices, sub_chunk_end_indices):
process = multiprocessing.Process(target=self.calculate_std_error_linear_sub_chunk,
args=(start_index_sub_chunk, end_index_sub_chunk, queue_std_error,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
std_error_read_thread.join()
# close queues
queue_std_error.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# compute results (i.e. t-stats and p-values) for the chunk
self.compute_results_regression()
# add chromosome number, base pair distance, and p-value of the current chunk to results for all chunks
self.append_to_results_all_chunks()
# save results
save_process = multiprocessing.Process(target=self.save_results_regression)
save_process.daemon = True
save_process.start()
save_process.join()
save_process.terminate()
# empty the dictionaries to release the memory because they are not needed anymore
self.init_algorithm_attributes()
# if this is not the last chunk, set up the next chunk of SNPs
if not self.is_last_chunk():
self.setup_next_chunk()
else:
# if this is the last chunk, generate the manhattan plot first, and then, tell clients to download the results
self.manhattan_plot()
self.set_step(HyFedProjectStep.RESULT)
except Exception as std_error_linear_exception:
logger.error(f'Project {self.project_id}: {std_error_linear_exception}')
self.project_failed()
def calculate_std_error_linear_sub_chunk(self, start_index, end_index, queue_std_error):
""" Compute linear regression std error values for a sub-chunk """
std_error_values = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.considered_snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_std_error.put(std_error_values)
std_error_values = dict()
sigma_squared = self.sse_values[snp_index] / (self.non_missing_sample_counts[snp_index] - len(self.covariates) - 2)
variance_values = (sigma_squared * self.xt_x_inverse_matrices[snp_index]).diagonal()
std_error_values[snp_index] = np.sqrt(variance_values)
queue_std_error.put(std_error_values)
# used in std-error step of linear/logistic regression
def read_queue_std_error(self, queue_std_error):
while len(self.std_error_values) < len(self.considered_snp_indices):
std_error = queue_std_error.get()
self.std_error_values.update(std_error)
# ##### logistic regression beta step related functions
def beta_logistic_step(self):
""" Compute logistic regression global beta values using the aggregated gradient and Hessian matrices for the chunk """
try:
# aggregate gradient, Hessian, and log likelihood values from the clients
gradient_vectors = self.compute_aggregated_parameter(SplinkLocalParameter.GRADIENT, DataType.LIST_NUMPY_ARRAY_FLOAT)
hessian_matrices = self.compute_aggregated_parameter( SplinkLocalParameter.HESSIAN, DataType.LIST_NUMPY_ARRAY_FLOAT)
log_likelihood_values = self.compute_aggregated_parameter(SplinkLocalParameter.LOG_LIKELIHOOD, DataType.NUMPY_ARRAY_FLOAT)
# convert lists to dictionaries
self.gradient_vectors = dict()
self.hessian_matrices = dict()
self.new_log_likelihood_values = dict()
snp_counter = -1
for snp_index in sorted(self.considered_in_process_snp_indices):
snp_counter += 1
self.gradient_vectors[snp_index] = gradient_vectors[snp_counter]
self.hessian_matrices[snp_index] = hessian_matrices[snp_counter]
self.new_log_likelihood_values[snp_index] = log_likelihood_values[snp_counter]
# initialize new beta values as an empty dictionary
self.new_beta_values = dict()
# queue
queue_beta_values = multiprocessing.Queue()
# thread to read from the queue
beta_value_read_thread = threading.Thread(target=self.read_queue_beta_logistic, args=(queue_beta_values,))
beta_value_read_thread.daemon = True
beta_value_read_thread.start()
# processes to compute the new beta values for the sub-chunks
sub_chunk_start_indices, sub_chunk_end_indices = self.get_start_end_indices(cpu_cores=8)
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(sub_chunk_start_indices, sub_chunk_end_indices):
process = multiprocessing.Process(target=self.calculate_beta_logistic_sub_chunk,
args=(start_index_sub_chunk, end_index_sub_chunk, queue_beta_values,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
beta_value_read_thread.join()
# close queues
queue_beta_values.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# update beta values
for snp_index in self.new_beta_values.keys():
self.beta_values[snp_index] = self.new_beta_values[snp_index]
# update considered index set
for snp_index in self.considered_in_process_snp_indices:
if self.beta_values[snp_index][0] == "NA":
self.considered_snp_indices.discard(snp_index)
self.std_error_values[snp_index] = self.beta_values[snp_index]
self.t_stat_values[snp_index] = self.beta_values[snp_index]
self.p_values[snp_index] = self.beta_values[snp_index]
continue
# check whether beta values for the SNP converged. If so, remove the SNP index from the in_process indices
for snp_index in self.considered_in_process_snp_indices:
old_log_likelihood = self.log_likelihood_values[snp_index]
new_log_likelihood = self.new_log_likelihood_values[snp_index]
if self.has_converged(old_log_likelihood, new_log_likelihood):
self.in_process_snp_indices.discard(snp_index)
# update log likelihood values
for snp_index in self.new_log_likelihood_values.keys():
self.log_likelihood_values[snp_index] = self.new_log_likelihood_values[snp_index]
# if there are still SNPs whose beta values not converged and max iterations not reached yet,
# share updated global beta values (excluding those ignored or converged) with the clients and stay in beta_logistic step
self.considered_in_process_snp_indices = self.considered_snp_indices.intersection(self.in_process_snp_indices)
if self.current_beta_iteration != self.max_iterations and len(self.considered_in_process_snp_indices) != 0:
self.current_beta_iteration += 1
beta_values = {snp_index: self.beta_values[snp_index] for snp_index in self.considered_in_process_snp_indices}
self.global_parameters[SplinkGlobalParameter.BETA] = beta_values
self.global_parameters[SplinkGlobalParameter.CURRENT_BETA_ITERATION] = self.current_beta_iteration
logger.debug(f'Project {self.project_id}: Beta iteration # {self.current_beta_iteration} done!')
# if beta max iterations reached or all beta values converged, share updated beta values (excluding ignored SNPs)
# with clients and go to the std_error_logistic step
else:
beta_values = {snp_index: self.beta_values[snp_index] for snp_index in self.considered_snp_indices}
self.global_parameters[SplinkGlobalParameter.BETA] = beta_values
self.set_step(SplinkProjectStep.STD_ERROR_LOGISTIC)
except Exception as beta_logistic_exception:
logger.error(f'Project {self.project_id}: {beta_logistic_exception}')
self.project_failed()
def calculate_beta_logistic_sub_chunk(self, start_index, end_index, queue_beta_values):
""" Compute logistic regression beta values for a sub-chunk """
new_beta_values = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.considered_in_process_snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_beta_values.put(new_beta_values)
new_beta_values = dict()
if np.linalg.det(self.hessian_matrices[snp_index]) == 0:
new_beta_values[snp_index] = np.array(["NA" for _ in range(len(self.covariates) + 2)])
continue
hessian_inverse_matrix = np.linalg.inv(self.hessian_matrices[snp_index])
beta_update_vector = np.dot(hessian_inverse_matrix, self.gradient_vectors[snp_index])
new_beta_vector = self.beta_values[snp_index].reshape(-1, 1) + beta_update_vector
new_beta_values[snp_index] = new_beta_vector.flatten()
queue_beta_values.put(new_beta_values)
def read_queue_beta_logistic(self, queue_beta_values):
while len(self.new_beta_values) < len(self.considered_in_process_snp_indices):
new_betas = queue_beta_values.get()
self.new_beta_values.update(new_betas)
# ##### logistic regression std-error step related functions
def std_error_logistic_step(self):
""" Compute logistic regression standard error values using the aggregated Hessian matrices for the chunk """
try:
# aggregate Hessian matrices from the clients
hessian_matrices = self.compute_aggregated_parameter(SplinkLocalParameter.HESSIAN, DataType.LIST_NUMPY_ARRAY_FLOAT)
# convert list to dictionary
self.hessian_matrices = dict()
snp_counter = -1
| |
not found on disk' % uri)
a_dataset = gdal.Open(a_uri)
b_dataset = gdal.Open(b_uri)
self.assertEqual(a_dataset.RasterXSize, b_dataset.RasterXSize,
"x dimensions are different a=%s, second=%s" %
(a_dataset.RasterXSize, b_dataset.RasterXSize))
self.assertEqual(a_dataset.RasterYSize, b_dataset.RasterYSize,
"y dimensions are different a=%s, second=%s" %
(a_dataset.RasterYSize, b_dataset.RasterYSize))
self.assertEqual(a_dataset.RasterCount, b_dataset.RasterCount,
"different number of rasters a=%s, b=%s" % (
(a_dataset.RasterCount, b_dataset.RasterCount)))
for band_number in range(1, a_dataset.RasterCount + 1):
band_a = a_dataset.GetRasterBand(band_number)
band_b = b_dataset.GetRasterBand(band_number)
a_array = band_a.ReadAsArray(0, 0, band_a.XSize, band_a.YSize)
b_array = band_b.ReadAsArray(0, 0, band_b.XSize, band_b.YSize)
try:
numpy.testing.assert_array_almost_equal(a_array, b_array)
except AssertionError:
for row_index in xrange(band_a.YSize):
for pixel_a, pixel_b in zip(a_array[row_index], b_array[row_index]):
self.assertAlmostEqual(pixel_a, pixel_b,
msg='%s != %s ... Failed at row %s' %
(pixel_a, pixel_b, row_index))
def assertVectorsEqual(self, aUri, bUri):
"""
Tests if vector datasources are equal to each other.
This assertion method asserts the equality of these vector
characteristics:
+ Number of layers in the vector
+ Number of features in each layer
+ Feature geometry type
+ Number of fields in each feature
+ Name of each field
+ Field values for each feature
Args:
aUri (string): a URI to an OGR vector
bUri (string): a URI to an OGR vector
Raises:
IOError: Raised if one of the input files is not found on disk.
AssertionError: Raised if the vectors are not found to be equal to\
one another.
Returns
Nothing.
"""
for uri in [aUri, bUri]:
if not os.path.exists(uri):
raise IOError('File "%s" not found on disk' % uri)
shape = ogr.Open(aUri)
shape_regression = ogr.Open(bUri)
# Check that the shapefiles have the same number of layers
layer_count = shape.GetLayerCount()
layer_count_regression = shape_regression.GetLayerCount()
self.assertEqual(layer_count, layer_count_regression,
'The shapes DO NOT have the same number of layers')
for layer_num in range(layer_count):
# Get the current layer
layer = shape.GetLayer(layer_num)
layer_regression = shape_regression.GetLayer(layer_num)
# Check that each layer has the same number of features
feat_count = layer.GetFeatureCount()
feat_count_regression = layer_regression.GetFeatureCount()
self.assertEqual(feat_count, feat_count_regression,
'The layers DO NOT have the same number of features')
self.assertEqual(layer.GetGeomType(), layer_regression.GetGeomType(),
'The layers do not have the same geometry type')
# Get the first features of the layers and loop through all the features
feat = layer.GetNextFeature()
feat_regression = layer_regression.GetNextFeature()
while feat is not None:
# Check that the field counts for the features are the same
layer_def = layer.GetLayerDefn()
layer_def_regression = layer_regression.GetLayerDefn()
field_count = layer_def.GetFieldCount()
field_count_regression = layer_def_regression.GetFieldCount()
self.assertEqual(field_count, field_count_regression,
'The shapes DO NOT have the same number of fields')
for fld_index in range(field_count):
# Check that the features have the same field values
field = feat.GetField(fld_index)
field_regression = feat_regression.GetField(fld_index)
self.assertEqual(field, field_regression,
'The field values DO NOT match')
# Check that the features have the same field name
field_ref = feat.GetFieldDefnRef(fld_index)
field_ref_regression = \
feat_regression.GetFieldDefnRef(fld_index)
field_name = field_ref.GetNameRef()
field_name_regression = field_ref_regression.GetNameRef()
self.assertEqual(field_name, field_name_regression,
'The fields DO NOT have the same name')
# Check that the features have the same geometry
geom = feat.GetGeometryRef()
geom_regression = feat_regression.GetGeometryRef()
self.assertTrue(geom.Equals(geom_regression))
if layer.GetGeomType() != ogr.wkbPoint:
# Check that the features have the same area,
# but only if the shapefile's geometry is not a point, since
# points don't have area to check.
self.assertEqual(geom.Area(), geom_regression.Area())
feat = None
feat_regression = None
feat = layer.GetNextFeature()
feat_regression = layer_regression.GetNextFeature()
shape = None
shape_regression = None
def assertCSVEqual(self, aUri, bUri):
"""Tests if csv files a and b are 'almost equal' to each other on a per
cell basis. Numeric cells are asserted to be equal out to 7 decimal
places. Other cell types are asserted to be equal.
Args:
aUri (string): a URI to a csv file
bUri (string): a URI to a csv file
Raises:
AssertionError: Raised when the two CSV files are found to be\
different.
Returns:
Nothing.
"""
a = open(aUri)
b = open(bUri)
reader_a = csv.reader(a)
reader_b = csv.reader(b)
for index, (a_row, b_row) in enumerate(zip(reader_a, reader_b)):
try:
self.assertEqual(a_row, b_row,
'Rows differ at row %s: a=%s b=%s' % (index, a_row, b_row))
except AssertionError:
for col_index, (a_element, b_element) in enumerate(zip(a_row, b_row)):
try:
a_element = float(a_element)
b_element = float(b_element)
self.assertAlmostEqual(a_element, b_element,
msg=('Values are significantly different at row %s col %s:'
' a=%s b=%s' % (index, col_index, a_element,
b_element)))
except ValueError:
# we know for sure they arenot floats, so compare as
# non-floats.
self.assertEqual(a_element, b_element,
msg=('Elements differ at row %s col%s: a=%s b=%s' %
(index, col_index, a_element, b_element)))
def assertMD5(self, uri, regression_hash):
"""Assert the MD5sum of a file against a regression MD5sum.
This method is a convenience method that uses
``invest_natcap.testing.get_hash()`` to determine the MD5sum of the
file located at `uri`. It is functionally equivalent to calling::
self.assertEqual(get_hash(uri), '<some md5sum>')
Regression MD5sums can be calculated for you by using
``invest_natcap.testing.get_hash()`` or a system-level md5sum program.
Args:
uri (string): a string URI to the file to be tested.
regression_hash (string) a string md5sum to be tested against.
Raises:
AssertionError: Raised when the MD5sum of the file at `uri` \
differs from the provided regression md5sum hash.
Returns:
Nothing.
"""
self.assertEqual(get_hash(uri), regression_hash, "MD5 Hashes differ.")
def assertMatrixes(self, matrix_a, matrix_b, decimal=6):
"""Tests if the input numpy matrices are equal up to `decimal` places.
This is a convenience function that wraps up required functionality in
``numpy.testing``.
Args:
matrix_a (numpy.ndarray): a numpy matrix
matrix_b (numpy.ndarray): a numpy matrix
decimal (int): an integer of the desired precision.
Raises:
AssertionError: Raised when the two matrices are determined to be\
different.
Returns:
Nothing.
"""
numpy.testing.assert_array_almost_equal(matrix_a, matrix_b, decimal)
def assertArchives(self, archive_1_uri, archive_2_uri):
"""
Compare the contents of two archived workspaces against each other.
Takes two archived workspaces, each generated from
``build_regression_archives()``, unzips them and
compares the resulting workspaces against each other.
Args:
archive_1_uri (string): a URI to a .tar.gz workspace archive
archive_2_uri (string): a URI to a .tar.gz workspace archive
Raises:
AssertionError: Raised when the two workspaces are found to be\
different.
Returns:
Nothing.
"""
archive_1_folder = pygeoprocessing.geoprocessing.temporary_folder()
data_storage.extract_archive(archive_1_folder, archive_1_uri)
archive_2_folder = pygeoprocessing.geoprocessing.temporary_folder()
data_storage.extract_archive(archive_2_folder, archive_2_uri)
self.assertWorkspace(archive_1_folder, archive_2_folder)
def assertWorkspace(self, archive_1_folder, archive_2_folder,
glob_exclude=''):
"""
Check the contents of two folders against each other.
This method iterates through the contents of each workspace folder and
verifies that all files exist in both folders. If this passes, then
each file is compared against each other using
``GISTest.assertFiles()``.
If one of these workspaces includes files that are known to be
different between model runs (such as logs, or other files that include
timestamps), you may wish to specify a glob pattern matching those
filenames and passing it to `glob_exclude`.
Args:
archive_1_folder (string): a uri to a folder on disk
archive_2_folder (string): a uri to a folder on disk
glob_exclude (string): a string in glob format representing files to ignore
Raises:
AssertionError: Raised when the two folders are found to have\
different contents.
Returns:
Nothing.
"""
# uncompress the two archives
archive_1_files = []
archive_2_files = []
for files_list, workspace in [
(archive_1_files, archive_1_folder),
(archive_2_files, archive_2_folder)]:
for root, dirs, files in os.walk(workspace):
root = root.replace(workspace + os.sep, '')
ignored_files = glob.glob(glob_exclude)
for filename in files:
if filename not in ignored_files:
full_path = os.path.join(root, filename)
files_list.append(full_path)
archive_1_files = sorted(archive_1_files)
archive_2_files = sorted(archive_2_files)
archive_1_size = len(archive_1_files)
archive_2_size = len(archive_2_files)
if archive_1_size != archive_2_size:
# find out which archive had more files.
archive_1_files = map(lambda x: x.replace(archive_1_folder, ''),
archive_1_files)
archive_2_files = map(lambda x: x.replace(archive_2_folder, ''),
archive_2_files)
missing_from_archive_1 = list(set(archive_2_files) -
set(archive_1_files))
missing_from_archive_2 = list(set(archive_1_files) -
set(archive_2_files))
raise AssertionError('Elements missing from A:%s, from B:%s' %
(missing_from_archive_1, missing_from_archive_2))
else:
# archives have the same number of files that we care about
for file_1, file_2 in zip(archive_1_files, archive_2_files):
file_1_uri = os.path.join(archive_1_folder, file_1)
file_2_uri = os.path.join(archive_2_folder, file_2)
LOGGER.debug('Checking %s, %s', file_1, file_2)
self.assertFiles(file_1_uri, file_2_uri)
def assertJSON(self, json_1_uri, json_2_uri):
"""Assert two JSON files against each other.
The two JSON files provided will be opened, read, and their
contents will be asserted to be equal. If the two are found to be
different, the diff of the two files will be printed.
Args:
json_1_uri (string): a uri to a JSON file.
json_2_uri (string): a uri to a JSON file.
Raises:
AssertionError: Raised when the two JSON objects differ.
Returns:
Nothing.
"""
dict_1 = | |
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_smets, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_smets, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_smets, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_smets, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = "- " * 40
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "Smets (unsafe):"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
s = format_time("size 10, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_smets_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
"""
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Dempster:"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_dempster, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_dempster, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_dempster, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_dempster, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(mediumElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(mediumElements, number)])
s = format_time("size 10, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_dempster, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_dempster, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_dempster, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_dempster, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(bigElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(bigElements, number)])
s = format_time("size 10000, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_dempster, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_dempster, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_dempster, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 10000, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_dempster, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
"""
########################################################################################################################################################################################################
########################################################################################################################################################################################################
########################################################################################################################################################################################################
s = "- " * 40
print(s)
f.write(s + "\n")
s = "Dempster (unsafe):"
print(s)
f.write(s + "\n")
for number in numberOfElements:
if number <= len(smallElements):
m1 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m2 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m3 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m4 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
m5 = MassFunction(*[(x, 1.0/number) for x in random.sample(smallElements, number)])
s = format_time("size 3, focals " + str(number) + ", 2 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 3 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, m3, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 4 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, m3, m4, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
s = format_time("size 3, focals " + str(number) + ", 5 bbas", time_function(nb_iterations, m1.combination_dempster_unsafe, m2, m3, m4, m5, timeout=timeout, verbose=False), nb_iterations, timeout)
print(s)
f.write(s + "\n")
| |
<reponame>RUAN-ZX/smileToLife_backend
import os
import random
from time import time
from django.db.models import Q
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
import uuid,hashlib
from .models import message, user,comment,like,Dislike
import json
commentExampleList = [
"一年之计,莫如树谷;十年之计,莫如树木;百年之计,莫如树人。",
"你站在阴影里,却说太阳对你不公平。",
"古之立大事者,不惟有超世之才,亦必有坚忍不拔之志。",
"骐骥一跃,不能十步;驽马十驾,功在不舍。",
"这个世界上,取得成功的人是那些努力寻找他们想要机会的人,如果找不到机会,他们就去创造机会。",
"荷叶生时春恨生,荷叶枯时秋恨成。深知身在情长在,怅望江头江水声。",
"冬天把旧叶子揉掉了,你要听新故事吗?静静的乌江睁着眼睛,笑着对你说:总有回家的人,总有离岸的船。",
"我们很难讨论存活得失之间的相对价值,却也始终为活着的那份存在甚至于永恒的精神而感动终身。",
"如果,蓝天是一本无字天书,云必是无字的注脚,而你的踏雪乌骓则翻译云的语言于驰道。",
"你想,这乌江之所以雄伟,在于她以海域般的雅量回合每一支氏族颠沛流离的故事合撰成一部大传奇,你从中阅读别人带血的篇章,也看到你项氏祖先所佔、染血的那一行。",
"或许行年渐晚,深知在这个你曾经称雄的世间,能完整实践理想中的美,愈来愈不可得,触目所见多是无法拼凑完全的碎片,和着垓下的楚歌——故乡的歌。",
]
messageExampleList = [
"路漫漫其修远兮,吾将上下而求索。",
"你所谓的岁月静好,是因为有人在负重前行",
"今生不悔入华夏,来世还愿种花家。",
"那年乱世如麻,愿你们来世拥有锦绣年华",
"亲们 ,大国梦的实现 ,不纯靠嘴。",
"知不足,然后能自反也;知困,然后能自强也。",
"为人性僻耽佳句,语不惊人死不休",
"博观而约取,厚积而薄发。",
"不战而屈人之兵,善之善者也。",
"以铜为镜,可以正衣冠;以古为镜,可以知兴替;以人为镜,可以明得失。",
"革命尚未成功,同志仍须努力。",
]
def get_unique_str():
uuid_str = str(uuid.uuid4())
md5 = hashlib.md5()
md5.update(uuid_str.encode('utf-8'))
return md5.hexdigest()
def ValidationPk(modelI,id):
# 小心get的问题!
#if model.objects==models.manager.QuerySet:
if modelI.objects.filter(pk=id):
return 1
else:
return 0
# 还要获得评论 点赞等
# def getCommentDict(messageId):
# commentDict = {}
# try:
# commentI = comment.objects.filter(cmMsId=messageId)
# if not commentI:
# return 0
# for i in commentI:
# commentDict[str(i.cmUserId)]=str(i.cmText)
# return commentDict
# except Exception as e:
# return 0
def transform(a):
if(a==True):
return 3
elif(a==False):
return 2
else:
return 1
def getMessage(request):
if request.method == "POST":
try:
mId = request.POST.get('mId')
mId = int(mId)
messageI = message.objects.get(msId=mId)
Voice = messageI.msVoice
Text = messageI.msText
# 再想能不能定期检查更新一波就简单了
LikeCount = messageI.msLikeCount
DisLikeCount = messageI.msDisLikeCount
# 获取留言的评论列表
dataList = []
try:
for cm in comment.objects.filter(cmMsId=mId).all():
text = cm.cmText
time1 = str(cm.cmTime)
likeCount = cm.cmLikeCount
disLikeCount = cm.cmDisLikeCount
cmId = cm.cmId
cmUserID = cm.cmUserId
UserI = user.objects.filter(userId=int(cmUserID))[0]
#print(cmUserID,str(UserI))
cmUserAvatar = UserI.userAvatar
cmUserAlias = UserI.userAlias
data1 = {
'cmUserId':cmUserID,
'cmUserAvatar':cmUserAvatar,
'cmUserAlias':cmUserAlias,
'cmId': cmId,
'text': text,
'time': time1,
'likeCount': likeCount,
'disLikeCount': disLikeCount,
}
dataList.append(data1)
except Exception as e:
print('11'+str(e))
commentCount = messageI.msCmCount
mstime = messageI.msTime
msUserId = messageI.msUserID
data1 = {
'commentList': dataList,
'msUserId':msUserId,
'Voice':Voice,
'Text':Text,
'likeCount':LikeCount,
'DislikeCount':DisLikeCount,
'commentCount':commentCount,
'time':str(mstime),
}
return JsonResponse(data1)
except Exception as e:
print(str(e)+'fffff')
try:
#print('p22')
Cx = request.POST.get('cx')
print(type(Cx))
Cx = int(Cx)
Cy = request.POST.get('cy')
#print(type(Cy))
Cy = int(Cy)
offset = 50
data1 = {}
conditionQ = Q(msCx__lt=Cx + offset) & Q(msCx__gt=Cx - offset) & Q(msCy__lt=Cy + offset) & Q(msCy__gt=Cy - offset)
msList = message.objects.filter(conditionQ).order_by('msCx').order_by('msCy')
mslist = []
for ms in msList:
if bool(ms.msVoice)&bool(ms.msText):
mstype = 3
elif ms.msVoice:
mstype = 2
else:
mstype = 1
uid = ms.msUserID
avatar = user.objects.filter(userId=uid)[0].userAvatar
LikeCount = ms.msLikeCount
DisLikeCount = ms.msDisLikeCount
commentCount = ms.msCmCount
mstime = ms.msTime
data1 = {
'msId':str(ms.msId),
'userId': uid,
'likeCount':LikeCount,
'DislikeCount':DisLikeCount,
'commentCount': commentCount,
'mstype':mstype,
'time':str(mstime),
'avatar':avatar,
}
mslist.append(data1)
return HttpResponse(json.dumps(mslist, ensure_ascii=False))
except Exception as e:
print('坐标和messageID都没有!' + str(e))
return render(request, '404.html')
else:
return render(request, 'artalk_getMessage.html')
def storing(t,request,cmd):
if cmd==1:
cmdstr = 'Avatar'
else:
cmdstr = 'Voice'
#print('p1')
fileI = request.FILES.get(cmdstr, None)
#print('p2')
fileNameI = fileI.name.split('.')
if not fileI:
return -1
filename = fileNameI[0] + str(t) + '.' + fileNameI[1]
#print('p3')
try:
storing = open(os.path.join('media/',cmdstr,filename), 'wb+')
except Exception as e:
print(e)
storing=''
return -1
#print(os.path.join('media/',cmdstr,filename))
for chunk in fileI.chunks():
storing.write(chunk)
storing.close()
print(filename + 'storing oK')
return filename
# ok
def createUser(request):
t = int(time())
dictJson = {}
userI = user()
if request.method == "POST":
try:
userI.userAlias = request.POST.get('alias')
userI.userGender = request.POST.get('gender')
userI.userPhone = request.POST.get('phone')
userI.userPwd = request.POST.get('password')
if user.objects.filter(userAlias=userI.userAlias):
dictJson['code'] = -1
return JsonResponse(dictJson)
if user.objects.filter(userPhone=userI.userPhone):
dictJson['code'] = -2
return JsonResponse(dictJson)
print('33')
avatar = storing(t, request, 1)
# if not avatar:
# dictJson['code'] = -3
# return JsonResponse(dictJson)
userI.userAvatar = avatar
userI.save()
return render(request, 'artalk_uploadUserResult.html', context={"user": userI})
except AttributeError:
dictJson['code'] = -3
return JsonResponse(dictJson)
except Exception as e:
print(e)
dictJson['code'] = -4
return JsonResponse(dictJson)
else:
return render(request, 'artalk_uploadUser.html')
# ok
def createMessage(request):
t = int(time())
dictJson = {}
messageI = message()
if request.method == "POST":
try:
Text = str(request.POST.get('text'))
cx = int(request.POST.get('cx'))
cy = int(request.POST.get('cy'))
userId = int(request.POST.get('userId'))
if not ValidationPk(user, userId):
dictJson['code'] = -1
return JsonResponse(dictJson)
except Exception as e:
print(e)
dictJson['code'] = -3
return JsonResponse(dictJson)
#
try:
messageI.msUserID = userId
messageI.msText = Text
messageI.msCx = float(cx)
messageI.msCy = float(cy)
voice = storing(t, request, 2)
messageI.msVoice = voice
messageI.save()
msId = str(message.objects.last().msId)
data1 = {
'msId':msId,
}
return JsonResponse(data1)
# 没音频的时候
except AttributeError:
if not request.POST.get('text'):
dictJson['code'] = -2
return JsonResponse(dictJson)
messageI.msVoice = ''
messageI.save()
msId = str(message.objects.last().msId)
data2 = {
'msId': msId,
}
return JsonResponse(data2)
except Exception as e:
print(e)
dictJson['code'] = -4
return JsonResponse(dictJson)
else:
return render(request, 'artalk_uploadMessage.html')
# ok
def createComment(request):
returnDict = {}
if request.method == 'POST':
try:
uId = request.POST.get('userId')
mId = request.POST.get('messageId')
text = request.POST.get('text')
uV = ValidationPk(user,uId)
mV = ValidationPk(message,mId)
if (uV==0&mV==0):
returnDict['code'] = -4
return JsonResponse(returnDict)
elif not uV:
returnDict['code'] = -1
return JsonResponse(returnDict)
elif not mV:
returnDict['code'] = -2
return JsonResponse(returnDict)
#user_id_invalided = 'setComment user 不存在!'
#ms_id_invalided = "setComment message 不存在!"
else:
commentI = comment()
commentI.cmMsId = str(mId)
commentI.cmUserId = str(uId)
commentI.cmText = str(text)
commentI.save()
cmCountPlus = message.objects.get(msId=int(mId))
cmCountPlus.msCmCount = cmCountPlus.msCmCount+1
cmCountPlus.save()
returnDict['cmId'] = comment.objects.last().cmId
return JsonResponse(returnDict)
except Exception as e:
print(e)
returnDict['code'] = -3
return JsonResponse(returnDict)
# 这里到达不了 后面再看
else:
return render(request,'artalk_setComment.html')
# returnDict['code'] = -5
# return JsonResponse(returnDict)
# 和过去的点赞是否重复!!
# 不能一条评论点两个赞!
# 评论那边 倒是无所谓!
# ok
# ok
def getRandomDisLikeUser(code,id):
# 假设目前有30用户!
userI = user.objects.all()[random.randrange(0, 100)].userId
# 当code=1 是给message点赞
if code:
# CmId 为空 那么msId就有值了
if not bool(
Dislike.objects.filter(Q(disLikeUserId=userI)&Q(disLikeMsId=id))&like.objects.filter(Q(likeUserId=userI)&Q(likeMsId=id))):
return userI
else:
return getRandomDisLikeUser(code,id)
# 是为了comment点赞:
else:
if not Dislike.objects.filter(Q(disLikeCmId=id)&Q(disLikeUserId=userI))&like.objects.filter(Q(likeUserId=userI)&Q(likeCmId=id)):
return userI
else:
return getRandomDisLikeUser(code,id)
def getRandomLikeUser(code,id):
# 假设目前有30用户!
userI = user.objects.all()[random.randrange(0, 100)].userId
print('11')
# 当code=1 是给message点赞
if code:
# CmId 为空 那么msId就有值了
if not bool(like.objects.filter(Q(likeMsId=id) & Q(likeUserId=userI)))&bool(Dislike.objects.filter(Q(disLikeMsId=id) & Q(disLikeUserId=userI))):
return userI
else:
return getRandomLikeUser(code,id)
# 是为了comment点赞:
else:
#print('22')
if not bool(like.objects.filter(Q(likeCmId=id) & Q(likeUserId=userI)))&bool(Dislike.objects.filter(Q(disLikeCmId=id) & Q(disLikeUserId=userI))):
return userI
else:
return getRandomLikeUser(code,id)
def createUM(request):
t = int(time())
if request.method == "POST":
avatar = storing(t,request,1)
if not avatar:
return HttpResponse('No file!!!')
userI = user().create(t % 100,avatar)
userI.save()
voice = storing(t, request, 2)
messageI = message()
#messageI.msLikeCount = 0
messageI.msVoice = voice
messageI.msCx = t%50
messageI.msCy = 100 - t%50
messageI.msText = messageExampleList[t % 10]
messageI.msUserID = str(user.objects.last().userId)
messageI.save()
# 一个message3个评论
# 3个评论 每个评论一个人 一个人可以发很多评论
# 刷赞
try:
flag = int(time())
msId1 = message.objects.last().msId
for i in range(1,4):
if flag%2==0:
likeI = like()
likeI.likeMsId = str(msId1)
likeI.likeUserId = getRandomLikeUser(1,str(msId1))
likeI.save()
elif flag%2==1:
DislikeI = Dislike()
DislikeI.disLikeMsId = str(msId1)
DislikeI.disLikeUserId = getRandomDisLikeUser(1,str(msId1))
DislikeI.save()
if flag%2==0:
message.objects.last().msLikeCount = 3
elif flag%2==1:
message.objects.last().msDisLikeCount = 3
except Exception as e:
print(e)
# for i in range(1,4):
# LikeI = like()
# commentI = comment()
# commentI.cmMsId = str(message.objects.last().msId)
#
# commentI.cmText = commentExampleList[random.randrange(0, 10)]
#
# commentI.cmUserId = str(user.objects.all()[random.randrange(0,10)].userId)
#
return render(
request,
'artalk_uploadUMResult.html',
context={
"user": userI,
"message":messageI,
})
else:
return render(request, 'artalk_uploadUM.html')
def toggleMsLike(request):
returnDict = {}
if request.method == 'POST':
try:
uId = request.POST.get('userId')
mId = request.POST.get('msId')
uV = ValidationPk(user, uId)
mV = ValidationPk(message, mId)
# 检测留言重复点赞
umV = like.objects.filter(Q(likeMsId=mId)&Q(likeUserId=uId))
DislikeI = Dislike.objects.filter(Q(disLikeMsId=mId) & Q(disLikeUserId=uId))
if not uV|mV:
returnDict['code'] = -5
return JsonResponse(returnDict)
if not uV:
returnDict['code'] = -2
return JsonResponse(returnDict)
if not mV:
returnDict['code'] = -3
return JsonResponse(returnDict)
if umV:
umV.delete()
msCountPlus = message.objects.get(msId=int(mId))
msCountPlus.msLikeCount = msCountPlus.msLikeCount - 1
msCountPlus.save()
else:
if DislikeI:
returnDict['code'] = -1
return JsonResponse(returnDict)
else:
LikeI = like()
LikeI.likeMsId = str(mId)
LikeI.likeUserId = str(uId)
LikeI.save()
msCountPlus = message.objects.get(msId=int(mId))
msCountPlus.msLikeCount = msCountPlus.msLikeCount + 1
msCountPlus.save()
LikeI = like.objects.filter(Q(likeMsId=mId) & Q(likeUserId=uId))
if DislikeI:
returnDict['code'] = 2
return JsonResponse(returnDict)
if LikeI:
returnDict['code'] = 1
return JsonResponse(returnDict)
if not bool(DislikeI) & bool(LikeI):
returnDict['code'] = 3
return JsonResponse(returnDict)
except Exception as e:
# 缺参数
returnDict['code'] = -4
return JsonResponse(returnDict)
else:
return render(request, 'artalk_createMsLike.html')
#returnDict['code'] = -6
#eturn JsonResponse(returnDict)
## ok
def toggleMsDisLike(request):
returnDict = {}
if request.method == 'POST':
try:
uId = request.POST.get('userId')
mId = request.POST.get('msId')
uV = ValidationPk(user, uId)
mV = ValidationPk(message, mId)
# 检测留言重复点赞
umV = Dislike.objects.filter(Q(disLikeMsId=mId) & Q(disLikeUserId=uId))
LikeI = like.objects.filter(Q(likeMsId=mId) & Q(likeUserId=uId))
if not uV | mV:
returnDict['code'] = -5
return JsonResponse(returnDict)
if not uV:
returnDict['code'] = -2
return JsonResponse(returnDict)
if not mV:
returnDict['code'] = -3
return JsonResponse(returnDict)
# 如果已经存在 那就删除
if umV:
umV.delete()
msCountPlus = message.objects.get(msId=int(mId))
msCountPlus.msDisLikeCount = msCountPlus.msDisLikeCount -1
msCountPlus.save()
# 不能存在就增加
else:
if LikeI:
returnDict['code'] = -1
return JsonResponse(returnDict)
else:
LikeI = Dislike()
LikeI.disLikeMsId = str(mId)
LikeI.disLikeUserId = str(uId)
LikeI.save()
msCountPlus = message.objects.get(msId=int(mId))
msCountPlus.msDisLikeCount = msCountPlus.msDisLikeCount + 1
msCountPlus.save()
DislikeI = Dislike.objects.filter(Q(disLikeMsId=mId) & Q(disLikeUserId=uId))
if DislikeI:
returnDict['code'] = 2
return JsonResponse(returnDict)
if LikeI:
returnDict['code'] = 1
return JsonResponse(returnDict)
if not (bool(DislikeI) | bool(LikeI)):
returnDict['code'] = 3
return JsonResponse(returnDict)
except Exception as e:
# 缺参数
returnDict['code'] = -4
return JsonResponse(returnDict)
else:
return render(request, 'artalk_createMsDisLike.html')
# ok
def toggleCmDisLike(request):
returnDict = {}
if request.method == 'POST':
try:
uId = request.POST.get('userId')
mId = request.POST.get('cmId')
uV = ValidationPk(user, uId)
mV = ValidationPk(comment, | |
'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852679':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852680':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852681':{'en': 'China Unicom', 'zh': u('\u4e2d\u56fd\u8054\u901a'), 'zh_Hant': u('\u4e2d\u570b\u806f\u901a')},
'852682':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852684':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852685':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852687':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852688':{'en': 'China Unicom', 'zh': u('\u4e2d\u56fd\u8054\u901a'), 'zh_Hant': u('\u4e2d\u570b\u806f\u901a')},
'852689':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852690':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852691':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852692':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852693':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852694':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852695':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852697':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852699':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'85270':{'zh_Hant': u('\u6578\u78bc\u901a')},
'852707':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a')},
'852708':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf')},
'8528480':{'en': 'Handy', 'zh': 'Handy', 'zh_Hant': 'Handy'},
'8528481':{'en': 'Sun Mobile', 'zh': u('\u65b0\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u65b0\u79fb\u52d5\u901a\u8a0a')},
'8528485':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a')},
'8528486':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a')},
'8528487':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a')},
'8528488':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a')},
'8528489':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a')},
'852849':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8')},
'852901':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852902':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852903':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852904':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'8529057':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852906':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852907':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852908':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852909':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852910':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852912':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852913':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852914':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852915':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852916':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852917':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852918':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852919':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852920':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852921':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852922':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852923':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852924':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852925':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852926':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852927':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852928':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852929':{'en': 'China Unicom', 'zh': u('\u4e2d\u56fd\u8054\u901a'), 'zh_Hant': u('\u4e2d\u570b\u806f\u901a')},
'8529290':{'en': 'Multibyte', 'zh': 'Multibyte', 'zh_Hant': 'Multibyte'},
'852930':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852931':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852932':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852933':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852934':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852935':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852936':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852937':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852938':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852940':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852941':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852942':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852943':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852944':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852945':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852946':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852947':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852948':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852949':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852950':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852951':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852952':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852953':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852954':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852955':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852956':{'en': 'China Unicom', 'zh': u('\u4e2d\u56fd\u8054\u901a'), 'zh_Hant': u('\u4e2d\u570b\u806f\u901a')},
'852957':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852958':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852960':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852961':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852962':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852963':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852964':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852965':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852966':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852967':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852968':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852969':{'en': 'China-Hongkong Telecom', 'zh': u('\u4e2d\u6e2f\u901a'), 'zh_Hant': u('\u4e2d\u6e2f\u901a')},
'852970':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852971':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852972':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852973':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852974':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852975':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852976':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852977':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852978':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852979':{'en': 'China Mobile', 'zh': u('\u4e2d\u56fd\u79fb\u52a8'), 'zh_Hant': u('\u4e2d\u570b\u79fb\u52d5')},
'852980':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852981':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852982':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852983':{'en': '3', 'zh': '3', 'zh_Hant': '3'},
'852984':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852985':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852986':{'en': 'SmarTone', 'zh': u('\u6570\u7801\u901a'), 'zh_Hant': u('\u6578\u78bc\u901a')},
'852987':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'852988':{'en': 'HKT', 'zh': u('\u9999\u6e2f\u79fb\u52a8\u901a\u8baf'), 'zh_Hant': u('\u9999\u6e2f\u79fb\u52d5\u901a\u8a0a')},
'853620':{'en': 'CTM'},
'8536200':{'en': 'SmarTone'},
'853621':{'en': 'China Telecom'},
'853622':{'en': '3'},
'853623':{'en': 'CTM'},
'853624':{'en': '3'},
'8536242':{'en': 'CTM'},
'8536243':{'en': 'CTM'},
'8536244':{'en': 'SmarTone'},
'8536245':{'en': 'SmarTone'},
'853625':{'en': 'SmarTone'},
'8536250':{'en': 'CTM'},
'8536251':{'en': 'CTM'},
'8536252':{'en': 'CTM'},
'8536253':{'en': 'CTM'},
'853626':{'en': 'SmarTone'},
'8536264':{'en': 'CTM'},
'8536265':{'en': 'CTM'},
'8536266':{'en': 'CTM'},
'8536267':{'en': 'CTM'},
'8536270':{'en': 'SmarTone'},
'8536271':{'en': 'SmarTone'},
'8536272':{'en': 'CTM'},
'8536273':{'en': 'CTM'},
'8536274':{'en': 'CTM'},
'8536275':{'en': 'CTM'},
'8536276':{'en': '3'},
'8536277':{'en': '3'},
'8536278':{'en': '3'},
'8536279':{'en': '3'},
'853628':{'en': 'CTM'},
'853629':{'en': 'CTM'},
'8536292':{'en': '3'},
'8536293':{'en': '3'},
'8536294':{'en': '3'},
'8536295':{'en': '3'},
'853630':{'en': '3'},
'8536300':{'en': 'CTM'},
'8536301':{'en': 'CTM'},
'8536302':{'en': 'CTM'},
'8536309':{'en': 'CTM'},
'853631':{'en': '3'},
'853632':{'en': 'CTM'},
'8536320':{'en': '3'},
'8536321':{'en': '3'},
'8536322':{'en': 'China Telecom'},
'8536323':{'en': 'China Telecom'},
'853633':{'en': 'CTM'},
'8536336':{'en': '3'},
'8536337':{'en': '3'},
'8536338':{'en': '3'},
'8536339':{'en': '3'},
'8536340':{'en': 'China Telecom'},
'8536341':{'en': 'China Telecom'},
'8536342':{'en': 'China Telecom'},
'8536343':{'en': 'China Telecom'},
'8536344':{'en': '3'},
'8536345':{'en': 'CTM'},
'8536346':{'en': 'CTM'},
'8536347':{'en': 'CTM'},
'8536348':{'en': 'CTM'},
'8536349':{'en': 'CTM'},
'853635':{'en': 'China Telecom'},
'853636':{'en': 'SmarTone'},
'853637':{'en': 'China Telecom'},
'8536378':{'en': '3'},
'8536379':{'en': '3'},
'853638':{'en': '3'},
'8536386':{'en': 'China Telecom'},
'8536387':{'en': 'China Telecom'},
'8536388':{'en': 'China Telecom'},
'8536389':{'en': 'China Telecom'},
'853639':{'en': 'CTM'},
'8536390':{'en': 'China Telecom'},
'8536391':{'en': 'China Telecom'},
'8536398':{'en': '3'},
'8536399':{'en': '3'},
'8536500':{'en': '3'},
'8536501':{'en': '3'},
'8536502':{'en': '3'},
'8536503':{'en': '3'},
'8536504':{'en': '3'},
'8536515':{'en': 'CTM'},
'8536516':{'en': 'CTM'},
'8536517':{'en': 'CTM'},
'8536518':{'en': 'CTM'},
'8536519':{'en': 'CTM'},
'853652':{'en': 'CTM'},
'8536520':{'en': 'China Telecom'},
'8536521':{'en': 'China Telecom'},
'8536522':{'en': 'China Telecom'},
'8536523':{'en': 'China Telecom'},
'853653':{'en': 'CTM'},
'8536532':{'en': '3'},
'8536533':{'en': '3'},
'8536534':{'en': '3'},
'8536535':{'en': '3'},
'8536540':{'en': '3'},
'8536541':{'en': '3'},
'85365420':{'en': '3'},
'85365421':{'en': '3'},
'85365422':{'en': '3'},
'85365423':{'en': '3'},
'85365424':{'en': '3'},
'85365425':{'en': 'China Telecom'},
'85365426':{'en': 'China Telecom'},
'85365427':{'en': 'China Telecom'},
'85365428':{'en': 'China Telecom'},
'85365429':{'en': 'China Telecom'},
'8536543':{'en': 'China Telecom'},
'8536544':{'en': 'China Telecom'},
'8536545':{'en': 'CTM'},
'8536546':{'en': 'CTM'},
'85365470':{'en': 'CTM'},
'85365471':{'en': 'CTM'},
'85365472':{'en': 'CTM'},
'85365473':{'en': 'CTM'},
'85365474':{'en': 'CTM'},
'85365475':{'en': 'SmarTone'},
'85365476':{'en': 'SmarTone'},
'85365477':{'en': 'SmarTone'},
'85365478':{'en': 'SmarTone'},
'85365479':{'en': 'SmarTone'},
'8536548':{'en': 'SmarTone'},
'8536549':{'en': 'SmarTone'},
'853655':{'en': 'CTM'},
'8536556':{'en': 'China Telecom'},
'8536557':{'en': 'China Telecom'},
'8536558':{'en': 'China Telecom'},
'8536559':{'en': 'China Telecom'},
'853656':{'en': 'China Telecom'},
'853657':{'en': '3'},
'8536570':{'en': 'China Telecom'},
'8536571':{'en': 'China Telecom'},
'8536572':{'en': 'China Telecom'},
'8536573':{'en': 'China Telecom'},
'853658':{'en': 'China Telecom'},
'8536586':{'en': 'CTM'},
'8536587':{'en': 'CTM'},
'8536588':{'en': 'CTM'},
'8536589':{'en': 'CTM'},
'853659':{'en': 'CTM'},
'8536598':{'en': 'China Telecom'},
'8536599':{'en': 'China Telecom'},
'85366001':{'en': 'CTM'},
'8536601':{'en': 'CTM'},
'8536602':{'en': 'SmarTone'},
'8536603':{'en': '3'},
'8536604':{'en': 'SmarTone'},
'8536605':{'en': 'China Telecom'},
'8536610':{'en': '3'},
'8536611':{'en': '3'},
'8536612':{'en': 'CTM'},
'8536613':{'en': 'CTM'},
'8536614':{'en': 'SmarTone'},
'8536615':{'en': 'SmarTone'},
'8536616':{'en': '3'},
'8536617':{'en': '3'},
'8536618':{'en': 'CTM'},
'8536619':{'en': 'CTM'},
'853662':{'en': 'SmarTone'},
'853663':{'en': '3'},
'853664':{'en': '3'},
'8536640':{'en': 'SmarTone'},
'8536641':{'en': 'SmarTone'},
'8536647':{'en': 'CTM'},
'8536649':{'en': 'China Telecom'},
'853665':{'en': 'CTM'},
'8536656':{'en': '3'},
'8536657':{'en': '3'},
'853666':{'en': 'CTM'},
'8536670':{'en': 'China Telecom'},
'8536671':{'en': 'China Telecom'},
'8536672':{'en': 'CTM'},
'8536673':{'en': 'SmarTone'},
'8536674':{'en': '3'},
'8536675':{'en': 'CTM'},
'8536676':{'en': '3'},
'8536677':{'en': 'CTM'},
'8536678':{'en': 'SmarTone'},
'8536679':{'en': 'CTM'},
'853668':{'en': 'CTM'},
'8536690':{'en': 'Guangxing Communication Co'},
'8536691':{'en': 'Guangxing Communication Co'},
'8536692':{'en': 'CTM'},
'8536693':{'en': 'CTM'},
'8536694':{'en': '3'},
'8536695':{'en': '3'},
'8536696':{'en': 'CTM'},
'8536697':{'en': '3'},
'8536698':{'en': 'CTM'},
'8536699':{'en': 'China Telecom'},
'853680':{'en': '3'},
'8536810':{'en': 'CTM'},
'8536811':{'en': 'CTM'},
'8536812':{'en': 'CTM'},
'8536813':{'en': 'CTM'},
'8536814':{'en': 'CTM'},
'8536815':{'en': 'SmarTone'},
'8536816':{'en': 'SmarTone'},
'8536817':{'en': 'SmarTone'},
'8536818':{'en': 'SmarTone'},
'8536819':{'en': 'SmarTone'},
'853682':{'en': 'China Telecom'},
'853683':{'en': 'SmarTone'},
'8536840':{'en': '3'},
'8536841':{'en': '3'},
'8536842':{'en': '3'},
'8536843':{'en': '3'},
'8536844':{'en': '3'},
'8536845':{'en': 'CTM'},
'8536846':{'en': 'CTM'},
'8536847':{'en': 'CTM'},
'8536848':{'en': 'CTM'},
'8536849':{'en': 'CTM'},
'853685':{'en': '3'},
'853686':{'en': 'China Telecom'},
'8536870':{'en': 'SmarTone'},
'8536871':{'en': 'SmarTone'},
'8536872':{'en': 'SmarTone'},
'8536873':{'en': 'SmarTone'},
'8536874':{'en': 'SmarTone'},
'8536875':{'en': '3'},
'8536876':{'en': '3'},
'8536877':{'en': '3'},
'8536878':{'en': '3'},
'8536879':{'en': '3'},
'8536880':{'en': 'CTM'},
'8536881':{'en': 'CTM'},
'8536882':{'en': 'CTM'},
'8536883':{'en': 'CTM'},
'8536884':{'en': 'CTM'},
'8536885':{'en': 'China Telecom'},
'8536886':{'en': 'China Telecom'},
'8536887':{'en': 'China Telecom'},
'8536888':{'en': 'China Telecom'},
'8536889':{'en': 'China Telecom'},
'85510':{'en': 'Smart'},
'85511':{'en': 'Cellcard'},
'85512':{'en': 'Cellcard'},
'85513':{'en': 'qbmore/Cadcomms'},
'85514':{'en': 'Cellcard'},
'85515':{'en': 'Smart'},
'85516':{'en': 'Smart'},
'85517':{'en': 'Cellcard'},
'85518':{'en': 'Seatel'},
'8553248':{'en': 'Telecom Cambodia'},
'8553348':{'en': 'Telecom Cambodia'},
'8553448':{'en': 'Telecom Cambodia'},
'8553548':{'en': 'Telecom Cambodia'},
'8553648':{'en': 'Telecom Cambodia'},
'8554248':{'en': 'Telecom Cambodia'},
'8554348':{'en': 'Telecom Cambodia'},
'8554448':{'en': 'Telecom Cambodia'},
'8555248':{'en': 'Telecom Cambodia'},
'8555348':{'en': 'Telecom Cambodia'},
'8555448':{'en': 'Telecom Cambodia'},
'8555548':{'en': 'Telecom Cambodia'},
'85560':{'en': 'Metfone'},
'8556248':{'en': 'Telecom Cambodia'},
| |
<filename>align_images/cc2d.py
#!/usr/bin/env python3
''' A set of methods for computing the cross-correlation of 2D images.
Currently, 3 methods are provided for computing the cross-correlation (CC),
each implementing different boundary conditions:
- explicit: multiplication in the real space.
- dft: multiplication in the real Fourier space.
- scipy: a wrapper around scipy.signal.correlate2d
While explicit(boundary='drop') is far less sensitive to edge effects than
dft(), it is also much slower. If a full CC map is not required,
explicit_minimize() can instead be used to locate the CC maximum within a
reasonable computation time.
For any method, let img1 and img2 the entry images. We first subtract their
respective averages:
I = img1 - avg(img1) and J = img2 - avg(img2)
Then compute the normalisation factor, which is the product of the standard
deviations of I and J:
norm = σI × σJ = sqrt(sum(I²) × sum(J²))
The cross-correlation returned by the method is:
cc = I ⋆ J / norm
'''
import functools
import itertools
import multiprocessing as mp
import numpy as np
import scipy.signal as ss
import scipy.optimize as sio
import scipy.interpolate as si
from . import tools
def _prep_for_cc(img1, img2, inplace=False):
''' Prepare img1 and img2 for cross correlation computation:
- set average to 0
- fill masked values with 0 (if masked array)
- compute the normalisation value
Parameters
==========
img1, img2 : ndarray or masked array
The 2D arrays to prepare
inplace : bool (default: False)
If True, don't copy the arrays before removing the average.
This saves time.
Returns
=======
img1, img2 : ndarray or masked array
The 2D arrays prepared
norm : float
The normalisation for these arrays
'''
if not inplace:
a1 = img1.copy()
a2 = img2.copy()
else:
a1 = img1
a2 = img2
if np.issubdtype(a1.dtype, np.integer):
a1 = a1.astype(float)
if np.issubdtype(a2.dtype, np.integer):
a2 = a2.astype(float)
a1 -= a1.mean()
a2 -= a2.mean()
try:
a1 = a1.filled(0) # = fill with average
a2 = a2.filled(0)
except AttributeError:
# not a masked array
pass
norm = np.sqrt(np.sum(a1**2) * np.sum(a2**2))
return a1, a2, norm
def _get_padding_slice(img):
''' Get the slice for padding imag in `dft(... boundary='fill')`.
Parameters
==========
img : ndarray
The 2D image.
Returns
=======
s : slice
The slice of the new array where the old data should be inserted and
retrieved.
N : tuple
The size of the new array.
'''
n = np.array(img.shape)
N = 2**np.ceil(np.log2(n * 2))
N = N.astype(int)
im = np.zeros(N[0])
nmin = N//2 - n//2 - n%2
nmax = N//2 + n//2
s = (slice(nmin[0], nmax[0]), slice(nmin[1], nmax[1]))
return s, N
def _pad_array(arr, s, N, pad_value=0):
''' Insert arr in a larger array of shape N at the position defined by s, a
slice in the larger array. The area of the new array that don't contain
data of arr are filled with pad_value.
Parameters
==========
arr : ndarray
The array to insert in a larger array
s : slice
The slice of the larger array where the data from arr are to be
inserted. This slice must have the same shape as arr.
N : tuple
The shape of the new larger array.
pad_value : float
The value used to fill the areas of the larger array that are outside
of slice s.
Return
======
a : ndarray
A larger array containing the values of arr at the positions defined by
slice s.
'''
a = np.zeros(N) + pad_value
a[s] = arr
return a
def _unpad_array(arr, s, roll=False):
''' Reverse the operation performed by `_pad_array`.
Parameters
==========
arr : ndarray
The larger array containing the padded data.
s : slice
The slice of the larger array where the data from arr are to be
inserted. This slice must have the same shape as arr.
roll : bool (default: False)
A roll of half the size of the array is required before using the data.
If True, roll, retrieve the data, and roll back.
'''
if roll:
arr = tools.roll_2d(tools.roll_2d(arr)[s])
else:
arr = arr[s]
return arr
def explicit_step(a1, a2, i, j, norm=None):
''' Compute the explicit cross-correlation between two arrays for a given
integer shift.
Parameters
==========
a1, a2 : ndarray, 2D
Data values.
i, j : int
The shift between a1 and a2 for which to compute the cross-correlation.
norm : float or None (default: None)
The value by which to normalize the result.
If None, subtract their respective averages from the shifted version of
a1 and a2:
I = s_a1 - avg(s_a1); J = s_a2 - avg(s_a2),
and compute a local norm:
norm = sqrt(sum(I²) × sum(J²)).
This is used to implement boundary='drop' when computing an explicit
DFT map.
Returns
=======
cc : float
The cross-correlation of a1 with a2 for shift (i, j)
'''
ni, nj = a1.shape
s1 = (
slice(max(i, 0), min(ni+i-1, ni-1) + 1),
slice(max(j, 0), min(nj+j-1, nj-1) + 1)
)
s2 = (
slice(max(-i, 0), min(ni-i-1, ni-1) + 1),
slice(max(-j, 0), min(nj-j-1, nj-1) + 1)
)
a1 = a1[s1]
a2 = a2[s2]
try:
mask1 = a1.mask
except AttributeError:
mask1 = np.zeros_like(a1, dtype=bool)
try:
mask2 = a2.mask
except AttributeError:
mask2 = np.zeros_like(a2, dtype=bool)
mask = mask1 | mask2
a1 = np.ma.array(a1, mask=mask)
a2 = np.ma.array(a2, mask=mask)
if norm is None:
a1, a2, norm = _prep_for_cc(a1, a2)
return np.sum(a1 * a2) / norm
def explicit_step_float(a1, a2, i, j, norm=None):
''' Compute the explicit cross-correlation between two arrays for an
arbitrary float shift.
This is done by evaluating explicit_step() at the four surrounding integer
shifts, and by interpolating the results.
Parameters
==========
a1, a2 : ndarray, 2D
Data values.
i, j : float
The shift between a1 and a2 for which to compute the cross-correlation.
norm : passed to explicit_step()
Returns
=======
cc : float
The cross-correlation of a1 with a2 for shift (i, j)
'''
i0 = int(np.floor(i))
j0 = int(np.floor(j))
I = [i0, i0, i0+1, i0+1]
J = [j0, j0+1, j0, j0+1]
CC = [explicit_step(a1, a2, ii, jj, norm=norm) for ii, jj in zip(I, J)]
s = si.interp2d(I, J, CC)
return s(i, j)
def explicit(img1, img2, simax=None, sjmax=None, boundary='fill', cores=None):
''' Compute the cross-correlation of img1 and img2 using explicit
multiplication in the real space.
Parameters
==========
img1, img2 : ndarray
simax, sjmax : int or None (default: None)
The maximum shift on the 0 and 1 axes resp. for which to compute the
cross-correlation.
If None, return a cross-correlation map with the same size of the input
images.
boundary : 'fill' or 'drop' (default: 'fill')
How to handle boundary conditions. 'fill' is equivalent to padding the
images with zeros. With 'drop' the cross-correlation is computing using
only the overlapping part of the images.
cores : int or None (default: None)
If not None, use multiprocessing to compute the steps using the
specified number processes.
'''
ni, nj = img1.shape
if simax is None:
simin = - ni // 2
simax = + ni // 2
else:
simin = - simax
if sjmax is None:
sjmin = - nj // 2
sjmax = + nj // 2
else:
sjmin = - sjmax
if boundary == 'fill':
img1, img2, norm = _prep_for_cc(img1, img2)
elif boundary == 'drop':
norm = None
else:
msg = "unexpected value for 'boundary': {}".format(boundary)
raise ValueError(msg)
worker = functools.partial(explicit_step, img1, img2, norm=norm)
i_range = range(simin, simax)
j_range = range(sjmin, sjmax)
ni = len(i_range)
nj = len(j_range)
iterable = itertools.product(i_range, j_range)
if cores is None:
cc = itertools.starmap(worker, iterable)
cc = list(cc)
else:
p = mp.Pool(cores)
try:
n_iter = ni * nj
chunksize, extra = divmod(n_iter, len(p._pool))
if extra:
chunksize += 1
cc = p.starmap(worker, iterable, chunksize=chunksize)
finally:
p.terminate()
cc = np.array(cc)
cc = cc.reshape(ni, nj)
cc = tools.roll_2d(cc)
return cc
def explicit_minimize(img1, img2, x0=(0, 0), norm=None, **kwargs):
''' Find the position of the cross-correlation maximum between two images.
The maximum is found using scipy.optimize.minimize to minimize the opposite
of the cross-correlation of img1 with img2, computed through
explicit_step_float(). Refer to the `scipy.optimize.minimize` documentation
for the available optimisation methods.
Parameters
==========
img1, img2 : ndarray
Data | |
if kargs.get('min',False):
minStyle = kargs.get('minStyle', kargs.get('minmaxStyle', '--'))
minColor = kargs.get('minColor', kargs.get('minmaxColor', col))
minMarker = kargs.get('minMarker', kargs.get('minmaxMarker', ''))
ax.plot(xvalues, np.min(p, axis=1), color=minColor, linewidth=kargs.get('linewidth',1),linestyle=minStyle, marker=minMarker, label=lab+' (min)')
if kargs.get('max', False):
maxStyle = kargs.get('maxStyle',kargs.get('minmaxStyle','--'))
maxColor = kargs.get('maxColor',kargs.get('minmaxColor',col))
maxMarker = kargs.get('maxMarker',kargs.get('minmaxMarker',''))
ax.plot(xvalues, np.max(p, axis=1), color=maxColor, linestyle=maxStyle, linewidth=kargs.get('linewidth',1), marker=maxMarker, label=lab+' (max)')
ax.set_xlabel("Distance [{1}{0}]".format(unit, u))
if zunit is not None:
ax.set_ylabel("{1} [{0}]".format(zunit, self.channel))
else:
ax.set_ylabel("{1} [{0}]".format(self.zscale, self.channel))
return {'plot': Plot, 'l': xvalues, 'z': profile}
def get_bin_threshold(self, percent, high=True, adaptive=False, binary=True, img=False):
"""
Threshold the image into binary values
Parameters
----------
percent : float
The percentage where the thresholding is made
high : bool
If high a value of 1 is returned for values > percent
adaptive : bool
If True, performs an adaptive thresholding (see skimage.filters.threshold_adaptive)
binary : bool
If True return bool data (True/False) otherwise numeric (0/1)
img : bool
If True return a SPM_image otherwise a numpy array
"""
if adaptive:
if binary:
return self.pixels > threshold_local(self.pixels, percent)
return threshold_local(self.pixels, percent)
mi = np.min(self.pixels)
norm = (self.pixels-mi)/(np.max(self.pixels)-mi)
if high:
r = norm > percent
else:
r = norm < percent
if not img:
if binary:
return r
return np.ones(self.pixels.shape)*r
else:
I = copy.deepcopy(self)
I.channel = "Threshold from "+I.channel
if binary:
I.pixels = r
else:
I.pixels = np.ones(self.pixels.shape)*r
return I
def spline_offset(self, X, Y, Z=None, inline=True, ax=None, output='img', **kargs):
"""
subtract a spline interpolated by points corrdinates.
if Z is None, the image values will be used (default)
"""
if ax is not None:
if 'num' in kargs and kargs['num']:
text_color = 'k'
if 'text_color' in kargs:
text_color = kargs['text_color']
del kargs['text_color']
for i in range(len(X)):
l = self.pixels.shape[1]-X[i] < 20
ax.annotate(str(i), (X[i], Y[i]), ([
5, -5][l], 0), textcoords='offset pixels', va="center", ha=["left", "right"][l], color=text_color)
del kargs['num']
ax.plot(X, Y, 'o', **kargs)
import scipy.interpolate
T = np.flipud(self.pixels) - np.min(self.pixels)
if Z is None:
Z = [T[Y[i], X[i]] for i in range(len(X))]
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
xx, yy = np.meshgrid(x, y)
I = scipy.interpolate.SmoothBivariateSpline(X, Y, Z)
z = I.ev(xx, yy)
if inline:
self.pixels -= z
return z
else:
if output == 'img':
New = copy.deepcopy(self)
New.pixels -= z
return New
elif output == 'spline':
return z
else:
raise ValueError(
"The output parameter should be either 'img' or 'spline'")
def get_shadow_mask(self, angle, BIN=None, prog=False):
"""
If an image is recorded with a beam incident with a certain angle, the topography will shadow the data.
This function generates the shadow mask for a given topography and a given incident angle.
Parameters
----------
angle : float
The incidence angle in degrees
BIN : numpy array
Data. If given will move the recorded pixels at the correct x,y positions
prog : bool
display a progressbar ?
Note
----
This function is old, might not be optimized or working properly
"""
if BIN is not None:
BIN = BIN*1.0
slope = np.tan(np.radians(angle))
neg = False
if slope < 0:
neg = True
slope = -slope
topo = np.fliplr(self.pixels)
if BIN is not None:
BIN = np.fliplr(BIN)
else:
topo = self.pixels
x = np.linspace(0, self.size['real']['x'], self.pixels.shape[1])
if self.size['real']['unit'] == 'um':
x *= 1e-6
elif self.size['real']['unit'] == 'nm':
x *= 1e-9
mask = np.zeros(self.pixels.shape)
AFM_bin_shadow = np.zeros(self.pixels.shape)
Y = range(self.pixels.shape[0])
if prog:
Y = PB(Y)
for yi in Y:
for xi in range(self.pixels.shape[1]):
cut = self.pixels.shape[1]-2
y_ray = slope*(x-x[xi]) + topo[yi, xi]
while cut > xi and y_ray[cut] > topo[yi, cut]:
cut -= 1
if xi == cut:
if BIN is not None:
AFM_bin_shadow[yi, xi] = BIN[yi, xi]
continue
# Cut has been found
if BIN is not None:
x1 = x[cut]
x2 = x[cut+1]
y1 = topo[yi, cut]
y2 = topo[yi, cut+1]
x0 = x[xi]
y0 = topo[yi, xi]
if y2 == y1:
x_cut = (y1+slope*x0-y0)/slope
y_cut = y1
else:
numerator = x1/(x2-x1)+(y0-slope*x0-y1)/(y2-y1)
denominator = 1/(x2-x1)-slope/(y2-y1)
x_cut = numerator / denominator
y_cut = slope*(x_cut-x0)+y0
if x_cut >= x1 and x_cut <= x2:
y1 = BIN[yi, cut]
y2 = BIN[yi, cut+1]
yint = (((y2-y1)/(x2-x1))*(x_cut-x1))+y1
else:
yint = BIN[yi, xi]
AFM_bin_shadow[yi, xi] = yint
mask[yi, xi] = 1
if neg:
mask = np.fliplr(mask)
AFM_bin_shadow = np.fliplr(AFM_bin_shadow)
if BIN is not None:
return (mask, AFM_bin_shadow)
return mask
def adjust_position(self, fixed):
"""
Shift the current pixels to match a fixed image.
The shift is determined by position where the cross-correlation is maximized.
"""
adj = copy.deepcopy(self)
cor = np.fft.fft2(fixed.pixels)
cor = np.abs(np.fft.ifft2(np.conj(cor) * np.fft.fft2(self.pixels)))
cor = cor / fixed.pixels.size
ypeak, xpeak = np.unravel_index(cor.argmax(), cor.shape)
shift = [-(ypeak-1), -(xpeak-1)]
adj.pixels = np.roll(self.pixels, shift[0], axis=0)
adj.pixels = np.roll(adj.pixels, shift[1], axis=1)
return adj
def align(self, tform, cut=True):
"""
Apply an Affine transform on the data
Parameters
----------
tform : skimage.transform
the affine transform to perform
cut : bool
If True cut the data
"""
New = copy.deepcopy(self)
New.pixels = tf.warp(self.pixels, tform, preserve_range=True)
if not cut:
return New
cut = [0, 0] + list(self.pixels.shape)
if tform.translation[0] >= 0:
cut[2] -= tform.translation[0]
elif tform.translation[0] < 0:
cut[0] -= tform.translation[0]
if tform.translation[1] >= 0:
cut[1] += tform.translation[1]
elif tform.translation[1] < 0:
cut[3] += tform.translation[1]
cut = [int(x) for x in cut]
New.cut(cut, inplace=True)
return New, cut
def get_fft(self):
"""
return the FFT2 transform opf the image
"""
return np.fft.fftshift(np.fft.fft2(self.pixels))
def corr_fit2d(self, nx=2, ny=1, poly=False, inline=True, mask=None):
"""
Subtract a fitted 2D-polynom of nx and ny order from the data
Parameters
----------
nx : int
the polynom order for the x-axis
ny : int
the polynom order for the y-axis
poly : bool
if True the polynom is returned as output
inline : bool
create a new object?
mask : 2D numpy array
mask where the fitting should be performed
"""
r, z = fit2d(self.pixels, nx, ny, mask=mask)
if inline:
self.pixels -= z
else:
N = copy.deepcopy(self)
N.pixels -= z
if poly:
return N, z
return N
if poly:
return z
return self
def zero_min(self, inline=True):
"""
Shift the values so that the minimum becomes zero.
"""
if inline:
self.pixels -= np.min(self.pixels)
return self
else:
N = copy.deepcopy(self)
N.pixels -= np.min(N.pixels)
return N
def filter_lowpass(self, p, inline=True):
"""
Execute a lowpass filter on the data
"""
F = self.get_fft()
mask = self.getRmask() < p
if inline:
self.pixels = np.real(np.fft.ifft2(np.fft.fftshift(F*mask)))
else:
C = copy.deepcopy(self)
C.pixels = np.real(np.fft.ifft2(np.fft.fftshift(F*mask)))
return C
def _resize_infos(self):
"""
Internal to recalculate the real size when the image is cropped or cut
"""
self.size['real']['x'] *= self.pixels.shape[1]/self.size['pixels']['x']
self.size['real']['y'] *= self.pixels.shape[0]/self.size['pixels']['y']
self.size['pixels']['x'] = int(self.pixels.shape[1])
self.size['pixels']['y'] = int(self.pixels.shape[0])
if 'recorded' in self.size:
self.size['recorded']['real']['x'] \
*= (self.pixels.shape[1]/self.size['pixels']['x'])
self.size['recorded']['real']['y'] \
*= (self.pixels.shape[0]/self.size['pixels']['y'])
self.size['recorded']['pixels']['x'] = int(self.pixels.shape[1])
self.size['recorded']['pixels']['y'] = int(self.pixels.shape[0])
def filter_scars_removal(self, thresh=.5, inline=True):
"""
Filter function to remove scars from images.
"""
if not inline:
C = copy.deepcopy(self)
else:
C = self
for y in range(1, self.pixels.shape[0]-1):
b = self.pixels[y-1, :]
c = self.pixels[y, :]
a = self.pixels[y+1, :]
mask = np.abs(b-a) < thresh*(np.abs(c-a))
C.pixels[y, mask] = b[mask]
if not inline:
return C
return self
def cut(self, c, inline=False, pixels=True, **kargs):
"""
Clip/Crop the image
Parameters
----------
c : list [llx,lly,urx,ury]
list of the lowe-left (ll) and upper-right (ur) coordinates
inline: bool
perform the transformation inline or produce a new SPM_image?
pixels : bool
Are the coordinates given in pixels?
Returns
-------
self if inplace, clipped SPM_image otherwises2 = pySPM.Nanoscan("%s/CyI5b_PCB_ns.xml"%(Path))
"""
if 'inplace' in kargs:
inline=kargs['inplace']
if kargs.get('debug',False):
print("cut) Input coordinates:", c)
if not pixels:
c = [z for s in zip(*self.real2pixels(c[0::2], c[1::2])) for z in s]
if kargs.get('debug',False):
print("cut) pixel coordinates:", c)
if not inline:
new = copy.deepcopy(self)
new.pixels = cut(self.pixels, c, **kargs)
new._resize_infos()
return new
else:
self.pixels = cut(self.pixels, c, **kargs)
self._resize_infos()
return self
def zoom(self, zoom_factor, inplace=False, order=3):
"""
Resize the image to a new pixel size (but keep the real size) by pixel interpolation.
Parameters
----------
zoom_factor : float
> 1: up sampling
< 1: down sampling
order : int
The spline interpolation order to use. (default: 3). Use 0 for binary or very sharp images.
inplace | |
of calling loop() if you
wish to call select() or equivalent on.
Do not use if you are using the threaded interface loop_start()."""
if self._sock == None and self._ssl == None:
return MOSQ_ERR_NO_CONN
now = time.time()
self._check_keepalive()
if self._last_retry_check+1 < now:
# Only check once a second at most
self._message_retry_check()
self._last_retry_check = now
if self._ping_t > 0 and now - self._ping_t >= self._keepalive:
# mosq->ping_t != 0 means we are waiting for a pingresp.
# This hasn't happened in the keepalive time so we should disconnect.
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
self._callback_mutex.acquire()
if self._state == mosq_cs_disconnecting:
rc = MOSQ_ERR_SUCCESS
else:
rc = 1
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
return MOSQ_ERR_CONN_LOST
return MOSQ_ERR_SUCCESS
def message_retry_set(self, retry):
"""Set the timeout in seconds before a message with QoS>0 is retried.
20 seconds by default."""
if retry < 0:
raise ValueError('Invalid retry.')
self._message_retry = retry
def user_data_set(self, userdata):
"""Set the user data variable passed to callbacks. May be any data type."""
self._userdata = userdata
def will_set(self, topic, payload=None, qos=0, retain=False):
"""Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length.
"""
if topic == None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str) == True or isinstance(payload, bytearray) == True:
self._will_payload = payload
elif isinstance(payload, int) == True or isinstance(payload, float) == True:
self._will_payload = str(payload)
elif payload == None:
self._will_payload = None
else:
raise TypeError('payload must be a string, bytearray, int, float or None.')
self._will = True
self._will_topic = topic
self._will_qos = qos
self._will_retain = retain
def will_clear(self):
""" Removes a will that was previously configured with will_set().
Must be called before connect() to have any effect."""
self._will = False
self._will_topic = ""
self._will_payload = None
self._will_qos = 0
self._will_retain = False
def socket(self):
"""Return the socket or ssl object for this client."""
if self._ssl:
return self._ssl
else:
return self._sock
def loop_forever(self, timeout=1.0, max_packets=1):
"""This function call loop() for you in an infinite blocking loop. It
is useful for the case where you only want to run the MQTT client loop
in your program.
loop_forever() will handle reconnecting for you. If you call
disconnect() in a callback it will return."""
run = True
if self._state == mosq_cs_connect_async:
self.reconnect()
while run == True:
rc = MOSQ_ERR_SUCCESS
while rc == MOSQ_ERR_SUCCESS:
rc = self.loop(timeout, max_packets)
if self._state == mosq_cs_disconnecting:
run = False
else:
time.sleep(1)
self.reconnect()
return rc
def loop_start(self):
"""This is part of the threaded client interface. Call this once to
start a new thread to process network traffic. This provides an
alternative to repeatedly calling loop() yourself.
"""
if self._thread != None:
return MOSQ_ERR_INVAL
self._thread = threading.Thread(target=self._thread_main)
self._thread.daemon = True
self._thread.start()
def loop_stop(self, force=False):
"""This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
"""
if self._thread == None:
return MOSQ_ERR_INVAL
self._thread_terminate = True
self._thread.join()
self._thread = None
# ============================================================
# Private functions
# ============================================================
def _loop_rc_handle(self, rc):
if rc:
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
self._sock.close()
self._sock = None
self._state_mutex.acquire()
if self._state == mosq_cs_disconnecting:
rc = MOSQ_ERR_SUCCESS
self._state_mutex.release()
self._callback_mutex.acquire()
if self.on_disconnect:
self._in_callback = True
self.on_disconnect(self, self._userdata, rc)
self._in_callback = False
self._callback_mutex.release()
return rc
def _packet_read(self):
# This gets called if pselect() indicates that there is network data
# available - ie. at least one byte. What we do depends on what data we
# already have.
# If we've not got a command, attempt to read one and save it. This should
# always work because it's only a single byte.
# Then try to read the remaining length. This may fail because it is may
# be more than one byte - will need to save data pending next read if it
# does fail.
# Then try to read the remaining payload, where 'payload' here means the
# combined variable header and actual payload. This is the most likely to
# fail due to longer length, so save current data and current position.
# After all data is read, send to _mosquitto_handle_packet() to deal with.
# Finally, free the memory and reset everything to starting conditions.
if self._in_packet.command == 0:
try:
if self._ssl:
command = self._ssl.read(1)
else:
command = self._sock.recv(1)
except socket.error as err:
(msg) = err
if self._ssl and (msg.errno == ssl.SSL_ERROR_WANT_READ or msg.errno == ssl.SSL_ERROR_WANT_WRITE):
return MOSQ_ERR_AGAIN
if msg.errno == errno.EAGAIN:
return MOSQ_ERR_AGAIN
raise
else:
if len(command) == 0:
return 1
command = struct.unpack("!B", command)
self._in_packet.command = command[0]
if self._in_packet.have_remaining == 0:
# Read remaining
# Algorithm for decoding taken from pseudo code at
# http://publib.boulder.ibm.com/infocenter/wmbhelp/v6r0m0/topic/com.ibm.etools.mft.doc/ac10870_.htm
while True:
try:
if self._ssl:
byte = self._ssl.read(1)
else:
byte = self._sock.recv(1)
except socket.error as err:
(msg) = err
if self._ssl and (msg.errno == ssl.SSL_ERROR_WANT_READ or msg.errno == ssl.SSL_ERROR_WANT_WRITE):
return MOSQ_ERR_AGAIN
if msg.errno == errno.EAGAIN:
return MOSQ_ERR_AGAIN
raise
else:
byte = struct.unpack("!B", byte)
byte = byte[0]
self._in_packet.remaining_count.append(byte)
# Max 4 bytes length for remaining length as defined by protocol.
# Anything more likely means a broken/malicious client.
if len(self._in_packet.remaining_count) > 4:
return MOSQ_ERR_PROTOCOL
self._in_packet.remaining_length = self._in_packet.remaining_length + (byte & 127)*self._in_packet.remaining_mult
self._in_packet.remaining_mult = self._in_packet.remaining_mult * 128
if (byte & 128) == 0:
break
self._in_packet.have_remaining = 1
self._in_packet.to_process = self._in_packet.remaining_length
while self._in_packet.to_process > 0:
try:
if self._ssl:
data = self._ssl.read(self._in_packet.to_process)
else:
data = self._sock.recv(self._in_packet.to_process)
except socket.error as err:
(msg) = err
if self._ssl and (msg.errno == ssl.SSL_ERROR_WANT_READ or msg.errno == ssl.SSL_ERROR_WANT_WRITE):
return MOSQ_ERR_AGAIN
if msg.errno == errno.EAGAIN:
return MOSQ_ERR_AGAIN
raise
else:
self._in_packet.to_process = self._in_packet.to_process - len(data)
self._in_packet.packet = self._in_packet.packet + data
# All data for this packet is read.
self._in_packet.pos = 0
rc = self._packet_handle()
# Free data and reset values
self._in_packet.cleanup()
self._msgtime_mutex.acquire()
self._last_msg_in = time.time()
self._msgtime_mutex.release()
return rc
def _packet_write(self):
self._current_out_packet_mutex.acquire()
while self._current_out_packet:
packet = self._current_out_packet
try:
if self._ssl:
write_length = self._ssl.write(packet.packet[packet.pos:])
else:
write_length = self._sock.send(packet.packet[packet.pos:])
except AttributeError:
self._current_out_packet_mutex.release()
return MOSQ_ERR_SUCCESS
except socket.error as err:
self._current_out_packet_mutex.release()
(msg) = err
if self._ssl and (msg.errno == ssl.SSL_ERROR_WANT_READ or msg.errno == ssl.SSL_ERROR_WANT_WRITE):
return MOSQ_ERR_AGAIN
if msg.errno == errno.EAGAIN:
return MOSQ_ERR_AGAIN
raise
if write_length > 0:
packet.to_process = packet.to_process - write_length
packet.pos = packet.pos + write_length
if packet.to_process == 0:
if (packet.command & 0xF0) == PUBLISH and packet.qos == 0:
self._callback_mutex.acquire()
if self.on_publish:
self._in_callback = True
self.on_publish(self, self._userdata, packet.mid)
self._in_callback = False
self._callback_mutex.release()
self._out_packet_mutex.acquire()
if len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
else:
self._current_out_packet = None
self._out_packet_mutex.release()
else:
pass # FIXME
self._current_out_packet_mutex.release()
self._msgtime_mutex.acquire()
self._last_msg_out = time.time()
self._msgtime_mutex.release()
return MOSQ_ERR_SUCCESS
def _easy_log(self, level, buf):
if self.on_log:
self.on_log(self, self._userdata, level, buf)
def _check_keepalive(self):
now = time.time()
self._msgtime_mutex.acquire()
last_msg_out = self._last_msg_out
last_msg_in = self._last_msg_in
self._msgtime_mutex.release()
if (self._sock != None or self._ssl != None) and (now - last_msg_out >= self._keepalive or now - last_msg_in >= self._keepalive):
if self._state == mosq_cs_connected and self._ping_t == 0:
self._send_pingreq()
self._msgtime_mutex.acquire()
self._last_msg_out = now
self._last_msg_in = now
self._msgtime_mutex.release()
else:
if self._ssl:
self._ssl.close()
self._ssl = None
elif self._sock:
| |
import logging
from protocols.protocol_7_2_1.reports import Assembly, Program, InterpretationRequestRD, CancerInterpretationRequest, \
InterpretedGenome as InterpretedGenomeGelModel
from protocols.protocol_7_7.participant import Referral as ReferralGelModel
class PreviousData(Exception):
pass
class WorkspacePermissions(object):
def __init__(self, **kwargs):
self.short_name = kwargs.get('short_name')
self.long_name = kwargs.get('long_name')
self.gmc_name = kwargs.get('gmc_name')
self.groups = kwargs.get('groups')
class InterpretedGenome(object):
def __init__(self, **kwargs):
self.status = kwargs.get('status')
self.gel_qc_outcome = kwargs.get('gel_qc_outcome')
self.created_at = kwargs.get('created_at')
self.cip_version = kwargs.get('cip_version')
self.created_at = kwargs.get('created_at')
self.interpreted_genome_data = kwargs.get('interpreted_genome_data')
self.gel_qc_outcome = kwargs.get('gel_qc_outcome')
self.status = kwargs.get('status')
self.cva_variants_status = kwargs.get('cva_variants_status')
self.cva_variants_transaction_id = kwargs.get('cva_variants_transaction_id')
def __lt__(self, other):
"""
:type other: InterpretedGenome
"""
if self.created_at < other.created_at:
return True
return False
@property
def interpretation_request_payload(self):
if self.interpreted_genome_data:
return InterpretedGenomeGelModel.fromJsonDict(self.interpreted_genome_data)
class ReferralTest(object):
def __init__(self, **kwargs):
self.referral_test_id = kwargs.get("referral_test_id")
self.clinical_indication_test_type_id = kwargs.get("clinical_indication_test_type_id")
self.clinical_indication_test_code = kwargs.get("clinical_indication_test_code")
self.clinical_indication_test_name = kwargs.get("clinical_indication_test_name")
self.test_technology_id = kwargs.get("test_technology_id")
self.testTechnologyDescription = kwargs.get("testTechnologyDescription")
self.ordering_date = kwargs.get("ordering_date")
self.interpretation_request = kwargs.get("interpretation_request")
self.create_at = kwargs.get("create_at")
self.last_modified = kwargs.get("last_modified")
self.interpreter_organisation_id = kwargs.get("interpreter_organisation_id")
self.interpreter_organisation_code = kwargs.get("interpreter_organisation_code")
self.interpreter_organisation_name = kwargs.get("interpreter_organisation_name")
self.interpreter_organisation_national_grouping_id = kwargs.get("interpreter_organisation_national_grouping_id")
self.interpreter_organisation_national_grouping_name = kwargs.get("interpreter_organisation_national_grouping_name")
self.interpretation_request_id = kwargs.get("interpretation_request_id")
self.interpretation_request_version = kwargs.get("interpretation_request_version")
def get_interpretation_request_ids(self):
return self.interpretation_request_id, self.interpretation_request_version
def get_interpretation_request(self, cip_api_client, **params):
"""
:type cip_api_client: CipApiClient
:rtype : CipApiCase
"""
return cip_api_client.get_case(self.interpretation_request_id, self.interpretation_request_version, **params)
class Referral(object):
def __init__(self, **kwargs):
self.referral_id = kwargs.get("referral_id")
self.referral_uid = kwargs.get("referral_uid")
self.ordering_date = kwargs.get("ordering_date")
self.analysis_scope = kwargs.get("analysis_scope")
self._referral_payload_json = kwargs.get("referral_data")
self.last_modified = kwargs.get("last_modified")
self.create_at = kwargs.get("create_at")
self.requester_organisation_id = kwargs.get("requester_organisation_id")
self.requester_organisation_code = kwargs.get("requester_organisation_code")
self.requester_organisation_name = kwargs.get("requester_organisation_name")
self.requester_organisation_national_grouping_id = kwargs.get("requester_organisation_national_grouping_id")
self.requester_organisation_national_grouping_name = kwargs.get("requester_organisation_national_grouping_name")
self.referral_test = [rt for rt in self.process_referral_tests(kwargs.get('referral_test'))]
@property
def referral_data(self):
"""
:rtype: ReferralGelModel
"""
if not ReferralGelModel.validate(self._referral_payload_json):
logging.warning('The referral payload is not valid according to the version of GelModels you are using, '
'it may raise errors during the serialisation')
referral_payload = ReferralGelModel.fromJsonDict(self._referral_payload_json)
return referral_payload
def process_referral_tests(self, referral_test_data):
for referral_test in referral_test_data:
yield ReferralTest(**referral_test)
def get_interpretation_requests_ids(self):
for rt in self.referral_test:
yield rt.get_interpretation_request_ids()
def get_interpretation_requests(self, cip_api_client, **params):
for rt in self.referral_test:
yield rt.get_interpretation_request(cip_api_client, **params)
class ClinicalReport(object):
def __init__(self, **kwargs):
self.clinical_report_data = kwargs.get('clinical_report_data')
self.created_at = kwargs.get('created_at')
self.exit_questionnaire = ExitQuestionnaire(**kwargs.get('exit_questionnaire')) if\
kwargs.get('exit_questionnaire') else None
self.clinical_report_version = kwargs.get('clinical_report_version')
self.valid = kwargs.get('valid')
self.cva_variants_status = kwargs.get('cva_variants_status')
self.cva_variants_transaction_id = kwargs.get('cva_variants_transaction_id')
self.timestamp = kwargs.get('timestamp')
class VariantInterpretationLog(object):
def __init__(self, **kwargs):
self.case_updated = kwargs.get('case_updated')
self.log_entry = kwargs.get('log_entry').get('log_entry')
class ExitQuestionnaire(object):
def __init__(self, **kwargs):
self.created_at = kwargs.get('created_at')
self.exit_questionnaire_data = kwargs.get('exit_questionnaire_data')
self.user = kwargs.get('user')
self.cva_status = kwargs.get('cva_status')
self.cva_transaction_id = kwargs.get('cva_transaction_id')
class Participant:
def __init__(self, **kwargs):
self.participant_id = kwargs.get('participant_id')
self.participant_uid = kwargs.get('participant_uid')
self.family_id = kwargs.get('family_id')
self.sample_ids = kwargs.get('sample_ids')
self.interpretation_request = kwargs.get('interpretation_request')
self.category = kwargs.get('category')
self.participant_interpreted_genome = [ParticipantInterpretedGenome(**ig) for ig in kwargs.get('participant_interpreted_genome', [])]
self.participant_clinical_report = kwargs.get("participant_clinical_report")
self.primary_findings_analysis = kwargs.get("primary_findings_analysis")
self.additional_findings_analysis = kwargs.get("additional_findings_analysis")
self.year_of_birth = kwargs.get("year_of_birth")
self.assembly = kwargs.get("assembly")
self.sex = kwargs.get("sex")
self.clinical_indication = kwargs.get("clinical_indication")
self.created_at = kwargs.get("created_at")
self.updated_at = kwargs.get("updated_at")
self.participant_consent = ParticipantConsent(**kwargs.get("participant_consent")) if kwargs.get("participant_consent") else None
self.sites = kwargs.get("sites")
self.sample_type = kwargs.get("sample_type")
self.additional_findings_status = kwargs.get("additional_findings_status")
class ParticipantExitQuestionnaire(object):
def __init__(self, **kwargs):
self.created_at = kwargs.get('created_at')
self.exit_questionnaire_data = kwargs.get('exit_questionnaire_data')
self.user = kwargs.get('user')
self.draft = kwargs.get('draft')
class ParticipantConsent(object):
def __init__(self, **kwargs):
self.created_at = kwargs.get('created_at')
self.updated_at = kwargs.get('updated_at')
self.primary_finding_consent = kwargs.get('primary_finding_consent')
self.carrier_status_consent = kwargs.get('carrier_status_consent')
self.programme_consent = kwargs.get('programme_consent')
self.secondary_finding_consent = kwargs.get('secondary_finding_consent')
self.child_consent_form = kwargs.get('child_consent_form')
class ParticipantInterpretedGenome(object):
def __init__(self, **kwargs):
self.created_at = kwargs.get('created_at')
self.version = kwargs.get('version')
self.interpreted_genome_data = kwargs.get('interpreted_genome_data')
self.interpretation_service_name = kwargs.get('interpretation_service_name')
self.result_summary = kwargs.get('result_summary')
class ParticipantClinicalReport(object):
def __init__(self, **kwargs):
self.clinical_report_version = kwargs.get('clinical_report_version')
self.clinical_report_data = kwargs.get('clinical_report_data')
self.created_at = kwargs.get('created_at')
self.draft = kwargs.get('draft')
self.exit_questionnaire = ParticipantExitQuestionnaire(**kwargs.get('exit_questionnaire')) if\
kwargs.get('exit_questionnaire') else None
self.timestamp = kwargs.get('timestamp')
class RequestStatus(object):
def __init__(self, **kwargs):
self.created_at = kwargs.get('created_at')
self.user = kwargs.get('user')
self.status = kwargs.get('status')
def is_blocked(self):
if self.status == 'blocked':
return True
return False
class InterpretationFlag(object):
def __init__(self, **kwargs):
self.name = kwargs.get('flag').get('name')
self.description = kwargs.get('flag').get('description')
def __eq__(self, other):
if self.name == other.name:
return True
return False
def __hash__(self):
return hash(self.name)
class CipApiCase(object):
_map_sample_type2program = {
'raredisease': Program.rare_disease,
'cancer': Program.cancer
}
def __init__(self, **kwargs):
self._load_data(**kwargs)
def _load_data(self, **kwargs):
self.last_status = kwargs.get('last_status')
self.created_at = kwargs.get('created_at')
self.last_modified = kwargs.get('last_modified')
self.cip = kwargs.get('cip')
self.group_id = kwargs.get('group_id')
self.cohort_id = kwargs.get('cohort_id')
self.sample_type = kwargs.get('sample_type')
self.interpretation_request_id = kwargs.get('interpretation_request_id')
self.version = kwargs.get('version')
self.gel_tiering_qc_outcome = kwargs.get('gel_tiering_qc_outcome')
self.labkey_links = kwargs.get('labkey_links')
self.case_priority = kwargs.get('case_priority')
self.tags = kwargs.get('tags')
self.paid = kwargs.get('paid')
self.family_id = kwargs.get('family_id')
self.cancer_participant_id = kwargs.get('cancer_participant')
self.assembly = kwargs.get('assembly')
self.case_id = kwargs.get('case_id')
self.number_of_samples = kwargs.get('number_of_samples')
self.proband = kwargs.get('proband')
self.referral = Referral(**kwargs.get('referral')) if kwargs.get('referral') else None
self.interpretation_flags = [InterpretationFlag(**flag) for flag in kwargs.get(
'interpretation_flag')] if kwargs.get('interpretation_flag') else []
self.status = [RequestStatus(**s) for s in kwargs.get('status', [])]
self.files = kwargs.get('files')
self.interpretation_request_data = kwargs.get('interpretation_request_data')
self.interpreted_genome = [InterpretedGenome(**ig) for ig in kwargs.get('interpreted_genome', [])]
self.clinical_report = [ClinicalReport(**cr) for cr in kwargs.get('clinical_report', [])]
self.workspaces = kwargs.get('workspaces')
@property
def interpretation_request_payload(self):
if self.interpretation_request_data and self.sample_type == 'raredisease':
return InterpretationRequestRD.fromJsonDict(self.interpretation_request_data['json_request'])
if self.interpretation_request_data and self.sample_type == 'cancer':
return CancerInterpretationRequest.fromJsonDict(self.interpretation_request_data['json_request'])
@property
def pedigree(self):
if self.interpretation_request_data and self.sample_type == 'raredisease':
return self.interpretation_request_payload.pedigree
return None
@property
def cancer_participant(self):
if self.interpretation_request_data and self.sample_type == 'cancer':
return self.interpretation_request_payload.cancerParticipant
@property
def members(self):
if self.interpretation_request_data and self.sample_type == 'raredisease':
return [participant.participantId for participant in self.pedigree.members if participant.samples and participant.participantId]
elif self.interpretation_request_data and self.sample_type == 'cancer':
return self.proband
@property
def all_members(self):
if self.interpretation_request_data and self.sample_type == 'raredisease':
return [participant.participantId for participant in self.pedigree.members if participant.participantId]
elif self.interpretation_request_data and self.sample_type == 'cancer':
return self.proband
@property
def samples(self):
if self.interpretation_request_data and self.sample_type == 'raredisease':
return [sample.sampleId for member in self.pedigree.members for sample in member.samples if member.samples]
elif self.interpretation_request_data and self.sample_type == 'cancer':
samples = []
for m in self.interpretation_request_data.cancerParticipant.matchedSamples:
samples.append(m.germlineSampleId)
samples.append(m.tumourSampleId)
return samples
return None
@property
def is_blocked(self):
"""
:rtype: bool
"""
if self.last_status == 'blocked':
return True
return False
@property
def has_been_ever_blocked(self):
"""
:rtype: bool
"""
if True in [s.is_blocked() for s in self.status]:
return True
return False
@property
def has_been_interpreted(self):
"""
:rtype: bool
"""
if self.interpreted_genome:
return True
return False
@property
def has_clinical_reports(self):
"""
:rtype: bool
"""
if self.clinical_report:
return True
return False
@property
def program(self):
return self._map_sample_type2program.get(self.sample_type)
@property
def number_of_clinical_reports(self):
return len(self.clinical_report)
@property
def has_been_closed(self):
"""
:rtype: bool
"""
if True in [s.status == 'report_generated' for s in self.status] or \
True in [s.status == 'report_sent' for s in self.status]:
return True
else:
return False
@property
def has_been_dispatch(self):
"""
:rtype: bool
"""
if True in [s.status == 'dispatched' for s in self.status]:
return True
else:
return False
@property
def is_closed(self):
"""
:rtype: bool
"""
if self.last_status == 'report_sent':
return True
@property
def is_rare_disease(self):
"""
:rtype: bool
"""
return self.program == Program.rare_disease
@property
def is_cancer(self):
"""
:rtype: bool
"""
return self.program == Program.cancer
@property
def is_assembly_38(self):
"""
:rtype: bool
"""
return self.assembly == Assembly.GRCh38
@property
def is_assembly_37(self):
"""
:rtype: bool
"""
return self.assembly == Assembly.GRCh37
def __lt__(self, other):
"""
:type other: CipApiOverview
"""
if self.interpretation_request_id < other.interpretation_request_id:
return True
elif self.interpretation_request_id == other.interpretation_request_id:
if self.version < other.version:
return True
return False
def dispatch(self, cip_api_client, **params):
"""
:type cip_api_client: CipApiClient
"""
self._load_data(**cip_api_client.dispatch_raw(self.interpretation_request_id, self.version, **params))
def submit_interpretation_request(self, cip_api_client, payload, extra_fields, force=False, **params):
"""
:type cip_api_client: CipApiClient
"""
if not self.interpretation_request_data or force:
self._load_data(**cip_api_client.submit_interpretation_request_raw(
case_id=self.interpretation_request_id, case_version=self.version, interpretation_request_dict=payload,
extra_fields=extra_fields, **params
))
else:
raise PreviousData('This case has already an interpretation request associate, if you still want to upload'
'a new one use `force=True`')
def patch_case(self, cip_api_client, payload, **params):
"""
:type cip_api_client: CipApiClient
"""
self._load_data(**cip_api_client.patch_case_raw(case_id=self.interpretation_request_id,
case_version=self.version,
payload=payload, **params
))
def submit_interpreted_genome(self, cip_api_client, payload, partner_id, analysis_type, report_id, **params):
"""
:type cip_api_client: CipApiClient
"""
self.interpreted_genome.append(InterpretedGenome(**cip_api_client.submit_interpreted_genome_raw(
payload=payload, partner_id=partner_id, analysis_type=analysis_type, report_id=report_id, **params
)))
def submit_interpretation_flags(self, cip_api_client, payload, **params):
"""
:type cip_api_client: CipApiClient
"""
flags = [flag for flag in cip_api_client.submit_interpretation_flags(
payload, self.interpretation_request_id, self.version, **params)]
set_of_flags = set(self.interpretation_flags)
set_of_flags.update(set(flags))
self.interpretation_flags = list(set_of_flags)
def submit_clinical_report(self, cip_api_client, payload, partner_id, analysis_type, report_id, **params):
"""
:type cip_api_client: CipApiClient
"""
self.clinical_report.append(ClinicalReport(**cip_api_client.submit_clinical_report_raw(
payload=payload, partner_id=partner_id, analysis_type=analysis_type, report_id=report_id, **params
)))
def get_exit_questionnaire(self):
if self.has_clinical_reports:
list_of_cr = self.clinical_report
list_of_cr.reverse()
for cr in list_of_cr:
if cr.exit_questionnaire:
return cr.exit_questionnaire
return None
def get_exit_questionnaires(self):
if self.has_clinical_reports:
for cr in self.clinical_report:
if cr.exit_questionnaire:
yield cr.exit_questionnaire
class CipApiOverview(object):
def __init__(self, **kwargs):
self._load_data(**kwargs)
def _load_data(self, **kwargs):
self.interpretation_request_id = int(kwargs.get('interpretation_request_id', '.-.').split('-')[0])
self.version = kwargs.get('interpretation_request_id', '.-.').split('-')[1]
self.cip = kwargs.get('cip')
self.cohort_id = kwargs.get('cohort_id')
self.sample_type = kwargs.get('sample_type')
self.last_status = kwargs.get('last_status')
self.family_id = kwargs.get('family_id')
self.cancer_participant_id = kwargs.get('cancer_participant')
self.proband = kwargs.get('proband')
self.number_of_samples = kwargs.get('number_of_samples')
self.last_update = kwargs.get('last_update')
self.sites = kwargs.get('sites')
self.case_priority = kwargs.get('case_priority')
self.tags = kwargs.get('tags')
self.assembly = kwargs.get('assembly')
self.last_modified = kwargs.get('last_modified')
self.clinical_reports = kwargs.get('clinical_reports')
self.interpreted_genomes = kwargs.get('interpreted_genomes')
self.files = kwargs.get('files')
self.workflow_status = kwargs.get('workflow_status')
self.cva_variants_status = kwargs.get('cva_variants_status')
self.cva_variants_transaction_id = kwargs.get('cva_variants_transaction_id')
self.case_id = kwargs.get('case_id')
self.status = [RequestStatus(**s) for s in kwargs.get('status', [])]
self.referral = Referral(**kwargs.get('referral')) | |
# additional functions
def compareVal(a, b):
return (a > b) - (a < b)
def arrayCopy(fromArray, fromIndex, toArray, toIndex, length): # thanks to Bee Sort for improving readability on this function
toArray[toIndex:toIndex + length] = fromArray[fromIndex:fromIndex + length]
#
# MIT License
#
# Copyright (c) 2013 <NAME>
# Copyright (c) 2020 The Holy Grail Sort Project
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# The Holy Grail Sort Project
# Project Manager: Summer Dragonfly
# Project Contributors: 666666t
# Anonymous0726
# aphitorite
# Control
# dani_dlg
# DeveloperSort
# EilrahcF
# Enver
# Gaming32
# lovebuny
# Morwenn
# MP
# phoenixbound
# Spex_guy
# thatsOven
# _fluffyy
#
#
#
# Special thanks to "The Studio" Discord community!
# REWRITTEN GRAILSORT FOR PYTHON - A heavily refactored C/C++-to-Python version of
# <NAME>'s GrailSort.h, aiming to be as
# readable and intuitive as possible.
#
# ** Written and maintained by The Holy Grail Sort Project
#
# Primary author: thatsOven
#
# Current status: Working (Passing all tests) + Stable
class Subarray:
LEFT, RIGHT = 0, 1
class GrailSort:
GRAIL_STATIC_EXT_BUF_LEN = 512
extBuffer = None
extBufferLen = 0
@staticmethod
def grailSwap(array, a, b):
array[a], array[b] = array[b], array[a]
@staticmethod
def grailBlockSwap(array, a, b, blockLen):
for i in range(0, blockLen):
GrailSort.grailSwap(array, a + i, b + i)
@staticmethod
def grailRotate(array, start, leftLen, rightLen):
while leftLen > 0 and rightLen > 0:
if leftLen <= rightLen:
GrailSort.grailBlockSwap(array, start, start + leftLen, leftLen)
start += leftLen
rightLen -= leftLen
else:
GrailSort.grailBlockSwap(array, start + leftLen - rightLen, start + leftLen, rightLen)
leftLen -= rightLen
@staticmethod
def grailInsertSort(array, start, length):
for item in range(1, length):
left = start + item - 1
right = start + item
while left >= start and array[left] > array[right]:
GrailSort.grailSwap(array, left, right)
left -= 1
right -= 1
@staticmethod
def grailBinarySearchLeft(array, start, length, target):
left = 0
right = length
while left < right:
middle = left + ((right - left) // 2)
if array[start + middle] < target:
left = middle + 1
else:
right = middle
return left
@staticmethod
def grailBinarySearchRight(array, start, length, target):
left = 0
right = length
while left < right:
middle = left + ((right - left) // 2)
if array[start + middle] > target:
right = middle
else:
left = middle + 1
return right
@staticmethod
def grailCollectKeys(array, start, length, idealKeys):
keysFound = 1
firstKey = 0
currKey = 1
while currKey < length and keysFound < idealKeys:
insertPos = GrailSort.grailBinarySearchLeft(array, start + firstKey, keysFound, array[start + currKey])
if insertPos == keysFound or array[start + currKey] != array[start + firstKey + insertPos]:
GrailSort.grailRotate(array, start + firstKey, keysFound, currKey - (firstKey + keysFound))
firstKey = currKey - keysFound
GrailSort.grailRotate(array, start + firstKey + insertPos, keysFound - insertPos, 1)
keysFound += 1
currKey += 1
GrailSort.grailRotate(array, start, firstKey, keysFound)
return keysFound
@staticmethod
def grailPairwiseSwaps(array, start, length):
index = 1
while index < length:
left = start + index - 1
right = start + index
if array[left] > array[right]:
GrailSort.grailSwap(array, left - 2, right)
GrailSort.grailSwap(array, right - 2, left)
else:
GrailSort.grailSwap(array, left - 2, left)
GrailSort.grailSwap(array, right - 2, right)
index += 2
left = start + index - 1
if left < start + length:
GrailSort.grailSwap(array, left - 2, left)
@staticmethod
def grailPairwiseWrites(array, start, length):
index = 1
while index < length:
left = start + index - 1
right = start + index
if array[left] > array[right]:
array[left - 2], array[right - 2] = array[right], array[left]
else:
array[left - 2], array[right - 2] = array[left], array[right]
index += 2
left = start + index - 1
if left < start + length:
array[left - 2] = array[left]
@staticmethod
def grailMergeForwards(array, start, leftLen, rightLen, bufferOffset):
left = start
middle = start + leftLen
right = middle
end = middle + rightLen
buffer = start - bufferOffset
while right < end:
if left == middle or array[left] > array[right]:
GrailSort.grailSwap(array, buffer, right)
right += 1
else:
GrailSort.grailSwap(array, buffer, left)
left += 1
buffer += 1
if buffer != left:
GrailSort.grailBlockSwap(array, buffer, left, middle-left)
@staticmethod
def grailMergeBackwards(array, start, leftLen, rightLen, bufferOffset):
end = start - 1
left = end + leftLen
middle = left
right = middle + rightLen
buffer = right + bufferOffset
while left > end:
if right == middle or array[left] > array[right]:
GrailSort.grailSwap(array, buffer, left)
left -= 1
else:
GrailSort.grailSwap(array, buffer, right)
right -= 1
buffer -= 1
if right != buffer:
while right > middle:
GrailSort.grailSwap(array, buffer, right)
buffer -= 1
right -= 1
@staticmethod
def grailMergeOutOfPlace(array, start, leftLen, rightLen, bufferOffset):
left = start
middle = start + leftLen
right = middle
end = middle + rightLen
buffer = start - bufferOffset
while right < end:
if left == middle or array[left] > array[right]:
array[buffer] = array[right]
right += 1
else:
array[buffer] = array[left]
left += 1
buffer += 1
if buffer != left:
while left < middle:
array[buffer] = array[left]
buffer += 1
left += 1
@staticmethod
def grailBuildInPlace(array, start, length, currentLen, bufferLen):
mergeLen = currentLen
while mergeLen < bufferLen:
fullMerge = 2 * mergeLen
mergeEnd = start + length - fullMerge
bufferOffset = mergeLen
mergeIndex = start
while mergeIndex <= mergeEnd:
GrailSort.grailMergeForwards(array, mergeIndex, mergeLen, mergeLen, bufferOffset)
mergeIndex += fullMerge
leftOver = length - (mergeIndex - start)
if leftOver > mergeLen:
GrailSort.grailMergeForwards(array, mergeIndex, mergeLen, leftOver - mergeLen, bufferOffset)
else:
GrailSort.grailRotate(array, mergeIndex - mergeLen, mergeLen, leftOver)
start -= mergeLen
mergeLen *= 2
fullMerge = 2 * bufferLen
lastBlock = int(length % fullMerge)
lastOffset = start + length - lastBlock
if lastBlock <= bufferLen:
GrailSort.grailRotate(array, lastOffset, lastBlock, bufferLen)
else:
GrailSort.grailMergeBackwards(array, lastOffset, bufferLen, lastBlock - bufferLen, bufferLen)
mergeIndex = lastOffset - fullMerge
while mergeIndex >= start:
GrailSort.grailMergeBackwards(array, mergeIndex, bufferLen, bufferLen, bufferLen)
mergeIndex -= fullMerge
@staticmethod
def grailBuildOutOfPlace(array, start, length, bufferLen, extLen):
arrayCopy(array, start - extLen, GrailSort.extBuffer, 0, extLen)
GrailSort.grailPairwiseWrites(array, start, length)
start -= 2
mergeLen = 2
while mergeLen < extLen:
fullMerge = 2 * mergeLen
mergeEnd = start + length - fullMerge
bufferOffset = mergeLen
mergeIndex = start
while mergeIndex <= mergeEnd:
GrailSort.grailMergeOutOfPlace(array, mergeIndex, mergeLen, mergeLen, bufferOffset)
mergeIndex += fullMerge
leftOver = length - (mergeIndex - start)
if leftOver > mergeLen:
GrailSort.grailMergeOutOfPlace(array, mergeIndex, mergeLen, leftOver - mergeLen, bufferOffset)
else:
arrayCopy(array, mergeIndex, array, mergeIndex - mergeLen, leftOver)
start -= mergeLen
mergeLen *= 2
arrayCopy(GrailSort.extBuffer, 0, array, start + length, extLen)
GrailSort.grailBuildInPlace(array, start, length, mergeLen, bufferLen)
@staticmethod
def grailBuildBlocks(array, start, length, bufferLen):
if GrailSort.extBuffer is not None:
if bufferLen < GrailSort.extBufferLen:
extLen = bufferLen
else:
extLen = 1
while (extLen*2) <= GrailSort.extBufferLen:
extLen *= 2
GrailSort.grailBuildOutOfPlace(array, start, length, bufferLen, extLen)
else:
GrailSort.grailPairwiseSwaps(array, start, length)
GrailSort.grailBuildInPlace(array, start - 2, length, 2, bufferLen)
@staticmethod
def grailBlockSelectSort(array, firstKey, start, medianKey, blockCount, blockLen):
| |
import copy
import json
import pickle
import os
import glob
from collections import defaultdict
from traceback import print_exc
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import sklearn
from modnet.models import MODNetModel, EnsembleMODNetModel
from modnet.matbench.benchmark import matbench_kfold_splits
from pymatgen.core import Composition
DARK2_COLOURS = plt.cm.get_cmap("Dark2").colors
matplotlib.use("pdf")
HEIGHT = 2.5
DPI = 100
matplotlib.rcParams["font.size"] = 8
STIX = True
if STIX:
# matplotlib.rcParams["font.family"] = "sans-serif"
matplotlib.rcParams["font.family"] = "STIXGeneral"
matplotlib.rcParams["mathtext.fontset"] = "stixsans"
else:
matplotlib.rcParams["mathtext.fontset"] = "stixsans"
matplotlib.rcParams["font.family"] = "sans-serif"
matplotlib.rcParams["font.sans-serif"] = "Arial"
# Require these imports for backwards-compat when unpickling
try:
from modnet.featurizers.presets import CompositionOnlyFeaturizer # noqa
from modnet.preprocessing import MODData, CompositionContainer # noqa
except ImportError:
pass
def setup_threading():
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["TF_NUM_INTRAOP_THREADS"] = "1"
os.environ["TF_NUM_INTEROP_THREADS"] = "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# import tensorflow as tf
# tf.config.threading.set_intra_op_parallelism_threads(nprocs)
# tf.config.threading.set_inter_op_parallelism_threads(nthreads)
def load_settings(task: str):
settings = {}
settings_path = task + "_options.json"
if os.path.isfile(settings_path):
settings_path = task + "_options.json"
elif os.path.isfile(task + "/" + settings_path):
settings_path = task + "/" + settings_path
else:
settings_path = None
if settings_path:
with open(settings_path, "r") as f:
settings = json.load(f)
return settings
def featurize(task):
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
from modnet.preprocessing import MODData
from modnet.featurizers.presets import DeBreuck2020Featurizer
from matminer.datasets import load_dataset
if task == "matbench_elastic":
df_g = load_dataset("matbench_log_gvrh")
df_k = load_dataset("matbench_log_kvrh")
df = df_g.join(df_k.drop("structure",axis=1))
else:
df = load_dataset(task)
mapping = {
col: col.replace(" ", "_").replace("(", "").replace(")", "")
for ind, col in enumerate(df.columns)
}
df.rename(columns=mapping, inplace=True)
targets = [
col for col in df.columns if col not in ("id", "structure", "composition")
]
try:
materials = df["structure"] if "structure" in df.columns else df["composition"].map(Composition)
except KeyError:
raise RuntimeError(f"Could not find any materials data dataset for task {task!r}!")
fast_oxid_featurizer = DeBreuck2020Featurizer(fast_oxid=True)
data = MODData(
materials=materials.tolist(),
targets=df[targets].values,
target_names=targets,
featurizer=fast_oxid_featurizer,
)
data.featurize(n_jobs=32)
data.save(f"./precomputed/{task}_moddata.pkl.gz")
return data
def benchmark(data, settings,n_jobs=16, fast=False):
from modnet.matbench.benchmark import matbench_benchmark
columns = list(data.df_targets.columns)
mapping = {
col: col.replace(" ", "_").replace("(", "").replace(")", "")
for ind, col in enumerate(columns)
}
data.df_targets.rename(columns=mapping, inplace=True)
best_settings = {
"increase_bs": False,
"lr": 0.005,
"epochs": 50,
"act": "elu",
"out_act": "relu",
"batch_size": 32,
"loss": "mae",
"xscale": "minmax",
}
#best_settings = None
names = [[[field for field in data.df_targets.columns]]]
weights = {field: 1 for field in data.df_targets.columns}
from modnet.models import EnsembleMODNetModel
return matbench_benchmark(
data,
names,
weights,
best_settings,
model_type=EnsembleMODNetModel,
n_models=5,
classification=settings.get("classification"),
fast=fast,
nested=0 if fast else 5,
n_jobs=n_jobs,
)
def run_predict(data, final_model, settings, save_folds=False, dknn_only=False):
"""
Runs benchmark based on final_model without training everything again.
It also computes the Knn distance and puts it in the results pickle.
In fine, this should be integrated inside modnet benchmark.
:param data:
:param final_model:
:param settings:
:return:
"""
task = settings["task"]
# rebuild the EnsembleMODNetModels from the final model
n_best_archs = 5 # change this (from 1 to 5 max) to adapt number of inner best archs chosen
bootstrap_size = 5
outer_fold_size = bootstrap_size * 5 * 5
inner_fold_size = bootstrap_size * 5
models = []
multi_target = bool(len(data.df_targets.columns) - 1)
for i in range(5): # outer fold
modnet_models = []
for j in range(5): # inner fold
modnet_models+=(
final_model.model[(i * outer_fold_size) + (j * inner_fold_size):
(i * outer_fold_size) + (j * inner_fold_size) + (n_best_archs * bootstrap_size)])
model = EnsembleMODNetModel(modnet_models=modnet_models)
models.append(model)
if dknn_only:
with open(f"results/{task}_results.pkl", "rb") as f:
results = pickle.load(f)
results["dknns"] = []
else:
results = defaultdict(list)
for ind, (train, test) in enumerate(matbench_kfold_splits(data, classification=settings.get("classification", False))):
train_data, test_data = data.split((train, test))
path = "folds/train_moddata_f{}".format(ind + 1)
train_data = MODData.load(path)
assert len(set(train_data.df_targets.index).intersection(set(test_data.df_targets.index))) == 0
model = models[ind]
# compute dkNN
# TODO: test this quickly before submitting
max_feat_model = np.argmax([m.n_feat for m in model.model])
n_feat = model.model[max_feat_model].n_feat
feature_names = model.model[max_feat_model].optimal_descriptors
dknn = get_dknn(train_data, test_data, feature_names)
results["dknns"].append(dknn)
if dknn_only:
continue
predict_kwargs = {}
if settings.get("classification"):
predict_kwargs["return_prob"] = True
if model.can_return_uncertainty:
predict_kwargs["return_unc"] = True
pred_results = model.predict(test_data, **predict_kwargs)
if isinstance(pred_results, tuple):
predictions, stds = pred_results
else:
predictions = pred_results
stds = None
targets = test_data.df_targets
if settings.get("classification"):
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import OneHotEncoder
y_true = OneHotEncoder().fit_transform(targets.values).toarray()
score = roc_auc_score(y_true, predictions.values)
pred_bool = model.predict(test_data, return_prob=False)
print(f"ROC-AUC: {score}")
errors = targets - pred_bool
elif multi_target:
errors = targets - predictions
score = np.mean(np.abs(errors.values), axis=0)
else:
errors = targets - predictions
score = np.mean(np.abs(errors.values))
if save_folds:
opt_feat = train_data.optimal_features[:n_feat]
df_train = train_data.df_featurized
df_train = df_train[opt_feat]
df_train.to_csv("folds/train_f{}.csv".format(ind + 1))
df_test = test_data.df_featurized
df_test = df_test[opt_feat]
errors.columns = [x + "_error" for x in errors.columns]
df_test = df_test.join(errors)
df_test.to_csv("folds/test_f{}.csv".format(ind + 1))
results["predictions"].append(predictions)
if stds is not None:
results["stds"].append(stds)
results["targets"].append(targets)
results["errors"].append(errors)
results["scores"].append(score)
results['model'].append(model)
return results
def get_dknn(train_data, test_data, feature_names, k = 5):
feature_names = list(set(feature_names).intersection(set(train_data.target_nmi.index)))
x_train = train_data.df_featurized[feature_names].values
x_test = test_data.df_featurized[feature_names].values
scaler = sklearn.preprocessing.StandardScaler()
x_train_sc = scaler.fit_transform(x_train)
x_test_sc = scaler.transform(x_test)
#weight features
#w = (train_data.target_nmi[feature_names].values)**2
#x_train_sc = x_train_sc*w
#x_test_sc = x_test_sc*w
dist = sklearn.metrics.pairwise_distances(x_test_sc, x_train_sc, metric='cosine')
dknn = np.sort(dist, axis=1)[:, :k].mean(axis=1)
dknn = pd.DataFrame({t:dknn for t in train_data.df_targets.columns}, index = test_data.df_featurized.index)
return dknn
def get_metrics(target, pred, errors, name, settings):
import sklearn.metrics
import scipy
metrics = {}
if settings.get("classification"):
metrics["roc_auc"] = score = sklearn.metrics.roc_auc_score(
target.reshape(-1, 1), pred.reshape(-1, 1)
)
metrics["ap_score"] = ap_score = sklearn.metrics.average_precision_score(
target.reshape(-1, 1), pred.reshape(-1, 1), average="micro"
)
print(f"ROC-AUC: {score:3.3f}, AP: {ap_score:3.3f}")
else:
mae = metrics["mae"] = sklearn.metrics.mean_absolute_error(target, pred)
try:
mape = metrics["mape"] = sklearn.metrics.mean_absolute_percentage_error(
target, pred
)
except AttributeError:
mape = metrics["mape"] = 1e20
mse = metrics["mse"] = sklearn.metrics.mean_squared_error(target, pred)
med_ae = metrics["med_ae"] = sklearn.metrics.median_absolute_error(target, pred)
max_ae = metrics["max_ae"] = sklearn.metrics.max_error(target, pred)
fit_results = scipy.stats.linregress(
x=target.reshape(
-1,
),
y=pred.reshape(
-1,
),
)
slope = metrics["slope"] = fit_results.slope
rvalue = metrics["rvalue"] = fit_results.rvalue
print(
f"MAE = {mae:3.3f} {settings.get('units', '')}, MedianAE = {med_ae:3.3f} {settings.get('units', '')}, MAPE = {mape:3.3f}, √MSE = {np.sqrt(mse):3.3f} {settings.get('units', '')}" # noqa
)
print(
f"MaxAE = {max_ae:3.3f} {settings.get('units', '')}, slope = {slope:3.2f}, R = {rvalue:3.2f}"
)
for k,v in metrics.items():
metrics[k] = float(v)
with open(f"results/{name}_metrics.json", "w") as f:
json.dump(metrics, f)
return metrics
def analyse_results(results, settings):
target_names = set(c for res in results["targets"] for c in res.columns)
if settings.get("classification", False): # this small parts changes the prediction dataframe from (task_prob_0, task_prob_1) to single column dataframes giving the probability of class 1, with task as column name
name = list(target_names)[0]
results['predictions'] = [
results['predictions'][i].drop(name + '_prob_0', axis=1).rename(columns={name + '_prob_1': name}) for i in
range(5)]
results['stds'] = [
results['stds'][i].drop(name + '_prob_0', axis=1).rename(columns={name + '_prob_1': name}) for i in
range(5)]
all_targets = []
all_preds = []
all_stds = []
all_dknns = []
all_errors = []
for name in target_names:
targets = np.hstack([res[name].values for res in results["targets"]]).flatten()
if settings.get("classification"):
if len(target_names) > 1:
raise RuntimeError("Cannot handle multi-target classification.")
preds = np.hstack(
[res[res.columns[0]].values for res in results["predictions"]]
).flatten()
else:
preds = np.hstack(
[res[name].values for res in results["predictions"]]
).flatten()
stds = np.hstack([res[name].values for res in results["stds"]]).flatten()
if "dknns" in results.keys() and results["dknns"]:
dknns = np.hstack([res[name].values for res in results["dknns"]]).flatten()
else:
dknns = np.array([float("nan") for _ in range(len(preds))])
try:
errors = np.hstack(
[res[name].values for res in results["errors"]]
).flatten()
except (NameError, KeyError):
errors = np.hstack(
[res[name + "_error"].values for res in results["errors"]]
).flatten()
all_targets.append(targets)
all_preds.append(preds)
all_stds.append(stds)
all_dknns.append(dknns)
all_errors.append(errors)
for t, p, e, name in zip(all_targets, all_preds, all_errors, target_names):
metrics = get_metrics(t, p, e, name, settings)
print(metrics)
os.makedirs("./plots", exist_ok=True)
res = []
for ind, (target, pred, stds, dknns, error, name) in enumerate(
zip(all_targets, all_preds, all_stds, all_dknns, all_errors, target_names)
):
res.append((name,target, pred, stds, dknns, ind, settings))
if not settings.get("classification", False):
# if "nested_learning_curves" in results:
# plot_learning_curves(results["nested_learning_curves"], results["best_learning_curves"], settings)
plot_jointplot(target, error, ind, settings)
plot_scatter(target, pred, error, ind, settings, metrics)
plot_uncertainty(target, pred, stds, dknns, ind, settings)
else:
plot_classifier_roc(target, pred, settings)
return res
def plot_uncertainty_summary():
from uncertainty_utils import plot_ordered_mae
fig, ax = plt.subplots(2, 3, figsize=(3 * HEIGHT, 2 * HEIGHT), sharex=True)
ax = ax.flatten()
matbench_ordered = [
"steels",
"jdft2d",
"dielectric",
"expt_gap",
# "expt_is_metal",
# "glass",
"phonons",
"elastic",
]
all_results = []
for task in matbench_ordered:
with open(f"matbench_{task}/results/matbench_{task}_results.pkl", "rb") as f:
results = pickle.load(f)
settings = load_settings("matbench_" + task)
settings["task"] = "matbench_" + task
os.chdir(f"matbench_{task}")
res = analyse_results(results, settings)
os.chdir("..")
all_results+=res
for j,(name,target, pred, stds, dknns, ind, settings) in enumerate(all_results):
if j > 5:
break
yticks = False
if j % 3 == 0:
yticks = True
lines = plot_ordered_mae(pred, stds, target, dknns, ax[j], settings, ind, yticks=yticks)
labels = ["Error ranked", "Randomly ranked", "$\\sigma$ ranked", | |
<reponame>ratschlab/immunopepper
"""Contains all the output computation based on gene splicegraph"""
from collections import defaultdict
import logging
import numpy as np
from .filter import add_dict_kmer_forgrd
from .filter import add_dict_peptide
from .filter import add_set_kmer_back
from .filter import get_filtered_metadata_list
from .filter import junction_is_annotated
from .filter import junction_is_in_given_list
from .filter import junction_tuple_is_annotated
from .io_ import gz_and_normal_open
from .io_ import save_kmer_matrix
from .io_ import switch_tmp_path
from .mutations import apply_germline_mutation
from .mutations import get_exon_som_dict
from .mutations import get_mut_comb
from .mutations import get_som_expr_dict
from .namedtuples import Coord
from .namedtuples import OutputBackground
from .namedtuples import OutputJuncPeptide
from .namedtuples import OutputKmer
from .namedtuples import OutputMetadata
from .namedtuples import VertexPair
from .preprocess import search_edge_metadata_segmentgraph
from .translate import get_exhaustive_reading_frames
from .translate import get_full_peptide
from .translate import isolated_peptide_result
from .translate import get_peptide_result
from .translate import cross_peptide_result
from .utils import get_segment_expr
def collect_background_transcripts(gene=None, ref_seq_file=None, chrm=None, mutation=None):
"""Calculte the background peptide""
Parameters
----------
gene: Object, returned by SplAdder.
ref_seq: Str, reference sequnce of specific chromosome
mutation: Namedtuple Mutation, store the mutation information of specific chromosome and sample.
has the attribute ['mode', 'maf_dict', 'vcf_dict']
Returns
-------
ref_mut_seq: Dict. (sequence_type) -> list[char].
"""
gene.from_sparse()
min_pos = gene.splicegraph.vertices.min()
max_pos = gene.splicegraph.vertices.max()
# apply germline mutation
# when germline mutation is applied, background_seq != ref_seq
# otherwise, background_seq = ref_seq
ref_mut_seq = apply_germline_mutation(ref_sequence_file=ref_seq_file,
chrm=chrm,
pos_start=min_pos,
pos_end=max_pos,
mutation_sub_dict=mutation.germline_mutation_dict)
return ref_mut_seq
def collect_vertex_pairs(gene=None, gene_info=None, ref_seq_file=None, chrm=None, idx=None, mutation=None, all_read_frames=False, disable_concat=False, kmer=None, filter_redundant=False):
"""Calculte the output peptide for every exon-pairs in the splicegraph
Parameters
----------
gene: Object, returned by SplAdder.
ref_seq: Str, reference sequnce of specific chromosome
idx: Namedtuple Idx, has attribute idx.gene and idx.sample
mutation: Namedtuple Mutation, store the mutation information of specific chromosome and sample.
has the attribute ['mode', 'maf_dict', 'vcf_dict']
disable_concat: bool, flag indicating whether to disable the concanation of vertices into triples
kmer: bool, flag indicating whether to extract kmers from the current parse
filter_redundant: flag indicating whether to remove pairs spanning the same intron
Returns
-------
vertex_pairs: List of VertexPair.
ref_mut_seq: Dict. (sequence_type) -> list[char].
exon_som_dict: Dict. (exon_id) |-> (mutation_postion)
"""
gene.from_sparse()
sg = gene.splicegraph
min_pos = gene.splicegraph.vertices.min()
max_pos = gene.splicegraph.vertices.max()
output_id = 0
# apply germline mutation
# when germline mutation is applied, background_seq != ref_seq
# otherwise, background_seq = ref_seq
ref_mut_seq = apply_germline_mutation(ref_sequence_file=ref_seq_file,
chrm=chrm,
pos_start=min_pos,
pos_end=max_pos,
mutation_sub_dict=mutation.germline_mutation_dict)
# apply somatic mutation
# exon_som_dict: (exon_id) |-> (mutation_postion)
exon_som_dict = None
if mutation.somatic_mutation_dict is not None:
exon_som_dict = get_exon_som_dict(gene, mutation.somatic_mutation_dict)
vertex_pair_list = []
if all_read_frames:
reading_frame_dict = dict(get_exhaustive_reading_frames(sg, gene.strand, gene_info.vertex_order))
else: # use reading frames from annotation
reading_frame_dict = dict(gene_info.reading_frames)
for v_id in gene_info.vertex_order:
n_read_frames = len(reading_frame_dict[v_id])
if n_read_frames == 0: # no cds start, skip the vertex
continue
if len(gene_info.vertex_succ_list[v_id]) == 0: # if no successive vertex, we set it to np.nan, translate and output it
gene_info.vertex_succ_list[v_id].append(np.nan)
for prop_vertex in gene_info.vertex_succ_list[v_id]:
vertex_list = [v_id, prop_vertex]
mut_seq_comb = get_mut_comb(exon_som_dict,vertex_list)
for read_frame_tuple in sorted(reading_frame_dict[v_id]):
has_stop_flag = True
for variant_comb in mut_seq_comb: # go through each variant combination
logging.debug(' '.join([str(v_id), str(prop_vertex), str(variant_comb), str(read_frame_tuple.read_phase)]))
if prop_vertex is not np.nan:
peptide, modi_coord, flag, next_reading_frame = cross_peptide_result(read_frame_tuple, gene.strand, variant_comb, mutation.somatic_mutation_dict, ref_mut_seq, sg.vertices[:, prop_vertex], min_pos, all_read_frames)
orig_coord = Coord(sg.vertices[0, v_id], sg.vertices[1, v_id], sg.vertices[0, prop_vertex], sg.vertices[1, prop_vertex])
if (not flag.has_stop) and (not all_read_frames): #no propagation needed in all reading frame mode
reading_frame_dict[prop_vertex].add(next_reading_frame)
else:
peptide, modi_coord, flag = isolated_peptide_result(read_frame_tuple, gene.strand, variant_comb, mutation.somatic_mutation_dict, ref_mut_seq, min_pos, all_read_frames)
orig_coord = Coord(sg.vertices[0, v_id],sg.vertices[1, v_id], np.nan, np.nan)
has_stop_flag = has_stop_flag and flag.has_stop
gene_outputid = str(idx.gene) + ':' + str(output_id)
vertex_pair = VertexPair(output_id=gene_outputid,
read_frame=read_frame_tuple,
modified_exons_coord=modi_coord,
original_exons_coord=orig_coord,
vertex_idxs=vertex_list,
has_stop_codon=has_stop_flag,
peptide_weight="{:.3f}".format(1/n_read_frames))
vertex_pair_list.append(vertex_pair)
output_id += 1
if filter_redundant:
vertex_pair_list = get_filtered_metadata_list(vertex_pair_list, gene.strand)
concat_vertex_pair_list = defaultdict(list, {'2-exons': vertex_pair_list})
if not disable_concat:
for kmer_length in kmer: #
concat_vertex_pair_list['3-exons_{}-mer'.format(kmer_length)] = collect_vertex_triples(gene, vertex_pair_list, kmer_length)
#vertex_pair_list += concat_vertex_pair_list
return concat_vertex_pair_list, ref_mut_seq, exon_som_dict
def collect_vertex_triples(gene, vertex_pairs, k):
"""
Parameters
----------
gene: Object, returned by SplAdder.
vertex_pairs: List of VertexPair.
k: Int. the length of kmers.
Returns
-------
concat_simple_meta_list: List of VertexPair, specifically for triple-vertice cases.
"""
def _in_the_same_read_frame(front_coord, back_coord, strand):
if strand == '+':
return (front_coord.stop_v2 - back_coord.start_v1) % 3 == 0
else:
return (back_coord.stop_v1 - front_coord.start_v2) % 3 == 0
concat_vertex_pair_list = []
vertex_lens = gene.splicegraph.vertices[1,:] - gene.splicegraph.vertices[0,:]
key_id_list = np.where(vertex_lens < (k + 1) * 3)[0]
for key_id in key_id_list:
front_id_list = [i for i, vp in enumerate(vertex_pairs) if vp.vertex_idxs[1] == key_id
and not vp.has_stop_codon]
back_id_list = [i for i, vp in enumerate(vertex_pairs) if vp.vertex_idxs[0] == key_id
and vp.vertex_idxs[1] is not np.nan]
for front_id in front_id_list:
for back_id in back_id_list:
front_pair = vertex_pairs[front_id]
back_pair = vertex_pairs[back_id]
if not _in_the_same_read_frame(front_pair.modified_exons_coord, back_pair.modified_exons_coord, gene.strand):
continue
middle_exon_coord = gene.splicegraph.vertices[:, front_pair.vertex_idxs[1]]
triple_modi_coord = Coord(start_v1=front_pair.modified_exons_coord.start_v1,
stop_v1=front_pair.modified_exons_coord.stop_v1,
start_v2=middle_exon_coord[0],
stop_v2=middle_exon_coord[1],
start_v3=back_pair.modified_exons_coord.start_v2,
stop_v3=back_pair.modified_exons_coord.stop_v2)
triple_orig_coord = Coord(start_v1=front_pair.original_exons_coord.start_v1,
stop_v1=front_pair.original_exons_coord.stop_v1,
start_v2=middle_exon_coord[0],
stop_v2=middle_exon_coord[1],
start_v3=back_pair.original_exons_coord.start_v2,
stop_v3=back_pair.original_exons_coord.stop_v2)
triple_output_id = front_pair.output_id + '_' + back_pair.output_id.split('.')[-1]
triple_vertex_idxs = front_pair.vertex_idxs + [back_pair.vertex_idxs[-1]]
new_vertex_triple = VertexPair(output_id=triple_output_id,
read_frame=front_pair.read_frame,
modified_exons_coord=triple_modi_coord,
original_exons_coord=triple_orig_coord,
vertex_idxs=triple_vertex_idxs,
has_stop_codon=back_pair.has_stop_codon,
peptide_weight=front_pair.peptide_weight)
concat_vertex_pair_list.append(new_vertex_triple)
return concat_vertex_pair_list
def get_and_write_peptide_and_kmer(peptide_dict=None, kmer_dict=None,
gene=None, all_vertex_pairs=None, ref_mut_seq=None, idx=None,
exon_som_dict=None, countinfo=None,
edge_idxs=None, edge_counts=None, seg_counts=None,
mutation=None,table=None,
size_factor=None, junction_list=None,
filepointer=None,
output_silence=False, kmer=None,
cross_graph_expr=None, all_read_frames=None, graph_samples=None,outbase=None, verbose_save=None):
"""
Parameters
----------
peptide_dict: Dict. (peptide sequence)|-> metadata
kmer_dict: Dict. (kmer sequence)|-> metadata
gene: Object, returned by SplAdder.
all_vertex_pairs: List of VertexPair
ref_mut_seq: Str, reference sequnce of specific chromosome
idx: Namedtuple Idx, has attribute idx.gene and idx.sample
exon_som_dict: Dict. (exon_id) |-> (mutation_postion)
countinfo: Namedtuple, contains SplAdder count information
mutation: Namedtuple Mutation, store the mutation information of specific chromosome and sample.
has the attribute ['mode', 'maf_dict', 'vcf_dict']
table: Namedtuple GeneTable, store the gene-transcript-cds mapping tables derived
from .gtf file. has attribute ['gene_to_cds_begin', 'ts_to_cds', 'gene_to_cds']
size_factor: Scalar. To adjust the expression counts based on the external file `libsize.tsv`
junction_list: List. Work as a filter to indicate some exon pair has certain
ordinary intron which can be ignored further.
filepointer: namedtuple, contains the columns and paths of each file of interest
output_silence: bool, flag indicating whether not to silence annotated peptides
kmer: bool, flag indicating whether to output kmers for this parse
outbase: str, base direactory used for temporary files
cross_graph_expr: bool, whether to generate the expression kmer matrix with all samples from graph
graph_samples: list, samples contained in the splicing graph object
"""
# check whether the junction (specific combination of vertices) also is annotated
# as a junction of a protein coding transcript
junction_flag = junction_is_annotated(gene, table.gene_to_ts, table.ts_to_cds)
som_exp_dict = get_som_expr_dict(gene, list(mutation.somatic_mutation_dict.keys()), countinfo, idx)
kmer_matrix = [[], [], [], []] # in cross sample mode, will contain unique kmers per gene (1), is_junction (2), segments expr per sample (3), junction expr per sample (4)
### iterate over all vertex pairs and translate
for kmer_type, vertex_pairs in all_vertex_pairs.items():
for ii,vertex_pair in enumerate(vertex_pairs):
modi_coord = vertex_pair.modified_exons_coord
vertex_list = vertex_pair.vertex_idxs
tran_start_pos = modi_coord.start_v1 if gene.strand == '+' else modi_coord.stop_v1
mut_seq_comb = get_mut_comb(exon_som_dict, vertex_pair.vertex_idxs)
variant_id = 0
for variant_comb in mut_seq_comb: # go through each variant combination
peptide,flag = get_peptide_result(vertex_pair, gene.strand, variant_comb, mutation.somatic_mutation_dict, ref_mut_seq, np.min(gene.splicegraph.vertices), all_read_frames)
# If cross junction peptide has a stop-codon in it, the frame
# will not be propagated because the read is truncated before it reaches the end of the exon.
# also in mutation mode, only output the case where ref is different from mutated
for pep_idx in np.arange(len(peptide.mut)):
if not peptide.mut[pep_idx] or not (peptide.mut[pep_idx] not in peptide.ref or mutation.mode == 'ref' or output_silence):
continue
new_output_id = ':'.join([gene.name, '_'.join([str(v) for v in vertex_list]), str(variant_id), str(tran_start_pos), kmer_type])
vertex_tuple_anno_flag = junction_tuple_is_annotated(junction_flag, vertex_list)
junction_is_in_given_list_flag = junction_is_in_given_list(gene.splicegraph, vertex_list, gene.strand, junction_list)
if variant_comb is not np.nan and som_exp_dict is not None: # which means there exist mutations
seg_exp_variant_comb = [int(som_exp_dict[ipos]) for ipos in variant_comb]
else:
seg_exp_variant_comb = np.nan # if no mutation or no count file, the segment expression is .
# collect expression data
if countinfo:
segment_expr, expr_list = get_segment_expr(gene, modi_coord, countinfo, idx, seg_counts, cross_graph_expr=cross_graph_expr)
else:
segment_expr, expr_list = np.nan, None
if countinfo and not flag.is_isolated and edge_counts is not None: ## Will flag is isolated overlap with edge_counts is None?
edge_expr = search_edge_metadata_segmentgraph(gene, modi_coord, countinfo, idx, edge_idxs, edge_counts, cross_graph_expr=cross_graph_expr)
else:
edge_expr = np.nan
### Peptides
add_dict_peptide(peptide_dict, [OutputMetadata(peptide=peptide.mut[pep_idx],
output_id=new_output_id,
read_frame=vertex_pair.read_frame.read_phase,
gene_name=gene.name,
gene_chr=gene.chr,
gene_strand=gene.strand,
mutation_mode=mutation.mode,
junction_annotated=vertex_tuple_anno_flag,
has_stop_codon=int(flag.has_stop),
is_in_junction_list=junction_is_in_given_list_flag,
is_isolated=int(flag.is_isolated),
variant_comb=variant_comb,
variant_seg_expr=seg_exp_variant_comb,
modified_exons_coord=modi_coord,
original_exons_coord=vertex_pair.original_exons_coord,
vertex_idx=vertex_list,
junction_expr=edge_expr,
segment_expr=segment_expr,
kmer_type=kmer_type
)], skip_expr=cross_graph_expr)
variant_id += 1
output_peptide = OutputJuncPeptide(output_id= new_output_id,
peptide=peptide.mut[pep_idx],
exons_coor=modi_coord,
junction_expr=edge_expr)
### kmers
if cross_graph_expr: #generate kmer x sample expression | |
ix, others, tmp_wrapped = \
self._prefetch_process[split].get()
ix1 = 0
ix2 = 0
video_id = self.info['videos'][ix]['video_id']
if split == 'train':
# get the video_id
# print('train: id:{}'.format(ix))
ix1 = self.train_label_start_ix[ix] # label_start_ix starts from 0
ix2 = self.train_label_end_ix[ix] - 1
elif split == 'val':
# print('val: id:{}'.format(ix))
# video_id = self.info['videos'][ix]['video_id']
ix1 = self.val_label_start_ix[ix - self.train_length]
ix2 = self.val_label_end_ix[ix - self.train_length] - 1
features = tmp_c3d
ncap = ix2 - ix1 + 1 # number of captions available for this video
lm_mask_batch = np.zeros([ncap, self.seq_length], dtype='float32')
# fetch the sequence labels of the video
if split == 'train':
lm_label_batch = self.train_label_file["labels"][ix1:ix2 + 1]
elif split == 'val':
lm_label_batch = self.val_label_file['labels'][ix1:ix2 + 1]
if tmp_wrapped:
wrapped = True
# record associated info as well
info_dict = {}
info_dict['ix'] = ix
info_dict['id'] = self.info['videos'][ix]['id']
infos.append(info_dict)
# generate lm mask
lm_label_batch = np.append(lm_label_batch, np.zeros((1, self.seq_length)), axis=0)
lm_label_batch[:, -1] = 0
if vars(self.opt).get("dropsent_mode", "nodrop") == "insert":
for _ix, row in enumerate(lm_mask_batch):
nonzeros = (lm_label_batch[_ix] != 0).sum() + 2
if nonzeros > 12:
if np.random.random() > 0.7:
crop_point = np.random.randint(12, nonzeros)
lm_label_batch[_ix, (crop_point + 1):] = lm_label_batch[_ix, crop_point:(-1)]
lm_label_batch[_ix, crop_point] = 0
row[:nonzeros + 1] = 1
elif vars(self.opt).get("dropsent_mode", "nodrop") == "truncate":
for _ix, row in enumerate(lm_mask_batch):
nonzeros = (lm_label_batch[_ix] != 0).sum() + 2
crop_point = nonzeros
if nonzeros > 12:
if np.random.random() > 0.7:
crop_point = np.random.randint(12, nonzeros)
lm_label_batch[_ix, crop_point:] = 0
row[:min(nonzeros, crop_point + 1)] = 1
else:
for _ix, row in enumerate(lm_mask_batch):
nonzeros = (lm_label_batch[_ix] != 0).sum() + 2
row[:nonzeros] = 1
lm_label_batch[:, -1] = 0
sent_len = np.array(list(map(lambda x: (x != 0).sum() + 2, lm_label_batch)), dtype='int')
# generate index selecting list
data['fc_feats'] = np.stack(features)
# data['att_feats'] = np.stack(tmp_att)
data['att_feats'] = np.array([0]).astype('float32')
data['lda_feats'] = tmp_lda.astype('float32')
# data['cg_labels'] = lm_label_batch.astype('int') # masked by lm mask
data['cg_gts'] = gts_index.astype('int') # masked by tap mask
# data['cg_masks'] = lm_mask_batch.astype('float32')
data['cg_sents_len'] = sent_len
data["tap_labels"] = tap_label.astype('float32') # masked by tap mask
data["tap_iou_scores"] = iou_scores # masked by tap mask
# data["S_tap_labels"] = others['S_tap_labels'].astype('float32') # masked by tap mask
# data['S_tap_masks_for_loss'] = others['S_tap_masks'].astype('float32')
data["tap_gts_index"] = gts_index.astype('int') # maked by tap mask
# data['tap_event_boundary'] = boundary.astype('float32')
# data['tap_event_desc_score'] = self.desc_scores[video_id].astype('float32')
data["tap_masks_for_loss"] = tap_masks # masked by tap mask
data["tap_masks_for_good_proposal"] = tap_masks_good_proposal.astype('int') # maksed by tap mask
tap_gts_for_good_proposal = (tap_masks_good_proposal * (gts_index + 1) - 1).astype('int')
data['duration'] = self.data[video_id]['duration']
data['sentences'] = self.data[video_id]['sentences']
data['gt_featstamps'] = others['gt_featstamps']
data['gt_timestamps'] = self.data[video_id]['timestamps']
data['tap_gts_for_good_proposal'] = others['tap_gts_for_good_proposal']
data['vid'] = video_id
w1 = 1 - w1 if vars(self.opt).get('reverse_w0', False) else w1
data['w1'] = w1
# data['bw1'] = np.array([self.bw1]).astype('float32')
# data['desc_w1'] = np.array([self.desc_w1]).astype('float32')
data['bounds'] = {'it_pos_now': self.iterators[split],
'it_max': len(self.split_ix[split]),
'wrapped': wrapped}
data['infos'] = infos
data["proposal_num"] = proposal_num = data['tap_masks_for_good_proposal'].sum().astype('int').tolist()
data['ix'] = ix
T, _ = features.shape
if proposal_num <= 0 or features.shape[0] <= 1:
return data
# pdb.set_trace()
if True:
featstamp = others['gt_featstamps']
data['gts_cg_select_list'] = np.array( [i for i,f in enumerate(featstamp)]).astype('int')
data['gts_ind_select_list'] = np.array([f[1] for f in featstamp]).astype('int')
data['gts_soi_select_list'] = np.array([[f[0], f[1] + 1] for f in featstamp]).astype('int')
gt_sentence_batch = []
for ind in data['gts_cg_select_list']:
gt_sentence_batch.append(data['sentences'][ind])
data['gts_sentences_batch'] = gt_sentence_batch
data['SOTA_featstamps'] = others['SOTA_featstamps']
data['SOTA_timestamps'] = others['SOTA_timestamps']
data['SOTA_Prop_score'] = others['SOTA_Prop_score']
if self.use_SOTA_tep and (others['SOTA_featstamps'] is not None):
featstamp = others['SOTA_featstamps']
for ind, (x, y) in enumerate(featstamp):
if y <= x:
assert AssertionError
assert y > x
if y - x >= (self.K + 1):
# print('SOTA_TEP: Ding')
rand = np.random.randint(0, y - x - (self.K - 1))
rand_start = x + rand
rand_end = rand_start + (self.K)
featstamp[ind] = [rand_start, rand_end]
data['SOTA_cg_select_list'] = np.array(
[tap_gts_for_good_proposal[f[1], f[1] - f[0] - 1] for f in featstamp]).astype('int')
data['SOTA_ind_select_list'] = np.array([f[1] for f in featstamp]).astype('int')
data['SOTA_soi_select_list'] = np.array([[f[0], f[1] + 1] for f in featstamp]).astype('int')
gt_sentence_batch = []
for ind in data['SOTA_cg_select_list']:
gt_sentence_batch.append(data['sentences'][ind])
data['SOTA_sentences_batch'] = gt_sentence_batch
# in training phase, random sample proposals
# if split == 'train':
if True:
if self.opt.tap_model == "sst_1stage":
data['action_label'] = (others['action_label'] >= 0).astype('int').astype('float32')
data['action_label_index'] = others['action_label'].astype('int').astype('float32')
data['ind_select_list'] = train_only['ind_select_list'] # sampled
data['ind_select_list_eval_1stage'] = train_only['ind_select_list_eval']
data['soi_select_list'] = train_only['soi_select_list']
data['soi_select_list_eval_1stage'] = train_only['soi_select_list_eval']
data['cg_select_list'] = train_only['cg_select_list']
data['cg_labels_1stage'] = lm_label_batch[data['cg_select_list']].astype('int') # sampled
data['cg_labels_eval_1stage'] = lm_label_batch.astype('int')
data['cg_masks_1stage'] = lm_mask_batch[data['cg_select_list']].astype('float32') # sampled
data['cg_masks_eval_1stage'] = lm_mask_batch.astype('float32')
for row, v in enumerate(data['cg_select_list']):
assert v >= -1
if v == -1:
data['cg_masks_1stage'][row] = np.zeros(self.seq_length)
else:
# if True:
# assert ncap == tap_gts_for_good_proposal.max() + 1
data['ind_select_list'] = train_only['ind_select_list'] # sampled
data['soi_select_list'] = train_only['soi_select_list']
data['cg_select_list'] = train_only['cg_select_list']
data['cg_labels'] = lm_label_batch.astype('int') # sampled
data['cg_masks'] = lm_mask_batch.astype('float32') # sampled
sentence_batch = []
for ind in train_only['cg_select_list']:
sentence_batch.append(data['sentences'][ind])
data['sentences_batch'] = sentence_batch
data['sampled_ids'] = train_only['sampled_ids']
if ncap != gts_index.max() + 1:
pdb.set_trace()
pass
assert ncap == gts_index.max() + 1, video_id
return data
def get_segment_indics(self, soi_select_list):
'''
We devide the relation of event1(host) and event2(customer) into 3 part:
part1: intersection of event1 and event2(AB)
part2: the left difference set and the right difference set of event1(A-B)
part3: the left difference set and the right difference set of event2(B-A)
:param soi_select_list: list, shape[batch_size,2]
:return: indics for indicating the index of
'''
soi_select_list = [(s, e - 1) for s, e in soi_select_list]
bs = len(soi_select_list)
soi_select_list = np.array(soi_select_list) # (bs,2)
if len(soi_select_list.shape) <= 1:
print(soi_select_list.shape, soi_select_list)
s1, e1 = np.split(np.expand_dims(soi_select_list, 1).repeat(bs, 1), 2, axis=2) # [bs,bs,1],[bs,bs,1]
s2, e2 = np.split(np.expand_dims(soi_select_list, 0).repeat(bs, 0), 2, axis=2) # [bs,bs,1],[bs,bs,1]
templates = np.concatenate([
np.maximum(s1, s2), np.minimum(e1, e2),
s1, np.minimum(e1, s2),
np.maximum(e2, s1), e1,
s2, np.minimum(e2, s1),
np.maximum(e1, s2), e2
], 2) # [bs,bs,10]
masks1, mask21, mask22, mask31, mask32 = np.minimum(e1, e2) - np.maximum(s1, s2) > 0, \
np.minimum(e1, s2) - s1 > 0, \
e1 - np.maximum(e2, s1) > 0, \
np.minimum(e2, s1) - s2 > 0, \
e2 - np.maximum(e1, s2) > 0 # [bs,bs,1]
masks = np.concatenate([masks1, masks1, mask21, mask21, mask22, mask22, mask31, mask31, mask32, mask32],
axis=2) # [bs,bs,10]
indics = templates * masks
return indics # [bs,bs,10]
def get_shuffle_list(self, tap_gts_for_good_proposal, gt_featstamps, method='random'):
if method == 'random':
tap_indices_selecting_list = []
lm_indices_selecting_list = []
soi_indices_selecting_list = []
for i, row in enumerate(tap_gts_for_good_proposal):
for j, index in enumerate(row):
if not index == -1:
tap_indices_selecting_list.append(i)
lm_indices_selecting_list.append(index)
soi_indices_selecting_list.append([i - j, i + 1])
proposal_num = len(tap_indices_selecting_list)
sampled_ids = np.array(range(proposal_num)).astype('int')
np.random.shuffle(sampled_ids)
sampled_ids = sampled_ids[:min(proposal_num, self.prop_sample_num)]
tap_indices_selecting_list = np.array(tap_indices_selecting_list)
lm_indices_selecting_list = np.array(lm_indices_selecting_list)
soi_indices_selecting_list = np.array(soi_indices_selecting_list)
# tap_sample_list = np.array(tap_indices_selecting_list)[sampled_ids]
# lm_sample_list = np.array(lm_indices_selecting_list)[sampled_ids]
# soi_sample_list = np.array(soi_indices_selecting_list)[sampled_ids]
return tap_indices_selecting_list, lm_indices_selecting_list, soi_indices_selecting_list, sampled_ids
elif method == 'avg': # 此代码未测试,0726bywangt,
ncap = tap_gts_for_good_proposal.max() + 1
prop_per_cap = int(self.prop_sample_num / ncap)
whole_props_per_cap = [[cap, (tap_gts_for_good_proposal == cap).sum()] for cap in range(ncap)]
sorted(whole_props_per_cap, key=lambda x: x[1], reverse=False)
sampled_ids = []
for cap_id, prop_num in whole_props_per_cap:
bg_inds = zip(np.where(tap_gts_for_good_proposal == cap_id))
if len(bg_inds) > prop_per_cap:
bg_inds = np.random.choice(bg_inds, size=prop_per_cap)
sampled_ids.extend(bg_inds)
tap_sample_list = zip(*sampled_ids)[0]
lm_sample_list = tap_gts_for_good_proposal[sampled_ids]
soi_sample_list = [[t - k, t + 1] for t, k in sampled_ids]
else:
raise ValueError('sample method wrong')
# It's not coherent to make DataLoader a subclass of Dataset,
# but essentially, we only need to implement the following to functions,
# so that the torch.utils.data.DataLoader can load the data according
# the index. However, it's minimum change to switch to pytorch data loading
def __getitem__(self, index):
"""This function returns a tuple that is further passed to collate_fn
"""
ix = index # self.split_ix[index]
return self.get_data(ix)
def __len__(self):
return len(self.info['videos'])
def get_v_GwIHO7HpGkY(self):
while True:
data = self.get_batch('train')
if data['vid'] == 'v_GwIHO7HpGkY':
return data
class ArraySampler(data.sampler.SubsetRandomSampler):
def __iter__(self):
return iter(self.indices)
class BlobFetcher():
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, split, dataloader, if_shuffle=False):
"""
db is a list of tuples containing: imcrop_name,
caption, bbox_feat of gt box, imname
"""
self.split = split
self.dataloader = dataloader
self.if_shuffle = if_shuffle
# Add more in the queue
def reset(self):
"""
Two cases:
1. not hasattr(self, 'split_loader'): Resume from previous training.
Create the dataset given the saved split_ix and iterator
2. wrapped: a new epoch, the split_ix and iterator have been updated in
the get_minibatch_inds already.
"""
# batch_size is 0, the merge is done in DataLoader class
if self.if_shuffle:
random.shuffle(self.dataloader.split_ix[self.split])
sampler = ArraySampler(self.dataloader.split_ix[self.split][self.dataloader.iterators[self.split]:])
# | |
<gh_stars>0
"""Routes related to recipe data."""
import os
import random
import traceback
from functools import reduce
import peewee as pw
from flask import Blueprint, current_app, request, session
from recapi import utils
from recapi.models import recipemodel, storedmodel, tagmodel
from recapi.models.usermodel import User
bp = Blueprint("recipe_data", __name__)
@bp.route("/recipe_data")
def recipe_data():
"""Return all available recipe data."""
complete = True if request.args.get("complete", "false").lower() == "true" else False
return get_recipe_data(published=True, complete_data=complete)
@bp.route("/recipe_suggestions")
@utils.gatekeeper()
def recipe_suggestions():
"""Return data for all unpublished recipes."""
return get_recipe_data(published=False)
def get_recipe_data(published=False, complete_data=False):
"""Return published or unpublished recipe data."""
try:
Changed = User.alias()
recipes = recipemodel.Recipe.select(
recipemodel.Recipe, storedmodel.Stored,
pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist")
).where(
recipemodel.Recipe.published == published
).join(
storedmodel.Stored, pw.JOIN.LEFT_OUTER, on=(storedmodel.Stored.recipeID == recipemodel.Recipe.id)
).join(
tagmodel.RecipeTags, pw.JOIN.LEFT_OUTER, on=(tagmodel.RecipeTags.recipeID == recipemodel.Recipe.id)
).join(
tagmodel.Tag, pw.JOIN.LEFT_OUTER, on=(tagmodel.Tag.id == tagmodel.RecipeTags.tagID)
).group_by(
recipemodel.Recipe.id)
if complete_data:
# Load in User table
recipes = recipes.select(
User, Changed, recipemodel.Recipe, storedmodel.Stored,
pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist")
).switch(
recipemodel.Recipe
).join(
User, pw.JOIN.LEFT_OUTER, on=(User.id == recipemodel.Recipe.created_by).alias("a")
).switch(
recipemodel.Recipe
).join(
Changed, pw.JOIN.LEFT_OUTER, on=(Changed.id == recipemodel.Recipe.changed_by).alias("b"))
data = recipemodel.get_recipes(recipes, complete_data=complete_data)
return utils.success_response(msg="Data loaded", data=data, hits=len(data))
except Exception as e:
current_app.logger.error(traceback.format_exc())
return utils.error_response(f"Failed to load data: {e}")
@bp.route("/preview_data", methods=['POST'])
def preview_data():
"""Generate recipe preview. Convert markdown data to html."""
try:
data = utils.recipe2html(request.form.to_dict())
data = utils.deserialize(data)
image_file = request.files.get("image")
if image_file:
filename = utils.make_random_filename(image_file, file_extension=".jpg")
directory = os.path.join(current_app.instance_path, current_app.config.get("TMP_DIR"))
utils.save_upload_image(image_file, filename, directory)
data["image"] = "tmp/" + filename
return utils.success_response(msg="Data converted", data=data)
except Exception as e:
current_app.logger.error(traceback.format_exc())
return utils.error_response(f"Failed to convert data: {e}")
@bp.route("/view_recipe")
def view_recipe():
"""Generate view for one recipe. Convert markdown data to html."""
return get_recipe_from_db(convert=True)
@bp.route("/get_recipe")
def get_recipe():
"""Get data for one recipe."""
return get_recipe_from_db()
def get_recipe_from_db(convert=False):
"""Get data for one recipe. Convert to html if convert=True."""
recipe_id = request.args.get("id")
title = request.args.get("title")
try:
Changed = User.alias()
recipes = recipemodel.Recipe.select(
recipemodel.Recipe, User, Changed, storedmodel.Stored,
pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist")
).where(
recipemodel.Recipe.id == recipe_id if recipe_id else
recipemodel.Recipe.title == title
).join(
storedmodel.Stored, pw.JOIN.LEFT_OUTER, on=(storedmodel.Stored.recipeID == recipemodel.Recipe.id)
).switch(
recipemodel.Recipe
).join(
User, pw.JOIN.LEFT_OUTER, on=(User.id == recipemodel.Recipe.created_by).alias("a")
).switch(
recipemodel.Recipe
).join(
Changed, pw.JOIN.LEFT_OUTER, on=(Changed.id == recipemodel.Recipe.changed_by).alias("b")
).switch(
recipemodel.Recipe
).join(
tagmodel.RecipeTags, pw.JOIN.LEFT_OUTER, on=(tagmodel.RecipeTags.recipeID == recipemodel.Recipe.id)
).join(
tagmodel.Tag, pw.JOIN.LEFT_OUTER, on=(tagmodel.Tag.id == tagmodel.RecipeTags.tagID)
).group_by(recipemodel.Recipe.id)
recipe = recipemodel.get_recipe(recipes[0])
if convert:
recipe = utils.recipe2html(recipe)
if not recipe:
return utils.error_response(f"Could not find recipe '{title}'."), 404
return utils.success_response(msg="Data loaded", data=recipe)
except IndexError:
return utils.error_response(f"Could not find recipe with ID '{recipe_id}'"), 404
except Exception as e:
current_app.logger.error(traceback.format_exc())
return utils.error_response(f"Failed to load recipe: {e}"), 400
@bp.route("/add_recipe", methods=['POST'])
@utils.gatekeeper()
def add_recpie():
"""Add new recipe to the data base."""
recipe_id = None
filename = None
try:
data = request.form.to_dict()
data = utils.deserialize(data)
data["user"] = session.get("uid")
data["published"] = False if data.get("published", True).lower() == "false" else True
image_file = request.files.get("image")
recipe_id = recipemodel.add_recipe(data)
url = utils.make_url(data["title"], recipe_id)
recipemodel.set_url(recipe_id, url)
tagmodel.add_tags(data, recipe_id)
storedmodel.add_recipe(recipe_id)
save_image(data, recipe_id, image_file)
return utils.success_response(msg="Recipe saved", url=url)
except pw.IntegrityError:
return utils.error_response("Recipe title already exists!"), 409
except Exception as e:
# Delete recipe data and image
if recipe_id is not None:
storedmodel.delete_recipe(recipe_id)
recipemodel.delete_recipe(recipe_id)
if filename is not None:
img_path = os.path.join(current_app.instance_path, current_app.config.get("IMAGE_PATH"))
filepath = os.path.join(img_path, filename)
try:
utils.remove_file(filepath)
except Exception:
current_app.logger.warning(f"Could not delete file: {filepath}")
current_app.logger.error(traceback.format_exc())
return utils.error_response(f"Failed to save data: {e}"), 400
@bp.route("/edit_recipe", methods=["POST"])
@utils.gatekeeper()
def edit_recpie():
"""Edit a recipe that already exists in the data base."""
try:
data = request.form.to_dict()
data = utils.deserialize(data)
data["user"] = session.get("uid") # Store info about which user edited last
data["published"] = False if data.get("published", True).lower() == "false" else True
url = utils.make_url(data["title"], data["id"])
data["url"] = url
image_file = request.files.get("image")
if not image_file and not data["image"]:
recipe = recipemodel.Recipe.get(recipemodel.Recipe.id == data["id"])
if recipe.image:
try:
utils.remove_file(utils.remove_file(os.path.join(current_app.config.get("IMAGE_PATH"), recipe.image)))
except OSError:
current_app.logger.warning(traceback.format_exc())
else:
save_image(data, data["id"], image_file)
recipemodel.edit_recipe(data["id"], data)
tagmodel.add_tags(data, data["id"])
return utils.success_response(msg="Recipe saved", url=url)
except Exception as e:
current_app.logger.error(traceback.format_exc())
return utils.error_response(f"Failed to save data: {e}"), 400
@bp.route("/suggest", methods=["POST"])
@utils.gatekeeper(allow_guest=True)
def suggest_recipe():
"""Save a recipe suggestion in the data base (published=False)."""
recipe_id = None
filename = None
try:
data = request.form.to_dict()
data = utils.deserialize(data)
data["user"] = session.get("uid")
data["published"] = False
image_file = request.files.get("image")
recipe_id = recipemodel.add_recipe(data)
url = utils.make_url(data["title"], recipe_id)
recipemodel.set_url(recipe_id, url)
tagmodel.add_tags(data, recipe_id)
storedmodel.add_recipe(recipe_id)
save_image(data, recipe_id, image_file)
# Attempt to send email to admins
try:
msg = ("Hej kalufs-admin!\n\nEtt nytt receptförslag med titel \"{}\" har lämnats in av {}.\n"
"Logga in på https://kalufs.lol/recept för att granska och publicera receptet.\n\n"
"Vänliga hälsningar,\nkalufs.lol"
).format(data.get("title"), data.get("suggester"))
utils.send_mail(current_app.config.get("EMAIL_TO"), "Nytt receptförslag!", msg)
except Exception:
current_app.logger.error(traceback.format_exc())
return utils.success_response(msg="Recipe saved", url=url)
except pw.IntegrityError:
return utils.error_response("Recipe title already exists!"), 409
except Exception as e:
# Delete recipe data and image
if recipe_id is not None:
storedmodel.delete_recipe(recipe_id)
recipemodel.delete_recipe(recipe_id)
if filename is not None:
img_path = os.path.join(current_app.instance_path, current_app.config.get("IMAGE_PATH"))
filepath = os.path.join(img_path, filename)
try:
utils.remove_file(filepath)
except Exception:
current_app.logger.warning(f"Could not delete file: {filepath}")
current_app.logger.error(traceback.format_exc())
return utils.error_response(f"Failed to save data: {e}"), 400
def save_image(data, recipe_id, image_file):
"""Save uploaded image in data base."""
img_path = os.path.join(current_app.instance_path, current_app.config.get("IMAGE_PATH"))
thumb_destfolder = os.path.join(current_app.instance_path, current_app.config.get("THUMBNAIL_PATH"))
medium_destfolder = os.path.join(current_app.instance_path, current_app.config.get("MEDIUM_IMAGE_PATH"))
if image_file:
# Get filename and save image
filename = utils.make_db_filename(image_file, id=str(recipe_id), file_extension=".jpg")
utils.save_upload_image(image_file, filename, img_path)
# Edit row to add image path
data["image"] = filename
recipemodel.set_image(recipe_id, data)
# Save thumbnail
src = os.path.join(img_path, filename)
utils.save_downscaled(src, thumb_destfolder, thumbnail=True)
utils.save_downscaled(src, medium_destfolder)
# When recipe was parsed from external source, image is already uploaded
elif data.get("image") and data.get("image", "").startswith("tmp"):
filename = utils.make_db_filename(data["image"], id=str(recipe_id), file_extension=".jpg")
# Get path to file and copy it from tmp to img folder
src_directory = os.path.join(current_app.instance_path, current_app.config.get("TMP_DIR"))
src = os.path.join(src_directory, os.path.split(data["image"])[1])
utils.copy_file(src, img_path, filename)
# Edit row to add image file name
data["image"] = filename
recipemodel.set_image(recipe_id, data)
# Save thumbnail
src = os.path.join(img_path, filename)
utils.save_downscaled(src, thumb_destfolder, thumbnail=True)
utils.save_downscaled(src, medium_destfolder)
@bp.route("/delete_recipe")
@utils.gatekeeper()
def delete_recpie():
"""Remove recipe from data base."""
try:
recipe_id = request.args.get("id")
recipe = recipemodel.Recipe.get(recipemodel.Recipe.id == recipe_id)
if recipe.image:
utils.remove_file(os.path.join(current_app.config.get("IMAGE_PATH"), recipe.image))
utils.remove_file(os.path.join(current_app.config.get("THUMBNAIL_PATH"), recipe.image))
utils.remove_file(os.path.join(current_app.config.get("MEDIUM_IMAGE_PATH"), recipe.image))
tagmodel.delete_recipe(recipe_id)
storedmodel.delete_recipe(recipe_id)
recipemodel.delete_recipe(recipe_id)
return utils.success_response(msg="Recipe removed")
except Exception as e:
current_app.logger.error(traceback.format_exc())
return utils.error_response(f"Failed to remove recipe: {e}"), 400
@bp.route("/search")
def search():
"""Search recipe data base."""
try:
tag = request.args.get("tag")
user = request.args.get("user")
q = request.args.get("q")
if tag:
# Tag Search
querytype = "tag"
tagset = set(tag.split(","))
tagstructure = tagmodel.get_tag_structure(simple=True)
taggroups = []
for cat in tagstructure:
selected_tags = list(set(cat.get("tags")).intersection(tagset))
if selected_tags:
taggroups.append(selected_tags)
# Chain tags with OR within a category and with AND between categories
and_expressions = []
for taggroup in taggroups:
or_expressions = [
pw.fn.FIND_IN_SET(tag, pw.fn.group_concat(tagmodel.Tag.tagname))
for tag in taggroup
]
and_expressions.append(reduce(pw.operator.or_, or_expressions))
expr = reduce(pw.operator.and_, and_expressions)
elif user:
# User search
querytype = "user"
expr = ((User.displayname == user) | recipemodel.Recipe.suggester.contains(user))
else:
# String search: seperate by whitespace and search in all relevant fields
querytype = "q"
if len(q) > 1 and q.startswith('"') and q.endswith('"'):
searchitems = [q[1:-1]]
else:
searchitems = q.split(" ")
searchitems = [i.rstrip(",") for i in searchitems]
expr_list = [
(
recipemodel.Recipe.title.contains(s)
| recipemodel.Recipe.contents.contains(s)
| recipemodel.Recipe.ingredients.contains(s)
| recipemodel.Recipe.source.contains(s)
| User.username.contains(s)
| pw.fn.FIND_IN_SET(s, pw.fn.group_concat(tagmodel.Tag.tagname))
) for s in searchitems
]
expr = reduce(pw.operator.and_, expr_list)
# Build query
Changed = User.alias()
query = recipemodel.Recipe.select(
recipemodel.Recipe, User, Changed, storedmodel.Stored,
pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist")
).join(
storedmodel.Stored, pw.JOIN.LEFT_OUTER, on=(storedmodel.Stored.recipeID == recipemodel.Recipe.id)
).join(
User, pw.JOIN.LEFT_OUTER, on=(User.id == recipemodel.Recipe.created_by).alias("a")
).switch(
recipemodel.Recipe
).join(
Changed, pw.JOIN.LEFT_OUTER, on=(Changed.id == recipemodel.Recipe.changed_by).alias("b")
).switch(
recipemodel.Recipe
).join(
tagmodel.RecipeTags, pw.JOIN.LEFT_OUTER, on=(tagmodel.RecipeTags.recipeID == recipemodel.Recipe.id)
).join(
tagmodel.Tag, pw.JOIN.LEFT_OUTER, on=(tagmodel.Tag.id == tagmodel.RecipeTags.tagID)
).group_by(
recipemodel.Recipe.id
).where(
(recipemodel.Recipe.published == True)
).having(expr)
data = recipemodel.get_recipes(query)
message = f"Query: {querytype}={q}"
return utils.success_response(msg=message, data=data, hits=len(data))
except Exception as e:
current_app.logger.error(traceback.format_exc())
return utils.error_response(f"Query failed: {e}"), 400
@bp.route("/get_tag_categories")
def get_tag_categories():
"""Return a list of tag categories."""
cats = tagmodel.get_tag_categories()
return utils.success_response(msg="", data=cats)
@bp.route("/get_tag_structure")
def get_tag_structure():
cats = tagmodel.get_tag_structure()
return utils.success_response(msg="", data=cats)
@bp.route("/get_tag_structure_simple")
def get_tag_structure_simple():
cats = tagmodel.get_tag_structure(simple=True)
return utils.success_response(msg="", data=cats)
@bp.route("/random")
def get_random_recipe():
"""Return one recipe at random from randomizer categories in config."""
tags = current_app.config.get("RANDOM_TAGS", [])
or_expressions = reduce(pw.operator.or_, [
pw.fn.FIND_IN_SET(tag, pw.fn.group_concat(tagmodel.Tag.tagname))
for tag in tags
])
try:
recipes = recipemodel.Recipe.select(
recipemodel.Recipe, storedmodel.Stored, pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist")
).where(
recipemodel.Recipe.published == True
).join(
storedmodel.Stored, pw.JOIN.LEFT_OUTER, on=(storedmodel.Stored.recipeID == recipemodel.Recipe.id)
).join(
tagmodel.RecipeTags, pw.JOIN.LEFT_OUTER, on=(tagmodel.RecipeTags.recipeID == recipemodel.Recipe.id)
).join(
tagmodel.Tag, pw.JOIN.LEFT_OUTER, on=(tagmodel.Tag.id == tagmodel.RecipeTags.tagID)
).group_by(
recipemodel.Recipe.id
).having(
or_expressions
)
recipe = [random.choice(recipemodel.get_recipes(recipes))]
return utils.success_response(msg="Got random recipe", data=recipe, hits=len(recipe))
except Exception as e:
current_app.logger.error(traceback.format_exc())
return utils.error_response(f"Failed to load data: {e}")
@bp.route("/toggle_stored", methods=["POST"])
@utils.gatekeeper()
def toggle_stored():
"""Toggle the 'stored' value of a recipe."""
try:
data = request.get_json()
stored = data.get("stored", False)
storedmodel.toggle_stored(data["id"], stored)
if stored:
return utils.success_response(msg="Recipe stored")
else:
return utils.success_response(msg="Recipe unstored")
except Exception as e:
current_app.logger.error(traceback.format_exc())
return utils.error_response(f"Failed to save data: {e}"), 400
@bp.route("/stored_recipes")
@utils.gatekeeper()
def stored_recipes():
"""Return data for all stored recipes."""
try:
recipes = recipemodel.Recipe.select(
recipemodel.Recipe, storedmodel.Stored, pw.fn.group_concat(tagmodel.Tag.tagname).alias("taglist")
).join(
storedmodel.Stored, pw.JOIN.LEFT_OUTER, on=(storedmodel.Stored.recipeID == recipemodel.Recipe.id)
).where(
| |
if self.reac_bonds != {frozenset({inst[-2], inst[-3]}), frozenset({inst[-4], inst[-5]}), frozenset({inst[0], inst[1]})}:
# # new = 0
# if new:
# self.reactions[name].append(inst)
return 0
def search_Korcek_step2_even(self, natom, atom, bond, rad):
"""
Korcek step 2 for cyclic peroxides with even number of atoms in the ring.
Still, the 4 membered ring equals a 2,2 cycloaddition and is not considered here.
Ring breaks at O-O and then at every second bond, no H shift is needed.
"""
name = 'Korcek_step2_even'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
for ringsize in range(6, 14, 2): # even number of atoms in the ring
motif = ['X' for i in range(ringsize)]
motif[-1] = 'O'
motif[0] = 'O'
korcek_chain = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
# filter clockwise and anti clockwise hits
korcek_chain_filt = []
for kch in korcek_chain:
k = copy.deepcopy(kch) # need in order to prevent changes to korcek_chain with reverse()
l = copy.deepcopy(kch)
l.reverse()
if k not in korcek_chain_filt and l not in korcek_chain_filt:
korcek_chain_filt.append(kch)
for ins in korcek_chain_filt:
if bond[ins[0]][ins[-1]] == 1: # it is a ring
rxns += [ins]
self.new_reaction(rxns, name, full=True)
# for n, inst in enumerate(rxns):
# new = 1
# #filter for the same reactions
# for instance in self.reactions[name]:
# if inst == instance:
# new = 0
# # filter for specific reaction after this
# #if self.one_reaction_fam and new:
# # if ring_var[n] == 7:
# # if (not {frozenset({inst[-2], inst[-3]}), frozenset({inst[0], inst[1]})}.issubset(self.reac_bonds)) or self.prod_bonds != {frozenset()}:
# # new = 0
# # if ring_var[n] == 8:
# # # TODO this is an incomplete check
# # if self.reac_bonds != {frozenset({inst[-2], inst[-3]}), frozenset({inst[-4], inst[-5]}), frozenset({inst[0], inst[1]})}:
# # new = 0
# if new:
# self.reactions[name].append(inst)
return 0
def search_Korcek_step2(self, natom, atom, bond, rad):
"""
Generalized Korcek step
The 4 membered ring equals a 2,2 cycloaddition and is not considered here (no H shift involved)
The 5 membered ring proceeds through a 6 membered transition state (including a 1,2 H migration):
--O--O--
| |
HO-C---C----C-R ==> RCOOH + R3CC(R)O
| / \ |
R R R R
6-membered ring: TODO
Only the forward direction is included.
"""
name = 'Korcek_step2'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
ring_var = [] # a helper variable to temporarily mark the ring size within this function
for ringsize in range(5, 6):
motif = ['X' for i in range(ringsize + 1)]
#motif[-1] = 'H' # deleted because atom types are no longer checked
korcek_chain = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for ins in korcek_chain:
if bond[ins[0]][ins[-2]] == 1:
rxns += [ins]
ring_var.append(ringsize)
self.new_reaction(rxns, name, a=0, b=-1)
# for n, inst in enumerate(rxns):
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if ring_var[n] == 7:
# if (not {frozenset({inst[-2], inst[-3]}), frozenset({inst[0], inst[1]})}.issubset(self.reac_bonds)) or self.prod_bonds != {frozenset()}:
# new = 0
# if ring_var[n] == 8:
# # TODO this is an incomplete check
# if self.reac_bonds != {frozenset({inst[-2], inst[-3]}), frozenset({inst[-4], inst[-5]}), frozenset({inst[0], inst[1]})}:
# new = 0
return 0
def search_r22_cycloaddition(self, natom, atom, bond, rad):
"""
This is an RMG class.
R R R---R
|| + || <== | |
R R R---R
N.B.: only the reverse direction is available. Also, the 3 related RMG classes are treated as one.
"""
name = 'r22_cycloaddition'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
if not any([len(ci) == 4 for ci in self.species.cycle_chain]): return
for ci in self.species.cycle_chain:
if len(ci) == 4:
# there are two ways to slice a 4-mem ring
ring1 = ci
ring2 = np.ndarray.tolist(np.roll(ring1, 1))
# FIXME only works for 1 cycle
rxns += [ring1]
rxns += [ring2]
self.new_reaction(rxns, name, a=0, b=1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# # TODO need to make sure that these are the bonds that are broken, see the reaction details
# if self.reac_bonds != {frozenset({inst[0], inst[1]}), frozenset({inst[2], inst[3]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_r12_cycloaddition(self, natom, atom, bond, rad):
"""
This is an RMG class.
R--R
R=R + R: <== \ /
R
N.B.: only the reverse direction is available.
"""
name = 'r12_cycloaddition'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
if not any([len(ci) == 3 for ci in self.species.cycle_chain]): return
for ci in self.species.cycle_chain:
if len(ci) == 3:
# there are three ways to slice a 3-mem ring
ring1 = self.species.cycle_chain
ring2 = np.ndarray.tolist(np.roll(ring1, 1))
ring3 = np.ndarray.tolist(np.roll(ring1, 2))
# FIXME only works for 1 cycle
rxns += ring1
rxns += ring2
rxns += ring3
self.new_reaction(rxns, name, a=0, b=1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# # TODO need to make sure that these are the bonds that are broken, see the reaction details
# if self.reac_bonds != {frozenset({inst[0], inst[2]}), frozenset({inst[1], inst[2]})} or self.prod_bonds != {frozenset()}:
# new = 0
return 0
def search_r12_insertion_R(self, natom, atom, bond, rad):
"""
This is an RMG class.
X
|
X-P + R-R <== R-P-R
"""
#if np.sum(rad) != 0: return
name = 'r12_insertion_R'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X','X','X']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
#if all([atom[atomi] != 'H' for atomi in instance]):
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=1, c=2)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != set({frozenset({inst[0], inst[1]}), frozenset({inst[1], inst[2]})}) or self.prod_bonds != {frozenset({inst[0], inst[2]})}:
# new = 0
return 0
def search_r13_insertion_CO2(self, natom, atom, bond, rad):
"""
This is an RMG class.
O
||
O=C=O + R-R <== R-C-O-R
"""
#if np.sum(rad) != 0: return
name = 'r13_insertion_CO2'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X','C','O','X']
instances = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
for instance in instances:
for atomi in range(natom):
if not atomi in instance:
if atom[atomi] == 'O':
if bond[atomi][instance[1]] == 2:
rxns += [instance]
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != {frozenset({inst[0], inst[1]}), frozenset({inst[2], inst[3]})} or self.prod_bonds != {frozenset({inst[0], inst[3]})}:
# new = 0
return 0
def search_r13_insertion_ROR(self, natom, atom, bond, rad):
"""
This is an RMG class.
R1-O-R2 + R=R <== R1-R-R-O-R2
"""
#if np.sum(rad) != 0: return
name = 'r13_insertion_ROR'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
motif = ['X','X','X','O']
rxns = find_motif.start_motif(motif, natom, bond, atom, -1, self.species.atom_eqv)
self.new_reaction(rxns, name, a=0, b=-1)
# # filter for specific reaction after this
# if self.one_reaction_fam and new:
# if self.reac_bonds != set({frozenset({inst[0], inst[1]}), frozenset({inst[2], inst[3]})}) or self.prod_bonds != {frozenset({inst[0], inst[3]})}:
# new = 0
return 0
def search_Diels_alder_addition(self, natom, atom, bond, rad):
"""
This is an RMG class.
R R
// / \
R R R R
| + || <== || |
R R R R
\\ \ /
R R
N.B.: only the reverse direction is available.
"""
name = 'Diels_alder_addition'
if not name in self.reactions:
self.reactions[name] = []
rxns = [] #reactions found with the current resonance isomer
if not any([len(ci) == 6 for ci in self.species.cycle_chain]): return
for ci in self.species.cycle_chain:
if len(ci) == 6:
bondsum = 0
for index, atomi in enumerate(ci):
if index < 5:
atomj = ci[index + 1]
else:
atomj = ci[0]
bondsum += bond[atomi][atomj]
if bond[atomi][atomj] == 2:
start = atomi
startindex = index
if bondsum != | |
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import matplotlib.pyplot as plt
import pyroomacoustics as pra
from pyroomacoustics.doa import circ_dist
import numpy as np
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(780, 557)
MainWindow.setFixedSize(780,557)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 811, 541))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.error_dialog = QtWidgets.QMessageBox()
self.error_dialog.setIcon(QMessageBox.Warning)
self.error_dialog.setText("Error")
self.error_dialog.setWindowTitle("Error")
self.error_dialog.setInformativeText("Please fill all the parameters!")
self.groupBox = QtWidgets.QGroupBox(self.tab)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 391, 191))
self.groupBox.setObjectName("groupBox")
self.Y_value_2 = QtWidgets.QLineEdit(self.groupBox)
self.Y_value_2.setGeometry(QtCore.QRect(230, 60, 41, 20))
self.Y_value_2.setObjectName("Y_value_2")
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setGeometry(QtCore.QRect(30, 30, 131, 20))
self.label.setToolTip("")
self.label.setStatusTip("")
self.label.setWhatsThis("")
self.label.setObjectName("label")
self.label_3 = QtWidgets.QLabel(self.groupBox)
self.label_3.setGeometry(QtCore.QRect(30, 60, 131, 20))
self.label_3.setObjectName("label_3")
self.X_value = QtWidgets.QLineEdit(self.groupBox)
self.X_value.setGeometry(QtCore.QRect(170, 30, 41, 20))
self.X_value.setObjectName("X_value")
self.X_value_2 = QtWidgets.QLineEdit(self.groupBox)
self.X_value_2.setGeometry(QtCore.QRect(170, 60, 41, 20))
self.X_value_2.setObjectName("X_value_2")
self.Y_value = QtWidgets.QLineEdit(self.groupBox)
self.Y_value.setGeometry(QtCore.QRect(230, 30, 41, 20))
self.Y_value.setObjectName("Y_value")
self.label_4 = QtWidgets.QLabel(self.groupBox)
self.label_4.setGeometry(QtCore.QRect(220, 60, 16, 16))
self.label_4.setObjectName("label_4")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(220, 30, 20, 20))
self.label_2.setObjectName("label_2")
self.Temperature = QtWidgets.QLineEdit(self.groupBox)
self.Temperature.setEnabled(False)
self.Temperature.setGeometry(QtCore.QRect(170, 90, 41, 20))
self.Temperature.setObjectName("Temperature")
self.Humidity = QtWidgets.QLineEdit(self.groupBox)
self.Humidity.setEnabled(False)
self.Humidity.setGeometry(QtCore.QRect(170, 120, 41, 20))
self.Humidity.setText("")
self.Humidity.setObjectName("Humidity")
self.Air = QtWidgets.QLineEdit(self.groupBox)
self.Air.setEnabled(False)
self.Air.setGeometry(QtCore.QRect(170, 150, 41, 20))
self.Air.setText("")
self.Air.setObjectName("Air")
self.checkBox = QtWidgets.QCheckBox(self.groupBox)
self.checkBox.setGeometry(QtCore.QRect(30, 90, 91, 17))
self.checkBox.setObjectName("checkBox")
self.checkBox_2 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_2.setGeometry(QtCore.QRect(30, 120, 70, 17))
self.checkBox_2.setObjectName("checkBox_2")
self.checkBox_3 = QtWidgets.QCheckBox(self.groupBox)
self.checkBox_3.setGeometry(QtCore.QRect(30, 150, 101, 17))
self.checkBox_3.setObjectName("checkBox_3")
self.label_10 = QtWidgets.QLabel(self.groupBox)
self.label_10.setGeometry(QtCore.QRect(280, 30, 47, 13))
self.label_10.setObjectName("label_10")
self.label_12 = QtWidgets.QLabel(self.groupBox)
self.label_12.setGeometry(QtCore.QRect(280, 60, 47, 13))
self.label_12.setObjectName("label_12")
self.groupBox_2 = QtWidgets.QGroupBox(self.tab)
self.groupBox_2.setGeometry(QtCore.QRect(10, 210, 391, 232))
self.groupBox_2.setObjectName("groupBox_2")
self.freqs = QtWidgets.QLineEdit(self.groupBox_2)
self.freqs.setGeometry(QtCore.QRect(170, 180, 101, 20))
self.freqs.setText("")
self.freqs.setObjectName("freqs")
self.label_8 = QtWidgets.QLabel(self.groupBox_2)
self.label_8.setGeometry(QtCore.QRect(30, 90, 131, 20))
self.label_8.setObjectName("label_8")
self.label_11 = QtWidgets.QLabel(self.groupBox_2)
self.label_11.setGeometry(QtCore.QRect(220, 90, 20, 20))
self.label_11.setObjectName("label_11")
self.label_5 = QtWidgets.QLabel(self.groupBox_2)
self.label_5.setGeometry(QtCore.QRect(30, 60, 131, 20))
self.label_5.setObjectName("label_5")
self.number = QtWidgets.QLineEdit(self.groupBox_2)
self.number.setGeometry(QtCore.QRect(170, 60, 41, 20))
self.number.setObjectName("number")
self.c_x = QtWidgets.QLineEdit(self.groupBox_2)
self.c_x.setGeometry(QtCore.QRect(170, 90, 41, 20))
self.c_x.setObjectName("c_x")
self.label_6 = QtWidgets.QLabel(self.groupBox_2)
self.label_6.setGeometry(QtCore.QRect(30, 120, 131, 20))
self.label_6.setObjectName("label_6")
self.circular = QtWidgets.QRadioButton(self.groupBox_2)
self.circular.setGeometry(QtCore.QRect(30, 30, 101, 17))
self.circular.setObjectName("circular")
self.angle = QtWidgets.QLineEdit(self.groupBox_2)
self.angle.setGeometry(QtCore.QRect(170, 150, 41, 20))
self.angle.setObjectName("angle")
self.inter = QtWidgets.QLineEdit(self.groupBox_2)
self.inter.setGeometry(QtCore.QRect(170, 120, 41, 20))
self.inter.setObjectName("inter")
self.c_y = QtWidgets.QLineEdit(self.groupBox_2)
self.c_y.setGeometry(QtCore.QRect(230, 90, 41, 20))
self.c_y.setObjectName("c_y")
self.Linear = QtWidgets.QRadioButton(self.groupBox_2)
self.Linear.setGeometry(QtCore.QRect(150, 30, 82, 17))
self.Linear.setChecked(True)
self.Linear.setObjectName("Linear")
self.label_7 = QtWidgets.QLabel(self.groupBox_2)
self.label_7.setGeometry(QtCore.QRect(30, 150, 131, 20))
self.label_7.setObjectName("label_7")
self.label_9 = QtWidgets.QLabel(self.groupBox_2)
self.label_9.setGeometry(QtCore.QRect(30, 180, 131, 20))
self.label_9.setObjectName("label_9")
self.label_13 = QtWidgets.QLabel(self.groupBox_2)
self.label_13.setGeometry(QtCore.QRect(280, 90, 47, 13))
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(self.groupBox_2)
self.label_14.setGeometry(QtCore.QRect(220, 120, 47, 13))
self.label_14.setObjectName("label_14")
self.label_15 = QtWidgets.QLabel(self.groupBox_2)
self.label_15.setGeometry(QtCore.QRect(220, 150, 47, 13))
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(self.groupBox_2)
self.label_16.setGeometry(QtCore.QRect(280, 170, 111, 31))
self.label_16.setScaledContents(False)
self.label_16.setWordWrap(True)
self.label_16.setObjectName("label_16")
self.simulate1 = QtWidgets.QPushButton(self.tab)
self.simulate1.setGeometry(QtCore.QRect(210, 490, 151, 23))
self.simulate1.setObjectName("simulate1")
self.save1 = QtWidgets.QPushButton(self.tab)
self.save1.setGeometry(QtCore.QRect(380, 490, 151, 23))
self.save1.setObjectName("save1")
self.plainTextEdit = QtWidgets.QPlainTextEdit(self.tab)
self.plainTextEdit.setGeometry(QtCore.QRect(410, 30, 351, 401))
self.plainTextEdit.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.plainTextEdit.setReadOnly(True)
self.plainTextEdit.setObjectName("plainTextEdit")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.groupBox_3 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_3.setGeometry(QtCore.QRect(20, 20, 341, 101))
self.groupBox_3.setObjectName("groupBox_3")
self.label_17 = QtWidgets.QLabel(self.groupBox_3)
self.label_17.setGeometry(QtCore.QRect(30, 30, 51, 20))
self.label_17.setObjectName("label_17")
self.Azimuth = QtWidgets.QLineEdit(self.groupBox_3)
self.Azimuth.setGeometry(QtCore.QRect(180, 30, 41, 20))
self.Azimuth.setText("")
self.Azimuth.setObjectName("Azimuth")
self.Distance = QtWidgets.QLineEdit(self.groupBox_3)
self.Distance.setGeometry(QtCore.QRect(180, 60, 41, 20))
self.Distance.setText("")
self.Distance.setObjectName("Distance")
self.label_18 = QtWidgets.QLabel(self.groupBox_3)
self.label_18.setGeometry(QtCore.QRect(30, 60, 51, 20))
self.label_18.setObjectName("label_18")
self.label_45 = QtWidgets.QLabel(self.groupBox_3)
self.label_45.setGeometry(QtCore.QRect(230, 30, 47, 13))
self.label_45.setObjectName("label_45")
self.label_46 = QtWidgets.QLabel(self.groupBox_3)
self.label_46.setGeometry(QtCore.QRect(230, 60, 47, 13))
self.label_46.setObjectName("label_46")
self.groupBox_4 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_4.setGeometry(QtCore.QRect(20, 130, 341, 181))
self.groupBox_4.setObjectName("groupBox_4")
self.label_19 = QtWidgets.QLabel(self.groupBox_4)
self.label_19.setGeometry(QtCore.QRect(30, 30, 131, 20))
self.label_19.setObjectName("label_19")
self.SNR = QtWidgets.QLineEdit(self.groupBox_4)
self.SNR.setGeometry(QtCore.QRect(180, 30, 41, 20))
self.SNR.setText("")
self.SNR.setObjectName("SNR")
self.Sound = QtWidgets.QLineEdit(self.groupBox_4)
self.Sound.setGeometry(QtCore.QRect(180, 60, 41, 20))
self.Sound.setObjectName("Sound")
self.label_20 = QtWidgets.QLabel(self.groupBox_4)
self.label_20.setGeometry(QtCore.QRect(30, 60, 111, 20))
self.label_20.setObjectName("label_20")
self.label_21 = QtWidgets.QLabel(self.groupBox_4)
self.label_21.setGeometry(QtCore.QRect(30, 90, 111, 20))
self.label_21.setObjectName("label_21")
self.fs = QtWidgets.QLineEdit(self.groupBox_4)
self.fs.setGeometry(QtCore.QRect(180, 90, 41, 20))
self.fs.setText("")
self.fs.setObjectName("fs")
self.label_22 = QtWidgets.QLabel(self.groupBox_4)
self.label_22.setGeometry(QtCore.QRect(30, 120, 111, 20))
self.label_22.setObjectName("label_22")
self.ffts = QtWidgets.QLineEdit(self.groupBox_4)
self.ffts.setGeometry(QtCore.QRect(180, 120, 41, 20))
self.ffts.setObjectName("ffts")
self.label_23 = QtWidgets.QLabel(self.groupBox_4)
self.label_23.setGeometry(QtCore.QRect(30, 150, 111, 20))
self.label_23.setObjectName("label_23")
self.minb = QtWidgets.QLineEdit(self.groupBox_4)
self.minb.setGeometry(QtCore.QRect(180, 150, 41, 20))
self.minb.setText("")
self.minb.setObjectName("minb")
self.label_24 = QtWidgets.QLabel(self.groupBox_4)
self.label_24.setGeometry(QtCore.QRect(160, 150, 21, 20))
self.label_24.setObjectName("label_24")
self.maxb = QtWidgets.QLineEdit(self.groupBox_4)
self.maxb.setGeometry(QtCore.QRect(260, 150, 41, 20))
self.maxb.setText("")
self.maxb.setObjectName("maxb")
self.label_25 = QtWidgets.QLabel(self.groupBox_4)
self.label_25.setGeometry(QtCore.QRect(240, 150, 21, 20))
self.label_25.setObjectName("label_25")
self.label_47 = QtWidgets.QLabel(self.groupBox_4)
self.label_47.setGeometry(QtCore.QRect(230, 90, 47, 13))
self.label_47.setObjectName("label_47")
self.groupBox_5 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_5.setGeometry(QtCore.QRect(20, 320, 341, 161))
self.groupBox_5.setObjectName("groupBox_5")
self.checkBox_6 = QtWidgets.QCheckBox(self.groupBox_5)
self.checkBox_6.setGeometry(QtCore.QRect(30, 70, 91, 17))
self.checkBox_6.setObjectName("checkBox_6")
self.Air_2 = QtWidgets.QLineEdit(self.groupBox_5)
self.Air_2.setEnabled(False)
self.Air_2.setGeometry(QtCore.QRect(170, 130, 41, 20))
self.Air_2.setText("")
self.Air_2.setObjectName("Air_2")
self.checkBox_5 = QtWidgets.QCheckBox(self.groupBox_5)
self.checkBox_5.setGeometry(QtCore.QRect(30, 130, 101, 17))
self.checkBox_5.setObjectName("checkBox_5")
self.Humidity_2 = QtWidgets.QLineEdit(self.groupBox_5)
self.Humidity_2.setEnabled(False)
self.Humidity_2.setGeometry(QtCore.QRect(170, 100, 41, 20))
self.Humidity_2.setText("")
self.Humidity_2.setObjectName("Humidity_2")
self.Temperature_2 = QtWidgets.QLineEdit(self.groupBox_5)
self.Temperature_2.setEnabled(False)
self.Temperature_2.setGeometry(QtCore.QRect(170, 70, 41, 20))
self.Temperature_2.setObjectName("Temperature_2")
self.checkBox_4 = QtWidgets.QCheckBox(self.groupBox_5)
self.checkBox_4.setGeometry(QtCore.QRect(30, 100, 70, 17))
self.checkBox_4.setObjectName("checkBox_4")
self.label_35 = QtWidgets.QLabel(self.groupBox_5)
self.label_35.setGeometry(QtCore.QRect(280, 40, 47, 13))
self.label_35.setObjectName("label_35")
self.r_y = QtWidgets.QLineEdit(self.groupBox_5)
self.r_y.setGeometry(QtCore.QRect(230, 40, 41, 20))
self.r_y.setObjectName("r_y")
self.r_x = QtWidgets.QLineEdit(self.groupBox_5)
self.r_x.setGeometry(QtCore.QRect(170, 40, 41, 20))
self.r_x.setObjectName("r_x")
self.label_33 = QtWidgets.QLabel(self.groupBox_5)
self.label_33.setGeometry(QtCore.QRect(220, 40, 20, 20))
self.label_33.setObjectName("label_33")
self.label_34 = QtWidgets.QLabel(self.groupBox_5)
self.label_34.setGeometry(QtCore.QRect(30, 40, 131, 20))
self.label_34.setToolTip("")
self.label_34.setStatusTip("")
self.label_34.setWhatsThis("")
self.label_34.setObjectName("label_34")
self.groupBox_6 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_6.setGeometry(QtCore.QRect(380, 20, 391, 121))
self.groupBox_6.setObjectName("groupBox_6")
self.label_38 = QtWidgets.QLabel(self.groupBox_6)
self.label_38.setGeometry(QtCore.QRect(30, 30, 131, 20))
self.label_38.setObjectName("label_38")
self.number_2 = QtWidgets.QLineEdit(self.groupBox_6)
self.number_2.setGeometry(QtCore.QRect(170, 30, 41, 20))
self.number_2.setObjectName("number_2")
self.label_39 = QtWidgets.QLabel(self.groupBox_6)
self.label_39.setGeometry(QtCore.QRect(30, 60, 131, 20))
self.label_39.setObjectName("label_39")
self.angle_2 = QtWidgets.QLineEdit(self.groupBox_6)
self.angle_2.setGeometry(QtCore.QRect(170, 90, 41, 20))
self.angle_2.setObjectName("angle_2")
self.inter_2 = QtWidgets.QLineEdit(self.groupBox_6)
self.inter_2.setGeometry(QtCore.QRect(170, 60, 41, 20))
self.inter_2.setObjectName("inter_2")
self.label_40 = QtWidgets.QLabel(self.groupBox_6)
self.label_40.setGeometry(QtCore.QRect(30, 90, 131, 20))
self.label_40.setObjectName("label_40")
self.label_43 = QtWidgets.QLabel(self.groupBox_6)
self.label_43.setGeometry(QtCore.QRect(220, 60, 47, 13))
self.label_43.setObjectName("label_43")
self.label_44 = QtWidgets.QLabel(self.groupBox_6)
self.label_44.setGeometry(QtCore.QRect(220, 90, 47, 13))
self.label_44.setObjectName("label_44")
self.groupBox_7 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_7.setGeometry(QtCore.QRect(380, 150, 391, 251))
self.groupBox_7.setObjectName("groupBox_7")
self.music = QtWidgets.QCheckBox(self.groupBox_7)
self.music.setGeometry(QtCore.QRect(30, 30, 70, 17))
self.music.setChecked(True)
self.music.setObjectName("music")
self.srp = QtWidgets.QCheckBox(self.groupBox_7)
self.srp.setGeometry(QtCore.QRect(30, 70, 70, 17))
self.srp.setObjectName("srp")
self.cssm = QtWidgets.QCheckBox(self.groupBox_7)
self.cssm.setGeometry(QtCore.QRect(30, 110, 70, 17))
self.cssm.setObjectName("cssm")
self.waves = QtWidgets.QCheckBox(self.groupBox_7)
self.waves.setGeometry(QtCore.QRect(30, 150, 70, 17))
self.waves.setObjectName("waves")
self.tops = QtWidgets.QCheckBox(self.groupBox_7)
self.tops.setGeometry(QtCore.QRect(30, 190, 70, 17))
self.tops.setObjectName("tops")
self.label_36 = QtWidgets.QLabel(self.groupBox_7)
self.label_36.setGeometry(QtCore.QRect(110, 30, 271, 31))
self.label_36.setScaledContents(False)
self.label_36.setWordWrap(True)
self.label_36.setObjectName("label_36")
self.label_37 = QtWidgets.QLabel(self.groupBox_7)
self.label_37.setGeometry(QtCore.QRect(110, 70, 271, 31))
self.label_37.setWordWrap(True)
self.label_37.setObjectName("label_37")
self.label_41 = QtWidgets.QLabel(self.groupBox_7)
self.label_41.setGeometry(QtCore.QRect(110, 110, 271, 31))
self.label_41.setWordWrap(True)
self.label_41.setObjectName("label_41")
self.label_42 = QtWidgets.QLabel(self.groupBox_7)
self.label_42.setGeometry(QtCore.QRect(110, 150, 271, 31))
self.label_42.setWordWrap(True)
self.label_42.setObjectName("label_42")
self.label_48 = QtWidgets.QLabel(self.groupBox_7)
self.label_48.setGeometry(QtCore.QRect(110, 180, 271, 31))
self.label_48.setWordWrap(True)
self.label_48.setObjectName("label_48")
self.groupBox_8 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_8.setGeometry(QtCore.QRect(380, 410, 391, 71))
self.groupBox_8.setObjectName("groupBox_8")
self.angle_3 = QtWidgets.QLineEdit(self.groupBox_8)
self.angle_3.setGeometry(QtCore.QRect(170, 90, 41, 20))
self.angle_3.setObjectName("angle_3")
self.label_52 = QtWidgets.QLabel(self.groupBox_8)
self.label_52.setGeometry(QtCore.QRect(30, 90, 131, 20))
self.label_52.setObjectName("label_52")
self.label_54 = QtWidgets.QLabel(self.groupBox_8)
self.label_54.setGeometry(QtCore.QRect(220, 90, 47, 13))
self.label_54.setObjectName("label_54")
self.label_50 = QtWidgets.QLabel(self.groupBox_8)
self.label_50.setGeometry(QtCore.QRect(10, 30, 101, 16))
self.label_50.setObjectName("label_50")
self.label_51 = QtWidgets.QLabel(self.groupBox_8)
self.label_51.setGeometry(QtCore.QRect(10, 50, 31, 16))
self.label_51.setObjectName("label_51")
self.label_53 = QtWidgets.QLabel(self.groupBox_8)
self.label_53.setGeometry(QtCore.QRect(110, 10, 47, 13))
self.label_53.setObjectName("label_53")
self.label_55 = QtWidgets.QLabel(self.groupBox_8)
self.label_55.setGeometry(QtCore.QRect(150, 10, 47, 13))
self.label_55.setObjectName("label_55")
self.label_57 = QtWidgets.QLabel(self.groupBox_8)
self.label_57.setGeometry(QtCore.QRect(250, 10, 47, 13))
self.label_57.setObjectName("label_57")
self.label_58 = QtWidgets.QLabel(self.groupBox_8)
self.label_58.setGeometry(QtCore.QRect(300, 10, 47, 13))
self.label_58.setObjectName("label_58")
self.label_60 = QtWidgets.QLabel(self.groupBox_8)
self.label_60.setGeometry(QtCore.QRect(210, 10, 47, 13))
self.label_60.setObjectName("label_60")
self.label_56 = QtWidgets.QLabel(self.groupBox_8)
self.label_56.setGeometry(QtCore.QRect(110, 30, 31, 16))
self.label_56.setScaledContents(False)
self.label_56.setAlignment(QtCore.Qt.AlignCenter)
self.label_56.setObjectName("label_56")
self.label_61 = QtWidgets.QLabel(self.groupBox_8)
self.label_61.setGeometry(QtCore.QRect(160, 30, 31, 16))
self.label_61.setScaledContents(False)
self.label_61.setAlignment(QtCore.Qt.AlignCenter)
self.label_61.setObjectName("label_61")
self.label_62 = QtWidgets.QLabel(self.groupBox_8)
self.label_62.setGeometry(QtCore.QRect(210, 30, 31, 16))
self.label_62.setScaledContents(False)
self.label_62.setAlignment(QtCore.Qt.AlignCenter)
self.label_62.setObjectName("label_62")
self.label_63 = QtWidgets.QLabel(self.groupBox_8)
self.label_63.setGeometry(QtCore.QRect(250, 30, 31, 16))
self.label_63.setScaledContents(False)
self.label_63.setAlignment(QtCore.Qt.AlignCenter)
self.label_63.setObjectName("label_63")
self.label_64 = QtWidgets.QLabel(self.groupBox_8)
self.label_64.setGeometry(QtCore.QRect(300, 30, 31, 16))
self.label_64.setScaledContents(False)
self.label_64.setAlignment(QtCore.Qt.AlignCenter)
self.label_64.setObjectName("label_64")
self.label_66 = QtWidgets.QLabel(self.groupBox_8)
self.label_66.setGeometry(QtCore.QRect(110, 50, 31, 16))
self.label_66.setScaledContents(False)
self.label_66.setAlignment(QtCore.Qt.AlignCenter)
self.label_66.setObjectName("label_66")
self.label_67 = QtWidgets.QLabel(self.groupBox_8)
self.label_67.setGeometry(QtCore.QRect(160, 50, 31, 16))
self.label_67.setScaledContents(False)
self.label_67.setAlignment(QtCore.Qt.AlignCenter)
self.label_67.setObjectName("label_67")
self.label_68 = QtWidgets.QLabel(self.groupBox_8)
self.label_68.setGeometry(QtCore.QRect(210, 50, 31, 16))
self.label_68.setScaledContents(False)
self.label_68.setAlignment(QtCore.Qt.AlignCenter)
self.label_68.setObjectName("label_68")
self.label_69 = QtWidgets.QLabel(self.groupBox_8)
self.label_69.setGeometry(QtCore.QRect(250, 50, 31, 16))
self.label_69.setScaledContents(False)
self.label_69.setAlignment(QtCore.Qt.AlignCenter)
self.label_69.setObjectName("label_69")
self.label_70 = QtWidgets.QLabel(self.groupBox_8)
self.label_70.setGeometry(QtCore.QRect(300, 50, 31, 16))
self.label_70.setScaledContents(False)
self.label_70.setAlignment(QtCore.Qt.AlignCenter)
self.label_70.setObjectName("label_70")
self.simulate2 = QtWidgets.QPushButton(self.tab_2)
self.simulate2.setGeometry(QtCore.QRect(210, 490, 151, 23))
self.simulate2.setObjectName("simulate2")
self.save2 = QtWidgets.QPushButton(self.tab_2)
self.save2.setGeometry(QtCore.QRect(380, 490, 151, 23))
self.save2.setObjectName("save2")
self.tabWidget.addTab(self.tab_2, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Beamforming & DOA"))
self.groupBox.setTitle(_translate("MainWindow", "Room Parameters"))
self.label.setText(_translate("MainWindow", "Room Size: X"))
self.label_3.setText(_translate("MainWindow", "Source Position: X"))
self.label_4.setText(_translate("MainWindow", "Y"))
self.label_2.setText(_translate("MainWindow", "Y"))
self.checkBox.setText(_translate("MainWindow", "Tempurature"))
self.checkBox_2.setText(_translate("MainWindow", "Humidity"))
self.checkBox_3.setText(_translate("MainWindow", "Air Absorption"))
self.label_10.setText(_translate("MainWindow", "Meters"))
self.label_12.setText(_translate("MainWindow", "Meters"))
self.groupBox_2.setTitle(_translate("MainWindow", "Antennas Parameters"))
self.label_8.setText(_translate("MainWindow", "Center: X"))
self.label_11.setText(_translate("MainWindow", "Y"))
self.label_5.setText(_translate("MainWindow", "Number of Antenna: "))
self.label_6.setText(_translate("MainWindow", "Inter Antenna Distance:"))
self.circular.setText(_translate("MainWindow", "Circular Array"))
self.Linear.setText(_translate("MainWindow", "Linear Array"))
self.label_7.setText(_translate("MainWindow", "Angle: "))
self.label_9.setText(_translate("MainWindow", "Array of frequences (Hz):"))
self.label_13.setText(_translate("MainWindow", "Meters"))
self.label_14.setText(_translate("MainWindow", "Meters"))
self.label_15.setText(_translate("MainWindow", "Degrees"))
self.label_16.setText(_translate("MainWindow", "Separated by a comma: , "))
self.simulate1.setText(_translate("MainWindow", "Simulate"))
self.save1.setText(_translate("MainWindow", "Save"))
self.plainTextEdit.setPlainText(_translate("MainWindow", "Room size: The size of the room to simulate in \n"
"\n"
"Source Position: The position of the source in the room\n"
"\n"
"Temperature: Simulate the temperature in the room ( If not checked use a default tempurature)\n"
"\n"
"Humidity: Simulate the Humidity in the room ( If not checked use a default humidity)\n"
"\n"
"Air absorption: Simulate the Air absorption in the room ( If not checked there is no air absorption)\n"
"\n"
"Circulaire Array: Position the antennas in a circule of radius=\"Inter Mic Distance\"\n"
"\n"
"Linear Array: Position the antennas in a line distanced by \"Inter Mic Distance\"\n"
"\n"
"Center: Center of the Antennas Array\n"
"\n"
"Inter Antenna Distance: Distance between the antennas\n"
"\n"
"Angle: Antenna Angle\n"
"\n"
"Array of frequences: Simulate the beamforming for the specified frequences\n"
"\n"
""))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Beamforming"))
self.groupBox_3.setTitle(_translate("MainWindow", "Original Source Location"))
self.label_17.setText(_translate("MainWindow", "Azimuth:"))
self.label_18.setText(_translate("MainWindow", "Distance:"))
self.label_45.setText(_translate("MainWindow", "Degrees"))
self.label_46.setText(_translate("MainWindow", "Meters"))
self.groupBox_4.setTitle(_translate("MainWindow", "Algorithms Parameters"))
self.label_19.setText(_translate("MainWindow", "Signal to noise ratio (SNR):"))
self.Sound.setText(_translate("MainWindow", | |
be a str without space ' ' or underscore '_'
(automatically removed if present)
Type : None / str
Type of object (i.e.: 'Tor' or 'Lin' for a :class:`~tofu.geom.Ves`)
Deg : None / int
Degree of the b-splines constituting the :mod:`tofu.mesh` object
Exp : None / str
Flag specifying the experiment (e.g.: 'WEST', 'AUG', 'ITER', 'JET'...)
Diag : None / str
Flag indicating the diagnostic (e.g.: 'SXR', 'HXR', 'Bolo'...)
shot : None / int
A shot number from which the instance is valid (for tracking changes)
SaveName : None / str
Overrides the default file name for saving (not recommended)
SavePath : None / str
Absolute path where the instance should be saved
dUSR : None / dict
A user-defined dictionary containing information about the instance
All info considered relevant can be passed here
(e.g.: thickness of the diode, date of installation...)
lObj : None / dict / list
Either:
- list: list of other ID instances of objects on which the created object depends
(this list will then be sorted by class and formatted into a dictionary storign key attributes)
- dict: a ready-made such dictionary
"""
_dModes = {'geom':'TFG', 'data':'TFD'}
_defInclude = ['Mod','Cls','Type','Exp','Deg','Diag','Name','shot']
_dPref = {'Exp':'Exp','Diag':'Dg','shot':'sh','Deg':'Deg',
'version':'Vers','usr':'U'}
def __init__(self, Cls=None, Name=None, Type=None, Deg=None,
Exp=None, Diag=None, shot=None, SaveName=None,
SavePath=None, usr=None, dUSR=None, lObj=None,
fromdict=None, include=None, sep=None):
kwdargs = locals()
del kwdargs['self']
#super()
super(ID, self).__init__(**kwdargs)
def _reset(self):
self._dall = dict.fromkeys(self._get_keys_dall())
###########
# Get largs
###########
@staticmethod
def _get_largs_dall():
largs = ['Cls', 'Name', 'Type', 'Deg',
'Exp', 'Diag', 'shot', 'SaveName',
'SavePath', 'usr', 'dUSR', 'lObj', 'include']
return largs
###########
# Get check and format inputs
###########
@staticmethod
def _checkformat_inputs_dall(usr=None, Cls=None, Type=None,
SavePath=None, Exp=None, Diag=None,
shot=None, Deg=None, Name=None,
SaveName=None, include=None,
lObj=None, dUSR=None):
# Str args
ls = [usr, Type, SavePath, Exp, Diag, SaveName]
assert all(ss is None or type(ss) is str for ss in ls)
if usr is None:
try:
usr = getpass.getuser()
except:
pass
lc = [shot is None or (type(shot) is int and shot >= 0),
Deg is None or (type(Deg) is int and Deg >= 0),
Cls is not None and issubclass(Cls, ToFuObject),
include is None or isinstance(include, list)]
if not all(lc):
msg = ""
if not lc[0]:
msg += ("\nArg shot should be either:\n"
+ "\t- None\n"
+ "\t- int and positive\n"
+ " You provided: {}".format(shot))
if not lc[1]:
msg += ("\nArg Deg should be either:\n"
+ "\t- None\n"
+ "\t- int and positive\n"
+ " You provided: {}".format(Deg))
if not lc[2]:
msg += ("\nArg Cls should be a ToFuObject subclass!"
+ " You provided: {}".format(Cls))
if not lc[3]:
msg += ("\nArg include should be either:\n"
+ "\t- None\n"
+ "\t- list\n"
+ " You provided: {}".format(include))
raise Exception(msg)
dout = locals()
del dout['ls']
return dout
###########
# Get keys of dictionnaries
###########
@staticmethod
def _get_keys_dall():
lk = ['Mod', 'Cls', 'Type', 'Name', 'SaveName',
'SavePath', 'Exp', 'Diag', 'shot', 'Deg',
'version', 'usr', 'dUSR', 'lObj', 'SaveName-usr']
return lk
###########
# _init
###########
def _init(self, usr=None, Cls=None, Type=None, SavePath=None,
Exp=None, Diag=None, shot=None, Deg=None,
Name=None, SaveName=None, include=None,
lObj=None, dUSR=None, **kwdargs):
largs = self._get_largs_dall()
kwd = self._extract_kwdargs(locals(), largs)
largs = self._set_dall(**kwd)
###########
# set dictionaries
###########
def _set_dall(self, usr=None, Cls=None, Type=None, SavePath=None,
Exp=None, Diag=None, shot=None, Deg=None,
Name=None, SaveName=None, include=None,
lObj=None, dUSR=None):
dargs = locals()
del dargs['self']
dargs = ID._checkformat_inputs_dall(**dargs)
self._dall['version'] = __version__
lasis = ['usr','Type','SavePath','Exp','Diag','shot','Deg']
dasis = dict([(k,dargs[k]) for k in lasis])
self._dall.update(dasis)
# Set fixed attributes
Mod, Cls = ID._extract_ModClsFrom_class(dargs['Cls'])
self._dall['Mod'] = Mod
self._dall['Cls'] = Cls
# Set variable attributes
self.set_Name(Name, SaveName=SaveName, include=include)
self.set_lObj(lObj)
self.set_dUSR(dUSR)
###########
# strip dictionaries
###########
def _strip_dall(self, lkeep=[]):
pass
###########
# rebuild dictionaries
###########
def _rebuild_dall(self, lkeep=[]):
pass
###########
# _strip and get/from dict
###########
@classmethod
def _strip_init(cls):
cls._dstrip['allowed'] = [0]
nMax = max(cls._dstrip['allowed'])
doc = ""
doc = ToFuObjectBase.strip.__doc__.format(doc,nMax)
cls.strip.__doc__ = doc
def strip(self, strip=0):
#super()
super(ID,self).strip(strip=strip)
def _strip(self, strip=0):
pass
def _to_dict(self):
dout = {'dall':{'dict':self.dall, 'lexcept':None}}
return dout
def _from_dict(self, fd):
self._dall.update(**fd['dall'])
if 'version' in fd.keys() and fd['version']!=__version__:
msg = "The object was created with a different tofu version !"
msg += "\n object: {0}".format(fd['SaveName'])
msg += "\n original : tofu {0}".fd['version']
msg += "\n current : tofu {0}".__version__
warnings.warn(msg)
###########
# Properties
###########
@property
def dall(self):
return self._dall
@property
def Mod(self):
return self._dall['Mod']
@property
def Cls(self):
return self._dall['Cls']
@property
def Name(self):
return self._dall['Name']
@property
def NameLTX(self):
return r"$"+self.Name.replace('_','\_')+r"$"
@property
def Exp(self):
return self._dall['Exp']
@property
def Diag(self):
return self._dall['Diag']
@property
def shot(self):
return self._dall['shot']
@property
def usr(self):
return self._dall['usr']
@property
def Type(self):
return self._dall['Type']
@property
def Deg(self):
return self._dall['Deg']
@property
def SaveName(self):
return self._dall['SaveName']
@property
def SavePath(self):
return self._dall['SavePath']
@property
def lObj(self):
return self._dall['lObj']
@property
def dUSR(self):
return self._dall['dUSR']
@property
def version(self):
return self._dall['version']
###########
# semi-public methods
###########
@staticmethod
def _extract_ModClsFrom_class(Cls):
strc = str(Cls)
ind0 = strc.index('tofu.')+5
indeol = strc.index("'>")
strc = strc[ind0:indeol]
indp = strc.index('.')
Mod = strc[:indp]
strc = strc[indp+1:][::-1]
#cls = strc[:strc.index('.')][::-1]
return Mod, Cls.__name__
@staticmethod
def SaveName_Conv(Mod=None, Cls=None, Type=None, Name=None, Deg=None,
Exp=None, Diag=None, shot=None, version=None, usr=None,
include=None):
""" Return a default name for saving the object
Includes key info for fast identification of the object from file name
Used on object creation by :class:`~tofu.pathfile.ID`
It is recommended to use this default name.
"""
Modstr = ID._dModes[Mod] if Mod is not None else None
include = ID._defInclude if include is None else include
if Cls is not None and Type is not None and 'Type' in include:
Clsstr = Cls+Type
else:
Clsstr = Cls
Dict = {'Mod':Modstr, 'Cls':Clsstr, 'Name':Name}
for ii in include:
if not ii in ['Mod','Cls','Type','Name']:
Dict[ii] = None
if ii=='Deg' and Deg is not None:
Dict[ii] = ID._dPref[ii]+'{0:02.0f}'.format(Deg)
elif ii=='shot' and shot is not None:
Dict[ii] = ID._dPref[ii]+'{0:05.0f}'.format(shot)
elif not (ii in ['Mod','Cls','Type','Name'] or eval(ii+' is None')):
Dict[ii] = ID._dPref[ii]+eval(ii)
if 'Data' in Cls:
Order = ['Mod','Cls','Exp','Deg','Diag','shot',
'Name','version','usr']
else:
Order = ['Mod','Cls','Exp','Deg','Diag','Name',
'shot','version','usr']
SVN = ""
for ii in range(0,len(Order)):
if Order[ii] in include and Dict[Order[ii]] is not None:
SVN += '_' + Dict[Order[ii]]
SVN = SVN.replace('__','_')
if SVN[0]=='_':
SVN = SVN[1:]
return SVN
###########
# public methods
###########
def set_Name(self, Name, SaveName=None,
include=None,
ForceUpdate=False):
""" Set the Name of the instance, automatically updating the SaveName
The name should be a str without spaces or underscores (removed)
When the name is changed, if SaveName (i.e. the name used for saving)
was not user-defined, it is automatically updated
Parameters
----------
Name : str
Name of the instance, without ' ' or '_' (automatically removed)
SaveName : None / str
If provided, overrides the default name for saving (not recommended)
include: list
Controls how te default SaveName is generated
Each element of the list is a key str indicating whether an element
should be present in the SaveName
"""
self._dall['Name'] = Name
self.set_SaveName(SaveName=SaveName, include=include,
ForceUpdate=ForceUpdate)
def set_SaveName(self,SaveName=None,
include=None,
ForceUpdate=False):
""" Set the name for saving the instance (SaveName)
SaveName can be either:
- provided by the user (no constraint) - not recommended
- automatically generated from Name and key attributes (cf. include)
Parameters
----------
SaveName : None / str
If provided, overrides the default name for saving (not recommended)
include : list
Controls how te default SaveName is generated
Each element of the list is a key str indicating whether an element
should be present in the SaveName
ForceUpdate : bool
Flag indicating the behaviour when SaveName=None:
- True : A new SaveName is generated, overriding the old one
- False : The former SaveName is preserved (default)
"""
if not 'SaveName-usr' in self.dall.keys():
self._dall['SaveName-usr'] = (SaveName is not None)
# If SaveName provided by user, override
if SaveName is not None:
self._dall['SaveName'] = SaveName
self._dall['SaveName-usr'] = True
else:
# Don't update if former is user-defined and ForceUpdate is False
# Override if previous was:
# automatic or (user-defined but ForceUpdate is True)
C0 = self._dall['SaveName-usr']
C1 = self._dall['SaveName-usr'] and ForceUpdate
if (not C0) or C1:
SN = ID.SaveName_Conv(Mod=self.Mod, Cls=self.Cls,
Type=self.Type, Name=self.Name,
Deg=self.Deg, | |
fs = float(matData["fs"][0,0])
else:
fs = 1
return {'data': data, 'fs': fs, 'wfmID': wfmID, 'wfmFormat': wfmFormat}
def sine_generator(fs=100e6, freq=0, phase=0, wfmFormat='iq', zeroLast=False):
"""
Generates a sine wave with optional frequency offset and initial
phase at baseband or RF.
Args:
fs (float): Sample rate used to create the signal.
freq (float): Sine wave frequency.
phase (float): Sine wave initial phase.
wfmFormat (str): Selects waveform format. ('iq', 'real')
zeroLast (bool): Allows user to force the last sample point to 0.
Returns:
(NumPy array): Array containing the complex or real values of the waveform.
"""
if abs(freq) > fs / 2:
raise error.WfmBuilderError('Frequency violates Nyquist. Decrease frequency or increase sample rate')
if freq:
time = 100 / freq
else:
time = 10000 / fs
t = np.linspace(-time / 2, time / 2, int(time * fs), endpoint=False)
if wfmFormat.lower() == 'iq':
iq = np.exp(2 * np.pi * freq * 1j * t) + phase
if zeroLast:
iq[-1] = 0 + 1j*0
return iq
elif wfmFormat.lower() == 'real':
real = np.cos(2 * np.pi * freq * t + phase)
if zeroLast:
real[-1] = 0
return real
else:
raise error.WfmBuilderError('Invalid waveform wfmFormat selected. Choose "iq" or "real".')
def am_generator(fs=100e6, amDepth=50, modRate=100e3, cf=1e9, wfmFormat='iq', zeroLast=False):
"""
Generates a sinusoidal AM signal at baseband or RF.
Args:
fs (float): Sample rate used to create the signal.
amDepth (int): Depth of AM in %.
modRate (float): AM rate in Hz.
cf (float): Center frequency for real format waveforms.
wfmFormat (str): Waveform format. ('iq', 'real')
zeroLast (bool): Force the last sample point to 0.
Returns:
(NumPy array): Array containing the complex or real values of the waveform.
"""
if amDepth <= 0 or amDepth > 100:
raise error.WfmBuilderError('AM Depth out of range, must be 0 - 100.')
if modRate > fs:
raise error.WfmBuilderError('Modulation rate violates Nyquist. Decrease modulation rate or increase sample rate.')
time = 1 / modRate
t = np.linspace(-time / 2, time / 2, int(time * fs), endpoint=False)
mod = (amDepth / 100) * np.sin(2 * np.pi * modRate * t) + 1
if wfmFormat.lower() == 'iq':
iq = mod * np.exp(1j * t)
sFactor = abs(np.amax(iq))
iq = iq / sFactor * 0.707
if zeroLast:
iq[-1] = 0 + 1j*0
return iq
elif wfmFormat.lower() == 'real':
real = mod * np.cos(2 * np.pi * cf * t)
sFactor = np.amax(real)
real = real / sFactor
if zeroLast:
real[-1] = 0
return real
else:
raise error.WfmBuilderError('Invalid waveform format selected. Choose "iq" or "real".')
def cw_pulse_generator(fs=100e6, pWidth=10e-6, pri=100e-6, freqOffset=0, cf=1e9, wfmFormat='iq', zeroLast=False):
"""
Generates an unmodulated cw pulse at baseband or RF.
Args:
fs (float): Sample rate used to create the signal.
pWidth (float): Length of the pulse in seconds.
pri (float): Pulse repetition interval in seconds.
freqOffset (float): Frequency offset from cf.
cf (float): Carrier frequency of the pulse in Hz (only used if generating a 'real' waveform).
wfmFormat (str): Waveform format. ('iq' or 'real')
zeroLast (bool): Force the last sample point to 0.
Returns:
(NumPy array): Array containing the complex or real values of the waveform.
"""
if freqOffset > fs:
raise error.WfmBuilderError('Frequency offset violates Nyquist. Reduce freqOffset or increase sample rate.')
rl = int(fs * pWidth)
t = np.linspace(-rl / fs / 2, rl / fs / 2, rl, endpoint=False)
if wfmFormat.lower() == 'iq':
iq = np.exp(2 * np.pi * freqOffset * 1j * t)
if zeroLast:
iq[-1] = 0
if pri > pWidth:
deadTime = np.zeros(int(fs * pri - rl))
iq = np.append(iq, deadTime)
return iq
elif wfmFormat.lower() == 'real':
if pri <= pWidth:
real = np.cos(2 * np.pi * cf * t)
else:
deadTime = np.zeros(int(fs * pri - rl))
real = np.append(np.cos(2 * np.pi * (cf + freqOffset) * t), deadTime)
return real
else:
raise error.WfmBuilderError('Invalid waveform format selected. Choose "iq" or "real".')
def chirp_generator(fs=100e6, pWidth=10e-6, pri=100e-6, chirpBw=20e6, cf=1e9, wfmFormat='iq', zeroLast=False):
"""
Generates a symmetrical linear chirp at baseband or RF. Chirp direction
is determined by the sign of chirpBw (pos=up chirp, neg=down chirp).
Args:
fs (float): Sample rate used to create the signal.
pWidth (float): Length of the chirp in seconds.
pri (float): Pulse repetition interval in seconds.
chirpBw (float): Total bandwidth of the chirp.
cf (float): Carrier frequency for real format waveforms.
wfmFormat (str): Waveform format. ('iq', 'real')
zeroLast (bool): Force the last sample point to 0.
Returns:
(NumPy array): Array containing the complex or real values of the waveform.
"""
if chirpBw > fs:
raise error.WfmBuilderError('Chirp Bandwidth violates Nyquist.')
if chirpBw <= 0:
raise error.WfmBuilderError('Chirp Bandwidth must be a positive value.')
if pWidth <= 0 or pri <= 0:
raise error.WfmBuilderError('Pulse width and PRI must be positive values.')
"""Define baseband iq waveform. Create a time vector that goes from
-1/2 to 1/2 instead of 0 to 1. This ensures that the chirp will be
symmetrical around the carrier."""
rl = int(fs * pWidth)
chirpRate = chirpBw / pWidth
t = np.linspace(-rl / fs / 2, rl / fs / 2, rl, endpoint=False)
"""Direct phase manipulation was used to create the chirp modulation.
https://en.wikipedia.org/wiki/Chirp#Linear
phase = 2*pi*(f0*t + k/2*t^2)
Since this is a baseband modulation scheme, there is no f0 term and the
factors of 2 cancel out. It looks odd to have a pi multiplier rather than
2*pi, but the math works out correctly. Just throw that into the complex
exponential function and you're off to the races."""
mod = np.pi * chirpRate * t**2
if wfmFormat.lower() == 'iq':
iq = np.exp(1j * mod)
if zeroLast:
iq[-1] = 0
if pri > pWidth:
deadTime = np.zeros(int(fs * pri - rl))
iq = np.append(iq, deadTime)
return iq
elif wfmFormat.lower() == 'real':
if pri <= pWidth:
real = np.cos(2 * np.pi * cf * t + mod)
else:
deadTime = np.zeros(int(fs * pri - rl))
real = np.append(np.cos(2 * np.pi * cf * t + mod), deadTime)
return real
else:
raise error.WfmBuilderError('Invalid waveform format selected. Choose "iq" or "real".')
def barker_generator(fs=100e6, pWidth=10e-6, pri=100e-6, code='b2', cf=1e9, wfmFormat='iq', zeroLast=False):
"""
Generates a Barker phase coded signal at baseband or RF.
Args:
fs (float): Sample rate used to create the signal.
pWidth (float): Length of the chirp in seconds.
pri (float): Pulse repetition interval in seconds.
code (str): Barker code order. ('b2', 'b3', 'b41', 'b42', 'b5',
'b7', 'b11', 'b13')
cf (float): Carrier frequency for real format waveforms.
wfmFormat (str): Waveform format. ('iq', 'real')
zeroLast (bool): Force the last sample point to 0.
Returns:
(NumPy array): Array containing the complex or real values of the waveform.
"""
if pWidth <= 0 or pri <= 0:
raise error.WfmBuilderError('Pulse width and PRI must be positive values.')
# Codes taken from https://en.wikipedia.org/wiki/Barker_code
barkerCodes = {'b2': [1, -1], 'b3': [1, 1, -1],
'b41': [1, 1, -1, 1], 'b42': [1, 1, 1, -1],
'b5': [1, 1, 1, -1, 1], 'b7': [1, 1, 1, -1, -1, 1, -1],
'b11': [1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1],
'b13': [1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1]}
# Create array for each phase shift and concatenate them
codeSamples = int(pWidth / len(barkerCodes[code]) * fs)
rl = codeSamples * len(barkerCodes[code])
barker = []
for p in barkerCodes[code]:
temp = np.full((codeSamples,), p)
barker = np.concatenate([barker, temp])
mod = np.pi / 2 * barker
if wfmFormat.lower() == 'iq':
iq = np.exp(1j * mod)
if zeroLast:
iq[-1] = 0 + 0j
if pri > pWidth:
deadTime = np.zeros(int(fs * pri - rl))
iq = np.append(iq, deadTime)
return iq
elif wfmFormat.lower() == 'real':
t = np.linspace(-rl / fs / 2, rl / fs / 2, rl, endpoint=False)
if pri <= pWidth:
real = np.cos(2 * np.pi * cf * t + mod)
else:
deadTime = np.zeros(int(fs * pri - rl))
real = np.append(np.cos(2 * np.pi * cf * t + mod), deadTime)
return real
else:
raise error.WfmBuilderError('Invalid waveform format selected. Choose "iq" or "real".')
def multitone_generator(fs=100e6, | |
import CHANGED
from persistent._compat import _b
cache = self._makeOne()
oids = []
for i in range(100):
oid = _b('oid_%04d' % i)
oids.append(oid)
state = UPTODATE if i > 0 else CHANGED
cache[oid] = self._makePersist(oid=oid, state=state)
self.assertEqual(cache.cache_non_ghost_count, 100)
cache.full_sweep()
gc.collect() # banish the ghosts who are no longer in the ring
self.assertEqual(cache.cache_non_ghost_count, 1)
self.assertTrue(cache.get(oids[0]) is not None)
for oid in oids[1:]:
self.assertTrue(cache.get(oid) is None)
def test_minimize(self):
import gc
from persistent.interfaces import UPTODATE
from persistent._compat import _b
cache = self._makeOne()
oids = []
for i in range(100):
oid = _b('oid_%04d' % i)
oids.append(oid)
cache[oid] = self._makePersist(oid=oid, state=UPTODATE)
self.assertEqual(cache.cache_non_ghost_count, 100)
cache.minimize()
gc.collect() # banish the ghosts who are no longer in the ring
self.assertEqual(cache.cache_non_ghost_count, 0)
for oid in oids:
self.assertTrue(cache.get(oid) is None)
def test_minimize_turns_into_ghosts(self):
import gc
from persistent.interfaces import UPTODATE
from persistent.interfaces import GHOST
from persistent._compat import _b
cache = self._makeOne()
oid = _b('oid_%04d' % 1)
obj = cache[oid] = self._makePersist(oid=oid, state=UPTODATE)
self.assertEqual(cache.cache_non_ghost_count, 1)
cache.minimize()
gc.collect() # banish the ghosts who are no longer in the ring
self.assertEqual(cache.cache_non_ghost_count, 0)
self.assertEqual(obj._p_state, GHOST)
def test_new_ghost_non_persistent_object(self):
from persistent._compat import _b
cache = self._makeOne()
self.assertRaises(AttributeError, cache.new_ghost, _b('123'), object())
def test_new_ghost_obj_already_has_oid(self):
from persistent._compat import _b
from persistent.interfaces import GHOST
candidate = self._makePersist(oid=_b('123'), state=GHOST)
cache = self._makeOne()
self.assertRaises(ValueError, cache.new_ghost, _b('123'), candidate)
def test_new_ghost_obj_already_has_jar(self):
from persistent._compat import _b
class Dummy(object):
_p_oid = None
_p_jar = object()
cache = self._makeOne()
candidate = self._makePersist(oid=None, jar=object())
self.assertRaises(ValueError, cache.new_ghost, _b('123'), candidate)
def test_new_ghost_obj_already_in_cache(self):
from persistent._compat import _b
KEY = _b('123')
cache = self._makeOne()
candidate = self._makePersist(oid=KEY)
cache[KEY] = candidate
# Now, normally we can't get in the cache without an oid and jar
# (the C implementation doesn't allow it), so if we try to create
# a ghost, we get the value error
self.assertRaises(ValueError, cache.new_ghost, KEY, candidate)
candidate._p_oid = None
self.assertRaises(ValueError, cache.new_ghost, KEY, candidate)
# if we're sneaky and remove the OID and jar, then we get the duplicate
# key error
candidate._p_jar = None
self.assertRaises(KeyError, cache.new_ghost, KEY, candidate)
def test_new_ghost_success_already_ghost(self):
from persistent.interfaces import GHOST
from persistent._compat import _b
KEY = _b('123')
cache = self._makeOne()
candidate = self._makePersist(oid=None, jar=None)
cache.new_ghost(KEY, candidate)
self.assertTrue(cache.get(KEY) is candidate)
self.assertEqual(candidate._p_oid, KEY)
self.assertEqual(candidate._p_jar, cache.jar)
self.assertEqual(candidate._p_state, GHOST)
def test_new_ghost_success_not_already_ghost(self):
from persistent.interfaces import GHOST
from persistent.interfaces import UPTODATE
from persistent._compat import _b
KEY = _b('123')
cache = self._makeOne()
candidate = self._makePersist(oid=None, jar=None, state=UPTODATE)
cache.new_ghost(KEY, candidate)
self.assertTrue(cache.get(KEY) is candidate)
self.assertEqual(candidate._p_oid, KEY)
self.assertEqual(candidate._p_jar, cache.jar)
self.assertEqual(candidate._p_state, GHOST)
def test_new_ghost_w_pclass_non_ghost(self):
from persistent._compat import _b
KEY = _b('123')
class Pclass(object):
_p_oid = None
_p_jar = None
cache = self._makeOne()
cache.new_ghost(KEY, Pclass)
self.assertTrue(cache.get(KEY) is Pclass)
self.assertTrue(cache.persistent_classes[KEY] is Pclass)
self.assertEqual(Pclass._p_oid, KEY)
self.assertEqual(Pclass._p_jar, cache.jar)
def test_new_ghost_w_pclass_ghost(self):
from persistent._compat import _b
KEY = _b('123')
class Pclass(object):
_p_oid = None
_p_jar = None
cache = self._makeOne()
cache.new_ghost(KEY, Pclass)
self.assertTrue(cache.get(KEY) is Pclass)
self.assertTrue(cache.persistent_classes[KEY] is Pclass)
self.assertEqual(Pclass._p_oid, KEY)
self.assertEqual(Pclass._p_jar, cache.jar)
def test_reify_miss_single(self):
from persistent._compat import _b
KEY = _b('123')
cache = self._makeOne()
self.assertRaises(KeyError, cache.reify, KEY)
def test_reify_miss_multiple(self):
from persistent._compat import _b
KEY = _b('123')
KEY2 = _b('456')
cache = self._makeOne()
self.assertRaises(KeyError, cache.reify, [KEY, KEY2])
def test_reify_hit_single_ghost(self):
from persistent.interfaces import GHOST
from persistent._compat import _b
KEY = _b('123')
from persistent.interfaces import UPTODATE
cache = self._makeOne()
candidate = self._makePersist(oid=KEY, jar=cache.jar, state=GHOST)
cache[KEY] = candidate
self.assertEqual(cache.ringlen(), 0)
cache.reify(KEY)
self.assertEqual(cache.ringlen(), 1)
items = cache.lru_items()
self.assertEqual(items[0][0], KEY)
self.assertTrue(items[0][1] is candidate)
self.assertEqual(candidate._p_state, UPTODATE)
def test_reify_hit_single_non_ghost(self):
from persistent.interfaces import UPTODATE
from persistent._compat import _b
KEY = _b('123')
cache = self._makeOne()
candidate = self._makePersist(oid=KEY, jar=cache.jar, state=UPTODATE)
cache[KEY] = candidate
self.assertEqual(cache.ringlen(), 1)
cache.reify(KEY)
self.assertEqual(cache.ringlen(), 1)
self.assertEqual(candidate._p_state, UPTODATE)
def test_reify_hit_multiple_mixed(self):
from persistent.interfaces import GHOST
from persistent.interfaces import UPTODATE
from persistent._compat import _b
KEY = _b('123')
KEY2 = _b('456')
cache = self._makeOne()
c1 = self._makePersist(oid=KEY, jar=cache.jar, state=GHOST)
cache[KEY] = c1
c2 = self._makePersist(oid=KEY2, jar=cache.jar, state=UPTODATE)
cache[KEY2] = c2
self.assertEqual(cache.ringlen(), 1)
cache.reify([KEY, KEY2])
self.assertEqual(cache.ringlen(), 2)
self.assertEqual(c1._p_state, UPTODATE)
self.assertEqual(c2._p_state, UPTODATE)
def test_invalidate_miss_single(self):
from persistent._compat import _b
KEY = _b('123')
cache = self._makeOne()
cache.invalidate(KEY) # doesn't raise
def test_invalidate_miss_multiple(self):
from persistent._compat import _b
KEY = _b('123')
KEY2 = _b('456')
cache = self._makeOne()
cache.invalidate([KEY, KEY2]) # doesn't raise
def test_invalidate_hit_single_ghost(self):
from persistent.interfaces import GHOST
from persistent._compat import _b
KEY = _b('123')
cache = self._makeOne()
candidate = self._makePersist(oid='123', jar=cache.jar, state=GHOST)
cache[KEY] = candidate
self.assertEqual(cache.ringlen(), 0)
cache.invalidate(KEY)
self.assertEqual(cache.ringlen(), 0)
self.assertEqual(candidate._p_state, GHOST)
def test_invalidate_hit_single_non_ghost(self):
from persistent.interfaces import GHOST
from persistent.interfaces import UPTODATE
from persistent._compat import _b
KEY = _b('123')
cache = self._makeOne()
candidate = self._makePersist(oid='123', jar=cache.jar, state=UPTODATE)
cache[KEY] = candidate
self.assertEqual(cache.ringlen(), 1)
cache.invalidate(KEY)
self.assertEqual(cache.ringlen(), 0)
self.assertEqual(candidate._p_state, GHOST)
def test_invalidate_hit_multiple_mixed(self):
from persistent.interfaces import GHOST
from persistent.interfaces import UPTODATE
from persistent._compat import _b
KEY = _b('123')
KEY2 = _b('456')
cache = self._makeOne()
c1 = self._makePersist(oid=KEY, jar=cache.jar, state=GHOST)
cache[KEY] = c1
c2 = self._makePersist(oid=KEY2, jar=cache.jar, state=UPTODATE)
cache[KEY2] = c2
self.assertEqual(cache.ringlen(), 1)
cache.invalidate([KEY, KEY2])
self.assertEqual(cache.ringlen(), 0)
self.assertEqual(c1._p_state, GHOST)
self.assertEqual(c2._p_state, GHOST)
def test_invalidate_hit_multiple_non_ghost(self):
from persistent.interfaces import UPTODATE
from persistent.interfaces import GHOST
from persistent._compat import _b
KEY = _b('123')
KEY2 = _b('456')
cache = self._makeOne()
c1 = self._makePersist(oid=KEY, jar=cache.jar, state=UPTODATE)
cache[KEY] = c1
c2 = self._makePersist(oid=KEY2, jar=cache.jar, state=UPTODATE)
cache[KEY2] = c2
self.assertEqual(cache.ringlen(), 2)
# These should be in the opposite order of how they were
# added to the ring to ensure ring traversal works
cache.invalidate([KEY2, KEY])
self.assertEqual(cache.ringlen(), 0)
self.assertEqual(c1._p_state, GHOST)
self.assertEqual(c2._p_state, GHOST)
def test_invalidate_hit_pclass(self):
from persistent._compat import _b
KEY = _b('123')
class Pclass(object):
_p_oid = KEY
_p_jar = None
cache = self._makeOne()
cache[KEY] = Pclass
self.assertTrue(cache.persistent_classes[KEY] is Pclass)
cache.invalidate(KEY)
self.assertFalse(KEY in cache.persistent_classes)
def test_debug_info_w_persistent_class(self):
import gc
from persistent.interfaces import UPTODATE
from persistent._compat import _b
KEY = _b('pclass')
class pclass(object):
_p_oid = KEY
cache = self._makeOne()
pclass._p_state = UPTODATE
cache[KEY] = pclass
gc.collect() # pypy vs. refcounting
info = cache.debug_info()
self.assertEqual(len(info), 1)
oid, refc, typ, state = info[0]
self.assertEqual(oid, KEY)
self.assertEqual(refc, len(gc.get_referents(pclass)))
self.assertEqual(typ, 'type')
self.assertEqual(state, UPTODATE)
def test_debug_info_w_normal_object(self):
import gc
from persistent.interfaces import UPTODATE
from persistent._compat import _b
KEY = _b('uptodate')
cache = self._makeOne()
uptodate = self._makePersist(state=UPTODATE, oid=KEY)
cache[KEY] = uptodate
gc.collect() # pypy vs. refcounting
info = cache.debug_info()
self.assertEqual(len(info), 1)
oid, refc, typ, state = info[0]
self.assertEqual(oid, KEY)
self.assertEqual(refc, len(gc.get_referents(uptodate)))
self.assertEqual(typ, 'DummyPersistent')
self.assertEqual(state, UPTODATE)
def test_debug_info_w_ghost(self):
import gc
from persistent.interfaces import GHOST
from persistent._compat import _b
KEY = _b('ghost')
cache = self._makeOne()
ghost = self._makePersist(state=GHOST, oid=KEY)
cache[KEY] = ghost
gc.collect() # pypy vs. refcounting
info = cache.debug_info()
self.assertEqual(len(info), 1)
oid, refc, typ, state = info[0]
self.assertEqual(oid, KEY)
self.assertEqual(refc, len(gc.get_referents(ghost)))
self.assertEqual(typ, 'DummyPersistent')
self.assertEqual(state, GHOST)
def test_init_with_cacheless_jar(self):
# Sometimes ZODB tests pass objects that don't
# have a _cache
class Jar(object):
was_set = False
def __setattr__(self, name, value):
if name == '_cache':
object.__setattr__(self, 'was_set', True)
raise AttributeError(name)
jar = Jar()
self._makeOne(jar)
self.assertTrue(jar.was_set)
def test_setting_non_persistent_item(self):
cache = self._makeOne()
try:
cache[None] = object()
except TypeError as e:
self.assertEqual(str(e), "Cache values must be persistent objects.")
else:
self.fail("Should raise TypeError")
def test_setting_without_jar(self):
cache = self._makeOne()
p = self._makePersist(jar=None)
try:
cache[p._p_oid] = p
except ValueError as e:
self.assertEqual(str(e), "Cached object jar missing")
else:
self.fail("Should raise ValueError")
def test_setting_already_cached(self):
cache1 = self._makeOne()
p = self._makePersist(jar=cache1.jar)
cache1[p._p_oid] = p
cache2 = self._makeOne()
try:
cache2[p._p_oid] = p
except ValueError as e:
self.assertEqual(str(e), "Object already in another cache")
else:
self.fail("Should raise value error")
def test_cannot_update_mru_while_already_locked(self):
cache = self._makeOne()
cache._is_sweeping_ring = True
updated = cache.mru(None)
self.assertFalse(updated)
def test_update_object_size_estimation_simple(self):
cache = self._makeOne()
p = self._makePersist(jar=cache.jar)
cache[p._p_oid] = p
# The cache accesses the private attribute directly to bypass
# the bit conversion.
# Note that the _p_estimated_size is set *after*
# the update call is made in ZODB's serialize
p._Persistent__size = 0
cache.update_object_size_estimation(p._p_oid, 2)
self.assertEqual(cache.total_estimated_size, 64)
# A missing object does nothing
cache.update_object_size_estimation(None, 2)
self.assertEqual(cache.total_estimated_size, 64)
def test_cache_size(self):
size = 42
cache = self._makeOne(target_size=size)
self.assertEqual(cache.cache_size, size)
cache.cache_size = 64
self.assertEqual(cache.cache_size, 64)
def test_sweep_empty(self):
cache = self._makeOne()
self.assertEqual(cache.incrgc(), 0)
def test_sweep_of_non_deactivating_object(self):
cache = self._makeOne()
p = self._makePersist(jar=cache.jar)
p._p_state = 0 # non-ghost, get in the ring
cache[p._p_oid] = p
def bad_deactivate():
"Doesn't call super, for it's own reasons, so can't be ejected"
return
p._p_deactivate = bad_deactivate
import persistent.picklecache
sweep_types = persistent.picklecache._SWEEPABLE_TYPES
persistent.picklecache._SWEEPABLE_TYPES = DummyPersistent
try:
self.assertEqual(cache.full_sweep(), 0)
finally:
persistent.picklecache._SWEEPABLE_TYPES = sweep_types
del p._p_deactivate
self.assertEqual(cache.full_sweep(), 1)
if _is_jython:
def with_deterministic_gc(f):
def | |
fill_value=np.nan
)
yzoutput = np.empty((zoutput[:, :, 0].size, 2))
yzoutput[:, 0] = zoutput[:, :, 0].ravel()
yzoutput[:, 1] = youtput[:, :, 0].ravel()
averts2d = interp_func(yzoutput)
averts2d = averts2d.reshape(
(self.nlay + 1, self.nrow + 1, 1)
)
averts = averts2d * np.ones(shape_verts)
else:
# 3d interpolation
# flip y and z coordinates because RegularGridInterpolator
# requires increasing input coordinates
xyzinput = (np.flip(zcenters), np.flip(ycenters), xcenters)
a = np.flip(a, axis=[0, 1])
# interpolate
interp_func = interp.RegularGridInterpolator(
xyzinput, a, bounds_error=False, fill_value=np.nan
)
xyzoutput = np.empty((zoutput.size, 3))
xyzoutput[:, 0] = zoutput.ravel()
xyzoutput[:, 1] = youtput.ravel()
xyzoutput[:, 2] = xoutput.ravel()
averts = interp_func(xyzoutput)
averts = averts.reshape(shape_verts)
elif a.shape == shape_ext_x:
# set array to NaN where inactive on both side
if self._idomain is not None:
inactive_ext_x = np.full(shape_ext_x, True)
inactive_ext_x[:, :, :-1] = inactive
inactive_ext_x[:, :, 1:] = np.logical_and(
inactive_ext_x[:, :, 1:], inactive
)
a = np.where(inactive_ext_x, np.nan, a)
averts = np.empty(shape_verts, dtype=a.dtype)
averts_basic = np.empty(shape_verts, dtype=a.dtype)
for j in range(self.ncol + 1):
# perform basic interpolation (will be useful in all cases)
averts_basic[:, :, j] = array_at_verts_basic2d(a[:, :, j])
if self.is_regular_y and _is_regular_z and first_equal_yz:
# in this case, basic interpolation is the correct one
averts2d = averts_basic[:, :, j]
basic = True
else:
if self.nlay == 1:
# in this case we need a 1d interpolation along y
averts1d = array_at_faces_1d(a[0, :, j], self.__delc)
averts2d = averts1d.reshape((1, self.nrow + 1))
averts2d = averts2d * np.ones((2, self.nrow + 1))
elif self.nrow == 1:
# in this case we need a 1d interpolation along z
delz1d = np.abs(np.diff(self.zverts_smooth[:, 0, j]))
averts1d = array_at_faces_1d(a[:, 0, j], delz1d)
averts2d = averts1d.reshape((self.nlay + 1, 1))
averts2d = averts2d * np.ones((self.nlay + 1, 2))
else:
# 2d interpolation
# flip y and z coordinates because
# RegularGridInterpolator requires increasing input
# coordinates
yzinput = (np.flip(zcenters), np.flip(ycenters))
a2d = np.flip(a[:, :, j], axis=[0, 1])
interp_func = interp.RegularGridInterpolator(
yzinput, a2d, bounds_error=False, fill_value=np.nan
)
yzoutput = np.empty((zoutput[:, :, j].size, 2))
yzoutput[:, 0] = zoutput[:, :, j].ravel()
yzoutput[:, 1] = youtput[:, :, j].ravel()
averts2d = interp_func(yzoutput)
averts2d = averts2d.reshape(zoutput[:, :, j].shape)
averts[:, :, j] = averts2d
elif a.shape == shape_ext_y:
# set array to NaN where inactive on both side
if self._idomain is not None:
inactive_ext_y = np.full(shape_ext_y, True)
inactive_ext_y[:, :-1, :] = inactive
inactive_ext_y[:, 1:, :] = np.logical_and(
inactive_ext_y[:, 1:, :], inactive
)
a = np.where(inactive_ext_y, np.nan, a)
averts = np.empty(shape_verts, dtype=a.dtype)
averts_basic = np.empty(shape_verts, dtype=a.dtype)
for i in range(self.nrow + 1):
# perform basic interpolation (will be useful in all cases)
averts_basic[:, i, :] = array_at_verts_basic2d(a[:, i, :])
if self.is_regular_x and _is_regular_z and first_equal_xz:
# in this case, basic interpolation is the correct one
averts2d = averts_basic[:, i, :]
basic = True
else:
if self.nlay == 1:
# in this case we need a 1d interpolation along x
averts1d = array_at_faces_1d(a[0, i, :], self.__delr)
averts2d = averts1d.reshape((1, self.ncol + 1))
averts2d = averts2d * np.ones((2, self.ncol + 1))
elif self.ncol == 1:
# in this case we need a 1d interpolation along z
delz1d = np.abs(np.diff(self.zverts_smooth[:, i, 0]))
averts1d = array_at_faces_1d(a[:, i, 0], delz1d)
averts2d = averts1d.reshape((self.nlay + 1, 1))
averts2d = averts2d * np.ones((self.nlay + 1, 2))
else:
# 2d interpolation
# flip z coordinates because RegularGridInterpolator
# requires increasing input coordinates
xzinput = (np.flip(zcenters), xcenters)
a2d = np.flip(a[:, i, :], axis=[0])
interp_func = interp.RegularGridInterpolator(
xzinput, a2d, bounds_error=False, fill_value=np.nan
)
xzoutput = np.empty((zoutput[:, i, :].size, 2))
xzoutput[:, 0] = zoutput[:, i, :].ravel()
xzoutput[:, 1] = xoutput[:, i, :].ravel()
averts2d = interp_func(xzoutput)
averts2d = averts2d.reshape(zoutput[:, i, :].shape)
averts[:, i, :] = averts2d
elif a.shape == shape_ext_z:
# set array to NaN where inactive on both side
if self._idomain is not None:
inactive_ext_z = np.full(shape_ext_z, True)
inactive_ext_z[:-1, :, :] = inactive
inactive_ext_z[1:, :, :] = np.logical_and(
inactive_ext_z[1:, :, :], inactive
)
a = np.where(inactive_ext_z, np.nan, a)
averts = np.empty(shape_verts, dtype=a.dtype)
averts_basic = np.empty(shape_verts, dtype=a.dtype)
for k in range(self.nlay + 1):
# perform basic interpolation (will be useful in all cases)
averts_basic[k, :, :] = array_at_verts_basic2d(a[k, :, :])
if self.is_regular_xy:
# in this case, basic interpolation is the correct one
averts2d = averts_basic[k, :, :]
basic = True
else:
if self.nrow == 1:
# in this case we need a 1d interpolation along x
averts1d = array_at_faces_1d(a[k, 0, :], self.__delr)
averts2d = averts1d.reshape((1, self.ncol + 1))
averts2d = averts2d * np.ones((2, self.ncol + 1))
elif self.ncol == 1:
# in this case we need a 1d interpolation along y
averts1d = array_at_faces_1d(a[k, :, 0], self.__delc)
averts2d = averts1d.reshape((self.nrow + 1, 1))
averts2d = averts2d * np.ones((self.nrow + 1, 2))
else:
# 2d interpolation
# flip y coordinates because RegularGridInterpolator
# requires increasing input coordinates
xyinput = (np.flip(ycenters), xcenters)
a2d = np.flip(a[k, :, :], axis=[0])
interp_func = interp.RegularGridInterpolator(
xyinput, a2d, bounds_error=False, fill_value=np.nan
)
xyoutput = np.empty((youtput[k, :, :].size, 2))
xyoutput[:, 0] = youtput[k, :, :].ravel()
xyoutput[:, 1] = xoutput[k, :, :].ravel()
averts2d = interp_func(xyoutput)
averts2d = averts2d.reshape(youtput[k, :, :].shape)
averts[k, :, :] = averts2d
if not basic:
# use basic interpolation for remaining NaNs at boundaries
where_nan = np.isnan(averts)
averts[where_nan] = averts_basic[where_nan]
return averts
def array_at_faces(self, a, direction, withnan=True):
"""
Computes values at the center of cell faces using linear interpolation.
Parameters
----------
a : ndarray
Values at cell centers, shape (nlay, row, ncol).
direction : str, possible values are 'x', 'y' and 'z'
Direction in which values will be interpolated at cell faces.
withnan : bool
If True (default), the result value will be set to NaN where the
cell face sits between inactive cells. If False, not.
Returns
-------
afaces : ndarray
Array values interpolated at cell vertices, shape as input extended
by 1 along the specified direction.
"""
# get the dimension that corresponds to the direction
dir_to_dim = {"x": 2, "y": 1, "z": 0}
dim = dir_to_dim[direction]
# extended array with ghost cells on both sides having zero values
ghost_shape = list(a.shape)
ghost_shape[dim] += 2
a_ghost = np.zeros(ghost_shape, dtype=a.dtype)
# extended delta with ghost cells on both sides having zero values
delta_ghost = np.zeros(ghost_shape, dtype=a.dtype)
# inactive bool array
if withnan and self._idomain is not None:
inactive = self._idomain == 0
if dim == 0:
# fill array with ghost cells
a_ghost[1:-1, :, :] = a
a_ghost[0, :, :] = a[0, :, :]
a_ghost[-1, :, :] = a[-1, :, :]
# calculate weights
delta_ghost[1:-1, :, :] = self.delz
weight2 = delta_ghost[:-1, :, :] / (
delta_ghost[:-1, :, :] + delta_ghost[1:, :, :]
)
weight1 = 1.0 - weight2
# interpolate
afaces = a_ghost[:-1, :, :] * weight1 + a_ghost[1:, :, :] * weight2
# assign NaN where idomain==0 on both sides
if withnan and self._idomain is not None:
inactive_faces = np.full(afaces.shape, True)
inactive_faces[:-1, :, :] = np.logical_and(
inactive_faces[:-1, :, :], inactive
)
inactive_faces[1:, :, :] = np.logical_and(
inactive_faces[1:, :, :], inactive
)
afaces[inactive_faces] = np.nan
elif dim == 1:
# fill array with ghost cells
a_ghost[:, 1:-1, :] = a
a_ghost[:, 0, :] = a[:, 0, :]
a_ghost[:, -1, :] = a[:, -1, :]
# calculate weights
delc = np.reshape(self.delc, (1, self.nrow, 1))
delc_3D = delc * np.ones(a.shape)
delta_ghost[:, 1:-1, :] = delc_3D
weight2 = delta_ghost[:, :-1, :] / (
delta_ghost[:, :-1, :] + delta_ghost[:, 1:, :]
)
weight1 = 1.0 - weight2
# interpolate
afaces = a_ghost[:, :-1, :] * weight1 + a_ghost[:, 1:, :] * weight2
# assign NaN where idomain==0 on both sides
if withnan and self._idomain is not None:
inactive_faces = np.full(afaces.shape, True)
inactive_faces[:, :-1, :] = np.logical_and(
inactive_faces[:, :-1, :], inactive
)
inactive_faces[:, 1:, :] = np.logical_and(
inactive_faces[:, 1:, :], inactive
)
afaces[inactive_faces] = np.nan
elif dim == 2:
# fill array with ghost cells
a_ghost[:, :, 1:-1] = a
a_ghost[:, :, 0] = a[:, :, 0]
a_ghost[:, :, -1] = a[:, :, -1]
# calculate weights
delr = np.reshape(self.delr, (1, 1, self.ncol))
delr_3D = delr * np.ones(a.shape)
| |
<gh_stars>1-10
"""Generated message classes for gkebackup version v1.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'gkebackup'
class AuditConfig(_messages.Message):
r"""Specifies the audit configuration for a service. The configuration
determines which permission types are logged, and what identities, if any,
are exempted from logging. An AuditConfig must have one or more
AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
specific service, the union of the two AuditConfigs is used for that
service: the log_types specified in each AuditConfig are enabled, and the
exempted_members in each AuditLogConfig are exempted. Example Policy with
multiple AuditConfigs: { "audit_configs": [ { "service": "allServices",
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:<EMAIL>" ] }, { "log_type": "DATA_WRITE" }, { "log_type":
"ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com",
"audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type":
"DATA_WRITE", "exempted_members": [ "user:<EMAIL>" ] } ] } ] } For
sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
logging. It also exempts <EMAIL> from DATA_READ logging, and
<EMAIL> from DATA_WRITE logging.
Fields:
auditLogConfigs: The configuration for logging of each type of permission.
service: Specifies a service that will be enabled for audit logging. For
example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
`allServices` is a special value that covers all services.
"""
auditLogConfigs = _messages.MessageField('AuditLogConfig', 1, repeated=True)
service = _messages.StringField(2)
class AuditLogConfig(_messages.Message):
r"""Provides the configuration for logging a type of permissions. Example: {
"audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
"user:<EMAIL>" ] }, { "log_type": "DATA_WRITE" } ] } This enables
'DATA_READ' and 'DATA_WRITE' logging, while exempting <EMAIL> from
DATA_READ logging.
Enums:
LogTypeValueValuesEnum: The log type that this config enables.
Fields:
exemptedMembers: Specifies the identities that do not cause logging for
this type of permission. Follows the same format of Binding.members.
logType: The log type that this config enables.
"""
class LogTypeValueValuesEnum(_messages.Enum):
r"""The log type that this config enables.
Values:
LOG_TYPE_UNSPECIFIED: Default case. Should never be this.
ADMIN_READ: Admin reads. Example: CloudIAM getIamPolicy
DATA_WRITE: Data writes. Example: CloudSQL Users create
DATA_READ: Data reads. Example: CloudSQL Users list
"""
LOG_TYPE_UNSPECIFIED = 0
ADMIN_READ = 1
DATA_WRITE = 2
DATA_READ = 3
exemptedMembers = _messages.StringField(1, repeated=True)
logType = _messages.EnumField('LogTypeValueValuesEnum', 2)
class Backup(_messages.Message):
r"""Represents a request to perform a single point-in-time capture of some
portion of the state of a GKE cluster, the record of the backup operation
itself, and an anchor for the underlying artifacts that comprise the Backup
(the config backup and VolumeBackups). Next id: 28
Enums:
StateValueValuesEnum: Output only. Current state of the Backup
Messages:
LabelsValue: A set of custom labels supplied by user.
Fields:
allNamespaces: Output only. If True, all namespaces were included in the
Backup.
clusterMetadata: Output only. Information about the GKE cluster from which
this Backup was created.
completeTime: Output only. Completion time of the Backup
configBackupSizeBytes: Output only. cluster config backup size in bytes.
containsSecrets: Output only. Whether or not the Backup contains
Kubernetes Secrets. Inherited from the parent BackupPlan's
backup_config.include_secrets.
containsVolumeData: Output only. Whether or not the Backup contains volume
data. Inherited from the parent BackupPlan's
backup_config.include_volume_data.
createTime: Output only. [Output Only] The timestamp when this Backup
resource was created - can be converted to and from
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt)
deleteLockDays: Minimum age for this Backup (in days). If this field is
set to a non-zero value, the Backup will be "locked" against deletion
(either manual or automatic deletion) for the number of days provided
(measured from the creation time of the Backup). This value is inherited
from the parent BackupPlan's retention_policy.backup_delete_lock_days
value and may only be increased (either at creation time or in a
subsequent update). This field MUST be an integer value between 0-90
(inclusive). Default: inherited from BackupPlan.
deleteLockExpireTime: Output only. The time at which an existing delete
lock will expire for this backup (calculated from create_time +
delete_lock_days).
description: User specified descriptive string for this Backup.
encryptionKey: Output only. The customer managed encryption key that was
used to encrypt the Backup's artifacts. Inherited from the parent
BackupPlan's backup_config.encryption_key.
etag: Output only. `etag` is used for optimistic concurrency control as a
way to help prevent simultaneous updates of a backup from overwriting
each other. It is strongly suggested that systems make use of the `etag`
in the read-modify-write cycle to perform backup updates in order to
avoid race conditions: An `etag` is returned in the response to
`GetBackup`, and systems are expected to put that etag in the request to
`UpdateBackup` to ensure that their change will be applied to the same
version.
labels: A set of custom labels supplied by user.
manual: Output only. This flag indicates whether this Backup resource was
created manually by a user or via a schedule in the BackupPlan. A value
of True means that the Backup was created manually.
name: Output only. The fully qualified name of the Backup.
projects/*/locations/*/backupPlans/*/backups/*
podCount: Output only. The total number of Kubernetes Pods contained in
the Backup.
resourceCount: Output only. The total number of Kubernetes resources
included in the Backup.
retainDays: The age (in days) after which this Backup will be
automatically deleted. If not specified at Backup creation time, this
value is inherited from the parent BackupPlan's
retention_policy.backup_retain_days value. Once a Backup is created,
this value may only be increased. This must be an integer value >= 0. If
0, no automatic deletion will occur for this Backup. If not 0, this must
be >= delete_lock_days. Default: inherited from BackupPlan.
retainExpireTime: Output only. The time at which this Backup will be
automatically deleted (calculated from create_time + retain_days).
selectedApplications: Output only. If set, the list of
ProtectedApplications whose resources were included in the Backup.
selectedNamespaces: Output only. If set, the list of namespaces that were
included in the Backup.
sizeBytes: Output only. The total size of the Backup in bytes = config
backup size + sum(volume backup sizes)
state: Output only. Current state of the Backup
stateReason: Output only. Human-readable description of why the backup is
in the current `state`.
uid: Output only. [Output Only] Server generated global unique identifier
of [UUID4](https://en.wikipedia.org/wiki/Universally_unique_identifier)
updateTime: Output only. [Output Only] The timestamp when this Backup
resource was last updated - can be converted to and from
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt)
volumeCount: Output only. The total number of volume backups contained in
the Backup.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. Current state of the Backup
Values:
STATE_UNSPECIFIED: The Backup resource is in the process of being
created.
CREATING: The Backup resource has been created and the associated
BackupJob Kubernetes resource has been injected into the source
cluster.
IN_PROGRESS: The gkebackup agent in the cluster has begun executing the
backup operation.
SUCCEEDED: The backup operation has completed successfully.
FAILED: The backup operation has failed.
DELETING: This Backup resource (and its associated artifacts) is in the
process of being deleted.
"""
STATE_UNSPECIFIED = 0
CREATING = 1
IN_PROGRESS = 2
SUCCEEDED = 3
FAILED = 4
DELETING = 5
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""A set of custom labels supplied by user.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
allNamespaces = _messages.BooleanField(1)
clusterMetadata = _messages.MessageField('ClusterMetadata', 2)
completeTime = _messages.StringField(3)
configBackupSizeBytes = _messages.IntegerField(4)
containsSecrets = _messages.BooleanField(5)
containsVolumeData = _messages.BooleanField(6)
createTime = _messages.StringField(7)
deleteLockDays = _messages.IntegerField(8, variant=_messages.Variant.INT32)
deleteLockExpireTime = _messages.StringField(9)
description = _messages.StringField(10)
encryptionKey = _messages.MessageField('EncryptionKey', 11)
etag = _messages.StringField(12)
labels = _messages.MessageField('LabelsValue', 13)
manual = _messages.BooleanField(14)
name = _messages.StringField(15)
podCount = _messages.IntegerField(16, variant=_messages.Variant.INT32)
resourceCount = _messages.IntegerField(17, variant=_messages.Variant.INT32)
retainDays = _messages.IntegerField(18, variant=_messages.Variant.INT32)
retainExpireTime = _messages.StringField(19)
selectedApplications = _messages.MessageField('NamespacedNames', 20)
selectedNamespaces = _messages.MessageField('Namespaces', 21)
sizeBytes = _messages.IntegerField(22)
state = _messages.EnumField('StateValueValuesEnum', 23)
stateReason = _messages.StringField(24)
uid = _messages.StringField(25)
updateTime = _messages.StringField(26)
volumeCount | |
== "PUT":
return self.put_vci_configuration()
elif flask.request.method == "PATCH":
return self.patch_vci_configuration(vnfId, flask.request.values.get("vnfConfigModifications"))
elif flask.request.method == "DELETE":
return self.delete_vci_configuration()
def vnf_operation(self, vnfId, operationId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VS", vnfId) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vnf_operation()
elif flask.request.method == "POST":
return self.post_vnf_operation(vnfId, operationId, flask.request.values.get("operationArguments"))
elif flask.request.method == "PUT":
return self.put_vnf_operation()
elif flask.request.method == "PATCH":
return self.patch_vnf_operation()
elif flask.request.method == "DELETE":
return self.delete_vnf_operation()
def aa_authenticate(self, authentication):
if flask.request.method == "GET":
return self.get_aa_authenticate(authentication)
elif flask.request.method == "POST":
return self.post_aa_authenticate()
elif flask.request.method == "PUT":
return self.put_aa_authenticate()
elif flask.request.method == "PATCH":
return self.patch_aa_authenticate()
elif flask.request.method == "DELETE":
return self.delete_aa_authenticate()
def im_vib_users(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_users()
elif flask.request.method == "POST":
return self.post_vib_users(flask.request.values.get("vibUserInstance"))
elif flask.request.method == "PUT":
return self.put_vib_users()
elif flask.request.method == "PATCH":
return self.patch_vib_users()
elif flask.request.method == "DELETE":
return self.delete_vib_users()
def im_vib_u_userId(self, userId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_u_userId(userId)
elif flask.request.method == "POST":
return self.post_vib_u_userId()
elif flask.request.method == "PUT":
return self.put_vib_u_userId()
elif flask.request.method == "PATCH":
return self.patch_vib_u_userId(userId, flask.request.values.get("vibUserInstance"))
elif flask.request.method == "DELETE":
return self.delete_vib_u_userId(userId)
def im_vib_credentials(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_credentials()
elif flask.request.method == "POST":
return self.post_vib_credentials(flask.request.values.get("vibCredentialInstance"))
elif flask.request.method == "PUT":
return self.put_vib_credentials()
elif flask.request.method == "PATCH":
return self.patch_vib_credentials()
elif flask.request.method == "DELETE":
return self.delete_vib_credentials()
def im_vib_c_credentialId(self, userId, vnfId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_c_credentialId(userId, vnfId)
elif flask.request.method == "POST":
return self.post_vib_c_credentialId()
elif flask.request.method == "PUT":
return self.put_vib_c_credentialId()
elif flask.request.method == "PATCH":
return self.patch_vib_c_credentialId()
elif flask.request.method == "DELETE":
return self.delete_vib_c_credentialId(userId, vnfId)
def im_vib_c_userId(self, userId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_c_userId(userId)
elif flask.request.method == "POST":
return self.post_vib_c_userId()
elif flask.request.method == "PUT":
return self.put_vib_c_userId()
elif flask.request.method == "PATCH":
return self.patch_vib_c_userId()
elif flask.request.method == "DELETE":
return self.delete_vib_c_userId()
def im_vib_c_vnfId(self, vnfId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_c_vnfId(vnfId)
elif flask.request.method == "POST":
return self.post_vib_c_vnfId()
elif flask.request.method == "PUT":
return self.put_vib_c_vnfId()
elif flask.request.method == "PATCH":
return self.patch_vib_c_vnfId()
elif flask.request.method == "DELETE":
return self.delete_vib_c_vnfId()
def im_vib_subscriptions(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_subscriptions()
elif flask.request.method == "POST":
return self.post_vib_subscriptions(flask.request.values.get("vibSubscriptionInstance"))
elif flask.request.method == "PUT":
return self.put_vib_subscriptions()
elif flask.request.method == "PATCH":
return self.patch_vib_subscriptions()
elif flask.request.method == "DELETE":
return self.delete_vib_subscriptions()
def im_vib_s_subscriptionId(self, subscriptionId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_s_subscriptionId(subscriptionId)
elif flask.request.method == "POST":
return self.post_vib_s_subscriptionId()
elif flask.request.method == "PUT":
return self.put_vib_s_subscriptionId()
elif flask.request.method == "PATCH":
return self.patch_vib_s_subscriptionId(subscriptionId, flask.request.values.get("vibSubscriptionInstance"))
elif flask.request.method == "DELETE":
return self.delete_vib_s_subscriptionId(subscriptionId)
def im_vib_management_agents(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_management_agents()
elif flask.request.method == "POST":
return self.post_vib_management_agents(flask.request.values.get("vibMaInstance"))
elif flask.request.method == "PUT":
return self.put_vib_management_agents()
elif flask.request.method == "PATCH":
return self.patch_vib_management_agents()
elif flask.request.method == "DELETE":
return self.delete_vib_management_agents()
def im_vib_ma_agentId(self, agentId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_ma_agentId(agentId)
elif flask.request.method == "POST":
return self.post_vib_ma_agentId()
elif flask.request.method == "PUT":
return self.put_vib_ma_agentId()
elif flask.request.method == "PATCH":
return self.patch_vib_ma_agentId(agentId, flask.request.values.get("vibMaInstance"))
elif flask.request.method == "DELETE":
return self.delete_vib_ma_agentId(agentId)
def im_vib_vnf_instances(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_vnf_instances()
elif flask.request.method == "POST":
return self.post_vib_vnf_instances(flask.request.values.get("vibVnfInstance"))
elif flask.request.method == "PUT":
return self.put_vib_vnf_instances()
elif flask.request.method == "PATCH":
return self.patch_vib_vnf_instances()
elif flask.request.method == "DELETE":
return self.delete_vib_vnf_instances()
def im_vib_vnfi_vnfId(self, vnfId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_vnfi_vnfId(vnfId)
elif flask.request.method == "POST":
return self.post_vib_vnfi_vnfId()
elif flask.request.method == "PUT":
return self.put_vib_vnfi_vnfId()
elif flask.request.method == "PATCH":
return self.patch_vib_vnfi_vnfId(vnfId, flask.request.values.get("vibVnfInstance"))
elif flask.request.method == "DELETE":
return self.delete_vib_vnfi_vnfId(vnfId)
def im_vib_platforms(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_platforms()
elif flask.request.method == "POST":
return self.post_vib_platforms(flask.request.values.get("vibPlatformInstance"))
elif flask.request.method == "PUT":
return self.put_vib_platforms()
elif flask.request.method == "PATCH":
return self.patch_vib_platforms()
elif flask.request.method == "DELETE":
return self.delete_vib_platforms()
def im_vib_p_platformId(self, platformId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_p_platformId(platformId)
elif flask.request.method == "POST":
return self.post_vib_p_platformId()
elif flask.request.method == "PUT":
return self.put_vib_p_platformId()
elif flask.request.method == "PATCH":
return self.patch_vib_p_platformId(platformId, flask.request.values.get("vibPlatformInstance"))
elif flask.request.method == "DELETE":
return self.delete_vib_p_platformId(platformId)
def im_vib_vnf_managers(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_vnf_managers()
elif flask.request.method == "POST":
return self.post_vib_vnf_managers(flask.request.values.get("vibVnfmInstance"))
elif flask.request.method == "PUT":
return self.put_vib_vnf_managers()
elif flask.request.method == "PATCH":
return self.patch_vib_vnf_managers()
elif flask.request.method == "DELETE":
return self.delete_vib_vnf_managers()
def im_vib_vnfm_managerId(self, managerId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_vnfm_managerId(managerId)
elif flask.request.method == "POST":
return self.post_vib_vnfm_managerId()
elif flask.request.method == "PUT":
return self.put_vib_vnfm_managerId()
elif flask.request.method == "PATCH":
return self.patch_vib_vnfm_managerId(managerId, flask.request.values.get("vibVnfmInstance"))
elif flask.request.method == "DELETE":
return self.delete_vib_vnfm_managerId(managerId)
def im_vib_vnf_managers_drivers(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_vnf_manager_drivers()
elif flask.request.method == "POST":
return self.post_vib_vnf_manager_drivers(flask.request.values.get("vibVnfmDriverInstance"))
elif flask.request.method == "PUT":
return self.put_vib_vnf_manager_drivers()
elif flask.request.method == "PATCH":
return self.patch_vib_vnf_manager_drivers()
elif flask.request.method == "DELETE":
return self.delete_vib_vnf_manager_drivers()
def im_vib_vnfmd_vnfmId(self, vnfmId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("VIB", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_vib_vnfmd_vnfmId(vnfmId)
elif flask.request.method == "POST":
return self.post_vib_vnfmd_vnfmId()
elif flask.request.method == "PUT":
return self.put_vib_vnfmd_vnfmId()
elif flask.request.method == "PATCH":
return self.patch_vib_vnfmd_vnfmId(vnfmId, flask.request.values.get("vibVnfmDriverInstance"))
elif flask.request.method == "DELETE":
return self.delete_vib_vnfmd_vnfmId(vnfmId)
def im_ms_running_subscription(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("MS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_ms_running_subscription()
elif flask.request.method == "POST":
return self.post_ms_running_subscription()
elif flask.request.method == "PUT":
return self.put_ms_running_subscription()
elif flask.request.method == "PATCH":
return self.patch_ms_running_subscription()
elif flask.request.method == "DELETE":
return self.delete_ms_running_subscription()
def im_msrs_subscriptionId(self, subscriptionId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("MS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_msrs_subscriptionId(subscriptionId)
elif flask.request.method == "POST":
return self.post_msrs_subscriptionId(subscriptionId)
elif flask.request.method == "PUT":
return self.put_msrs_subscriptionId()
elif flask.request.method == "PATCH":
if "agentArguments" in flask.request.values:
return self.patch_msrs_subscriptionId(subscriptionId, flask.request.values.get("agentArguments"))
else:
return self.patch_msrs_subscriptionId(subscriptionId, None)
elif flask.request.method == "DELETE":
return self.delete_msrs_subscriptionId(subscriptionId)
def im_ms_subscription(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("MS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_ms_subscription()
elif flask.request.method == "POST":
return self.post_ms_subscription(flask.request.values.get("vnfIndicatorSubscriptionRequest"))
elif flask.request.method == "PUT":
return self.put_ms_subscription()
elif flask.request.method == "PATCH":
return self.patch_ms_subscription()
elif flask.request.method == "DELETE":
return self.delete_ms_subscription()
def im_mss_subscriptionId(self, subscriptionId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("MS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_mss_subscriptionId(subscriptionId)
elif flask.request.method == "POST":
return self.post_mss_subscriptionId()
elif flask.request.method == "PUT":
return self.put_mss_subscriptionId()
elif flask.request.method == "PATCH":
return self.patch_mss_subscriptionId(subscriptionId, flask.request.values.get("vnfIndicatorSubscription"))
elif flask.request.method == "DELETE":
return self.delete_mss_subscriptionId(subscriptionId)
def im_ms_agent(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("MS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_ms_agent()
elif flask.request.method == "POST":
return self.post_ms_agent(flask.request.values.get("vibMaInstance"))
elif flask.request.method == "PUT":
return self.put_ms_agent()
elif flask.request.method == "PATCH":
return self.patch_ms_agent()
elif flask.request.method == "DELETE":
return self.delete_ms_agent()
def im_msa_agentId(self, agentId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("MS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_msa_agentId(agentId)
elif flask.request.method == "POST":
return self.post_msa_agentId()
elif flask.request.method == "PUT":
return self.put_msa_agentId()
elif flask.request.method == "PATCH":
return self.patch_msa_agentId(agentId, flask.request.values.get("vibMaInstance"))
elif flask.request.method == "DELETE":
return self.delete_msa_agentId(agentId)
def im_as_authenticator(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("AS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_as_authenticator()
elif flask.request.method == "POST":
return self.post_as_authenticator()
elif flask.request.method == "PUT":
return self.put_as_authenticator()
elif flask.request.method == "PATCH":
return self.patch_as_authenticator()
elif flask.request.method == "DELETE":
return self.delete_as_authenticator()
def im_as_a_authenticatorId(self, authenticatorId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("AS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_as_a_authenticatorId(authenticatorId)
elif flask.request.method == "POST":
return self.post_as_a_authenticatorId()
elif flask.request.method == "PUT":
return self.put_as_a_authenticatorId()
elif flask.request.method == "PATCH":
return self.patch_as_a_authenticatorId()
elif flask.request.method == "DELETE":
return self.delete_as_a_authenticatorId()
def im_as_running_authenticator(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("AS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_as_running_authenticator()
elif flask.request.method == "POST":
return self.post_as_running_authenticator()
elif flask.request.method == "PUT":
return self.put_as_running_authenticator()
elif flask.request.method == "PATCH":
return self.patch_as_running_authenticator()
elif flask.request.method == "DELETE":
return self.delete_as_running_authenticator()
def im_as_ra_authenticatorId(self, authenticatorId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("AS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_as_ra_authenticatorId(authenticatorId)
elif flask.request.method == "POST":
return self.post_as_ra_authenticatorId(authenticatorId)
elif flask.request.method == "PUT":
return self.put_as_ra_authenticatorId()
elif flask.request.method == "PATCH":
return self.patch_as_ra_authenticatorId()
elif flask.request.method == "DELETE":
return self.delete_as_ra_authenticatorId()
def im_as_user(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("AS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_as_user()
elif flask.request.method == "POST":
return self.post_as_user(flask.request.values.get("vibUserInstance"))
elif flask.request.method == "PUT":
return self.put_as_user()
elif flask.request.method == "PATCH":
return self.patch_as_user()
elif flask.request.method == "DELETE":
return self.delete_as_user()
def im_as_u_userId(self, userId):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("AS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_as_u_userId(userId)
elif flask.request.method == "POST":
return self.post_as_u_userId()
elif flask.request.method == "PUT":
return self.put_as_u_userId()
elif flask.request.method == "PATCH":
return self.patch_as_u_userId(userId, flask.request.values.get("vibUserInstance"))
elif flask.request.method == "DELETE":
return self.delete_as_u_userId(userId)
def im_as_credential(self):
if self.__oaAa.getRunningAuthenticator() != "None":
if self.__authenticateRequest("AS", None) != True:
return "ERROR CODE #4 (AA): REQUEST COULD NOT BE AUTHENTICATED", 400
if flask.request.method == "GET":
return self.get_as_credential()
elif flask.request.method == "POST":
return self.post_as_credential(flask.request.values.get("vibCredentialInstance"))
elif flask.request.method == "PUT":
return | |
import warnings
from scipy.stats.stats import pearsonr
from geosoup.common import Handler, Opt, Sublist, np
__all__ = ['Samples']
class Samples:
"""
Class to read and arrange sample data.
Stores label and label names in y and y_names
Stores feature and feature names in x and x_names.
Currently the user has to provide sample csv files with one column as label (output)
and the rest of the columns as feature attributes. There should be no index number column.
All columns should be data only.
"""
def __init__(self,
csv_file=None,
label_colname=None,
x=None,
y=None,
x_name=None,
y_name=None,
weights=None,
weights_colname=None,
use_band_dict=None,
max_allow_x=1e13,
max_allow_y=1e13,
line_limit=None,
remove_null=True,
**kwargs):
"""
:param csv_file: csv file that contains the features (training or validation samples)
:param label_colname: column in csv file that contains the feature label (output value)
:param x: 2d array containing features (samples) without the label
:param y: 1d array of feature labels (same order as x)
:param x_name: 1d array of feature names (bands).
Can be used to select which columns to read from csv file.
:param y_name: name of label
:param use_band_dict: list of attribute (band) names
:param max_allow_x: Maximum allowed values of x
:param max_allow_y: Maximum allowed value of y
"""
self.csv_file = csv_file
self.label_colname = label_colname
if type(x).__name__ in ('ndarray', 'NoneType'):
self.x = x
else:
self.x = np.array(list(x))
self.x_name = x_name
if type(y).__name__ in ('ndarray', 'NoneType'):
self.y = y
else:
self.y = np.array(list(y))
self.y_name = y_name
self.weights = weights
self.weights_colname = weights_colname
self.use_band_dict = use_band_dict
self.index = None
self.nfeat = None
self.xmin = None
self.xmax = None
self.ymin = None
self.ymax = None
self.y_hist = None
self.y_bin_edges = None
self.x_hist = None
self.x_bin_edges = None
self.max_allow_x = max_allow_x
self.max_allow_y = max_allow_y
# label name or csv file are provided
if (label_colname is not None) and (csv_file is not None):
temp = Handler(filename=csv_file).read_from_csv(return_dicts=True,
line_limit=line_limit)
header = list(temp[0])
# label name doesn't match
if label_colname in header:
loc = header.index(label_colname)
else:
raise ValueError("Label name mismatch.\nAvailable names: " + ', '.join(header))
feat_names = header.copy()
_ = feat_names.pop(loc)
# read from data dictionary
if self.x_name is not None and type(self.x_name) in (list, tuple):
self.x_name = [elem for elem in feat_names if elem in self.x_name]
else:
self.x_name = feat_names
clean_list = []
if remove_null:
for elem_dict in temp:
val_chk = list((elem in (None, '', ' ', 'null', 'NULL', '<null>', '<NULL>')) or
(elem in (int, float) and np.isnan(elem))
for elem in elem_dict.values())
if any(val_chk):
continue
else:
clean_list.append(elem_dict)
else:
clean_list = temp
self.x = np.array(list(list(samp_dict[feat_name] for feat_name in feat_names)
for samp_dict in clean_list))
self.y = np.array(list(samp_dict[label_colname] for samp_dict in clean_list))
self.y_name = label_colname
# if band name dictionary is provided
if use_band_dict is not None:
self.y_name = [use_band_dict[b] for b in self.y_name]
elif (label_colname is None) and (csv_file is not None):
temp = Handler(filename=csv_file).read_from_csv(return_dicts=True,
line_limit=line_limit)
clean_list = []
if remove_null:
for elem_dict in temp:
val_chk = list((elem in (None, '', ' ', 'null', 'NULL', '<null>', '<NULL>')) or
(elem in (int, float) and np.isnan(elem))
for elem in elem_dict.values())
if any(val_chk):
continue
else:
clean_list.append(elem_dict)
else:
clean_list = temp
# read from data dictionary
feat_names = list(clean_list[0].keys())
if self.x_name is not None and type(self.x_name) in (list, tuple):
self.x_name = [elem for elem in feat_names if elem in self.x_name]
else:
self.x_name = feat_names
self.x = np.array(list(list(samp_dict[feat_name] for feat_name in self.x_name)
for samp_dict in clean_list))
else:
warnings.warn("Samples class initiated without data file and/or label",
category=RuntimeWarning, stacklevel=1)
if self.x is not None and self.y is not None:
if self.y_name is None:
self.y_name = 'y'
if (self.x_name is None) or \
(type(self.x_name) not in (list, tuple)) or \
(len(self.x_name) != self.x.shape[1]):
self.x_name = list('x{}'.format(str(i+1)) for i in range(self.x.shape[1]))
if weights is None:
if weights_colname is not None:
if csv_file is not None:
# label name doesn't match
if any(weights_colname in n for n in self.x_name):
loc = self.x_name.index(weights_colname)
else:
raise ValueError("Weight column name mismatch")
self.weights = self.x[:, loc]
self.x = np.delete(self.x, loc, 1)
else:
raise ValueError("No csv_file specified for weights")
# if keywords are supplied
if kwargs is not None:
# columns containing data
if 'columns' in kwargs:
if type(kwargs['columns']).__name__ == 'list':
self.columns = np.array(kwargs['columns'])
elif type(kwargs['columns']).__name__ in ('ndarray', 'NoneType'):
self.columns = kwargs['columns']
else:
self.columns = np.array(list(kwargs['columns']))
else:
self.columns = None
# IDs of samples
if 'ids' in kwargs:
self.ids = kwargs['ids']
else:
self.ids = None
else:
self.columns = None
self.ids = None
if self.x is not None:
if self.columns is None:
self.columns = np.arange(0, self.x.shape[1])
self.nsamp = self.x.shape[0]
self.nvar = self.x.shape[1]
self.nfeat = self.x.shape[1]
if np.issubdtype(self.x.dtype, np.number):
self.xmin = self.x.min(0, initial=max_allow_x)
self.xmax = self.x.max(0, initial=max_allow_y)
self.index = np.arange(0, self.x.shape[0])
else:
self.nsamp = 0
self.nvar = 0
if self.y is not None:
if np.issubdtype(self.y.dtype, np.number):
self.ymin = self.y.min(initial=-max_allow_y)
self.ymax = self.y.max(initial=max_allow_y)
if self.y is not None:
self.head = '\n'.join(list(str(elem) for elem in
[' '.join(list(self.x_name) + [self.y_name])] +
list(' '.join(list(str(elem_) for elem_ in self.x[i, :].tolist() + [self.y[i]]))
for i in range(10))))
else:
self.head = '<empty>'
def __repr__(self):
"""
Representation of the Samples object
:return: Samples class representation
"""
if self.csv_file is not None:
return "<Samples object from {cf} with {v} variables, label: {l}, {n} samples>".format(cf=Handler(
self.csv_file).basename,
l=self.y_name,
n=self.x.shape[0],
v=self.x.shape[1])
elif self.csv_file is None and self.x is not None:
return "<Samples object with {v} variables, {n} samples>".format(n=self.x.shape[0],
v=self.x.shape[1])
else:
return "<Samples object: EMPTY>"
def subsample(self,
index_locations):
"""
Method to get index locations as a sample object
:param index_locations: list, tuple, numpy array or integer of index locations
:returns: Sample object
"""
if isinstance(index_locations, list) or \
isinstance(index_locations, tuple) or \
isinstance(index_locations, np.ndarray) or \
isinstance(index_locations, int):
warnings.simplefilter('ignore')
outsamp = Samples()
warnings.simplefilter('default')
outsamp.x_name = self.x_name
outsamp.y_name = self.y_name
if isinstance(index_locations, int):
loc = np.array([index_locations])
else:
loc = np.array(index_locations)
outsamp.x = self.x[np.array(loc), :]
outsamp.y = self.y[np.array(loc)]
outsamp.nsamp = outsamp.x.shape[0]
outsamp.index = np.arange(0, outsamp.nsamp)
outsamp.nfeat = outsamp.x.shape[1]
return outsamp
else:
raise TypeError("subsample() method works for list, tuple, numpy array or integer data types only")
def format_data(self):
"""
Method to format the samples to the RF model fit method
:param self
:return: dictionary of features and labels
"""
if self.columns is not None:
column_list = []
column_list += self.columns.tolist()
out_x = self.x[:, self.columns]
out_x_name = list(self.x_name[i] for i in column_list)
else:
out_x = self.x
out_x_name = self.x_name
return {
'features': out_x.copy(),
'labels': self.y.copy(),
'label_name': Opt.__copy__(self.y_name),
'feature_names': Opt.__copy__(out_x_name),
}
def select_features(self,
name_list=None):
"""
Method to return a Samples instance using a selection of feature names
:param name_list: List of feature names to make a new Samples() instance from
:returns: Samples instance
"""
indx_list = []
for name in name_list:
indx_list.append(self.x_name.index(name))
samp = Samples(label_colname=self.y_name,
x=self.x[:, np.array(indx_list)],
y=self.y,
x_name=name_list,
y_name=self.y_name,
weights=self.weights,
weights_colname=self.weights_colname,
use_band_dict=self.use_band_dict,
max_allow_x=self.max_allow_x,
max_allow_y=self.max_allow_y)
samp.csv_file = self.csv_file
return samp
def correlation_matrix(self,
verbose=False):
"""
Method to return a dictionary with correlation data
rows = columns = variables (or dimensions)
:param verbose: Should the elements of correlation matrix
be displayed while being calculated? (default: True)
:return: Dictionary
"""
if np.issubdtype(self.x.dtype, np.number):
# get data from samples
data_mat = self.x
nsamp, nvar = data_mat.shape
print(nsamp, nvar)
# get names of variables
var_names = list()
for i, name in enumerate(self.x_name):
print(str(i)+' '+name)
var_names.append(name.upper())
# initialize correlation matrix
corr = np.zeros([nvar, nvar], dtype=np.float32)
pval = np.zeros([nvar, nvar], dtype=np.float32)
# calculate correlation matrix
for i in range(0, nvar):
for j in range(0, nvar):
corr[i, j] = pearsonr(data_mat[:, i], data_mat[:, j])[0]
pval[i, j] = pearsonr(data_mat[:, i], data_mat[:, j])[1]
if verbose:
str1 = '{row} <---> {col} = '.format(row=var_names[i], col=var_names[j])
str2 = '{:{w}.{p}f}'.format(corr[i, j], w=3, p=2)
print(str1 + str2)
return {'corr': corr, 'pval': pval, 'names': var_names}
else:
raise TypeError("Flexible data type for X - Cannot compute histograms")
def merge(self,
samp):
"""
Merge two sample sets together
column and label names and orders should be the same in the two datasets
:param self, samp:
"""
self.x = np.vstack((self.x, samp.x))
self.y = np.hstack((self.y, samp.y))
self.nsamp = self.x.shape[0]
self.index = np.arange(0, self.nsamp)
if np.issubdtype(self.x.dtype, np.number):
self.xmin = self.x.min(0, initial=-self.max_allow_x)
self.xmax = self.x.max(0, initial=self.max_allow_x)
if np.issubdtype(self.y.dtype, np.number):
self.ymin = self.y.min(initial=-self.max_allow_y)
self.ymax = self.y.max(initial=self.max_allow_y)
def delete_column(self,
column_id=None,
column_name=None):
| |
<reponame>fameshpatel/olfactorybulb<filename>prev_ob_models/exclude/GilraBhalla2015/analysis/fit_odor_morphs_withFULLlin.py
# -*- coding: utf-8 -*-
########## THIS FITTING PROGRAM IS MEANT TO BE A CLONE OF MUKUND'S AND ADIL'S MATLAB ONE
## USAGE: python2.6 fit_odor_morphs.py ../results/odor_morphs/2011-01-13_odormorph_SINGLES_JOINTS_PGS.pickle [CHISQ_HIST] [SAVEFIG]
from scipy import optimize
from scipy.special import * # has error function erf() and inverse erfinv()
from pylab import *
import pickle
import sys
import math
sys.path.extend(["..","../networks","../generators","../simulations"])
from stimuliConstants import * # has SETTLETIME, inputList and pulseList, GLOMS_ODOR, GLOMS_NIL
from networkConstants import * # has central_glom
from sim_utils import * # has rebin() to alter binsize
from analysis_utils import * # has read_morphfile() and NUM_REBINS, etc.
## use error function(x) for x>=0 (zero for x<0),
## OR use sigmoid(x) (non-zero for -ve x)
USE_ERF = False#True
iterationnum = 1
## I don't use the NUMBINS in simset_odor.py, rather I rebin() with below NUM_REBINS
## Adil used 17 bins for a 1s rat respiration cycle.
## I'm using 9 bins to get the same binwidth, else there are oscillations ~ 35 Hz gamma?
NUM_REBINS = 9#17
NUMMIX = len(inputList)
## remove the two pure odors and one pure air weights
NUMWTS = NUMMIX-3
firstrun = False#True
### numbers of mitral to be fitted.
fitted_mitral_list = [2*central_glom+0, 2*central_glom+1]
## Fit type: 'lin' : linear or 'arb' : monotonic arbitrary
## if arbitrary fit_type, weights are also free params,
## if linear fit_type, weights are not free params.
## This param is passed to fit_morphs()
fit_type = 'arb'
log81 = math.log(81)
def constrain0to1(x):
try:
return exp(x)/(1+exp(x)) # use numpy's exp
except OverflowError as overflowerr:
print overflowerr
print x
return 1.0
# define sigmoid which runs from (-0.5,0.1) to (+0.5,0.9)
# Ideally the fitted sigmoid should be shifted by 0.5 i.e.
# exp((x-0.5)*log81)/(1+exp((x-0.5)*log81))
# This will overlap most of the linear part.
# But for fitting it doesn't matter,
# the fit routine will shift the parameters as required.
# But while plotting the internal response parameters,
# shift by 0.5 and plot -- see below
def outputsigmoid(x):
if USE_ERF:
if x<0: return 0
else: return erf(x)
else:
try:
return exp(x*log81)/(1+exp(x*log81)) # use numpy's exp
except OverflowError as overflowerr:
print overflowerr
print x
return 1.0
def inversesigmoid(x):
if USE_ERF:
if x<0: return x
else: return erfinv(x)
else:
## just to set initial values, value doesn't matter too much when x tends to 0
if x>1e-200: return math.log(x/(1-x))
else: return -5e2
def rectifier(x):
x[where(x<0)[0]]=0
return x
def chisqfunc(params, ydata, errdata, fit_type):
RA = params[0:NUM_REBINS]
RB = params[NUM_REBINS:2*NUM_REBINS]
Rair = params[2*NUM_REBINS:3*NUM_REBINS]
if fit_type == 'arb':
#### for the weights also, we use exactly what is done by Mukund and Adil in matlab:
#### constrain weights to be between 0 and 1
#### sort the weights to ensure monotonicity
inputsA = [ constrain0to1(x) for x in params[3*NUM_REBINS:(3*NUM_REBINS+NUMWTS)] ]
## important to put these in else along with sort(),
## weights saturate at 0.9 or so rather than at 1.0
inputsA.extend([0.0,1.0]) # for pure odors
inputsA.sort() # in place sort
inputsA.append(0.0) # for air - keep this after sort!
inputsB = [ constrain0to1(x) for x in params[(3*NUM_REBINS+NUMWTS):(3*NUM_REBINS+2*NUMWTS)] ]
## important to put these in else along with sort(),
## weights saturate at 0.9 or so rather than at 1.0
inputsB.extend([0.0,1.0]) # for pure odors
inputsB.sort(reverse=True) # weights of odor B need to be used in reverse
inputsB.append(0.0) # for air - keep this after sort!
#### Mukund and Adil constrained sigmoidmax > ydatamax (note exp(x)>0.)
sigmoidmax = ydata.max() + exp(params[3*NUM_REBINS+2*NUMWTS])
else:
## *-operator unpacks the list which become args of zip()
## zip collects the i-th elements together of all the args.
inputsA,inputsB = zip(*inputList) # keep the last (0,0) air input
global iterationnum
if iterationnum%1000==0: print 'iteration number =',iterationnum
#if iterationnum%100==0: print inputsA, inputsB
chisqarray = [0.0]
for i,(inputA,inputB) in enumerate(inputList):
CA = inputsA[i]
CB = inputsB[i]
if fit_type == 'arb':
for bin in range(NUM_REBINS):
Rmix = sigmoidmax*outputsigmoid( Rair[bin] + CA*RA[bin] + CB*RB[bin] )
chisqarray.append( (ydata[i][bin] - Rmix)/errdata[i][bin] ) # divide by error to do chi-square fit
else:
Rmix = rectifier(Rair + CA*RA + CB*RB)
chisqarray.extend( (ydata[i] - Rmix)/errdata[i] )
## not yet squared, so normalize 'chi' to sqrt of number of dof
chisqarray = array(chisqarray) / sqrt(ydata.size-params.size)
iterationnum += 1
return chisqarray # misnomer -- actually individual chi array
def fit_morphs(filename, fitted_mitral, fit_type='arb', refit=True):
## The model predicts the individual response not the mean.
## Hence below fitting uses standard deviation, not standard error of the mean.
numavgs,firingbinsmeanList,firingbinserrList = read_morphfile(filename,fitted_mitral,NUM_REBINS)
########################## Initial values for the parameters
if fit_type=='arb':
params_filename = filename+'_params'+str(fitted_mitral)
else:
params_filename = filename+'_paramsFULLlin'+str(fitted_mitral)
if firstrun or refit:
params0 = []
spikesmax = firingbinsmeanList.max()
RA = firingbinsmeanList[-2] # odor A is last but one
RB = firingbinsmeanList[0] # odor B is first
Rair = firingbinsmeanList[-1] # air response is last
# The initial parameters are for odor A followed by odor B
# extend(): Don't add the list as an element but add the elements of the list
if fit_type == 'arb':
# the small value 0.001 should be put, else divide by zero errors in chi-sq!
params0.extend([ ( inversesigmoid(0.998*RA[i]/spikesmax+0.001) - \
inversesigmoid(0.998*Rair[i]/spikesmax+0.001) )/log81 for i in range(NUM_REBINS) ])
params0.extend([ ( inversesigmoid(0.998*RB[i]/spikesmax+0.001) - \
inversesigmoid(0.998*Rair[i]/spikesmax+0.001) )/log81 for i in range(NUM_REBINS) ])
# initial params for the air vector # air is last
params0.extend([ inversesigmoid(0.998*Rair[i]/spikesmax+0.001)/log81 for i in range(NUM_REBINS) ])
params0.extend([0.0]*2*NUMWTS) # weights of mixtures
# argument for the exp in sigmoidmax as per Mukund and Adil.
# -1 gives match for generated data, -3 went into local minimum.
params0.append(-1)
else:
params0.extend(RA - Rair)
params0.extend(RB - Rair)
params0.extend(Rair)
##### pure odor concentrations are not parameters.
##### They are set to (CA=1,CB=0) and (CA=0,CB=1) and act as normalization.
## if arbitrary fit_type, weights are also free params,
## if linear fit_type, weights are not free params.
if fit_type == 'arb':
## take only the mixture values, not the start and end-1 points which are pure odors,
## nor end point which is pure air
for i,(inputA,inputB) in enumerate(inputList[1:-2]):
# to constrain weights between 0 and 1, sigmoid is used,
# so use inversesigmoid to set initial value for weights
params0[3*NUM_REBINS+i] = inversesigmoid(inputA)
params0[3*NUM_REBINS+NUMWTS+i] = inversesigmoid(inputB)
else:
f = open(params_filename,'r')
params0,chisq = pickle.load(f)
f.close()
###################################### Fitting
if not refit:
params = array(params0) ## only use params, do not fit again
else:
## args is a tuple! if only one element write (elem, )
params = optimize.leastsq( chisqfunc, params0,
args=(firingbinsmeanList, firingbinserrList, fit_type), full_output=1, maxfev=50000)
params = params[0] # leastsq returns a whole tuple of stuff - errmsg etc.
## Calculate sum of squares of the chisqarray
chisqarraysq = [i**2 for i in chisqfunc(params, firingbinsmeanList, firingbinserrList, fit_type)]
chisq = reduce(lambda x, y: x+y, chisqarraysq)
if refit:
paramsfile = open(params_filename,'w')
pickle.dump((params,chisq), paramsfile)
paramsfile.close()
############################## Calculate fitted responses and return them
if fit_type == 'arb':
#### for the weights also, we use exactly what is done by Mukund and Adil in matlab:
#### constrain weights to be between 0 and 1
#### sort the weights to ensure monotonicity
inputsA = [ constrain0to1(x) for x in params[3*NUM_REBINS:(3*NUM_REBINS+NUMWTS)] ]
inputsA.extend([0.0,1.0])
inputsA.sort() # in place sort
inputsB = [ constrain0to1(x) for x in params[(3*NUM_REBINS+NUMWTS):(3*NUM_REBINS+2*NUMWTS)] ]
inputsB.extend([0.0,1.0])
inputsB.sort(reverse=True) # weights of odor B need to be used in reverse
#### Mukund and Adil constrained sigmoidmax > ydatamax (note exp(x)>0.)
sigmoidmax = firingbinsmeanList.max() + math.exp(params[3*NUM_REBINS+2*NUMWTS])
else:
## *-operator unpacks the list which become args of zip()
## zip collects the i-th elements together of all the args.
inputsA,inputsB = zip(*(inputList[:-1])) # leave out the last (0,0) air input
fitted_responses = []
Rair = params[2*NUM_REBINS:3*NUM_REBINS]
for inpnum,(inputA,inputB) in enumerate(inputList[:-1]):
if fit_type == 'arb':
fitted_responses.append(\
[ sigmoidmax*outputsigmoid( \
inputsA[inpnum]*params[i] + inputsB[inpnum]*params[NUM_REBINS+i] + Rair[i]\
) for i in range(NUM_REBINS) ] )
else:
fitted_responses.append( rectifier( \
inputsA[inpnum]*params[:NUM_REBINS] + \
inputsB[inpnum]*params[NUM_REBINS:2*NUM_REBINS] + Rair ) )
if fit_type == 'arb':
fitted_responses.append([ sigmoidmax*outputsigmoid( Rair[i] ) \
for i in range(NUM_REBINS) ] )
else:
fitted_responses.append( rectifier(Rair) )
return (params,chisq,inputsA,inputsB,fitted_responses,numavgs,firingbinsmeanList,firingbinserrList)
def plot_example_onemit(ax1,ax2,fitted_mitral,mit_fit_params):
bindt = RESPIRATION/float(NUM_REBINS)
respiration2time = arange(RESPIRATION,2*RESPIRATION,bindt) + bindt/2.0
params,chisq,inputsA,inputsB,fitted_responses,numavgs,firingbinsmeanList,firingbinserrList =\
mit_fit_params
print "Mit",fitted_mitral,"normalized chisq =",chisq
brightness = 0.2
num_morphs = len(inputList)-1
for i,(inputA,inputB) in enumerate(inputList):
## The inputA acts to morph odor response from red | |
#!/usr/bin/env python
# encoding: utf-8
# General utility methods.
#
# https://github.com/stefanvanberkum/CD-ABSC
#
# Adapted from Trusca, Wassenberg, Frasincar and Dekker (2020).
# https://github.com/mtrusca/HAABSA_PLUS_PLUS
#
# <NAME>., <NAME>., <NAME>., <NAME>. (2020) A Hybrid Approach for Aspect-Based Sentiment Analysis Using
# Deep Contextual Word Embeddings and Hierarchical Attention. In: <NAME>., <NAME>., <NAME>. (eds) Web
# Engineering. ICWE 2020. Lecture Notes in Computer Science, vol 12128. Springer, Cham.
# https://doi.org/10.1007/978-3-030-50578-3_25
import numpy as np
from config import *
def batch_index(length, batch_size, n_iter=100, is_shuffle=True):
"""
Method obtained from Trusca et al. (2020), no original docstring provided.
:param length:
:param batch_size:
:param n_iter:
:param is_shuffle:
:return:
"""
index = list(range(length))
for j in range(n_iter):
if is_shuffle:
np.random.shuffle(index)
for i in range(int(length / batch_size) + (1 if length % batch_size else 0)):
yield index[i * batch_size:(i + 1) * batch_size]
def load_word_id_mapping(word_id_file, encoding='utf8'):
"""
Method obtained from Trusca et al. (2020), original docstring below.
:param word_id_file: word-id mapping file path
:param encoding: file's encoding, for changing to unicode
:return: word-id mapping, like hello=5
"""
word_to_id = dict()
for line in open(word_id_file):
line = line.decode(encoding, 'ignore').lower().split()
word_to_id[line[0]] = int(line[1])
print('\nload word-id mapping done!\n')
return word_to_id
def load_w2v(w2v_file, embedding_dim, is_skip=False):
"""
Method obtained from Trusca et al. (2020), no original docstring provided.
:param w2v_file:
:param embedding_dim:
:param is_skip:
:return:
"""
fp = open(w2v_file)
if is_skip:
fp.readline()
w2v = []
word_dict = dict()
# [0,0,...,0] represent absent words.
w2v.append([0.] * embedding_dim)
cnt = 0
for line in fp:
cnt += 1
line = line.split()
if len(line) != embedding_dim + 1:
print('a bad word embedding: {}'.format(line[0]))
continue
w2v.append([float(v) for v in line[1:]])
word_dict[line[0]] = cnt
w2v = np.asarray(w2v, dtype=np.float32)
w2v_sum = np.sum(w2v, axis=0, dtype=np.float32)
div = np.divide(w2v_sum, cnt, dtype=np.float32)
w2v = np.row_stack((w2v, div))
word_dict['$t$'] = (cnt + 1)
return word_dict, w2v
def change_y_to_onehot(y, pos_neu_neg=True):
"""
Method adapted from Trusca et al. (2020), no original docstring provided.
:param y: vector of polarities
:param pos_neu_neg: True if three possible polarities (positive, neutral and negative)
:return:
"""
from collections import Counter
count = Counter(y)
if FLAGS.writable == 1:
with open(FLAGS.results_file, "a") as results:
results.write("Positive: " + str(count['1']) + ", Neutral: " + str(
count['0']) + ", Negative: " + str(count['-1']) + ", Total: " + str(sum(count.values())) + "\n")
print("Polarity count:", count)
if pos_neu_neg:
class_set = {'1', '0', '-1'}
else:
class_set = set(y)
n_class = len(class_set)
y_onehot_mapping = dict(zip(class_set, range(n_class)))
print("Polarity mapping:", y_onehot_mapping)
onehot = []
for label in y:
tmp = [0] * n_class
tmp[y_onehot_mapping[label]] = 1
onehot.append(tmp)
return np.asarray(onehot, dtype=np.int32), y_onehot_mapping
def change_y_to_onehot_keep(y, y_onehot_mapping, pos_neu_neg=True):
"""
Method adapted from Trusca et al. (2020), no original docstring provided.
:param y: vector of polarities
:param y_onehot_mapping: one-hot mapping to keep
:param pos_neu_neg: True if three possible polarities (positive, neutral and negative)
:return:
"""
from collections import Counter
count = Counter(y)
if FLAGS.writable == 1:
with open(FLAGS.results_file, "a") as results:
results.write("Positive: " + str(count['1']) + ", Neutral: " + str(
count['0']) + ", Negative: " + str(count['-1']) + ", Total: " + str(sum(count.values())) + "\n")
print("Polarity count:", count)
if pos_neu_neg:
class_set = {'1', '0', '-1'}
else:
class_set = set(y)
n_class = len(class_set)
print("Polarity mapping:", y_onehot_mapping)
onehot = []
for label in y:
tmp = [0] * n_class
tmp[y_onehot_mapping[label]] = 1
onehot.append(tmp)
return np.asarray(onehot, dtype=np.int32), y_onehot_mapping
def load_inputs_twitter(input_file, word_id_file, sentence_len, type_='', is_r=True, target_len=10, encoding='utf8',
pos_neu_neg=True):
"""
Method adapted from Trusca et al. (2020), no original docstring provided.
:param input_file:
:param word_id_file:
:param sentence_len:
:param type_:
:param is_r:
:param target_len:
:param encoding:
:param pos_neu_neg: True if three possible polarities (positive, neutral and negative)
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('Load word-to-id done!')
x, y, sen_len = [], [], []
x_r, sen_len_r = [], []
target_words = []
tar_len = []
all_target, all_sent, all_y = [], [], []
lines = open(input_file).readlines()
for i in range(0, len(lines), 3):
# Targets.
words = lines[i + 1].lower().split()
target = words
target_word = []
for w in words:
if w in word_to_id:
target_word.append(word_to_id[w])
length = min(len(target_word), target_len)
tar_len.append(length)
target_words.append(target_word[:length] + [0] * (target_len - length))
# Sentiment.
y.append(lines[i + 2].strip().split()[0])
# Left and right context.
words = lines[i].lower().split()
sent = words
words_l, words_r = [], []
flag = True
for word in words:
if word == '$t$':
flag = False
continue
if flag:
if word in word_to_id:
words_l.append(word_to_id[word])
else:
if word in word_to_id:
words_r.append(word_to_id[word])
if type_ == 'TD' or type_ == 'TC':
words_l = words_l[:sentence_len]
words_r = words_r[:sentence_len]
sen_len.append(len(words_l))
x.append(words_l + [0] * (sentence_len - len(words_l)))
tmp = words_r
if is_r:
tmp.reverse()
sen_len_r.append(len(tmp))
x_r.append(tmp + [0] * (sentence_len - len(tmp)))
all_sent.append(sent)
all_target.append(target)
else:
words = words_l + target_word + words_r
words = words[:sentence_len]
sen_len.append(len(words))
x.append(words + [0] * (sentence_len - len(words)))
all_y = y
y, y_onehot_mapping = change_y_to_onehot(y, pos_neu_neg=pos_neu_neg)
if type_ == 'TD':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), \
np.asarray(sen_len_r), np.asarray(y)
elif type_ == 'TC':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), np.asarray(sen_len_r), \
np.asarray(y), np.asarray(target_words), np.asarray(tar_len), np.asarray(all_sent), np.asarray(
all_target), np.asarray(all_y), y_onehot_mapping
elif type_ == 'IAN':
return np.asarray(x), np.asarray(sen_len), np.asarray(target_words), \
np.asarray(tar_len), np.asarray(y)
else:
return np.asarray(x), np.asarray(sen_len), np.asarray(y)
def load_inputs_twitter_keep(input_file, y_onehot_mapping, word_id_file, sentence_len, type_='', is_r=True,
target_len=10, encoding='utf8', pos_neu_neg=True):
"""
Method adapted from Trusca et al. (2020), no original docstring provided.
:param input_file:
:param y_onehot_mapping: one-hot mapping to keep
:param word_id_file:
:param sentence_len:
:param type_:
:param is_r:
:param target_len:
:param encoding:
:param pos_neu_neg: True if three possible polarities (positive, neutral and negative)
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('Load word-to-id done!')
x, y, sen_len = [], [], []
x_r, sen_len_r = [], []
target_words = []
tar_len = []
all_target, all_sent, all_y = [], [], []
# Read in txt file.
lines = open(input_file).readlines()
for i in range(0, len(lines), 3):
# Targets.
words = lines[i + 1].lower().split()
target = words
target_word = []
for w in words:
if w in word_to_id:
target_word.append(word_to_id[w])
l = min(len(target_word), target_len)
tar_len.append(l)
target_words.append(target_word[:l] + [0] * (target_len - l))
# Sentiment.
y.append(lines[i + 2].strip().split()[0])
# Left and right context.
words = lines[i].lower().split()
sent = words
words_l, words_r = [], []
flag = True
for word in words:
if word == '$t$':
flag = False
continue
if flag:
if word in word_to_id:
words_l.append(word_to_id[word])
else:
if word in word_to_id:
words_r.append(word_to_id[word])
if type_ == 'TD' or type_ == 'TC':
words_l = words_l[:sentence_len]
words_r = words_r[:sentence_len]
sen_len.append(len(words_l))
x.append(words_l + [0] * (sentence_len - len(words_l)))
tmp = words_r
if is_r:
tmp.reverse()
sen_len_r.append(len(tmp))
x_r.append(tmp + [0] * (sentence_len - len(tmp)))
all_sent.append(sent)
all_target.append(target)
else:
words = words_l + target_word + words_r
words = words[:sentence_len]
sen_len.append(len(words))
x.append(words + [0] * (sentence_len - len(words)))
all_y = y
y, y_onehot_mapping = change_y_to_onehot_keep(y, y_onehot_mapping, pos_neu_neg=pos_neu_neg)
if type_ == 'TD':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), \
np.asarray(sen_len_r), np.asarray(y)
elif type_ == 'TC':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), np.asarray(sen_len_r), \
np.asarray(y), np.asarray(target_words), np.asarray(tar_len), np.asarray(all_sent), np.asarray(
all_target), np.asarray(all_y), y_onehot_mapping
elif type_ == 'IAN':
return np.asarray(x), np.asarray(sen_len), np.asarray(target_words), \
np.asarray(tar_len), np.asarray(y)
else:
return np.asarray(x), np.asarray(sen_len), np.asarray(y)
def load_inputs_cabasc(input_file, word_id_file, sentence_len, type_='', is_r=True, target_len=10, encoding='utf8'):
"""
Method obtained from Trusca et al. (2020), no original docstring provided.
NOTE. Not used in current adaptation.
:param input_file:
:param word_id_file:
:param sentence_len:
:param type_:
:param is_r:
:param target_len:
:param encoding:
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('load word-to-id done!')
x, y, sen_len = [], [], []
x_r, sen_len_r = [], []
sent_short_final, sent_final = [], []
target_words = []
tar_len = []
mult_mask = []
lines = open(input_file).readlines()
for i in range(0, len(lines), 3):
words = lines[i + 1].lower().split()
target_word = []
for w in words:
if w in word_to_id:
target_word.append(word_to_id[w])
l = min(len(target_word), target_len)
tar_len.append(l)
target_words.append(target_word[:l] + [0] * (target_len - l))
y.append(lines[i + 2].strip().split()[0])
words = lines[i].lower().split()
words_l, words_r, sent_short, sent = [], [], [], []
flag = True
for word in words:
if word == '$t$':
flag = False
continue
if flag:
if word in word_to_id:
words_l.append(word_to_id[word])
else:
if word in word_to_id:
words_r.append(word_to_id[word])
if type_ == 'TD' or type_ == 'TC':
mult = [1] * sentence_len
mult[len(words_l):len(words_l) + l] = | |
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def subnet_deletion_event(self):
# This is an event from CloudTrail, so the location of the IDs in the event are different:
try:
detail = self.event.get("detail")
subnet_id = detail.get("requestParameters", {}).get("subnetId")
# Get the VPC ID from the spoke account::
ec2 = self._session(self.spoke_account_id)
response = ec2.describe_subnets(subnet_id)
subnet = response[0]
vpc_id = subnet.get("VpcId")
# Get any available Transit Gateway VPC attachments for that VPC ID:
attachments = ec2.describe_transit_gateway_vpc_attachments(
tgw_id=environ.get("TGW_ID"),
vpc_id=vpc_id,
state=["available", "pending", "modifying"],
)
if len(attachments) == 0:
# No attachments found, that's fine:
return "No attachments found"
# A transit gateway cannot have more than one attachment to the same VPC.
attachment = attachments[0]
# Verify that the subnet ID in question is one of the subnets in the VPC attachment:
attachment_subnet_ids = attachment.get("SubnetIds")
if subnet_id in attachment_subnet_ids:
# Delete transit gateway attachment
attachment_id = attachment.get("TransitGatewayAttachmentId")
self.logger.info(
f"About to delete transit gateway VPC attachment {attachment_id}"
)
ec2.delete_transit_gateway_vpc_attachment(attachment_id)
return f"Deleted transit gateway VPC attachment {attachment_id}"
return (
"No subnet IDs matched active transit gateway vpc attachments."
)
except Exception as e:
try:
error_code = e.response["Error"]["Code"]
except Exception:
error_code = ""
if (
error_code == "InvalidVpcID.NotFound"
or error_code == "InvalidSubnetID.NotFound"
):
# This is fine, the subnet was already deleted
return "Subnet or VPC already deleted"
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _update_ddb_failed(self, e):
self.event.update({"Comment": str(e)})
self.event.update({"Status": "failed"})
ddb = DynamoDb(self.event)
ddb.put_item()
def _update_subnet_id_tags(self, subnet_id, error_message):
subnet_tag_operations = [
"CreateTransitGatewayVpcAttachment",
"DeleteTransitGatewayVpcAttachment",
"ModifyTransitGatewayVpcAttachment",
]
for operation in subnet_tag_operations:
if operation in error_message:
self._create_tag(subnet_id, "Subnet-Error", error_message)
def _update_vpc_id_tags(self, vpc_id, error_message):
vpc_tag_operations = [
"CreateTransitGatewayVpcAttachment",
"DeleteTransitGatewayVpcAttachment",
"ModifyTransitGatewayVpcAttachment",
"AssociateTransitGatewayRouteTable",
"DisassociateTransitGatewayRouteTable",
"EnableTransitGatewayRouteTablePropagation",
"DisableTransitGatewayRouteTablePropagation",
"RouteTableNotFoundException",
]
for operation in vpc_tag_operations:
if operation in error_message:
self._create_tag(vpc_id, "VPC-Error", error_message)
def update_tags_if_failed(self):
# This function updates the tags on the VPCs/Subnets, so that
# the VPC owner has visibility on whether STNO failed.
if self.event.get("Status", "") == "failed":
error_message = self.event.get("Comment", "Unknown error")
subnet_id = self.event.get("SubnetId")
vpc_id = self.event.get("VpcId")
if subnet_id:
self._update_subnet_id_tags(subnet_id, error_message)
if vpc_id:
self._update_vpc_id_tags(vpc_id, error_message)
return self.event
class VPC:
"""
This class contains functions to manage VPC related resources
"""
def __init__(self, event):
self.event = event
self.logger = logging.getLogger(__name__)
self.assume_role = AssumeRole()
self.spoke_account_id = self.event.get("account")
self.spoke_region = self.event.get("region")
self.logger.info(self.__class__.__name__ + CLASS_EVENT)
self.logger.info(event)
self.org_client = Organizations()
def _session(self, account_id):
# instantiate EC2 sessions
return EC2(
credentials=self.assume_role(account_id),
)
def _print(self, description, response):
self.logger.info(f"Printing {description}")
self.logger.info(response)
def _create_tag(self, resource, key, message):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
ec2 = self._session(self.spoke_account_id)
ec2.create_tags(
resource, "STNOStatus-" + key, timestamp_message(message)
)
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
def _extract_resource_id(self):
self.logger.info(f"The event for resources is {self.event}")
resource_arn = self.event.get("resources")[0]
return resource_arn.split("/")[1]
def _check_list_length(self, array, length):
# compare the length of the list
if len(array) == length:
return None
else:
raise ValueError(
"Length of the list in the response is more than {} values.".format(
length
)
)
def _update_event_with_ou_name(self):
"""
This method updates the event with on OU name and account name for tagging.
"""
# Update the event with details on the account name and OU.
account_id = self.event.get("account")
if not account_id:
account_id = self.event.get("AWSSpokeAccountId")
if account_id:
self.event.update({"account": account_id})
account_name = self.org_client.get_account_name(account_id)
if account_name:
self.logger.debug(
f"Updating the event with account name {account_name}"
)
self.event.update({"AccountName": account_name})
account_ou_path = self.org_client.get_ou_path(account_id)
if account_ou_path:
self.logger.debug(
f"Updating the event with OU path {account_ou_path}"
)
self.event.update({"AccountOuPath": account_ou_path})
def _update_account_details(self):
account_id = self.event.get("account")
if not account_id:
account_id = self.event.get("AWSSpokeAccountId")
if account_id:
self.event.update({"account": account_id})
account_name = self.org_client.get_account_name(account_id)
if account_name:
self.event.update({"AccountName": account_name})
account_ou_path = self.org_client.get_ou_path(account_id)
if account_ou_path:
self.event.update({"AccountOuPath": account_ou_path})
self.logger.info("Updated Event with ou_name is: {}".format(self.event))
def describe_resources(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
# Update the event with details on the account name and OU.
# We first need the account ID:
self._update_account_details()
# check if the event is coming from STNO Management Console
if self.event.get("AdminAction") is None:
# extract subnet id from the ARN
resource_id = self._extract_resource_id()
# if event is from VPC tagging
if resource_id.startswith("vpc"):
self.logger.info(
"Tag Change on VPC: {}".format(resource_id)
)
self.event.update({"VpcId": resource_id})
self.event.update({"TagEventSource": "vpc"})
# get VPC details
self._describe_vpc()
# if event from Subnet tagging
elif resource_id.startswith("subnet"):
self.logger.info(
"Tag Change on Subnet: {}".format(resource_id)
)
self.event.update({"SubnetId": resource_id})
self.event.update({"TagEventSource": "subnet"})
# get subnet details
self._describe_subnet()
# get VPC details
self._describe_vpc()
else:
self.logger.info(
"Resource Id is neither a VPC nor a subnet."
)
raise TypeError(
"Application Exception: Resource Id is neither a VPC nor a subnet."
)
elif self.event.get("TagEventSource") == "vpc":
self._set_event_variables()
# get VPC details
self._describe_vpc()
elif self.event.get("TagEventSource") == "subnet":
self._set_event_variables()
# get subnet details
self._describe_subnet()
# get VPC details
self._describe_vpc()
if self.event.get("time") is None:
self.event.update({"time": current_time()})
return self.event
except Exception as e:
try:
error_code = e.response["Error"]["Code"]
except Exception:
error_code = ""
if (
error_code == "InvalidVpcID.NotFound"
or error_code == "InvalidSubnetID.NotFound"
):
raise ResourceNotFoundException(e)
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _set_event_variables(self):
self.logger.info(
"Event came from the management console, setting variables"
)
self.event.update({"account": self.event.get("AWSSpokeAccountId")})
self.event.update(
{
environ.get("ASSOCIATION_TAG"): self.event.get(
"AssociationRouteTable"
)
}
)
self.event.update(
{
environ.get("PROPAGATION_TAG"): self.event.get(
"PropagationRouteTables"
)
}
)
# re-initialize the class variables
self._reset()
def _reset(self):
# reset class variables
self.__init__(self.event)
def _describe_vpc(self):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
ec2 = self._session(self.spoke_account_id)
# describe the vpc in the spoke account
response = ec2.describe_vpcs(self.event.get("VpcId"))
self._print("Describe VPC", response)
# the response should return a list with single item
self._check_list_length(response, 1)
# update event with subnet details
index = 0
vpc = response[index]
# Cidr block associated with this VPC
self.event.update({"VpcCidr": vpc.get("CidrBlock")})
# Assuming VPC is not tagged
self.event.update({"VpcTagFound": "no"})
tag_key_list = []
if vpc.get("Tags") is not None:
for tag in vpc.get("Tags"):
tag_key_list.append(tag.get("Key").lower().strip())
self._print("list of tag keys", tag_key_list)
else:
self.logger.info(
"No tags found for the VPC associated with the tagged Subnet."
)
if (
environ.get("ASSOCIATION_TAG").lower().strip() in tag_key_list
or environ.get("PROPAGATION_TAG").lower().strip()
in tag_key_list
):
# check if tags exist for the VPC
self.logger.info(
"Found association or propagation tag for the VPC: {}".format(
self.event.get("VpcId")
)
)
self.event.update({"VpcTagFound": "yes"})
# event source is subnet tag change, then obtain the Tag Event Sources from VPC tags
if self.event.get("TagEventSource") == "subnet":
self._update_event_with_vpc_tags(vpc.get("Tags"))
else:
self._update_event_with_vpc_tags(
self.event.get("detail", {}).get("tags")
)
return self.event
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _match_keys_with_tag(self, key, value):
try:
self.logger.info(
EXECUTING
+ self.__class__.__name__
+ "/"
+ inspect.stack()[0][3]
)
if (
key.lower().strip()
== environ.get("ASSOCIATION_TAG").lower().strip()
):
self.event.update(
{environ.get("ASSOCIATION_TAG"): value.lower().strip()}
)
self._print("Modified Event with Association Tag", self.event)
elif (
key.lower().strip()
== environ.get("PROPAGATION_TAG").lower().strip()
):
# organizations tag policy does not allow comma (,) as a
# separator. Adding slash (/) and colon (:) as separators
self.event.update(
{
environ.get("PROPAGATION_TAG"): [
x.lower().strip() for x in value.replace('/', ',').replace(':', ',').split(",")
]
}
)
self._print("Modified Event with Propagation Tag", self.event)
elif key.lower().strip() == "name":
vpc_name = value.strip()
self.logger.debug(
f"Updating the event with vpc name {vpc_name}"
)
self.event.update({"VpcName": vpc_name})
if "AttachmentTagsRequired" not in self.event:
self.event.update({"AttachmentTagsRequired": {}})
# If the VPC_TAGS_FOR_ATTACHMENT is specified, and is not empty
# go through this comma separated list, and see if the VPC has those tags.
# If it does, store it in the event under AttachmentTagsRequired as a dictionary of key->value pairs.
if "VPC_TAGS_FOR_ATTACHMENT" in environ:
tag_keys_to_copy = environ.get("VPC_TAGS_FOR_ATTACHMENT").split(
","
)
# Do a case insensitive match, example CostCode/codecode
tag_keys_to_copy = [x.lower().strip() for x in tag_keys_to_copy]
if key.lower().strip() in tag_keys_to_copy:
self.logger.debug(
f"Attaching tags with key {key} and value {value}"
)
self.event["AttachmentTagsRequired"][key] = value
except Exception as e:
message = self._message(inspect.stack()[0][3], e)
self.logger.exception(message)
raise
def _update_event_with_tgw_attachment_name(self):
"""
This method updates the event with a name for TGW attachment.
"""
account_name = self.event.get("AccountName")
self.logger.debug(f"account_name is: {account_name}")
if account_name:
self.event["AttachmentTagsRequired"]["account-name"] = account_name[
:255
]
account_ou_path = self.event.get("AccountOuPath")
self.logger.debug(f"account_ou_path is {account_ou_path}")
if account_ou_path:
self.event["AttachmentTagsRequired"][
"account-ou"
] = account_ou_path[:255]
vpc_name = self.event.get("VpcName")
self.logger.debug(f"VPC name is {vpc_name}")
if vpc_name:
self.event["AttachmentTagsRequired"][
"vpc-name"
] = vpc_name[:255]
# Construct the tgw Name tag. If we have the account_name: /The/Ou/Path/account_ame: vpc_name
attachment_name = ""
if account_ou_path:
attachment_name += account_ou_path
if account_name:
attachment_name += f"{account_name}"
if vpc_name:
if account_ou_path or account_name:
attachment_name = (
attachment_name + ": " + | |
409081, 409099, 409121, 409153, 409163,
409177, 409187, 409217, 409237, 409259, 409261, 409267, 409271,
409289, 409291, 409327, 409333, 409337, 409349, 409351, 409369,
409379, 409391, 409397, 409429, 409433, 409441, 409463, 409471,
409477, 409483, 409499, 409517, 409523, 409529, 409543, 409573,
409579, 409589, 409597, 409609, 409639, 409657, 409691, 409693,
409709, 409711, 409723, 409729, 409733, 409753, 409769, 409777,
409781, 409813, 409817, 409823, 409831, 409841, 409861, 409867,
409879, 409889, 409891, 409897, 409901, 409909, 409933, 409943,
409951, 409961, 409967, 409987, 409993, 409999, 410009, 410029,
410063, 410087, 410093, 410117, 410119, 410141, 410143, 410149,
410171, 410173, 410203, 410231, 410233, 410239, 410243, 410257,
410279, 410281, 410299, 410317, 410323, 410339, 410341, 410353,
410359, 410383, 410387, 410393, 410401, 410411, 410413, 410453,
410461, 410477, 410489, 410491, 410497, 410507, 410513, 410519,
410551, 410561, 410587, 410617, 410621, 410623, 410629, 410651,
410659, 410671, 410687, 410701, 410717, 410731, 410741, 410747,
410749, 410759, 410783, 410789, 410801, 410807, 410819, 410833,
410857, 410899, 410903, 410929, 410953, 410983, 410999, 411001,
411007, 411011, 411013, 411031, 411041, 411049, 411067, 411071,
411083, 411101, 411113, 411119, 411127, 411143, 411157, 411167,
411193, 411197, 411211, 411233, 411241, 411251, 411253, 411259,
411287, 411311, 411337, 411347, 411361, 411371, 411379, 411409,
411421, 411443, 411449, 411469, 411473, 411479, 411491, 411503,
411527, 411529, 411557, 411563, 411569, 411577, 411583, 411589,
411611, 411613, 411617, 411637, 411641, 411667, 411679, 411683,
411703, 411707, 411709, 411721, 411727, 411737, 411739, 411743,
411751, 411779, 411799, 411809, 411821, 411823, 411833, 411841,
411883, 411919, 411923, 411937, 411941, 411947, 411967, 411991,
412001, 412007, 412019, 412031, 412033, 412037, 412039, 412051,
412067, 412073, 412081, 412099, 412109, 412123, 412127, 412133,
412147, 412157, 412171, 412187, 412189, 412193, 412201, 412211,
412213, 412219, 412249, 412253, 412273, 412277, 412289, 412303,
412333, 412339, 412343, 412387, 412397, 412411, 412457, 412463,
412481, 412487, 412493, 412537, 412561, 412567, 412571, 412589,
412591, 412603, 412609, 412619, 412627, 412637, 412639, 412651,
412663, 412667, 412717, 412739, 412771, 412793, 412807, 412831,
412849, 412859, 412891, 412901, 412903, 412939, 412943, 412949,
412967, 412987, 413009, 413027, 413033, 413053, 413069, 413071,
413081, 413087, 413089, 413093, 413111, 413113, 413129, 413141,
413143, 413159, 413167, 413183, 413197, 413201, 413207, 413233,
413243, 413251, 413263, 413267, 413293, 413299, 413353, 413411,
413417, 413429, 413443, 413461, 413477, 413521, 413527, 413533,
413537, 413551, 413557, 413579, 413587, 413597, 413629, 413653,
413681, 413683, 413689, 413711, 413713, 413719, 413737, 413753,
413759, 413779, 413783, 413807, 413827, 413849, 413863, 413867,
413869, 413879, 413887, 413911, 413923, 413951, 413981, 414013,
414017, 414019, 414031, 414049, 414053, 414061, 414077, 414083,
414097, 414101, 414107, 414109, 414131, 414157, 414179, 414199,
414203, 414209, 414217, 414221, 414241, 414259, 414269, 414277,
414283, 414311, 414313, 414329, 414331, 414347, 414361, 414367,
414383, 414389, 414397, 414413, 414431, 414433, 414451, 414457,
414461, 414467, 414487, 414503, 414521, 414539, 414553, 414559,
414571, 414577, 414607, 414611, 414629, 414641, 414643, 414653,
414677, 414679, 414683, 414691, 414697, 414703, 414707, 414709,
414721, 414731, 414737, 414763, 414767, 414769, 414773, 414779,
414793, 414803, 414809, 414833, 414857, 414871, 414889, 414893,
414899, 414913, 414923, 414929, 414949, 414959, 414971, 414977,
414991, 415013, 415031, 415039, 415061, 415069, 415073, 415087,
415097, 415109, 415111, 415133, 415141, 415147, 415153, 415159,
415171, 415187, 415189, 415201, 415213, 415231, 415253, 415271,
415273, 415319, 415343, 415379, 415381, 415391, 415409, 415427,
415447, 415469, 415477, 415489, 415507, 415517, 415523, 415543,
415553, 415559, 415567, 415577, 415603, 415607, 415609, 415627,
415631, 415643, 415651, 415661, 415669, 415673, 415687, 415691,
415697, 415717, 415721, 415729, 415759, 415783, 415787, 415799,
415801, 415819, 415823, 415861, 415873, 415879, 415901, 415931,
415937, 415949, 415951, 415957, 415963, 415969, 415979, 415993,
415999, 416011, 416023, 416071, 416077, 416089, 416107, 416147,
416149, 416153, 416159, 416167, 416201, 416219, 416239, 416243,
416249, 416257, 416263, 416281, 416291, 416333, 416359, 416387,
416389, 416393, 416399, 416401, 416407, 416413, 416417, 416419,
416441, 416443, 416459, 416473, 416477, 416491, 416497, 416501,
416503, 416513, 416531, 416543, 416573, 416579, 416593, 416621,
416623, 416629, 416659, 416677, 416693, 416719, 416761, 416797,
416821, 416833, 416839, 416849, 416851, 416873, 416881, 416887,
416947, 416957, 416963, 416989, 417007, 417017, 417019, 417023,
417037, 417089, 417097, 417113, 417119, 417127, 417133, 417161,
417169, 417173, 417181, 417187, 417191, 417203, 417217, 417227,
417239, 417251, 417271, 417283, 417293, 417311, 417317, 417331,
417337, 417371, 417377, 417379, 417383, 417419, 417437, 417451,
417457, 417479, 417491, 417493, 417509, 417511, 417523, 417541,
417553, 417559, 417577, 417581, 417583, 417617, 417623, 417631,
417643, 417649, 417671, 417691, 417719, 417721, 417727, 417731,
417733, 417737, 417751, 417763, 417773, 417793, 417811, 417821,
417839, 417863, 417869, 417881, 417883, 417899, 417931, 417941,
417947, 417953, 417959, 417961, 417983, 417997, 418007, 418009,
418027, 418031, 418043, 418051, 418069, 418073, 418079, 418087,
418109, 418129, 418157, 418169, 418177, 418181, 418189, 418199,
418207, 418219, 418259, 418273, 418279, 418289, 418303, 418321,
418331, 418337, 418339, 418343, 418349, 418351, 418357, 418373,
418381, 418391, 418423, 418427, 418447, 418459, 418471, 418493,
418511, 418553, 418559, 418597, 418601, 418603, 418631, 418633,
418637, 418657, 418667, 418699, 418709, 418721, 418739, 418751,
418763, 418771, 418783, 418787, 418793, 418799, 418811, 418813,
418819, 418837, 418843, 418849, 418861, 418867, 418871, 418883,
418889, 418909, 418921, 418927, 418933, 418939, 418961, 418981,
418987, 418993, 418997, 419047, 419051, 419053, 419057, 419059,
419087, 419141, 419147, 419161, 419171, 419183, 419189, 419191,
419201, 419231, 419249, 419261, 419281, 419291, 419297, 419303,
419317, 419329, 419351, 419383, 419401, 419417, 419423, 419429,
419443, 419449, 419459, 419467, 419473, 419477, 419483, 419491,
419513, 419527, 419537, 419557, 419561, 419563, 419567, 419579,
419591, 419597, 419599, 419603, 419609, 419623, 419651, 419687,
419693, 419701, 419711, 419743, 419753, 419777, 419789, 419791,
419801, 419803, 419821, 419827, 419831, 419873, 419893, 419921,
419927, 419929, 419933, 419953, 419959, 419999, 420001, 420029,
420037, 420041, 420047, 420073, 420097, 420103, 420149, 420163,
420191, 420193, 420221, 420241, 420253, 420263, 420269, 420271,
420293, 420307, 420313, 420317, 420319, 420323, 420331, 420341,
420349, 420353, 420361, 420367, 420383, 420397, 420419, 420421,
420439, 420457, 420467, 420479, 420481, 420499, 420503, 420521,
420551, 420557, 420569, 420571, 420593, 420599, 420613, 420671,
420677, 420683, 420691, 420731, 420737, 420743, 420757, 420769,
420779, 420781, 420799, 420803, 420809, 420811, 420851, 420853,
420857, 420859, 420899, 420919, 420929, 420941, 420967, 420977,
420997, 421009, 421019, 421033, 421037, 421049, 421079, 421081,
421093, 421103, 421121, 421123, 421133, 421147, 421159, 421163,
421177, 421181, 421189, 421207, 421241, 421273, 421279, 421303,
421313, 421331, 421339, 421349, 421361, 421381, 421397, 421409,
421417, 421423, 421433, 421453, 421459, 421469, 421471, 421483,
421493, 421501, 421517, 421559, 421607, 421609, 421621, 421633,
421639, 421643, 421657, 421661, 421691, 421697, 421699, 421703,
421709, 421711, 421717, 421727, 421739, 421741, 421783, 421801,
421807, 421831, 421847, 421891, 421907, 421913, 421943, 421973,
421987, 421997, 422029, 422041, 422057, 422063, 422069, 422077,
422083, 422087, 422089, 422099, 422101, 422111, 422113, 422129,
422137, 422141, 422183, 422203, 422209, 422231, 422239, 422243,
422249, 422267, 422287, 422291, 422309, 422311, 422321, 422339,
422353, 422363, 422369, 422377, 422393, 422407, 422431, 422453,
422459, 422479, 422537, 422549, 422551, 422557, 422563, 422567,
422573, 422581, 422621, 422627, 422657, 422689, 422701, 422707,
422711, 422749, 422753, 422759, 422761, 422789, 422797, 422803,
422827, 422857, 422861, 422867, 422869, 422879, 422881, 422893,
422897, 422899, 422911, 422923, 422927, 422969, 422987, 423001,
423013, 423019, 423043, 423053, 423061, 423067, 423083, 423091,
423097, 423103, 423109, 423121, 423127, 423133, 423173, 423179,
423191, 423209, 423221, 423229, 423233, 423251, 423257, 423259,
423277, 423281, 423287, 423289, 423299, 423307, 423323, 423341,
423347, 423389, 423403, 423413, 423427, 423431, 423439, 423457,
423461, 423463, 423469, 423481, 423497, 423503, 423509, 423541,
423547, 423557, 423559, 423581, 423587, 423601, 423617, 423649,
423667, 423697, 423707, 423713, 423727, 423749, 423751, 423763,
423769, 423779, 423781, 423791, 423803, 423823, 423847, 423853,
423859, 423869, 423883, 423887, 423931, 423949, 423961, 423977,
423989, 423991, 424001, 424003, 424007, 424019, 424027, 424037,
424079, 424091, 424093, 424103, 424117, 424121, 424129, 424139,
424147, 424157, 424163, 424169, 424187, 424199, 424223, 424231,
424243, 424247, 424261, 424267, 424271, 424273, 424313, 424331,
424339, 424343, 424351, 424397, 424423, 424429, 424433, 424451,
424471, 424481, 424493, 424519, 424537, 424547, 424549, 424559,
424573, 424577, 424597, 424601, 424639, 424661, 424667, 424679,
424687, 424693, 424709, 424727, 424729, 424757, 424769, 424771,
424777, 424811, 424817, 424819, 424829, 424841, 424843, 424849,
424861, 424867, 424889, 424891, 424903, 424909, 424913, 424939,
424961, 424967, 424997, 425003, 425027, 425039, 425057, 425059,
425071, 425083, 425101, 425107, 425123, 425147, 425149, 425189,
425197, 425207, 425233, 425237, 425251, 425273, 425279, 425281,
425291, 425297, 425309, 425317, 425329, 425333, 425363, 425377,
425387, 425393, 425417, 425419, 425423, 425441, 425443, 425471,
425473, 425489, 425501, | |
# encoding: UTF-8
"""Library for running an EPICS-based virtual accelertor using IMPACT particle tracker."""
import cothread
import logging
import math
import numpy
import os.path
import random
import re
import shutil
import subprocess
import tempfile
import threading
import time
from collections import OrderedDict
from copy import deepcopy
from cothread import catools
from phantasy.library.lattice.impact import LatticeFactory, OUTPUT_MODE_DIAG
from phantasy.library.layout import BCMElement
from phantasy.library.layout import BLElement
from phantasy.library.layout import BLMElement
from phantasy.library.layout import BPMElement
from phantasy.library.layout import BendElement
from phantasy.library.layout import CavityElement
from phantasy.library.layout import CorElement
from phantasy.library.layout import DriftElement
from phantasy.library.layout import PMElement
from phantasy.library.layout import PortElement
from phantasy.library.layout import QuadElement
from phantasy.library.layout import SeqElement
from phantasy.library.layout import SextElement
from phantasy.library.layout import SolCorElement
from phantasy.library.layout import StripElement
from phantasy.library.layout import ValveElement
from phantasy.library.parser import Configuration
__copyright__ = "Copyright (c) 2015, Facility for Rare Isotope Beams"
__author__ = "<NAME>"
# configuration options
CONFIG_MACHINE = "machine"
CONFIG_IMPACT_EXE_FILE = "impact_exe_file"
CONFIG_IMPACT_DATA_DIR = "impact_data_dir"
# default values
_DEFAULT_IMPACT_EXE = "impact"
_TEMP_DIRECTORY_SUFFIX = "_va_impact"
_DEFAULT_ERROR_VALUE = 0.0
_VA_STATUS_GOOD = "OK"
_VA_STATUS_BAD = "ERR"
# global logger instance
_LOGGER = logging.getLogger(__name__)
# global virtual accelerator
_VIRTUAL_ACCELERATOR = None
def start(layout, **kwargs):
"""Start the global virtual accelerator.
Parameters
----------
layout :
Accelerator layout object.
Keyword Arguments
-----------------
settings :
Dictionary of machine settings.
channels :
List of channel tuples with (name, properties, tags).
start :
Name of accelerator element to start simulation.
end :
Name of accelerator element to end simulation.
data_dir :
Path of directory containing IMPACT data files.
work_dir :
Path of directory for execution of IMPACT.
"""
global _VIRTUAL_ACCELERATOR
if _VIRTUAL_ACCELERATOR is None:
_VIRTUAL_ACCELERATOR = build_virtaccel(layout, **kwargs)
if _VIRTUAL_ACCELERATOR.is_started():
raise RuntimeError("Virtual Accelerator already started")
_VIRTUAL_ACCELERATOR.start()
def stop():
"""Stop the global virtual accelerator.
"""
global _VIRTUAL_ACCELERATOR
if _VIRTUAL_ACCELERATOR is None or not _VIRTUAL_ACCELERATOR.is_started():
raise RuntimeError("Virtual Accelerator not started")
_VIRTUAL_ACCELERATOR.stop()
def build_virtaccel(layout, **kwargs):
"""Convenience method to build a virtual accelerator.
Parameters
----------
layout :
Accelerator layout object
Keyword Arguments
-----------------
settings :
Dictionary of machine settings
channels :
List of channel tuples with (name, properties, tags)
start :
Name of accelerator element to start simulation
end :
Name of accelerator element to end simulation
data_dir :
Path of directory containing IMPACT data files
work_dir :
Path of directory for execution of IMPACT
Returns
-------
ret :
VirtualAccelerator instance
"""
va_factory = VirtualAcceleratorFactory(layout, **kwargs)
return va_factory.build()
class VirtualAcceleratorFactory(object):
"""Prepare a VirtualAccelerator for execution.
The main purpose of this class is to process the accelerator
description and configure the VirtualAccelerator for proper
exection.
"""
def __init__(self, layout, **kwargs):
self.layout = layout
self.config = kwargs.get("config", None)
self.settings = kwargs.get("settings", None)
self.channels = kwargs.get("channels", None)
self.start = kwargs.get("start", None)
self.end = kwargs.get("end", None)
self.data_dir = kwargs.get("data_dir", None)
self.work_dir = kwargs.get("work_dir", None)
@property
def layout(self):
return self._layout
@layout.setter
def layout(self, layout):
if not isinstance(layout, SeqElement):
raise TypeError("VirtAccelFactory: 'layout' property much be type SeqElement")
self._layout = layout
@property
def start(self):
return self._start
@start.setter
def start(self, start):
if (start is not None) and not isinstance(start, str):
raise TypeError("VirtAccelFactory: 'start' property much be type string or None")
self._start = start
@property
def end(self):
return self._end
@end.setter
def end(self, end):
if (end is not None) and not isinstance(end, str):
raise TypeError("VirtAccelFactory: 'end' property much be type string or None")
self._end = end
@property
def config(self):
return self._config
@config.setter
def config(self, config):
if not isinstance(config, Configuration):
raise TypeError("LatticeFactory: 'config' property must be type Configuration")
self._config = config
@property
def settings(self):
return self._settings
@settings.setter
def settings(self, settings):
if not isinstance(settings, dict):
raise TypeError("VirtAccelFactory: 'settings' property much be type dict")
self._settings = settings
@property
def channels(self):
return self._channels
@channels.setter
def channels(self, channels):
if not isinstance(channels, list):
raise TypeError("VirtAccelFactory: 'channels' property much be type list")
self._channels = channels
@property
def machine(self):
return self._machine
@machine.setter
def machine(self, machine):
if (machine is not None) and not isinstance(machine, str):
raise TypeError("VirtAccelFactory: 'machine' property much be type string or None")
self._machine = machine
@property
def data_dir(self):
return self._data_dir
@data_dir.setter
def data_dir(self, data_dir):
if (data_dir is not None) and not isinstance(data_dir, str):
raise TypeError("VirtAccelFactory: 'data_dir' property much be type string or None")
self._data_dir = data_dir
@property
def work_dir(self):
return self._work_dir
@work_dir.setter
def work_dir(self, work_dir):
if (work_dir is not None) and not isinstance(work_dir, str):
raise TypeError("VirtAccelFactory: 'work_dir' property much be type string or None")
self._work_dir = work_dir
def _get_config_impact_exe(self):
if self.config.has_default(CONFIG_IMPACT_EXE_FILE):
return self.config.getabspath_default(CONFIG_IMPACT_EXE_FILE, cmd=True)
return _DEFAULT_IMPACT_EXE
def _findChannel(self, name, field, handle):
for channel, props, _ in self.channels:
if props["elemName"] != name:
continue
if props["elemField"] != field:
continue
if props["elemHandle"] != handle:
continue
# IMPORTANT: Channel names originating from channel finder
# may be of type 'unicode' instead of 'str'. The cothread
# library does not have proper support for unicode strings.
return str(channel)
raise RuntimeError("VirtAccelFactory: channel not found: '{}', '{}', '{}'".format(name, field, handle))
def build(self):
"""Process the accelerator description and configure the Virtual Accelerator.
"""
settings = self.settings
data_dir = self.data_dir
if (data_dir is None) and self.config.has_default(CONFIG_IMPACT_DATA_DIR):
data_dir = self.config.getabspath_default(CONFIG_IMPACT_DATA_DIR)
if data_dir is None:
raise RuntimeError("VirtAccelFactory: No data directory provided, check the configuration")
work_dir = self.work_dir
impact_exe = self._get_config_impact_exe()
latfactory = LatticeFactory(self.layout, config=self.config, settings=self.settings)
latfactory.outputMode = OUTPUT_MODE_DIAG
latfactory.start = self.start
latfactory.end = self.end
m = re.match("(.*:)?(.*):(.*):(.*)", self.channels[0][0])
if not m:
raise RuntimeError("VirtAccelFactory: Error determining channel prefix, check channel names")
if m.group(1) is None:
chanprefix = None
else:
# IMPORTANT: chanprefix must
# be converted from unicode
chanprefix = str(m.group(1))
va = VirtualAccelerator(latfactory, settings, chanprefix, impact_exe, data_dir, work_dir)
for elem in self.layout.iter(start=self.start, end=self.end):
if isinstance(elem, CavityElement):
# Need to normalize cavity phase settings to 0~360
settings[elem.name][elem.fields.phase] = _normalize_phase(settings[elem.name][elem.fields.phase])
va.append_rw(self._findChannel(elem.name, elem.fields.phase, "setpoint"),
self._findChannel(elem.name, elem.fields.phase, "readset"),
self._findChannel(elem.name, elem.fields.phase, "readback"),
(elem.name, elem.fields.phase), desc="Cavity Phase", egu="degree", drvh=360, drvl=0)
va.append_rw(self._findChannel(elem.name, elem.fields.amplitude, "setpoint"),
self._findChannel(elem.name, elem.fields.amplitude, "readset"),
self._findChannel(elem.name, elem.fields.amplitude, "readback"),
(elem.name, elem.fields.amplitude), desc="Cavity Amplitude", egu="%")
va.append_elem(elem)
elif isinstance(elem, SolCorElement):
va.append_rw(self._findChannel(elem.name, elem.fields.field, "setpoint"),
self._findChannel(elem.name, elem.fields.field, "readset"),
self._findChannel(elem.name, elem.fields.field, "readback"),
(elem.name, elem.fields.field), desc="Solenoid Field", egu="T") # , drvratio=0.10)
va.append_rw(self._findChannel(elem.h.name, elem.h.fields.angle, "setpoint"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readset"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readback"),
(elem.h.name, elem.h.fields.angle), desc="Horizontal Corrector",
egu="radian") # , drvabs=0.001)
va.append_rw(self._findChannel(elem.v.name, elem.v.fields.angle, "setpoint"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readset"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readback"),
(elem.v.name, elem.v.fields.angle), desc="Vertical Corrector",
egu="radian") # , drvabs=0.001)
va.append_elem(elem)
elif isinstance(elem, CorElement):
va.append_rw(self._findChannel(elem.h.name, elem.h.fields.angle, "setpoint"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readset"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readback"),
(elem.h.name, elem.h.fields.angle), desc="Horizontal Corrector",
egu="radian") # , drvabs=0.001)
va.append_rw(self._findChannel(elem.v.name, elem.v.fields.angle, "setpoint"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readset"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readback"),
(elem.v.name, elem.v.fields.angle), desc="Vertical Corrector",
egu="radian") # , drvabs=0.001)
va.append_elem(elem)
elif isinstance(elem, BendElement):
va.append_rw(self._findChannel(elem.name, elem.fields.field, "setpoint"),
self._findChannel(elem.name, elem.fields.field, "readset"),
self._findChannel(elem.name, elem.fields.field, "readback"),
(elem.name, elem.fields.field), desc="Bend Relative Field", egu="none") # , drvratio=0.10)
va.append_elem(elem)
elif isinstance(elem, QuadElement):
va.append_rw(self._findChannel(elem.name, elem.fields.gradient, "setpoint"),
self._findChannel(elem.name, elem.fields.gradient, "readset"),
self._findChannel(elem.name, elem.fields.gradient, "readback"),
(elem.name, elem.fields.gradient), desc="Quadrupole Gradient",
egu="T/m") # , drvratio=0.10)
va.append_elem(elem)
elif isinstance(elem, SextElement):
_LOGGER.warning("VirtAccelFactory: Hexapole magnet element support not implemented. Ignoring channels.")
# va.append_rw(self._findChannel(elem.name, elem.fields.field, "setpoint"),
# self._findChannel(elem.name, elem.fields.field, "readset"),
# self._findChannel(elem.name, elem.fields.field, "readback"),
# (elem.name, elem.fields.field), desc="Hexapole Field", egu="T/m^2", drvrel=0.05)
# va.append_elem(elem)
elif isinstance(elem, BPMElement):
va.append_ro(self._findChannel(elem.name, elem.fields.x, "readback"),
(elem.name, elem.fields.x), desc="Horizontal Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.y, "readback"),
(elem.name, elem.fields.y), desc="Vertical Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.phase, "readback"),
(elem.name, elem.fields.phase), desc="Beam Phase", egu="degree")
va.append_ro(self._findChannel(elem.name, elem.fields.energy, "readback"),
(elem.name, elem.fields.energy), desc="Beam Energy", egu="MeV")
va.append_elem(elem)
elif isinstance(elem, PMElement):
va.append_ro(self._findChannel(elem.name, elem.fields.x, "readback"),
(elem.name, elem.fields.x), desc="Horizontal Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.y, "readback"),
(elem.name, elem.fields.y), desc="Vertical Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.xrms, "readback"),
(elem.name, elem.fields.xrms), desc="Horizontal Size", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.yrms, "readback"),
(elem.name, elem.fields.yrms), desc="Vertical Size", egu="m")
va.append_elem(elem)
elif isinstance(elem, (BLMElement, BLElement, BCMElement)):
# ignore these diagnostic elements for now
pass
elif isinstance(elem, (ValveElement, PortElement, StripElement)):
# ignore these elements with no relevant channels
pass
elif isinstance(elem, DriftElement):
# drift elements have no channels
pass
else:
raise RuntimeError("Unsupported element type: {}".format(type(elem).__name__))
return va
class VirtualAccelerator(object):
"""VirtualAccelerator executes and manages the
EPICS IOC process and IMPACT simulation process.
"""
def __init__(self, latfactory, settings, chanprefix, impact_exe, data_dir, work_dir=None):
if not isinstance(latfactory, LatticeFactory):
raise TypeError("VirtualAccelerator: Invalid type for LatticeFactory")
self._latfactory = latfactory
if not isinstance(settings, dict):
raise TypeError("VirtualAccelerator: Invalid type for accelerator Settings")
self._settings = settings
self._chanprefix = chanprefix
self.impact_exe = impact_exe
self.data_dir = data_dir
self.work_dir = work_dir
self._epicsdb = []
self._csetmap = OrderedDict()
self._elemmap = OrderedDict()
self._fieldmap = OrderedDict()
self._readfieldmap = OrderedDict()
self._noise = 0.001
self._started = False
self._continue = False
self._rm_work_dir = False
self._ioc_process = None
self._ioc_logfile = None
self._subscriptions = None
self._lock = cothread.Event(False)
@property
def impact_exe(self):
return self._impact_exe
@impact_exe.setter
def impact_exe(self, impact_exe):
if not isinstance(impact_exe, str):
raise TypeError("VirtualAccelerator: 'impact_exe' property much be type string")
self._impact_exe = impact_exe
@property
def data_dir(self):
| |
__eq__(self, o):
"""__eq__(IntsConstDataSet2D self, IntsConstDataSet2D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet2D___eq__(self, o)
def __ne__(self, o):
"""__ne__(IntsConstDataSet2D self, IntsConstDataSet2D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet2D___ne__(self, o)
def __lt__(self, o):
"""__lt__(IntsConstDataSet2D self, IntsConstDataSet2D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet2D___lt__(self, o)
def __gt__(self, o):
"""__gt__(IntsConstDataSet2D self, IntsConstDataSet2D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet2D___gt__(self, o)
def __ge__(self, o):
"""__ge__(IntsConstDataSet2D self, IntsConstDataSet2D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet2D___ge__(self, o)
def __le__(self, o):
"""__le__(IntsConstDataSet2D self, IntsConstDataSet2D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet2D___le__(self, o)
__swig_destroy__ = _RMF_HDF5.delete_IntsConstDataSet2D
__del__ = lambda self: None
IntsConstDataSet2D_swigregister = _RMF_HDF5.IntsConstDataSet2D_swigregister
IntsConstDataSet2D_swigregister(IntsConstDataSet2D)
class IntsConstDataSet3D(_ConstAttributesObject):
"""Proxy of C++ RMF::HDF5::ConstDataSetD<(RMF::HDF5::IntsTraits,3)> class."""
__swig_setmethods__ = {}
for _s in [_ConstAttributesObject]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IntsConstDataSet3D, name, value)
__swig_getmethods__ = {}
for _s in [_ConstAttributesObject]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, IntsConstDataSet3D, name)
def __init__(self):
"""__init__(RMF::HDF5::ConstDataSetD<(RMF::HDF5::IntsTraits,3)> self) -> IntsConstDataSet3D"""
this = _RMF_HDF5.new_IntsConstDataSet3D()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def get_size(self):
"""get_size(IntsConstDataSet3D self) -> DataSetIndex3D"""
return _RMF_HDF5.IntsConstDataSet3D_get_size(self)
def get_value(self, ijk):
"""get_value(IntsConstDataSet3D self, DataSetIndex3D ijk) -> Ints"""
return _RMF_HDF5.IntsConstDataSet3D_get_value(self, ijk)
def __str__(self):
"""__str__(IntsConstDataSet3D self) -> std::string"""
return _RMF_HDF5.IntsConstDataSet3D___str__(self)
def __repr__(self):
"""__repr__(IntsConstDataSet3D self) -> std::string"""
return _RMF_HDF5.IntsConstDataSet3D___repr__(self)
def get_block(self, lb, size):
"""get_block(IntsConstDataSet3D self, DataSetIndex3D lb, DataSetIndex3D size) -> IntsList"""
return _RMF_HDF5.IntsConstDataSet3D_get_block(self, lb, size)
def __eq__(self, o):
"""__eq__(IntsConstDataSet3D self, IntsConstDataSet3D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet3D___eq__(self, o)
def __ne__(self, o):
"""__ne__(IntsConstDataSet3D self, IntsConstDataSet3D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet3D___ne__(self, o)
def __lt__(self, o):
"""__lt__(IntsConstDataSet3D self, IntsConstDataSet3D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet3D___lt__(self, o)
def __gt__(self, o):
"""__gt__(IntsConstDataSet3D self, IntsConstDataSet3D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet3D___gt__(self, o)
def __ge__(self, o):
"""__ge__(IntsConstDataSet3D self, IntsConstDataSet3D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet3D___ge__(self, o)
def __le__(self, o):
"""__le__(IntsConstDataSet3D self, IntsConstDataSet3D o) -> bool"""
return _RMF_HDF5.IntsConstDataSet3D___le__(self, o)
__swig_destroy__ = _RMF_HDF5.delete_IntsConstDataSet3D
__del__ = lambda self: None
IntsConstDataSet3D_swigregister = _RMF_HDF5.IntsConstDataSet3D_swigregister
IntsConstDataSet3D_swigregister(IntsConstDataSet3D)
class IntsDataSetAttributes1D(IntsConstDataSet1D):
"""Proxy of C++ RMF::HDF5::MutableAttributes<(RMF::HDF5::ConstDataSetD<(RMF::HDF5::IntsTraits,1)>)> class."""
__swig_setmethods__ = {}
for _s in [IntsConstDataSet1D]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IntsDataSetAttributes1D, name, value)
__swig_getmethods__ = {}
for _s in [IntsConstDataSet1D]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, IntsDataSetAttributes1D, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_int_attribute(self, nm, value):
"""set_int_attribute(IntsDataSetAttributes1D self, std::string nm, Ints value)"""
return _RMF_HDF5.IntsDataSetAttributes1D_set_int_attribute(self, nm, value)
def set_float_attribute(self, nm, value):
"""set_float_attribute(IntsDataSetAttributes1D self, std::string nm, Floats value)"""
return _RMF_HDF5.IntsDataSetAttributes1D_set_float_attribute(self, nm, value)
def set_index_attribute(self, nm, value):
"""set_index_attribute(IntsDataSetAttributes1D self, std::string nm, Ints value)"""
return _RMF_HDF5.IntsDataSetAttributes1D_set_index_attribute(self, nm, value)
def set_char_attribute(self, nm, value):
"""set_char_attribute(IntsDataSetAttributes1D self, std::string nm, std::string value)"""
return _RMF_HDF5.IntsDataSetAttributes1D_set_char_attribute(self, nm, value)
__swig_destroy__ = _RMF_HDF5.delete_IntsDataSetAttributes1D
__del__ = lambda self: None
IntsDataSetAttributes1D_swigregister = _RMF_HDF5.IntsDataSetAttributes1D_swigregister
IntsDataSetAttributes1D_swigregister(IntsDataSetAttributes1D)
class IntsDataSetAttributes2D(IntsConstDataSet2D):
"""Proxy of C++ RMF::HDF5::MutableAttributes<(RMF::HDF5::ConstDataSetD<(RMF::HDF5::IntsTraits,2)>)> class."""
__swig_setmethods__ = {}
for _s in [IntsConstDataSet2D]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IntsDataSetAttributes2D, name, value)
__swig_getmethods__ = {}
for _s in [IntsConstDataSet2D]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, IntsDataSetAttributes2D, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_int_attribute(self, nm, value):
"""set_int_attribute(IntsDataSetAttributes2D self, std::string nm, Ints value)"""
return _RMF_HDF5.IntsDataSetAttributes2D_set_int_attribute(self, nm, value)
def set_float_attribute(self, nm, value):
"""set_float_attribute(IntsDataSetAttributes2D self, std::string nm, Floats value)"""
return _RMF_HDF5.IntsDataSetAttributes2D_set_float_attribute(self, nm, value)
def set_index_attribute(self, nm, value):
"""set_index_attribute(IntsDataSetAttributes2D self, std::string nm, Ints value)"""
return _RMF_HDF5.IntsDataSetAttributes2D_set_index_attribute(self, nm, value)
def set_char_attribute(self, nm, value):
"""set_char_attribute(IntsDataSetAttributes2D self, std::string nm, std::string value)"""
return _RMF_HDF5.IntsDataSetAttributes2D_set_char_attribute(self, nm, value)
__swig_destroy__ = _RMF_HDF5.delete_IntsDataSetAttributes2D
__del__ = lambda self: None
IntsDataSetAttributes2D_swigregister = _RMF_HDF5.IntsDataSetAttributes2D_swigregister
IntsDataSetAttributes2D_swigregister(IntsDataSetAttributes2D)
class IntsDataSetAttributes3D(IntsConstDataSet3D):
"""Proxy of C++ RMF::HDF5::MutableAttributes<(RMF::HDF5::ConstDataSetD<(RMF::HDF5::IntsTraits,3)>)> class."""
__swig_setmethods__ = {}
for _s in [IntsConstDataSet3D]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, IntsDataSetAttributes3D, name, value)
__swig_getmethods__ = {}
for _s in [IntsConstDataSet3D]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, IntsDataSetAttributes3D, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_int_attribute(self, nm, value):
"""set_int_attribute(IntsDataSetAttributes3D self, std::string nm, Ints value)"""
return _RMF_HDF5.IntsDataSetAttributes3D_set_int_attribute(self, nm, value)
def set_float_attribute(self, nm, value):
"""set_float_attribute(IntsDataSetAttributes3D self, std::string nm, Floats value)"""
return _RMF_HDF5.IntsDataSetAttributes3D_set_float_attribute(self, nm, value)
def set_index_attribute(self, nm, value):
"""set_index_attribute(IntsDataSetAttributes3D self, std::string nm, Ints value)"""
return _RMF_HDF5.IntsDataSetAttributes3D_set_index_attribute(self, nm, value)
def set_char_attribute(self, nm, value):
"""set_char_attribute(IntsDataSetAttributes3D self, std::string nm, std::string value)"""
return _RMF_HDF5.IntsDataSetAttributes3D_set_char_attribute(self, nm, value)
__swig_destroy__ = _RMF_HDF5.delete_IntsDataSetAttributes3D
__del__ = lambda self: None
IntsDataSetAttributes3D_swigregister = _RMF_HDF5.IntsDataSetAttributes3D_swigregister
IntsDataSetAttributes3D_swigregister(IntsDataSetAttributes3D)
class FloatConstDataSet1D(_ConstAttributesObject):
"""Proxy of C++ RMF::HDF5::ConstDataSetD<(RMF::HDF5::FloatTraits,1)> class."""
__swig_setmethods__ = {}
for _s in [_ConstAttributesObject]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, FloatConstDataSet1D, name, value)
__swig_getmethods__ = {}
for _s in [_ConstAttributesObject]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, FloatConstDataSet1D, name)
def __init__(self):
"""__init__(RMF::HDF5::ConstDataSetD<(RMF::HDF5::FloatTraits,1)> self) -> FloatConstDataSet1D"""
this = _RMF_HDF5.new_FloatConstDataSet1D()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def get_size(self):
"""get_size(FloatConstDataSet1D self) -> DataSetIndex1D"""
return _RMF_HDF5.FloatConstDataSet1D_get_size(self)
def get_value(self, ijk):
"""get_value(FloatConstDataSet1D self, DataSetIndex1D ijk) -> RMF::HDF5::FloatTraits::Type"""
return _RMF_HDF5.FloatConstDataSet1D_get_value(self, ijk)
def __str__(self):
"""__str__(FloatConstDataSet1D self) -> std::string"""
return _RMF_HDF5.FloatConstDataSet1D___str__(self)
def __repr__(self):
"""__repr__(FloatConstDataSet1D self) -> std::string"""
return _RMF_HDF5.FloatConstDataSet1D___repr__(self)
def get_block(self, lb, size):
"""get_block(FloatConstDataSet1D self, DataSetIndex1D lb, DataSetIndex1D size) -> Floats"""
return _RMF_HDF5.FloatConstDataSet1D_get_block(self, lb, size)
def __eq__(self, o):
"""__eq__(FloatConstDataSet1D self, FloatConstDataSet1D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet1D___eq__(self, o)
def __ne__(self, o):
"""__ne__(FloatConstDataSet1D self, FloatConstDataSet1D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet1D___ne__(self, o)
def __lt__(self, o):
"""__lt__(FloatConstDataSet1D self, FloatConstDataSet1D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet1D___lt__(self, o)
def __gt__(self, o):
"""__gt__(FloatConstDataSet1D self, FloatConstDataSet1D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet1D___gt__(self, o)
def __ge__(self, o):
"""__ge__(FloatConstDataSet1D self, FloatConstDataSet1D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet1D___ge__(self, o)
def __le__(self, o):
"""__le__(FloatConstDataSet1D self, FloatConstDataSet1D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet1D___le__(self, o)
__swig_destroy__ = _RMF_HDF5.delete_FloatConstDataSet1D
__del__ = lambda self: None
FloatConstDataSet1D_swigregister = _RMF_HDF5.FloatConstDataSet1D_swigregister
FloatConstDataSet1D_swigregister(FloatConstDataSet1D)
class FloatConstDataSet2D(_ConstAttributesObject):
"""Proxy of C++ RMF::HDF5::ConstDataSetD<(RMF::HDF5::FloatTraits,2)> class."""
__swig_setmethods__ = {}
for _s in [_ConstAttributesObject]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, FloatConstDataSet2D, name, value)
__swig_getmethods__ = {}
for _s in [_ConstAttributesObject]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, FloatConstDataSet2D, name)
def __init__(self):
"""__init__(RMF::HDF5::ConstDataSetD<(RMF::HDF5::FloatTraits,2)> self) -> FloatConstDataSet2D"""
this = _RMF_HDF5.new_FloatConstDataSet2D()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def get_size(self):
"""get_size(FloatConstDataSet2D self) -> DataSetIndex2D"""
return _RMF_HDF5.FloatConstDataSet2D_get_size(self)
def get_value(self, ijk):
"""get_value(FloatConstDataSet2D self, DataSetIndex2D ijk) -> RMF::HDF5::FloatTraits::Type"""
return _RMF_HDF5.FloatConstDataSet2D_get_value(self, ijk)
def __str__(self):
"""__str__(FloatConstDataSet2D self) -> std::string"""
return _RMF_HDF5.FloatConstDataSet2D___str__(self)
def __repr__(self):
"""__repr__(FloatConstDataSet2D self) -> std::string"""
return _RMF_HDF5.FloatConstDataSet2D___repr__(self)
def get_block(self, lb, size):
"""get_block(FloatConstDataSet2D self, DataSetIndex2D lb, DataSetIndex2D size) -> Floats"""
return _RMF_HDF5.FloatConstDataSet2D_get_block(self, lb, size)
def __eq__(self, o):
"""__eq__(FloatConstDataSet2D self, FloatConstDataSet2D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet2D___eq__(self, o)
def __ne__(self, o):
"""__ne__(FloatConstDataSet2D self, FloatConstDataSet2D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet2D___ne__(self, o)
def __lt__(self, o):
"""__lt__(FloatConstDataSet2D self, FloatConstDataSet2D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet2D___lt__(self, o)
def __gt__(self, o):
"""__gt__(FloatConstDataSet2D self, FloatConstDataSet2D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet2D___gt__(self, o)
def __ge__(self, o):
"""__ge__(FloatConstDataSet2D self, FloatConstDataSet2D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet2D___ge__(self, o)
def __le__(self, o):
"""__le__(FloatConstDataSet2D self, FloatConstDataSet2D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet2D___le__(self, o)
__swig_destroy__ = _RMF_HDF5.delete_FloatConstDataSet2D
__del__ = lambda self: None
FloatConstDataSet2D_swigregister = _RMF_HDF5.FloatConstDataSet2D_swigregister
FloatConstDataSet2D_swigregister(FloatConstDataSet2D)
class FloatConstDataSet3D(_ConstAttributesObject):
"""Proxy of C++ RMF::HDF5::ConstDataSetD<(RMF::HDF5::FloatTraits,3)> class."""
__swig_setmethods__ = {}
for _s in [_ConstAttributesObject]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, FloatConstDataSet3D, name, value)
__swig_getmethods__ = {}
for _s in [_ConstAttributesObject]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, FloatConstDataSet3D, name)
def __init__(self):
"""__init__(RMF::HDF5::ConstDataSetD<(RMF::HDF5::FloatTraits,3)> self) -> FloatConstDataSet3D"""
this = _RMF_HDF5.new_FloatConstDataSet3D()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def get_size(self):
"""get_size(FloatConstDataSet3D self) -> DataSetIndex3D"""
return _RMF_HDF5.FloatConstDataSet3D_get_size(self)
def get_value(self, ijk):
"""get_value(FloatConstDataSet3D self, DataSetIndex3D ijk) -> RMF::HDF5::FloatTraits::Type"""
return _RMF_HDF5.FloatConstDataSet3D_get_value(self, ijk)
def __str__(self):
"""__str__(FloatConstDataSet3D self) -> std::string"""
return _RMF_HDF5.FloatConstDataSet3D___str__(self)
def __repr__(self):
"""__repr__(FloatConstDataSet3D self) -> std::string"""
return _RMF_HDF5.FloatConstDataSet3D___repr__(self)
def get_block(self, lb, size):
"""get_block(FloatConstDataSet3D self, DataSetIndex3D lb, DataSetIndex3D size) -> Floats"""
return _RMF_HDF5.FloatConstDataSet3D_get_block(self, lb, size)
def __eq__(self, o):
"""__eq__(FloatConstDataSet3D self, FloatConstDataSet3D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet3D___eq__(self, o)
def __ne__(self, o):
"""__ne__(FloatConstDataSet3D self, FloatConstDataSet3D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet3D___ne__(self, o)
def __lt__(self, o):
"""__lt__(FloatConstDataSet3D self, FloatConstDataSet3D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet3D___lt__(self, o)
def __gt__(self, o):
"""__gt__(FloatConstDataSet3D self, FloatConstDataSet3D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet3D___gt__(self, o)
def __ge__(self, o):
"""__ge__(FloatConstDataSet3D self, FloatConstDataSet3D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet3D___ge__(self, o)
def __le__(self, o):
"""__le__(FloatConstDataSet3D self, FloatConstDataSet3D o) -> bool"""
return _RMF_HDF5.FloatConstDataSet3D___le__(self, o)
__swig_destroy__ = _RMF_HDF5.delete_FloatConstDataSet3D
__del__ = lambda self: None
FloatConstDataSet3D_swigregister = _RMF_HDF5.FloatConstDataSet3D_swigregister
FloatConstDataSet3D_swigregister(FloatConstDataSet3D)
class FloatDataSetAttributes1D(FloatConstDataSet1D):
"""Proxy of C++ RMF::HDF5::MutableAttributes<(RMF::HDF5::ConstDataSetD<(RMF::HDF5::FloatTraits,1)>)> class."""
__swig_setmethods__ = {}
for _s in [FloatConstDataSet1D]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, FloatDataSetAttributes1D, name, value)
__swig_getmethods__ = {}
for _s in [FloatConstDataSet1D]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, FloatDataSetAttributes1D, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def set_int_attribute(self, nm, value):
"""set_int_attribute(FloatDataSetAttributes1D | |
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
import EKF
import pdb
from mbest_ilp import new_m_best_sol
from multiprocessing import Pool
from functools import partial
#from mbest_ilp import m_best_sol as new_m_best_sol
INFTY_COST = 1e+5
def min_marg_matching(marginalizations, track_indices=None, max_distance=1):
cost_matrix = 1 - marginalizations
num_tracks, num_detections = cost_matrix.shape
if track_indices is None:
track_indices = np.arange(num_tracks)
detection_indices = np.arange(num_detections-1)
if num_tracks == 0 or num_detections == 0:
return [], track_indices, detection_indices # Nothing to match.
extra_dummy_cols = np.tile(cost_matrix[:,0,np.newaxis], (1, num_tracks-1))
expanded_cost_matrix = np.hstack((extra_dummy_cols, cost_matrix))
indices = linear_assignment(expanded_cost_matrix)
matches, unmatched_tracks, unmatched_detections = [], [], []
# gather unmatched detections (new track)
for col, detection_idx in enumerate(detection_indices):
if col+num_tracks not in indices[:, 1]:
unmatched_detections.append(detection_idx)
# gather unmatched tracks (no detection)
for row, track_idx in enumerate(track_indices):
if row not in indices[:, 0]:
unmatched_tracks.append(track_idx)
# thresholding and matches
for row, col in indices:
track_idx = track_indices[row]
detection_idx = col - num_tracks
if detection_idx < 0:
unmatched_tracks.append(track_idx)
continue
if expanded_cost_matrix[row, col] > max_distance:
# apply thresholding
unmatched_tracks.append(track_idx)
unmatched_detections.append(detection_idx)
else:
# associate matches
matches.append((track_idx, detection_idx))
return matches, unmatched_tracks, unmatched_detections
def min_cost_matching(
distance_metric, max_distance, tracks, detections, track_indices=None,
detection_indices=None, compare_2d = False, detections_3d=None):
"""Solve linear assignment problem.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection_indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
if len(detection_indices) == 0 or len(track_indices) == 0:
return [], track_indices, detection_indices # Nothing to match.
cost_matrix = distance_metric(
tracks, detections, track_indices, detection_indices, compare_2d, detections_3d)
cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
#print("\n\nCascade Cost Matrix: ", cost_matrix)
indices = linear_assignment(cost_matrix)
matches, unmatched_tracks, unmatched_detections = [], [], []
# gather unmatched detections (new track)
for col, detection_idx in enumerate(detection_indices):
if col not in indices[:, 1]:
unmatched_detections.append(detection_idx)
# gather unmatched trackes (no detection)
for row, track_idx in enumerate(track_indices):
if row not in indices[:, 0]:
unmatched_tracks.append(track_idx)
# thresholding and matches
for row, col in indices:
track_idx = track_indices[row]
detection_idx = detection_indices[col]
if cost_matrix[row, col] > max_distance:
# apply thresholding
unmatched_tracks.append(track_idx)
unmatched_detections.append(detection_idx)
else:
# associate matches
matches.append((track_idx, detection_idx))
return matches, unmatched_tracks, unmatched_detections
# @profile
def JPDA(
distance_metric, dummy_node_cost_app, dummy_node_cost_iou, tracks, detections, track_indices=None,
detection_indices=None, m=1, compare_2d = False, windowing = False):
"""Solve linear assignment problem.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection_indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : List[int]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above).
detection_indices : List[int]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above).
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
if len(detection_indices) == 0 or len(track_indices) == 0:
return np.zeros((0, len(detections) + 1)) # Nothing to match.
cost_matrix, gate_mask = distance_metric(
tracks, detections, track_indices, detection_indices, compare_2d)
num_tracks, num_detections = cost_matrix.shape[0], cost_matrix.shape[1]
cost_matrix[gate_mask] = INFTY_COST
# print("\nIOU Cost Matrix:", cost_matrix[:,:,0])
# print("App:", cost_matrix[:,:,1])
clusters = find_clusters(cost_matrix[:,:,0], INFTY_COST - 0.0001)
# print('\n', clusters)
jpda_output = []
for cluster in clusters:
jpda_output.append(get_JPDA_output(cluster, cost_matrix, dummy_node_cost_app, dummy_node_cost_iou, INFTY_COST - 0.0001, m))
if not jpda_output:
mc = np.zeros((num_tracks, num_detections + 1))
mc[:, 0] = 1
return mc
assignments, assignment_cost = zip(*jpda_output)
assignments = np.vstack([item for sublist in assignments for item in sublist])
assignment_cost = np.array([item for sublist in assignment_cost for item in sublist])
marginalised_cost = np.sum(assignments*np.exp(-np.expand_dims(assignment_cost, 1)), axis = 0)
marginalised_cost = np.reshape(marginalised_cost, (num_tracks, num_detections+1))
# print('\n', marginalised_cost)
return marginalised_cost
def calculate_entropy(matrix, idx, idy):
mask = np.ones(matrix.shape)
mask[idx, idy] = 0
entropy = matrix/np.sum(mask*matrix, axis=1, keepdims=True)
entropy = (-entropy*np.log(entropy)) * mask
entropy = np.mean(np.sum(entropy, axis=1))
return entropy
def get_JPDA_output(cluster, cost_matrix, dummy_node_cost_app, dummy_node_cost_iou, cutoff, m):
if len(cluster[1]) == 0:
assignment = np.zeros((cost_matrix.shape[0], cost_matrix.shape[1]+1))
assignment[cluster[0], 0] = 1
assignment = assignment.reshape(1,-1)
return [assignment], np.array([0])
new_cost_matrix_appearance = np.reshape(cost_matrix[np.repeat(cluster[0], len(cluster[1])),
np.tile(cluster[1] - 1, len(cluster[0])),
[0]*(len(cluster[1])*len(cluster[0]))],
(len(cluster[0]), len(cluster[1])))
new_cost_matrix_iou = np.reshape(cost_matrix[np.repeat(cluster[0], len(cluster[1])), np.tile(cluster[1] - 1, len(cluster[0])), 1],
(len(cluster[0]), len(cluster[1])))
idx_x, idx_y = np.where(new_cost_matrix_appearance > cutoff)
appearance_entropy = calculate_entropy(new_cost_matrix_appearance, idx_x, idx_y)
iou_entropy = calculate_entropy(new_cost_matrix_iou, idx_x, idx_y)
if appearance_entropy < iou_entropy:
new_cost_matrix = new_cost_matrix_appearance
new_cost_matrix = 2*np.ones(new_cost_matrix.shape)/(new_cost_matrix+1) - 1
dummy_node_cost = -np.log(2/(dummy_node_cost_app+1) - 1)
else:
new_cost_matrix = new_cost_matrix_iou
new_cost_matrix[new_cost_matrix==1] -= 1e-3
new_cost_matrix = 1 - new_cost_matrix
dummy_node_cost = -np.log(1-dummy_node_cost_iou)
new_cost_matrix = -np.log(new_cost_matrix)
new_cost_matrix[idx_x, idx_y] = cutoff
if len(cluster[0]) == 1:
new_cost_matrix = np.concatenate([np.ones((new_cost_matrix.shape[0], 1))*dummy_node_cost, new_cost_matrix], axis = 1)
total_cost = np.sum(np.exp(-new_cost_matrix))
new_assignment = np.zeros((cost_matrix.shape[0], cost_matrix.shape[1]+1))
new_assignment[np.repeat(cluster[0], len(cluster[1])+1), np.tile(
np.concatenate([np.zeros(1, dtype = np.int32), cluster[1]]), len(cluster[0]))] = np.exp(-new_cost_matrix)/total_cost
new_assignment = new_assignment.reshape(1, -1)
return [new_assignment], np.array([0])
if new_cost_matrix.ndim <= 1:
new_cost_matrix = np.expand_dims(new_cost_matrix, 1)
# print(new_cost_matrix)
assignments, assignment_cost = new_m_best_sol(new_cost_matrix, m, dummy_node_cost)
offset = np.amin(assignment_cost)
assignment_cost -= offset
new_assignments = []
total_cost = np.sum(np.exp(-assignment_cost))
for assignment in assignments:
new_assignment = np.zeros((cost_matrix.shape[0], cost_matrix.shape[1]+1))
new_assignment[np.repeat(cluster[0], len(cluster[1])+1), np.tile(
np.concatenate([np.zeros(1, dtype = np.int32), cluster[1]]), len(cluster[0]))] = \
assignment/total_cost
new_assignments.append(new_assignment.reshape(1, -1))
return new_assignments, assignment_cost
def matching_cascade(
distance_metric, max_distance, cascade_depth, tracks, detections,
track_indices=None, detection_indices=None, compare_2d = False, detections_3d=None):
"""Run matching cascade.
Parameters
----------
distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
The distance metric is given a list of tracks and detections as well as
a list of N track indices and M detection indices. The metric should
return the NxM dimensional cost matrix, where element (i, j) is the
association cost between the i-th track in the given track indices and
the j-th detection in the given detection indices.
max_distance : float
Gating threshold. Associations with cost larger than this value are
disregarded.
cascade_depth: int
The cascade depth, should be se to the maximum track age.
tracks : List[track.Track]
A list of predicted tracks at the current time step.
detections : List[detection.Detection]
A list of detections at the current time step.
track_indices : Optional[List[int]]
List of track indices that maps rows in `cost_matrix` to tracks in
`tracks` (see description above). Defaults to all tracks.
detection_indices : Optional[List[int]]
List of detection indices that maps columns in `cost_matrix` to
detections in `detections` (see description above). Defaults to all
detections.
Returns
-------
(List[(int, int)], List[int], List[int])
Returns a tuple with the following three entries:
* A list of matched track and detection indices.
* A list of unmatched track indices.
* A list of unmatched detection indices.
| |
<gh_stars>0
# Copyright (C) 2018 Arm Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NN-Quantizer for Caffe models
import sys
# Include <Caffe installation path>/python in PYTHONPATH environment variable
import os
import caffe
from caffe.proto import caffe_pb2
import numpy as np
import argparse
from google.protobuf import text_format
import pickle
class Caffe_Quantizer(object):
"""\
Quantize a trained caffe model to 8-bits
"""
def __init__(self,model_file='',weight_file='',iterations=100,
accuracy_layer='accuracy',gpu=False):
self.model_file=model_file
self.weight_file=weight_file
self.quant_weight_file=""
self.conv_layer=[]
self.ip_layer=[]
self.start_layer=[]
self.end_layer=[]
self.layer=[]
self.layer_shape={}
self.layer_wt_shape={}
self.top_blob={}
self.bottom_blob={}
self.layer_type={}
self.kernel_size={}
self.stride={}
self.pad={}
self.group={}
self.pool_type={}
self.lrn_type={}
self.lrn_size={}
self.lrn_alpha={}
self.lrn_beta={}
self.num_ops={}
self.num_wts={}
self.wt_int_bits={}
self.wt_dec_bits={}
self.bias_int_bits={}
self.bias_dec_bits={}
self.act_int_bits={}
self.act_dec_bits={}
self.bias_lshift={}
self.act_rshift={}
self.data_layer=None
self.mean_file=""
self.mean_val=[]
self.label_layer=None
self.accuracy_layer=accuracy_layer
self.iterations=iterations
self.gpu=gpu
def save_quant_params(self,model_info_file):
pickle.dump(self,open(model_info_file,'wb'))
def load_quant_params(self,model_info_file):
model_par=pickle.load(open(model_info_file,'rb'))
self.model_file=model_par.model_file
self.weight_file=model_par.weight_file
self.quant_weight_file=model_par.quant_weight_file
self.conv_layer=model_par.conv_layer
self.ip_layer=model_par.ip_layer
self.start_layer=model_par.start_layer
self.end_layer=model_par.end_layer
self.layer=model_par.layer
self.layer_shape=model_par.layer_shape
self.layer_wt_shape=model_par.layer_wt_shape
self.top_blob=model_par.top_blob
self.bottom_blob=model_par.bottom_blob
self.layer_type=model_par.layer_type
self.kernel_size=model_par.kernel_size
self.stride=model_par.stride
self.pad=model_par.pad
self.group=model_par.group
self.pool_type=model_par.pool_type
self.lrn_type=model_par.lrn_type
self.lrn_size=model_par.lrn_size
self.lrn_alpha=model_par.lrn_alpha
self.lrn_beta=model_par.lrn_beta
self.num_ops=model_par.num_ops
self.num_wts=model_par.num_wts
self.wt_int_bits=model_par.wt_int_bits
self.wt_dec_bits=model_par.wt_dec_bits
self.bias_int_bits=model_par.bias_int_bits
self.bias_dec_bits=model_par.bias_dec_bits
self.act_int_bits=model_par.act_int_bits
self.act_dec_bits=model_par.act_dec_bits
self.bias_lshift=model_par.bias_lshift
self.act_rshift=model_par.act_rshift
self.data_layer=model_par.data_layer
self.mean_file=model_par.mean_file
self.mean_val=model_par.mean_val
self.label_layer=model_par.label_layer
self.accuracy_layer=model_par.accuracy_layer
self.iterations=model_par.iterations
self.gpu=model_par.gpu
def run_full_network(self):
if self.gpu==True:
caffe.set_mode_gpu()
net = caffe.Net(self.model_file,self.weight_file,caffe.TEST)
acc = np.zeros(self.iterations)
for i in range(0,self.iterations):
out = net.forward()
acc[i] = out[self.accuracy_layer]*100
print("Full precision accuracy: %.2f%%" %(acc.mean()))
return acc.mean()
def run_quantized_network(self):
if self.gpu==True:
caffe.set_mode_gpu()
net = caffe.Net(self.model_file,self.quant_weight_file,caffe.TEST)
acc = np.zeros(self.iterations)
for i in range(0,self.iterations):
out = net.forward()
acc[i] = out[self.accuracy_layer]*100
print("Accuracy with quantized weights/biases: %.2f%%" %(acc.mean()))
for i in range(0,self.iterations):
for layer_no in range(0,len(self.start_layer)):
if layer_no==0:
net.forward(end=str(self.end_layer[layer_no]))
else:
net.forward(start=str(self.start_layer[layer_no]),end=str(self.end_layer[layer_no]))
if layer_no < len(self.start_layer)-1: # not quantizing accuracy layer
net.blobs[self.end_layer[layer_no]].data[:]=np.floor(net.blobs[self.end_layer[layer_no]].data*\
(2**self.act_dec_bits[self.end_layer[layer_no]]))
net.blobs[self.end_layer[layer_no]].data[net.blobs[self.end_layer[layer_no]].data>126]=127
net.blobs[self.end_layer[layer_no]].data[net.blobs[self.end_layer[layer_no]].data<-127]=-128
net.blobs[self.end_layer[layer_no]].data[:]=net.blobs[self.end_layer[layer_no]].data/\
(2**self.act_dec_bits[self.end_layer[layer_no]])
acc[i] = net.blobs[self.accuracy_layer].data*100
accuracy = acc.mean()
print("Accuracy with quantized weights/biases and activations: %.2f%%" %(accuracy))
return accuracy
def get_layer_info(self):
net=caffe_pb2.NetParameter()
text_format.Merge(open(self.model_file,'r').read(),net)
cnn = caffe.Net(self.model_file,self.weight_file,caffe.TEST)
if len(net.layer)==0: #some prototxts use "layer", some use "layers"
layers = net.layers
else:
layers = net.layer
for layer in layers:
layer_name=[]
for val in layer.top:
layer_name+=[str(val)]
self.top_blob[str(layer.name)]=layer_name
self.layer_shape[str(layer.name)]=cnn.blobs[self.top_blob[str(layer.name)][0]].data.shape
layer_name=[]
for val in layer.bottom:
layer_name+=[str(val)]
self.bottom_blob[str(layer.name)]=layer_name
self.layer_type[str(layer.name)] = str(layer.type).lower()
if str(layer.type).lower() == 'convolution' or str(layer.type)=='4':
self.layer_wt_shape[str(layer.name)]=cnn.params[self.top_blob[str(layer.name)][0]][0].data.shape
self.conv_layer.append(str(layer.name))
self.kernel_size[str(layer.name)] = layer.convolution_param.kernel_size[0]
self.stride[str(layer.name)] = 1;
self.pad[str(layer.name)] = 0;
if(len(layer.convolution_param.stride)!=0):
self.stride[str(layer.name)] = layer.convolution_param.stride[0]
if(len(layer.convolution_param.pad)!=0):
self.pad[str(layer.name)] = layer.convolution_param.pad[0]
self.group[str(layer.name)] = layer.convolution_param.group
elif str(layer.type).lower() == 'pooling' or str(layer.type)=='17':
self.pool_type[str(layer.name)] = layer.pooling_param.pool
self.kernel_size[str(layer.name)] = layer.pooling_param.kernel_size
self.stride[str(layer.name)] = layer.pooling_param.stride
self.pad[str(layer.name)] = layer.pooling_param.pad
elif str(layer.type).lower() == 'lrn' or str(layer.type)=='15':
self.lrn_type[str(layer.name)] = layer.lrn_param.norm_region
self.lrn_size[str(layer.name)] = layer.lrn_param.local_size
self.lrn_alpha[str(layer.name)] = layer.lrn_param.alpha
self.lrn_beta[str(layer.name)] = layer.lrn_param.beta
elif str(layer.type).lower() == 'innerproduct' or str(layer.type)=='14':
self.layer_wt_shape[str(layer.name)]=cnn.params[self.top_blob[str(layer.name)][0]][0].data.shape
self.ip_layer.append(str(layer.name))
elif str(layer.type).lower() == 'data' or str(layer.type)=='5':
included = False
for layer_phase in layer.include:
included = included or layer_phase.phase == caffe.TEST
if(included == True):
batch_size = layer.data_param.batch_size
self.data_layer = str(layer.top[0])
self.label_layer = str(layer.top[1])
self.mean_file = layer.transform_param.mean_file
self.mean_val = list(layer.transform_param.mean_value)
def get_graph_connectivity(self):
# Extract network connectivity for running CNN functions in the correct order
# Traversing back from output layer (accuracy) to input layer (data) especially because
# googLeNet has many accuracy labels, which branch out and end at a different accuracy
# label with forward traversal
net=caffe_pb2.NetParameter()
text_format.Merge(open(self.model_file,'r').read(),net)
allowed_layer_types = ['data','convolution','innerproduct','pooling','lrn','relu',\
'accuracy','concat','5','4','14','17','15','18','1','3']
current_layer = self.accuracy_layer
traversed=[]
while current_layer != str(self.data_layer):
traversed += [current_layer]
num_branch = len(self.bottom_blob[current_layer])
current_blob = self.bottom_blob[current_layer][0]
has_unused_relu = 0
for key, value in self.top_blob.items():
if (current_blob in value) and (key not in traversed) and \
(self.layer_type[key] == 'relu' or self.layer_type[key]=='18'):
has_unused_relu = 1
break
for key, value in self.top_blob.items():
if(has_unused_relu == 1):
if (current_blob in value) and (key not in traversed) and \
(self.layer_type[key]=='relu' or self.layer_type[key]=='18'):
has_unused_relu = 0
current_layer = key
break
else:
if (current_blob in value) and (key not in traversed) and \
(self.layer_type[key] in allowed_layer_types):
current_layer = key
break
traversed += [current_layer]
traversed.reverse()
self.layer=traversed[:]
self.start_layer+=['']
for layer_no in range(0,len(self.layer)):
layer = self.layer[layer_no]
if layer == self.data_layer or layer in self.conv_layer or \
layer in self.ip_layer or layer in self.accuracy_layer or\
((self.layer_type[layer]=='pooling' or self.layer_type[layer]=='17') \
and self.pool_type[layer]==1):
self.end_layer+=[layer]
if layer_no < len(self.layer)-1:
self.start_layer+=[self.layer[layer_no+1]]
print(self.start_layer)
print(self.end_layer)
# Quantize weights to 8 bits
# Using min and max of weights as nearest power of 2, quantize to 8bits (QM.N) and check accuracy
# If accuracy is lost, try QM-1:N+1, QM-2,N+2,... with saturation to find out the best combination
# with least accuracy loss (Trading-off weights that occur infrequently for more precision)
#
# -2^(M+N) 0 2^(M+N)
# | ^ |
# | *|||* |
# <--------| *|||||* |------->
# Saturated| *|||||||* |Saturated
# | *|||||||||||* |
# | *|||||||||||||||||* |
# *| |*
# * |<-------------------------->| *
# Weight quantization and
# truncation with minimal
# loss of accuracy
#
def quantize_wts_8bit(self,tolerance=0.001,search_range=3):
if self.gpu==True:
caffe.set_mode_gpu()
net = caffe.Net(self.model_file,self.weight_file,caffe.TEST)
acc = np.zeros(self.iterations)
for i in range(0,self.iterations):
out = net.forward()
acc[i] = out[self.accuracy_layer]*100
target_accuracy = acc.mean()
print("Full precision accuracy: %.2f%%" %(target_accuracy))
self.quant_weight_file = self.weight_file
wfile = os.path.basename(self.weight_file)
qwfile = 'quantized_'+wfile
self.quant_weight_file = self.weight_file.replace(wfile,qwfile)
self.quant_weight_file = self.quant_weight_file.replace('.h5','')
net.save(self.quant_weight_file)
for layer_name in self.conv_layer+self.ip_layer:
#Start with min/max of weights to the rounded up to nearest power of 2.
wt_max = net.params[layer_name][0].data.max()
wt_min = net.params[layer_name][0].data.min()
self.wt_int_bits[layer_name] = int(np.ceil(np.log2(max(abs(wt_min),abs(wt_max)))))
self.wt_dec_bits[layer_name] = 7-self.wt_int_bits[layer_name]
max_int_bits = self.wt_int_bits[layer_name]-search_range
print('Layer: '+ layer_name + ' weights max: '+str(wt_max)+' min: '+str(wt_min)+\
' Format: Q'+str(self.wt_int_bits[layer_name])+'.'+str(self.wt_dec_bits[layer_name]))
net.params[layer_name][0].data[:]=np.round(net.params[layer_name][0].data*\
(2**self.wt_dec_bits[layer_name]))/(2**self.wt_dec_bits[layer_name])
for i in range(0,self.iterations):
out = net.forward()
acc[i] = out[self.accuracy_layer]*100
accuracy = acc.mean()
print("Accuracy: %.2f%%" %(accuracy))
best_int_bits = self.wt_int_bits[layer_name]
best_dec_bits = self.wt_dec_bits[layer_name]
best_accuracy = accuracy
while target_accuracy-accuracy>tolerance and self.wt_int_bits[layer_name]>max_int_bits:
self.wt_int_bits[layer_name] = self.wt_int_bits[layer_name]-1
self.wt_dec_bits[layer_name] = self.wt_dec_bits[layer_name]+1
net.copy_from(self.quant_weight_file)
net.params[layer_name][0].data[:]=np.round(net.params[layer_name][0].data*\
(2**self.wt_dec_bits[layer_name]))
net.params[layer_name][0].data[net.params[layer_name][0].data>126]=127
net.params[layer_name][0].data[net.params[layer_name][0].data<-127]=-128
net.params[layer_name][0].data[:]=net.params[layer_name][0].data/\
(2**self.wt_dec_bits[layer_name])
for i in range(0,self.iterations):
out = net.forward()
acc[i] = out[self.accuracy_layer]*100
accuracy = acc.mean()
print('Format Q'+str(self.wt_int_bits[layer_name])+'.'+\
str(self.wt_dec_bits[layer_name])+' Accuracy: %.2f%%' %(accuracy))
if accuracy>best_accuracy:
best_int_bits = self.wt_int_bits[layer_name]
best_dec_bits = self.wt_dec_bits[layer_name]
best_accuracy = accuracy
self.wt_int_bits[layer_name] = best_int_bits
self.wt_dec_bits[layer_name] = best_dec_bits
net.copy_from(self.quant_weight_file)
net.params[layer_name][0].data[:]=np.round(net.params[layer_name][0].data*\
(2**self.wt_dec_bits[layer_name]))
net.params[layer_name][0].data[net.params[layer_name][0].data>126]=127
net.params[layer_name][0].data[net.params[layer_name][0].data<-127]=-128
net.params[layer_name][0].data[:]=net.params[layer_name][0].data/\
(2**self.wt_dec_bits[layer_name])
print('Final '+layer_name+ ' weights format Q'+str(best_int_bits)+'.'+\
str(best_dec_bits)+' Accuracy: %.2f%%' %(best_accuracy))
net.save(self.quant_weight_file)
# Quantize activations (inter-layer data) to 8 bits
# Using min and max of activations as nearest power of 2, quantize to 8bits (QM.N) and check accuracy
# If accuracy is lost, try QM-1:N+1, QM-2,N+2,... with saturation to find out the best combination
# with least accuracy loss (Trading-off activations that occur infrequently for more precision)
def quantize_activations_8bit(self,tolerance=0.001,search_range=3):
if self.gpu==True:
caffe.set_mode_gpu()
net = caffe.Net(self.model_file,self.quant_weight_file,caffe.TEST)
acc = np.zeros(self.iterations)
for i in range(0,self.iterations):
out = net.forward()
acc[i] = out[self.accuracy_layer]*100
target_accuracy = acc.mean()
print("Accuracy with quantized weights: %.2f%%" %(target_accuracy))
max_val={}
min_val={}
quant_layer_flag={}
for layer in self.end_layer:
max_val[layer]=float('-inf')
min_val[layer]=float('inf')
quant_layer_flag[layer]=0
#Finding min max for output of all layers
for i in range(0,self.iterations):
for layer_no in range(0,len(self.start_layer)):
if layer_no==0:
net.forward(end=str(self.end_layer[layer_no]))
else:
net.forward(start=str(self.start_layer[layer_no]),end=str(self.end_layer[layer_no]))
layer_max = net.blobs[self.end_layer[layer_no]].data.max()
layer_min = net.blobs[self.end_layer[layer_no]].data.min()
if(layer_max>max_val[self.end_layer[layer_no]]):
max_val[self.end_layer[layer_no]]=layer_max
if(layer_min<min_val[self.end_layer[layer_no]]):
min_val[self.end_layer[layer_no]]=layer_min
#print("Running %s layer, max,min : %.2f,%.2f" %(self.end_layer[layer_no],layer_max,layer_min))
max_int_bits={}
for layer in self.end_layer:
self.act_int_bits[layer] = int(np.ceil(np.log2(max(abs(max_val[layer]),abs(min_val[layer])))))
self.act_dec_bits[layer] = 7-self.act_int_bits[layer]
max_int_bits[layer]=self.act_int_bits[layer]-search_range
print('Layer: '+layer+' max: '+ str(max_val[layer]) + ' min: '+str(min_val[layer])+ \
' Format: Q'+str(self.act_int_bits[layer])+'.'+str(self.act_dec_bits[layer]))
quant_max_val={}
quant_min_val={}
for layer in self.end_layer:
quant_max_val[layer]=float('-inf')
quant_min_val[layer]=float('inf')
for quant_layer_no in range(0,len(self.start_layer)-1): #No need to quantize accuracy layer
quant_layer=self.end_layer[quant_layer_no]
quant_layer_flag[quant_layer]=1
if((self.layer_type[quant_layer]=='pooling' or self.layer_type[quant_layer]=='17') and \
self.pool_type[quant_layer]==1):
prev_layer=self.end_layer[quant_layer_no-1]
self.act_int_bits[quant_layer]=self.act_int_bits[prev_layer]
self.act_dec_bits[quant_layer]=self.act_dec_bits[prev_layer]
continue
# quantize layer by layer
for i in range(0,self.iterations):
for layer_no in range(0,len(self.start_layer)):
if layer_no==0:
net.forward(end=str(self.end_layer[layer_no]))
else:
net.forward(start=str(self.start_layer[layer_no]),end=str(self.end_layer[layer_no]))
if quant_layer_flag[self.end_layer[layer_no]]==1: # quantize incrementally layer by layer
net.blobs[self.end_layer[layer_no]].data[:]=np.floor(net.blobs[self.end_layer[layer_no]].data*\
(2**self.act_dec_bits[self.end_layer[layer_no]]))/(2**self.act_dec_bits[self.end_layer[layer_no]])
layer_max = net.blobs[self.end_layer[layer_no]].data.max()
layer_min = net.blobs[self.end_layer[layer_no]].data.min()
if(layer_max>quant_max_val[self.end_layer[layer_no]]):
quant_max_val[self.end_layer[layer_no]]=layer_max
if(layer_min<quant_min_val[self.end_layer[layer_no]]):
quant_min_val[self.end_layer[layer_no]]=layer_min
acc[i] = net.blobs[self.accuracy_layer].data*100
accuracy=acc.mean()
print('Layer-'+quant_layer+' max: '+str(quant_max_val[quant_layer])+\
' min: '+str(quant_min_val[quant_layer])+' format: Q'+\
str(self.act_int_bits[quant_layer])+'.'+str(self.act_dec_bits[quant_layer])+\
' accuracy: %.2f%%' %(acc.mean()))
best_accuracy = accuracy
best_int_bits = self.act_int_bits[quant_layer]
best_dec_bits = self.act_dec_bits[quant_layer]
while target_accuracy-accuracy>tolerance and self.act_int_bits[quant_layer]>\
max_int_bits[quant_layer]:
for layer in self.end_layer:
quant_max_val[layer]=float('-inf')
quant_min_val[layer]=float('inf')
self.act_int_bits[quant_layer] = self.act_int_bits[quant_layer]-1
self.act_dec_bits[quant_layer] = self.act_dec_bits[quant_layer]+1
for i in range(0,self.iterations):
for layer_no in range(0,len(self.start_layer)):
if layer_no==0:
net.forward(end=str(self.end_layer[layer_no]))
else:
net.forward(start=str(self.start_layer[layer_no]),end=str(self.end_layer[layer_no]))
if quant_layer_flag[self.end_layer[layer_no]]==1:
net.blobs[self.end_layer[layer_no]].data[:]=np.floor(net.blobs[self.end_layer[layer_no]].data*\
(2**self.act_dec_bits[self.end_layer[layer_no]]))
net.blobs[self.end_layer[layer_no]].data[net.blobs[self.end_layer[layer_no]].data>126]=127
net.blobs[self.end_layer[layer_no]].data[net.blobs[self.end_layer[layer_no]].data<-127]=-128
net.blobs[self.end_layer[layer_no]].data[:]=net.blobs[self.end_layer[layer_no]].data/\
(2**self.act_dec_bits[self.end_layer[layer_no]])
layer_max = net.blobs[self.end_layer[layer_no]].data.max()
layer_min = net.blobs[self.end_layer[layer_no]].data.min()
if(layer_max>quant_max_val[self.end_layer[layer_no]]):
quant_max_val[self.end_layer[layer_no]]=layer_max
if(layer_min<quant_min_val[self.end_layer[layer_no]]):
quant_min_val[self.end_layer[layer_no]]=layer_min
acc[i] = net.blobs[self.accuracy_layer].data*100
accuracy=acc.mean()
if accuracy>best_accuracy:
best_int_bits = self.act_int_bits[quant_layer]
best_dec_bits = self.act_dec_bits[quant_layer]
best_accuracy = accuracy
print('Layer-'+quant_layer+' max: '+str(quant_max_val[quant_layer])+\
'min: '+str(quant_min_val[quant_layer])+' format: Q'+\
str(self.act_int_bits[quant_layer])+'.'+str(self.act_dec_bits[quant_layer])+\
| |
!= api.ssn:
rvol = api.find_repl_volume(item['volume']['id'],
rssn, None)
# if there is an old replication whack it.
api.delete_replication(svol, rssn, False)
if api.start_replication(
svol, rvol,
item['specs']['replicationtype'],
self._get_qos(rssn),
item['specs']['activereplay']):
# Save our replication_driver_data.
item['rdd'] += ','
item['rdd'] += backend['target_device_id']
else:
# No joy. Bail
item['status'] = 'error'
def _fixup_types(self, api, items):
# Update our replay profiles.
for item in items:
if item['status'] == 'reattached':
# Re-apply any appropriate replay profiles.
item['status'] = 'available'
rps = item['specs']['replay_profile_string']
if rps:
svol = api.get_volume(item['nvol'])
if not api.update_replay_profiles(svol, rps):
item['status'] = 'error'
def _volume_updates(self, items):
# Update our volume updates.
volume_updates = []
for item in items:
# Set our status for our replicated volumes
model_update = {'provider_id': item['nvol'],
'replication_driver_data': item['rdd']}
# These are simple. If the volume reaches available then,
# since we were replicating it, replication status must
# be good. Else error/error.
if item['status'] == 'available':
model_update['status'] = 'available'
model_update['replication_status'] = (
fields.ReplicationStatus.ENABLED)
else:
model_update['status'] = 'error'
model_update['replication_status'] = (
fields.ReplicationStatus.ERROR)
volume_updates.append({'volume_id': item['volume']['id'],
'updates': model_update})
return volume_updates
def _failback_replication(self, api, volume, qosnode):
"""Sets up the replication failback.
:param api: Dell SC API.
:param volume: Cinder Volume
:param qosnode: Dell QOS node object.
:return: replitem dict.
"""
LOG.info('failback_volumes: replicated volume')
# Get our current volume.
cvol = api.find_volume(volume['id'], volume['provider_id'])
# Original volume on the primary.
ovol = api.find_repl_volume(volume['id'], api.primaryssn,
None, True, False)
# Delete our current mappings.
api.remove_mappings(cvol)
# If there is a replication to delete do so.
api.delete_replication(ovol, api.ssn, False)
# Replicate to a common replay.
screpl = api.replicate_to_common(cvol, ovol, 'tempqos')
# We made it this far. Update our status.
screplid = None
status = ''
if screpl:
screplid = screpl['instanceId']
nvolid = screpl['destinationVolume']['instanceId']
status = 'inprogress'
else:
LOG.error('Unable to restore %s', volume['id'])
screplid = None
nvolid = None
status = 'error'
# Save some information for the next step.
# nvol is the new volume created by replicate_to_common.
# We also grab our extra specs here.
replitem = {
'volume': volume,
'specs': self._parse_extraspecs(volume),
'qosnode': qosnode,
'screpl': screplid,
'cvol': cvol['instanceId'],
'ovol': ovol['instanceId'],
'nvol': nvolid,
'rdd': six.text_type(api.ssn),
'status': status}
return replitem
def _failback_live_volume(self, api, id, provider_id):
"""failback the live volume to its original
:param api: Dell SC API
:param id: Volume ID
:param provider_id: Dell Instance ID
:return: model_update dict
"""
model_update = {}
# We do not search by name. Only failback if we have a complete
# LV object.
sclivevolume = api.get_live_volume(provider_id)
# TODO(tswanson): Check swapped state first.
if sclivevolume and api.swap_roles_live_volume(sclivevolume):
LOG.info('Success swapping sclivevolume roles %s', id)
model_update = {
'status': 'available',
'replication_status': fields.ReplicationStatus.ENABLED,
'provider_id':
sclivevolume['secondaryVolume']['instanceId']}
else:
LOG.info('Failure swapping roles %s', id)
model_update = {'status': 'error'}
return model_update
def _finish_failback(self, api, replitems):
# Wait for replication to complete.
# This will also flip replication.
self._wait_for_replication(api, replitems)
# Replications are done. Attach to any additional replication
# backends.
self._reattach_remaining_replications(api, replitems)
self._fixup_types(api, replitems)
return self._volume_updates(replitems)
def failback_volumes(self, volumes):
"""This is a generic volume failback.
:param volumes: List of volumes that need to be failed back.
:return: volume_updates for the list of volumes.
"""
LOG.info('failback_volumes')
with self._client.open_connection() as api:
# Get our qosnode. This is a good way to make sure the backend
# is still setup so that we can do this.
qosnode = self._get_qos(api.ssn)
if not qosnode:
raise exception.VolumeBackendAPIException(
message=_('Unable to failback. Backend is misconfigured.'))
volume_updates = []
replitems = []
# Trundle through the volumes. Update non replicated to alive again
# and reverse the replications for the remaining volumes.
for volume in volumes:
LOG.info('failback_volumes: starting volume: %s', volume)
model_update = {}
if volume.get('replication_driver_data'):
rspecs = self._get_replication_specs(
self._get_volume_extra_specs(volume))
if rspecs['live']:
model_update = self._failback_live_volume(
api, volume['id'], volume['provider_id'])
else:
replitem = self._failback_replication(api, volume,
qosnode)
# Save some information for the next step.
# nvol is the new volume created by
# replicate_to_common. We also grab our
# extra specs here.
replitems.append(replitem)
else:
# Not replicated. Just set it to available.
model_update = {'status': 'available'}
# Save our update
if model_update:
volume_updates.append({'volume_id': volume['id'],
'updates': model_update})
# Let's do up to 5 replications at once.
if len(replitems) == 5:
volume_updates += self._finish_failback(api, replitems)
replitems = []
# Finish any leftover items
if replitems:
volume_updates += self._finish_failback(api, replitems)
# Set us back to a happy state.
# The only way this doesn't happen is if the primary is down.
self._update_backend(None)
return volume_updates
def _failover_replication(self, api, id, provider_id, destssn):
rvol = api.break_replication(id, provider_id, destssn)
model_update = {}
if rvol:
LOG.info('Success failing over volume %s', id)
model_update = {'replication_status':
fields.ReplicationStatus.FAILED_OVER,
'provider_id': rvol['instanceId']}
else:
LOG.info('Failed failing over volume %s', id)
model_update = {'status': 'error'}
return model_update
def _failover_live_volume(self, api, id, provider_id):
model_update = {}
# Search for volume by id if we have to.
sclivevolume = api.get_live_volume(provider_id, id)
if sclivevolume:
swapped = api.is_swapped(provider_id, sclivevolume)
# If we aren't swapped try it. If fail error out.
if not swapped and not api.swap_roles_live_volume(sclivevolume):
LOG.info('Failure swapping roles %s', id)
model_update = {'status': 'error'}
return model_update
LOG.info('Success swapping sclivevolume roles %s', id)
sclivevolume = api.get_live_volume(provider_id)
model_update = {
'replication_status':
fields.ReplicationStatus.FAILED_OVER,
'provider_id':
sclivevolume['primaryVolume']['instanceId']}
# Error and leave.
return model_update
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover to secondary.
:param context: security context
:param secondary_id: Specifies rep target to fail over to
:param volumes: List of volumes serviced by this backend.
:returns: destssn, volume_updates data structure
Example volume_updates data structure:
.. code-block:: json
[{'volume_id': <cinder-uuid>,
'updates': {'provider_id': 8,
'replication_status': 'failed-over',
'replication_extended_status': 'whatever',...}},]
"""
LOG.debug('failover-host')
LOG.debug(self.failed_over)
LOG.debug(self.active_backend_id)
LOG.debug(self.replication_enabled)
if self.failed_over:
if secondary_id == 'default':
LOG.debug('failing back')
return 'default', self.failback_volumes(volumes), []
raise exception.InvalidReplicationTarget(
reason=_('Already failed over'))
LOG.info('Failing backend to %s', secondary_id)
# basic check
if self.replication_enabled:
with self._client.open_connection() as api:
# Look for the specified secondary.
destssn = self._parse_secondary(api, secondary_id)
if destssn:
# We roll through trying to break replications.
# Is failing here a complete failure of failover?
volume_updates = []
for volume in volumes:
model_update = {}
if volume.get('replication_driver_data'):
rspecs = self._get_replication_specs(
self._get_volume_extra_specs(volume))
if rspecs['live']:
model_update = self._failover_live_volume(
api, volume['id'],
volume.get('provider_id'))
else:
model_update = self._failover_replication(
api, volume['id'],
volume.get('provider_id'), destssn)
else:
# Not a replicated volume. Try to unmap it.
scvolume = api.find_volume(
volume['id'], volume.get('provider_id'))
api.remove_mappings(scvolume)
model_update = {'status': 'error'}
# Either we are failed over or our status is now error.
volume_updates.append({'volume_id': volume['id'],
'updates': model_update})
# this is it.
self._update_backend(destssn)
LOG.debug('after update backend')
LOG.debug(self.failed_over)
LOG.debug(self.active_backend_id)
LOG.debug(self.replication_enabled)
return destssn, volume_updates, []
else:
raise exception.InvalidReplicationTarget(reason=(
_('replication_failover failed. %s not found.') %
secondary_id))
# I don't think we should ever get here.
raise exception.VolumeBackendAPIException(message=(
_('replication_failover failed. '
'Backend not configured for failover')))
def _get_unmanaged_replay(self, api, volume_name, provider_id,
existing_ref):
replay_name = None
if existing_ref:
replay_name = existing_ref.get('source-name')
if not replay_name:
msg = _('_get_unmanaged_replay: Must specify source-name.')
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
# Find our volume.
scvolume = api.find_volume(volume_name, provider_id)
if not scvolume:
# Didn't find it.
msg = (_('_get_unmanaged_replay: Cannot find volume id %s')
% volume_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Find our replay.
screplay = api.find_replay(scvolume, replay_name)
if not screplay:
# Didn't find it. Reference must be invalid.
msg = (_('_get_unmanaged_replay: Cannot '
'find snapshot named %s') % replay_name)
LOG.error(msg)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
return screplay
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
snapshot structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the
snapshot['name'] which is how drivers traditionally map between a
cinder snapshot and the associated backend storage object.
2. Place some metadata on the snapshot, or somewhere in the backend,
that allows other driver requests (e.g. delete) to locate the
backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
"""
with self._client.open_connection() as api:
# | |
<filename>nanoraw/plot_commands.py
import os, sys
import h5py
import Queue
import numpy as np
import multiprocessing as mp
from time import sleep
from collections import defaultdict
from itertools import repeat, groupby
from pkg_resources import resource_string
# import nanoraw functions
import nanoraw_stats as ns
import nanoraw_helper as nh
VERBOSE = False
# quantiles and especially violin plots at leat 3 values
# to be meaningful
QUANT_MIN = 3
# plotting names for strands
FWD_STRAND = 'Forward Strand'
REV_STRAND = 'Reverse Strand'
###################################
#### ggplot via rpy2 functions ####
###################################
GG_LOAD_ERROR=(
'*' * 60 + '\nERROR: Must have rpy2, R and ' +
'R package ggplot2 installed in order to plot. If these ' +
'packages are installed, run:\n\t\t`python -c "import rpy2.robjects; ' +
'from rpy2.robjects.packages import importr; ' +
'importr(\'ggplot2\');"`\n\t to see installation issues.\n' + \
'*' * 60 + '\n\n')
try:
import rpy2.robjects as r
from rpy2.robjects.packages import importr
except:
# pass here and raise error when main functions are actually called
pass
############################################
#### Kmer signal distribution functions ####
############################################
def plot_kmer_dist(files, corrected_group, basecall_subgroups,
read_mean, upstrm_bases, dnstrm_bases,
kmer_thresh, num_reads, pdf_fn, save_r_data_fn,
dont_plot):
if VERBOSE: sys.stderr.write(
'Parsing files and tabulating k-mers.\n')
kmer_len = upstrm_bases + dnstrm_bases + 1
reads_added = 0
all_kmers = defaultdict(list)
# randomly pick files instead of ordered from listing
np.random.shuffle(files)
for fn, basecall_subgroup in [(fn, bc_grp) for fn in files
for bc_grp in basecall_subgroups]:
try:
with h5py.File(fn, 'r') as read_data:
if ('/Analyses/' + corrected_group + '/' +
basecall_subgroup + '/Events') not in read_data:
continue
event_data = read_data[
'/Analyses/' + corrected_group + '/' +
basecall_subgroup + '/Events'].value
seq = event_data['base']
means = event_data['norm_mean']
except:
# probably truncated file
continue
read_kmers = defaultdict(list)
for kmer, event_mean in zip(
[''.join(bs) for bs in zip(*[
seq[i:] for i in range(kmer_len)])],
means[kmer_len - 1 - dnstrm_bases:]):
read_kmers[kmer].append(event_mean)
# if every k-mer is present (unless kmer is greater than 4) and
# each k-mer has the requested number of occurences
if kmer_thresh == 0 or (
len(read_kmers) == 4 ** kmer_len and min(
len(x) for x in read_kmers.values()) > kmer_thresh):
reads_added += 1
for kmer, kmer_means in read_kmers.items():
if read_mean:
all_kmers[kmer].append((
np.mean(kmer_means), reads_added))
else:
all_kmers[kmer].extend(
zip(kmer_means, repeat(reads_added)))
if reads_added >= num_reads:
break
if reads_added == 0:
sys.stderr.write(
'****** ERROR ******\n\tNo valid reads present. Check ' +
'corrected group used in genome_resquiggle as well as ' +
'[--num-kmer-threshold] parameter especially if requested ' +
'k-mer length is greater than 3 or 4. Consider setting ' +
'to 0 for k-mer lengths > 4.\n')
if reads_added < num_reads:
sys.stderr.write(
'****** WARNING ******\tFewer valid reads present than ' +
'requested. Check corrected group used in ' +
'genome_resquiggle as well as [--num-kmer-threshold] ' +
'parameter especially if requested k-mer length is ' +
'greater than 3 or 4. Consider setting to 0 for k-mer ' +
'legnths > 4.\n')
if VERBOSE: sys.stderr.write('Preparing plot data.\n')
kmer_levels = [kmer for means, kmer in sorted([
(np.mean(zip(*means)[0]), kmer)
for kmer, means in all_kmers.items()])]
plot_data = [
(kmer, kmer[upstrm_bases], sig_mean, read_i)
for kmer in kmer_levels
for sig_mean, read_i in all_kmers[kmer]]
kmerDat = r.DataFrame({
'Kmer':r.FactorVector(
r.StrVector(zip(*plot_data)[0]),
ordered=True, levels=r.StrVector(kmer_levels)),
'Base':r.StrVector(zip(*plot_data)[1]),
'Signal':r.FloatVector(zip(*plot_data)[2]),
'Read':r.StrVector(zip(*plot_data)[3])})
# df to plot kmers as tile of colors requires cowplot R package
try:
cowplot = importr("cowplot")
baseDat = r.DataFrame({
'Kmer':r.FactorVector(
r.StrVector([kmer for kmer in kmer_levels
for _ in range(kmer_len)]),
ordered=True, levels=r.StrVector(kmer_levels)),
'Base':r.StrVector([kmer[i] for kmer in kmer_levels
for i in range(kmer_len)]),
'Position':r.IntVector([
i - upstrm_bases for kmer in kmer_levels
for i in range(kmer_len)])})
except:
sys.stderr.write(
'********* WARNING: Install R package `cowplot` for ' +
'visual kmer display. Using text kmer display. ********\n')
baseDat = r.NA_Character
if save_r_data_fn is None:
save_r_data_fn = r.NA_Character
else:
save_r_data_fn = r.StrVector([save_r_data_fn,])
dont_plot_r = r.BoolVector([dont_plot,])
if VERBOSE: sys.stderr.write('Plotting.\n')
r.r(resource_string(__name__, 'R_scripts/plotKmerDist.R'))
if not dont_plot: r.r('pdf("' + pdf_fn + '", height=7, width=10)')
if read_mean:
r.globalenv['plotKmerDistWReadPath'](
kmerDat, baseDat, save_r_data_fn, dont_plot_r)
else:
r.globalenv['plotKmerDist'](
kmerDat, baseDat, save_r_data_fn, dont_plot_r)
if not dont_plot: r.r('dev.off()')
return
########################################
#### General data parsing functions ####
########################################
def get_read_correction_data(
filename, reg_type, reg_width, corr_basecall_group,
region_name=None, start_at_zero=False):
fast5_data = h5py.File(filename, 'r')
raw_grp = fast5_data['/Raw/Reads'].values()[0]
if ( '/Analyses/' + corr_basecall_group) not in fast5_data:
fast5_data.close()
return None, None, None, None
corr_grp = fast5_data['/Analyses/' + corr_basecall_group]
events_grp = corr_grp['Events']
events_data = events_grp.value
read_id = raw_grp.attrs['read_id']
signal_data = raw_grp['Signal'].value
raw_offset = events_grp.attrs['read_start_rel_to_raw']
shift, scale, lower_lim, upper_lim = [
corr_grp.attrs[attr_name] for attr_name in (
'shift', 'scale', 'lower_lim', 'upper_lim')]
old_segs = corr_grp['Alignment/read_segments'].value
old_align_vals = corr_grp['Alignment/read_alignment'].value
new_align_vals = corr_grp['Alignment/genome_alignment'].value
fast5_data.close()
event_starts = events_data['start']
# if a end or random region is requested
events_end = events_data['start'][-1] + events_data['length'][-1]
if reg_type == 'start':
reg_start = 0
elif reg_type == 'end':
reg_start = events_end - reg_width
elif reg_type == 'random':
reg_start = np.random.randint(0, events_end - reg_width)
else:
# reg_type should be an integer which is the raw start position
assert isinstance(reg_type, int)
reg_start = reg_type
norm_reg_signal, scale_values = nh.normalize_raw_signal(
signal_data, raw_offset + reg_start, reg_width,
shift=shift, scale=scale, lower_lim=lower_lim,
upper_lim=upper_lim)
# calculate running difference
min_seg_len = 4
sig_cs = np.cumsum(np.insert(norm_reg_signal, 0, 0))
running_diffs = np.abs((2 * sig_cs[min_seg_len:-min_seg_len]) -
sig_cs[:-2*min_seg_len] -
sig_cs[2*min_seg_len:])
# note that I need to check that both new and old segments are
# in the region as the start of the same genomic position can
# shift in raw space (i.e. only the old or new position could be
# in the region of interest)
old_segs_in_reg = np.where(np.logical_and(
reg_start <= old_segs, old_segs < reg_start + reg_width))[0]
old_reg_segs = old_segs[old_segs_in_reg]
new_segs = np.concatenate([event_starts,
[events_end,]])
new_segs_in_reg = np.where(np.logical_and(
reg_start <= new_segs, new_segs < reg_start + reg_width))[0]
new_reg_segs = new_segs[new_segs_in_reg]
i_old_segs = iter(old_segs)
i_new_segs = iter(new_segs)
align_vals = [((old_b, next(i_old_segs) if old_b != '-' else -1),
(new_b, next(i_new_segs) if new_b != '-' else -1))
for old_b, new_b in zip(
old_align_vals, new_align_vals)]
reg_align_vals = [
((old_b, old_pos, old_pos in old_reg_segs),
(new_b, new_pos, new_pos in new_reg_segs))
for (old_b, old_pos), (new_b, new_pos) in align_vals
if old_pos in old_reg_segs or new_pos in new_reg_segs]
# summarize alignment for old and new segments
old_is_del, old_is_mismatch, new_is_ins = [], [], []
last_was_del = False
for (old_b, old_pos, old_in_reg), (
new_b, new_pos, new_in_reg) in reg_align_vals:
if old_b == '-' and new_in_reg:
new_is_ins.append(True)
elif new_b == '-' and old_in_reg:
old_is_del.append(True)
old_is_mismatch.append(False)
last_was_del = True
else:
if new_in_reg:
new_is_ins.append(False)
if old_in_reg:
if last_was_del:
old_is_del.append(True)
last_was_del = False
else:
old_is_del.append(False)
old_is_mismatch.append(old_b != new_b)
old_bases, old_reg_segs = zip(*[
(b, pos) for b, pos, in_reg in zip(*reg_align_vals)[0]
if in_reg]) if len(reg_align_vals) > 0 else ([], [])
new_bases, new_reg_segs = zip(*[
(b, pos) for b, pos, in_reg in zip(*reg_align_vals)[1]
if in_reg]) if len(reg_align_vals) > 0 else ([], [])
# bring positions to zero start if aligning multiple sequences
sig_range = range(reg_start, reg_start + reg_width)
if start_at_zero:
old_reg_segs = [
old_seg_pos - reg_start for old_seg_pos in old_reg_segs]
new_reg_segs = [
new_seg_pos - reg_start for new_seg_pos in new_reg_segs]
sig_range = range(0, reg_width)
old_dat = {
'Position':r.FloatVector(old_reg_segs),
'Base':r.StrVector(old_bases),
'IsDel':r.BoolVector(old_is_del),
'IsMismatch':r.BoolVector(old_is_mismatch),
'Read':r.StrVector([read_id for _ in range(len(old_bases))])}
new_dat = {
'Position':r.FloatVector(new_reg_segs),
'Base':r.StrVector(new_bases),
'IsIns':r.BoolVector(new_is_ins),
'Read':r.StrVector([read_id for _ in range(len(new_bases))])}
sig_dat = {
'Signal':r.FloatVector(norm_reg_signal),
'Position':r.FloatVector(sig_range),
'Read':r.StrVector([
read_id for _ in range(len(norm_reg_signal))])}
diff_dat = {
'Signal':r.FloatVector(running_diffs),
'Position':r.FloatVector(sig_range[
min_seg_len:len(running_diffs) + min_seg_len]),
'Read':r.StrVector([
read_id for _ in range(len(running_diffs))])}
# add region is applicable
if region_name is not None:
old_dat['Region'] = r.StrVector([
region_name for _ in range(len(old_bases))])
new_dat['Region'] = r.StrVector([
region_name for _ in range(len(new_bases))])
sig_dat['Region'] = r.StrVector([
region_name for _ in range(len(norm_reg_signal))])
diff_dat['Region'] = r.StrVector([
region_name for _ in range(len(running_diffs))])
old_dat = r.DataFrame(old_dat)
new_dat = r.DataFrame(new_dat)
sig_dat = r.DataFrame(sig_dat)
diff_dat = r.DataFrame(diff_dat)
return old_dat, new_dat, sig_dat, diff_dat
def get_read_reg_events(r_data, interval_start, num_bases):
r_means = nh.get_read_base_means(r_data)
if r_data.start > interval_start:
# handle reads that start in middle of region
start_overlap = interval_start + num_bases - r_data.start
# create region with nan values
region_means = np.empty(num_bases)
region_means[:] = np.NAN
region_means[-start_overlap:] = r_means[:start_overlap]
elif r_data.end < interval_start + num_bases:
# handle reads that end inside region
end_overlap = r_data.end - interval_start
# create region with nan values
region_means = np.empty(num_bases)
region_means[:] = np.NAN
region_means[:end_overlap] = r_means[-end_overlap:]
else:
skipped_bases = interval_start - r_data.start
region_means = r_means[
skipped_bases:skipped_bases + num_bases]
return region_means
def get_reg_events(reg_reads, interval_start, num_bases, strand):
reg_events = [
get_read_reg_events(r_data, interval_start, num_bases)
for r_data in reg_reads if r_data.strand == strand]
reg_events = [r_means for r_means in reg_events
| |
import numpy as np
import pandas as pd
import sys
from tqdm import tqdm
import h5py
from sklearn.metrics.pairwise import cosine_similarity
import pkg_resources
import re
import itertools
import os
import matplotlib.pyplot as plt
from sys import stdout ### GET rid of later
from .context import context_composite, context96, context1536, context78, context83, context_composite96
COMPL = {"A":"T","T":"A","G":"C","C":"G"}
# ---------------------------------
# IOUtils
# ---------------------------------
def file_loader(x):
if x.endswith('.csv'):
return pd.read_csv(x, index_col=0)
elif x.endswith('.parquet'):
return pd.read_parquet(x)
else:
return pd.read_csv(x, sep='\t', index_col=0)
# ---------------------------------
# NMF Utils
# ---------------------------------
def split_negatives(x: pd.DataFrame, tag: str = '_n', axis: int = 0):
"""
Split dataframe into positive and negative components.
--------------------
Args:
* x: pd.DataFrame input matrix
* tag: string that will be added to the end of the negative variable names
* axis: which axis to create a positive dimension for
NOTE: this is for 2D numpy arrays
Returns:
* pd.DataFrame with new positive transformed matrix.
"""
x_neg = -1 * x.copy()
x_neg = x_neg.where(x_neg > 0, 0)
if axis:
x.columns = x.columns.astype(str)
x_neg.columns = [x+'_n' for x in x.columns]
else:
x.index = x.index.astype(str)
x_neg.index = [x+'_n' for x in x.index]
return pd.concat([x.where(x > 0, 0), x_neg], axis=axis)
def l2fc(df: pd.DataFrame, center: str = 'median', axis: int = 1):
"""
Log2 Fold-Change Input Dataframe
-------------------------
Args:
* df: pd.DataFrame
* center: center-metrix to compute log-fold change over
** 'median'
** 'mean'
* axis: int axis to compute median and LFC across (i.e. samples)
Returns:
* pd.Dataframe: log2FC-transformed pandas dataframe
"""
X = df.values
if center == 'median':
X_mid = np.median(X, axis)
elif center == 'mean':
X_mid = np.mean(X, axis)
if axis==1:
return pd.DataFrame(np.log2(X) - np.log2(X_mid)[:,np.newaxis], index=df.index, columns=df.columns)
else:
return pd.DataFrame(np.log2(X) - np.log2(X_mid)[np.newaxis], index=df.index, columns=df.columns)
def compute_phi(mu: float, var: float, beta: float):
"""
Compute Phi
------------------------
Compute the Dispersion parameter.
"""
return var / (mu ** (2-beta))
def transfer_weights(W: pd.DataFrame, H: pd.DataFrame, active_thresh:float = 1e-2):
"""
Transfers weights from output of NMF.
------------------------
Args:
* W: input W matrix (K x n_features)
* H: input H matrix (n_samples x K)
* active_thresh: active threshold to consider a factor loading significant
Returns:
* W_final: normalized W matrix (K x n_features)
* H_final: normalized H matrix (n_samples x K)
* nsig: number of signatures found
"""
W = W.copy()
H = H.copy()
# Active signatures
nonzero_idx = (np.sum(H, axis=1) * np.sum(W, axis=0)) > active_thresh
nsig = np.sum(nonzero_idx)
# Raw matrices for active signatures
W_active = W[:, nonzero_idx]
H_active = H[nonzero_idx, :]
# Normalize W and transfer weight to H matrix
W_weight = np.sum(W_active, axis=0)
W_final = W_active / W_weight
H_final = W_weight[:, np.newaxis] * H_active
return W_final, H_final, nsig, nonzero_idx
def select_signatures(W: pd.DataFrame, H: pd.DataFrame):
"""
Scales NMF output by sample and feature totals to select Signatures.
------------------------
Args:
* W: input W matrix (K x n_features)
* H: input H matrix (n_samples x K)
Returns:
* W: output W matrix with max_id, max, and max_norm columns
* H: output H matrix with max_id, max, and max_norm columns
"""
Wnorm = W.copy()
Hnorm = H.copy()
# Scale Matrix
for j in range(W.shape[1]):
Wnorm.iloc[:,j] *= H.sum(1).values[j] # Multiple normalized signature contributions by their total mutation attribution to get total attribution per context
Hnorm.iloc[j,:] *= W.sum(0).values[j] # Multiply signature raw attributions by fraction of mutations per context
# Normalize
Wnorm = Wnorm.div(Wnorm.sum(1),axis=0)
Hnorm = Hnorm.div(Hnorm.sum(0),axis=1)
H = H.T
Hnorm = Hnorm.T
# Get Max Values
H_max_id = H.idxmax(axis=1, skipna=True).astype('int')
H['max'] = H.max(axis=1, skipna=True)
H['max_id'] = H_max_id
Hnorm['max_norm']=Hnorm.max(axis=1, skipna=True)
W_max_id = W.idxmax(axis=1, skipna=True).astype('int')
W['max'] = W.max(axis=1, skipna=True)
W['max_id'] = W_max_id
Wnorm['max_norm'] = Wnorm.max(axis=1, skipna=True)
H['max_norm'] = Hnorm['max_norm']
W['max_norm'] = Wnorm['max_norm']
_rename = {x:'S'+x for x in list(H)[:-3]}
H = H.rename(columns=_rename)
W = W.rename(columns=_rename)
return W,H
def select_markers(
X: pd.DataFrame, \
W: pd.DataFrame, \
H: pd.DataFrame, \
cut_norm: float = 0.5, \
cut_diff: float = 1.0, \
verbose: bool = False \
):
"""
Marker selection from NMF.
------------------------
Args:
* X: Input X matrix (n_samples x n_features)
* W: input W matrix (K x n_features) with max_id, max, and max_norm columns
* H: input H matrix (n_samples x K) with max_id, max, and max_norm columns
* cut_norm: minimum normalized signature strength
* cut_diff: minimum difference between selected signature and other signatures
Returns:
* Pandas Dataframe of NMF markers
* Pandas Dataframe of full W matrix
"""
markers = list()
full = list()
pd.options.mode.chained_assignment = None
for n in tqdm(np.unique(W['max_id']), desc='Clusters: ', disable=not verbose):
if H[H['max_id']==n].shape[0] > 0:
tmp = W[W['max_id']==n]
tmp.loc[:,'mean_on'] = X.loc[np.array(tmp.index), H[H['max_id']==n].index].mean(axis=1)
tmp.loc[:,'mean_off'] = X.loc[np.array(tmp.index), H[H['max_id']!=n].index].mean(axis=1)
tmp.loc[:,'diff'] = tmp.loc[:,'mean_on'] - tmp.loc[:,'mean_off']
tmp.sort_values('diff', ascending=False, inplace=True)
full.append(tmp)
markers.append(tmp[(tmp['diff'] > cut_diff) & (tmp['max_norm'] >= cut_norm)])
nmf_markers = X.loc[pd.concat(markers).index,H.max_id.sort_values().index]
nmf_markers.index.name = 'feat'
return nmf_markers, pd.concat(full)
# ---------------------------------
# Mutational Signature Utils
# ---------------------------------
def load_reference_signatures(ref: str, verbose=True):
"""
Load reference signatures.
-------------------------
Pre-processed Reference Mutational Signatures.
"""
if ref == 'cosmic2':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/cosmic_v2/sa_cosmic2.tsv'), sep='\t').dropna(1)
reference_index = "Somatic Mutation Type"
elif ref == 'cosmic3':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/cosmic_v3/sa_cosmic3_sbs.tsv'), sep='\t').dropna(1)
reference_index = "Somatic Mutation Type"
elif ref == 'cosmic3_exome':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/cosmic_v3/sa_cosmic3_sbs_exome.tsv'), sep='\t').dropna(1)
reference_index = "Somatic Mutation Type"
elif ref == 'cosmic3_DBS':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/cosmic_v3/sa_cosmic3_dbs.tsv'), sep='\t').dropna(1)
reference_index = "Somatic Mutation Type"
elif ref == 'cosmic3_ID':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/cosmic_v3/sa_cosmic3_id.tsv'), sep='\t').dropna(1)
reference_index = "Mutation Type"
elif ref == 'pcawg_SBS':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/PCAWG/sa_PCAWG_sbs.tsv'), sep='\t').dropna(1)
reference_index = 'Somatic Mutation Type'
elif ref == 'pcawg_COMPOSITE':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/PCAWG/sa_PCAWG_composite.tsv'), sep='\t').dropna(1)
reference_index = 'Somatic Mutation Type'
elif ref == 'pcawg_COMPOSITE96':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/PCAWG/sa_PCAWG_composite96.tsv'), sep='\t').dropna(1)
reference_index = 'Somatic Mutation Type'
elif ref == 'pcawg_SBS_ID':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/PCAWG/sa_PCAWG_sbs_id.tsv'), sep='\t').dropna(1)
reference_index = 'Somatic Mutation Type'
elif ref == 'pcawg_SBS96_ID':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/PCAWG/sa_PCAWG_sbs96_id.tsv'), sep='\t').dropna(1)
reference_index = 'Somatic Mutation Type'
elif ref == 'polymerase_msi':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/POLE_MSI/POLE_MSI_1536SBS_ID.tsv'), sep='\t').dropna(1)
reference_index = 'Somatic Mutation Type'
elif ref == 'polymerase_msi96':
reference = pd.read_csv(pkg_resources.resource_filename('signatureanalyzer', 'ref/POLE_MSI/POLE_MSI_SBS96_ID.tsv'), sep='\t').dropna(1)
reference_index = 'Somatic Mutation Type'
else:
raise Exception("Not yet implemented for {}".format(ref))
if verbose:
print(" * Using {} signatures".format(ref))
return reference, reference_index
def compl(seq: str, reverse: bool = False):
"""
Gets the complement of a string
Args:
* seq: string (does not have to be base pair)
* reverse: set to true to reverse seq
Returns:
* complement of seq
"""
return ''.join([COMPL[x] if x in COMPL.keys() else x for x in (reversed(seq) if reverse else seq)])
def sbs_annotation_converter(x: str) -> str:
"""
Eithers swaps from word -> arrow format for SBS or vice versa.
word: (REF)(ALT)(LEFT)(RIGHT)
arrow: (LEFT)[(REF)>(ALT)](RIGHT)
"""
if '>' in x:
return x[2]+x[4]+x[0]+x[6]
else:
return x[2]+'['+x[0]+'>'+x[1]+']'+x[3]
def sbs1536_annotation_converter(x: str) -> str:
"""
Eithers swaps from word -> arrow format for 1536 SBS or vice versa.
word: (REF)(ALT)(L-2)(L-1)(R+1)(R+2)
arrow: (L-2)(L-1)[(REF)>(ALT)](R+1)(R+2)
"""
if '>' in x:
return x[3] + x[5] + x[:2] + x[7:9]
else:
return x[2:4] + '[' + x[0] + '>' + x[1] + ']' + x[4:6]
def _map_id_sigs(
df: pd.DataFrame,
) -> pd.Series:
"""
Map Insertion-Deletion Substitution Signatures.
-----------------------
Args:
* df: pandas.core.frame.DataFrame with index to be mapped
Returns:
* pandas.core.series.Series with matching indices to input cosmic
"""
def _convert_to_cosmic(x):
i1 = 'DEL' if 'del' in x else 'INS'
if x[0].isdigit():
i2 = 'MH' if 'm' in x else 'repeats'
i3 = re.search('[\d+]+', x).group()
else:
i2 = x[0]
i3 = '1'
i4 = re.search('[\d+]+$', x).group()
if i1 == 'DEL' and i2 != 'MH':
i4 = str(int(i4[0]) - 1) + i4[1:]
return '_'.join([i1, i2, i3, i4])
if df.index.name is None: df.index.name = 'index'
df_idx = df.index.name
context_s = df.reset_index()[df_idx]
return context_s.apply(_convert_to_cosmic)
def _map_dbs_sigs(
df: pd.DataFrame,
cosmic_df: pd.DataFrame,
sub_index: str = 'Substitution Type'
) -> pd.Series:
"""
Map Doublet-Base Substitution Signatures.
-----------------------
Args:
* df: pandas.core.frame.DataFrame with index to be mapped
* cosmic_df: dataframe with Cosmic indices to map to
* sub_index: substitution index - the column to map to in the cosmic dataframe
Returns:
* pandas.core.series.Series with matching indices to input cosmic
"""
def _check_to_flip(x, ref):
if x in ref:
return x
else:
return compl(x[:2], reverse=True) + '>' + compl(x[3:], reverse=True)
if df.index.name is None: df.index.name = 'index'
df_idx = df.index.name
context_s = df.reset_index()[df_idx]
return context_s.apply(lambda x: _check_to_flip(x, set(cosmic_df[sub_index])))
def _map_sbs_sigs(
df: pd.DataFrame,
ref_df: | |
player:\n"
for upgrade in self.__upgrade_list:
res += str(upgrade) + "\n"
return res
def level_up(self):
# type: () -> None
while self.exp >= self.required_exp:
self.level += 1
self.required_exp *= mpf("10") ** triangular(self.level)
def roll_dice(self, game):
# type: (Game) -> None
self.location += Dice().value
if self.location >= len(game.board.get_tiles()):
self.gold += game.start_bonus
self.location -= len(game.board.get_tiles())
def get_gold_per_turn(self):
# type: () -> mpf
return mpf_sum_of_list([place.gold_per_turn for place in self.__owned_list]) * \
mpf_product_of_list([upgrade.gold_gain_multiplier for upgrade in self.__upgrade_list])
def get_exp_per_turn(self):
# type: () -> mpf
return mpf_sum_of_list([place.exp_per_turn for place in self.__owned_list]) * \
mpf_product_of_list([upgrade.exp_gain_multiplier for upgrade in self.__upgrade_list])
def get_owned_list(self):
# type: () -> list
return self.__owned_list
def buy_place(self, place):
# type: (Place) -> bool
if self.gold >= place.gold_cost:
self.gold -= place.gold_cost
self.__owned_list.append(place)
place.owner = self
return True
return False
def upgrade_place(self, place):
# type: (Place) -> bool
if place in self.__owned_list:
if self.gold >= place.gold_cost:
self.gold -= place.gold_cost
place.level_up()
return True
return False
return False
def acquire_place(self, place, owner):
# type: (Place, Player) -> bool
if place in owner.get_owned_list() and place not in self.get_owned_list():
if self.gold >= place.gold_cost:
self.gold -= place.gold_cost
owner.gold += place.gold_cost
place.level_up()
self.__owned_list.append(place)
owner.__owned_list.append(place)
place.owner = self
return True
return False
return False
def get_upgrade_list(self):
# type: () -> list
return self.__upgrade_list
def buy_upgrade(self, upgrade):
# type: (Upgrade) -> bool
if self.gold >= upgrade.gold_cost:
self.gold -= upgrade.gold_cost
self.__upgrade_list.append(upgrade)
return True
return False
def get_random_reward(self, random_reward):
# type: (RandomReward) -> None
self.gold += random_reward.reward_gold
self.exp += random_reward.reward_exp
self.level_up()
def answer_quiz_question(self, quiz_question, input_answer):
# type: (QuizQuestion, str) -> bool
if input_answer == quiz_question.correct_answer:
self.gold += quiz_question.correct_answer_gold_reward
self.exp += quiz_question.correct_answer_exp_reward
self.level_up()
return True
return False
def gain_turn_reward(self):
# type: () -> None
self.gold += self.get_gold_per_turn()
self.exp += self.get_exp_per_turn()
self.level_up()
def clone(self):
# type: () -> Player
return copy.deepcopy(self)
class CPU(Player):
"""
This class contains attributes of a CPU controlled player as the player's opponent.
"""
def __init__(self):
# type: () -> None
Player.__init__(self, "CPU")
class Game:
"""
This class contains attributes of saved game data.
"""
def __init__(self, player, cpu, board, quiz_questions):
# type: (Player, CPU, Board, list) -> None
self.turn: int = 0
self.start_bonus: mpf = mpf("2e5")
self.player: Player = player
self.cpu: CPU = cpu
self.board: Board = board
self.__quiz_questions: list = quiz_questions
def __str__(self):
# type: () -> str
res: str = "" # initial value
res += "Start Bonus: " + str(self.start_bonus) + "\n"
res += "Player's stats in the game: " + str(self.player) + "\n"
res += "CPU's stats in the game: " + str(self.cpu) + "\n"
return res
def get_quiz_questions(self):
# type: () -> list
return self.__quiz_questions
def clone(self):
# type: () -> Game
return copy.deepcopy(self)
# Creating main function used to run the game.
def main() -> int:
"""
This main function is used to run the game.
:return: an integer
"""
print("Welcome to 'Own The Planet - Board Game Edition' by 'DigitalCreativeApkDev'.")
print("This game is an offline board game where the player and CPU compete to be the richest in the planet.")
# Initialising function level variables to be used in the game.
# 1. List of upgrades sold in the upgrade shop
upgrades_sold: list = [
Upgrade("GOLD UPGRADE #1", "Gold upgrade level 1.", mpf("1e10"), mpf("5"), mpf("1")),
Upgrade("GOLD UPGRADE #2", "Gold upgrade level 2.", mpf("1e20"), mpf("10"), mpf("1")),
Upgrade("GOLD UPGRADE #3", "Gold upgrade level 3.", mpf("1e40"), mpf("20"), mpf("1")),
Upgrade("GOLD UPGRADE #4", "Gold upgrade level 4.", mpf("1e80"), mpf("40"), mpf("1")),
Upgrade("GOLD UPGRADE #5", "Gold upgrade level 5.", mpf("1e160"), mpf("80"), mpf("1")),
Upgrade("GOLD UPGRADE #6", "Gold upgrade level 6.", mpf("1e320"), mpf("160"), mpf("1")),
Upgrade("GOLD UPGRADE #7", "Gold upgrade level 7.", mpf("1e640"), mpf("320"), mpf("1")),
Upgrade("GOLD UPGRADE #8", "Gold upgrade level 8.", mpf("1e1280"), mpf("640"), mpf("1")),
Upgrade("GOLD UPGRADE #9", "Gold upgrade level 9.", mpf("1e2560"), mpf("1280"), mpf("1")),
Upgrade("GOLD UPGRADE #10", "Gold upgrade level 10.", mpf("1e5120"), mpf("2560"), mpf("1")),
Upgrade("EXP UPGRADE #1", "EXP upgrade level 1.", mpf("1e10"), mpf("1"), mpf("5")),
Upgrade("EXP UPGRADE #2", "EXP upgrade level 2.", mpf("1e20"), mpf("1"), mpf("10")),
Upgrade("EXP UPGRADE #3", "EXP upgrade level 3.", mpf("1e40"), mpf("1"), mpf("20")),
Upgrade("EXP UPGRADE #4", "EXP upgrade level 4.", mpf("1e80"), mpf("1"), mpf("40")),
Upgrade("EXP UPGRADE #5", "EXP upgrade level 5.", mpf("1e160"), mpf("1"), mpf("80")),
Upgrade("EXP UPGRADE #6", "EXP upgrade level 6.", mpf("1e320"), mpf("1"), mpf("160")),
Upgrade("EXP UPGRADE #7", "EXP upgrade level 7.", mpf("1e640"), mpf("1"), mpf("320")),
Upgrade("EXP UPGRADE #8", "EXP upgrade level 8.", mpf("1e1280"), mpf("1"), mpf("640")),
Upgrade("EXP UPGRADE #9", "EXP upgrade level 9.", mpf("1e2560"), mpf("1"), mpf("1280")),
Upgrade("EXP UPGRADE #10", "EXP upgrade level 10.", mpf("1e5120"), mpf("1"), mpf("2560"))
]
# 2. The board
board: Board = Board([
StartTile(),
Place("The Pygmy Wilderness", "A jungle.", mpf("1e5"), mpf("1e4"), mpf("1e3")),
EmptySpace(),
EmptySpace(),
Place("Kihahancha Paradise", "A jungle.", mpf("1e10"), mpf("1e8"), mpf("1e6")),
EmptySpace(),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
Place("The Almond Seafront", "A beach.", mpf("1e16"), mpf("1e13"), mpf("1e10")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
Place("Cineneh Enclave", "An island.", mpf("1e23"), mpf("1e19"), mpf("1e15")),
EmptySpace(),
EmptySpace(),
RandomRewardTile(),
EmptySpace(),
Place("Emerheller Shallows", "A lake.", mpf("1e31"), mpf("1e26"), mpf("1e21")),
EmptySpace(),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
Place("Chelver Waters", "A lake.", mpf("1e40"), mpf("1e34"), mpf("1e28")),
EmptySpace(),
QuizTile(),
EmptySpace(),
Place("Wreckage Bay", "A pirate cove.", mpf("1e50"), mpf("1e43"), mpf("1e36")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("Kraken Cay", "A pirate cove.", mpf("1e61"), mpf("1e53"), mpf("1e45")),
EmptySpace(),
EmptySpace(),
RandomRewardTile(),
Place("The Secret Haunt", "A dungeon.", mpf("1e73"), mpf("1e64"), mpf("1e55")),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
Place("The Nether Pits", "A dungeon.", mpf("1e86"), mpf("1e76"), mpf("1e66")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
EmptySpace(),
Place("Gourdley Citadel", "A castle.", mpf("1e100"), mpf("1e89"), mpf("1e78")),
EmptySpace(),
EmptySpace(),
RandomRewardTile(),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("Kiamgema Wilderness", "A jungle.", mpf("1e115"), mpf("1e103"), mpf("1e91")),
EmptySpace(),
QuizTile(),
EmptySpace(),
Place("Baraboni Paradise", "A jungle.", mpf("1e131"), mpf("1e118"), mpf("1e105")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
RandomRewardTile(),
EmptySpace(),
Place("Gladbour Highlands", "A mountain.", mpf("1e148"), mpf("1e134"), mpf("1e120")),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
Place("Blacklita Mountain", "A mountain.", mpf("1e166"), mpf("1e151"), mpf("1e136")),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
EmptySpace(),
Place("Infinite Bank", "A beach.", mpf("1e185"), mpf("1e169"), mpf("1e153")),
RandomRewardTile(),
EmptySpace(),
EmptySpace(),
Place("Nanstino Margin", "A beach.", mpf("1e205"), mpf("1e188"), mpf("1e171")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
Place("Barringsor Holm", "An island.", mpf("1e226"), mpf("1e208"), mpf("1e190")),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
EmptySpace(),
Place("Sudcona Key", "An island.", mpf("1e248"), mpf("1e229"), mpf("1e210")),
EmptySpace(),
EmptySpace(),
RandomRewardTile(),
EmptySpace(),
Place("Canhead Shallows", "A lake.", mpf("1e271"), mpf("1e251"), mpf("1e231")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("Shrewsry Domain", "A lake.", mpf("1e295"), mpf("1e274"), mpf("1e253")),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
Place("<NAME>", "A forest.", mpf("1e320"), mpf("1e298"), mpf("1e276")),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
Place("Timber Enclave", "A pirate cove.", mpf("1e346"), mpf("1e323"), mpf("1e300")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("Cove of Salty Sands", "A pirate cove.", mpf("1e373"), mpf("1e349"), mpf("1e325")),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
EmptySpace(),
Place("Raging Prairie", "A desert.", mpf("1e401"), mpf("1e376"), mpf("1e351")),
EmptySpace(),
RandomRewardTile(),
EmptySpace(),
EmptySpace(),
Place("Moaning Wastes", "A desert.", mpf("1e430"), mpf("1e404"), mpf("1e378")),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
EmptySpace(),
Place("Cladborough Stronghold", "A castle.", mpf("1e460"), mpf("1e433"), mpf("1e406")),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("Grimcarres Bay", "A beach.", mpf("1e491"), mpf("1e463"), mpf("1e435")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("Brookcarres Sands", "A beach.", mpf("1e523"), mpf("1e494"), mpf("1e465")),
EmptySpace(),
QuizTile(),
EmptySpace(),
Place("Neunora Sands", "A beach.", mpf("1e556"), mpf("1e526"), mpf("1e496")),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
RandomRewardTile(),
EmptySpace(),
Place("Mahanmei", "A jungle.", mpf("1e590"), mpf("1e559"), mpf("1e528")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("The Mighty Paradise", "A jungle.", mpf("1e625"), mpf("1e593"), mpf("1e561")),
EmptySpace(),
RandomRewardTile(),
EmptySpace(),
Place("Hideout of Grog", "A pirate cove.", mpf("1e661"), mpf("1e628"), mpf("1e595")),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
EmptySpace(),
Place("Dead Kraken Lagoon", "A pirate cove.", mpf("1e698"), mpf("1e664"), mpf("1e630")),
EmptySpace(),
EmptySpace(),
RandomRewardTile(),
EmptySpace(),
EmptySpace(),
Place("Shelgue Key", "An island.", mpf("1e736"), mpf("1e701"), mpf("1e666")),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
Place("The Molten Skerry", "An island.", mpf("1e775"), mpf("1e739"), mpf("1e703")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("<NAME>", "A castle.", mpf("1e815"), mpf("1e778"), mpf("1e741")),
EmptySpace(),
EmptySpace(),
RandomRewardTile(),
EmptySpace(),
EmptySpace(),
Place("The Wrinkled Domain", "A sea.", mpf("1e856"), mpf("1e818"), mpf("1e780")),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
Place("Chesterrial Waters", "A sea.", mpf("1e898"), mpf("1e859"), mpf("1e820")),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
Place("The Frothy Abyss", "A sea.", mpf("1e941"), mpf("1e901"), mpf("1e861")),
EmptySpace(),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("Calm Loch", "A lake.", mpf("1e985"), mpf("1e944"), mpf("1e903")),
EmptySpace(),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("Nipatane Expanse", "A lake.", mpf("1e1030"), mpf("1e988"), mpf("1e946")),
EmptySpace(),
EmptySpace(),
RandomRewardTile(),
EmptySpace(),
EmptySpace(),
EmptySpace(),
Place("New Gardens Wharf", "A harbor.", mpf("1e1076"), mpf("1e1033"), mpf("1e990")),
EmptySpace(),
EmptySpace(),
UpgradeShop(upgrades_sold),
EmptySpace(),
EmptySpace(),
QuizTile(),
EmptySpace(),
EmptySpace(),
Place("Nokojour Landing", "A harbor.", mpf("1e1123"), mpf("1e1079"), mpf("1e1035"))
])
# 3. New game representation
new_game: Game
# 4. Quiz questions
quiz_questions: list = [
QuizQuestion("What is the length of an Olympic Swimming Pool (in metres)?", [
"A. 100 metres",
"B. 50 metres",
"C. 25 metres",
"D. 75 metres"
], "B", mpf("1e100"), mpf("1e100")),
QuizQuestion("What is | |
<reponame>splunk-soar-connectors/sep14
# File: sep14_connector.py
#
# Copyright (c) 2017-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Standard library imports
import datetime
import json
import re
import time
# Phantom imports
import phantom.app as phantom
import requests
import xmltodict
from bs4 import BeautifulSoup
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
# Local imports
import sep14_consts as consts
COMMAND_STATE_DESC = {
"0": "INITIAL",
"1": "RECEIVED",
"2": "IN_PROGRESS",
"3": "COMPLETED",
"4": "REJECTED",
"5": "CANCELED",
"6": "ERROR"
}
COMMAND_SUB_STATE_DESC = {
'-1': 'Unknown', '0': 'Success', '1': 'Client did not execute the command'
}
# Dictionary that maps each error code with its corresponding message
ERROR_RESPONSE_DICT = {
consts.SEP_REST_RESP_UNAUTHORIZED: consts.SEP_REST_RESP_UNAUTHORIZED_MSG,
consts.SEP_REST_RESP_BAD_REQUEST: consts.SEP_REST_RESP_BAD_REQUEST_MSG,
consts.SEP_REST_RESP_NOT_FOUND: consts.SEP_REST_RESP_NOT_FOUND_MSG,
consts.SEP_REST_RESP_ERROR_IN_PROCESSING: consts.SEP_REST_RESP_ERROR_IN_PROCESSING_MSG,
consts.SEP_REST_RESP_FORBIDDEN: consts.SEP_REST_RESP_FORBIDDEN_MSG,
consts.SEP_REST_RESP_GONE: consts.SEP_REST_RESP_GONE_MSG
}
class Sep14Connector(BaseConnector):
""" This is an AppConnector class that inherits the BaseConnector class. It implements various actions supported by
sep14 and helper methods required to run the actions.
"""
def __init__(self):
# Calling the BaseConnector's init function
super(Sep14Connector, self).__init__()
self._url = None
self._username = None
self._password = None
self._verify_server_cert = None
self._state = None
self._token = None
return
def initialize(self):
""" This is an optional function that can be implemented by the AppConnector derived class. Since the
configuration dictionary is already validated by the time this function is called, it's a good place to do any
extra initialization of any internal modules. This function MUST return a value of either phantom.APP_SUCCESS or
phantom.APP_ERROR. If this function returns phantom.APP_ERROR, then AppConnector::handle_action will not get
called.
"""
config = self.get_config()
self._url = config[consts.SEP_CONFIG_URL]
self._username = config[consts.SEP_CONFIG_USERNAME]
self._password = config[consts.SEP_CONFIG_PASSWORD]
self._verify_server_cert = config.get(consts.SEP_CONFIG_VERIFY_SSL, False)
self._state = self.load_state()
if self._state:
self._token = self._state.get('token')
return phantom.APP_SUCCESS
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error messages from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = consts.SEP_ERR_CODE_MSG
error_msg = e.args[0]
else:
error_code = consts.SEP_ERR_CODE_MSG
error_msg = consts.SEP_ERR_MSG_UNAVAILABLE
except:
error_code = consts.SEP_ERR_CODE_MSG
error_msg = consts.SEP_ERR_MSG_UNAVAILABLE
try:
if error_code in consts.SEP_ERR_CODE_MSG:
error_text = "Error Message: {0}".format(error_msg)
else:
error_text = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
except:
self.debug_print("Error occurred while parsing error message")
error_text = consts.SEP_PARSE_ERR_MSG
return error_text
def _validate_integer(self, action_result, parameter, key, allow_zero=False):
try:
if not float(parameter).is_integer():
return action_result.set_status(phantom.APP_ERROR, consts.SEP_INT_ERR_MSG.format(key=key)), None
parameter = int(parameter)
except:
return action_result.set_status(phantom.APP_ERROR, consts.SEP_INT_ERR_MSG.format(key=key)), None
if parameter < 0:
return action_result.set_status(phantom.APP_ERROR,
'Please provide a valid non-negative integer value in the "{}" parameter'.format(key)), None
if not allow_zero and parameter == 0:
return action_result.set_status(phantom.APP_ERROR, "Please provide a positive integer value in the '{}' parameter".format(key)), None
return phantom.APP_SUCCESS, parameter
def _generate_api_token(self, action_result):
"""Generate new token based on the credentials provided.
:param action_result: object of ActionResult class
:return: status phantom.APP_SUCCESS/phantom.APP_ERROR (along with appropriate message)
"""
authorization = {
"username": requests.compat.quote(self._username),
"password": requests.compat.quote(self._password)
}
response_status, response = self._make_rest_call(consts.SEP_TEST_CONNECTIVITY_ENDPOINT, action_result,
data=json.dumps(authorization), timeout=30, method="post")
if phantom.is_fail(response_status):
self._state['token'] = None
return action_result.get_status()
token = response.get("token")
if not token:
self.debug_print("Failed to generate token")
return action_result.set_status(phantom.APP_ERROR, "Failed to generate token")
self._state['token'] = self._token = token
return phantom.APP_SUCCESS
def _make_rest_call_abstract(self, endpoint, action_result, headers=None, data=None, params=None, method="get",
timeout=None):
"""This method generates a new token if it is not available or if the existing token has expired
and makes the call using _make_rest_call method.
:param endpoint: REST endpoint
:param action_result: object of ActionResult class
:param headers: requests headers
:param data: request body
:param params: request params
:param method: GET/POST/PUT/DELETE (Default method will be 'GET')
:param timeout: request timeout
:return: status phantom.APP_SUCCESS/phantom.APP_ERROR (along with appropriate message) and API response
"""
# Use this object for _make_rest_call
# Final status of action_result will be determined after retry, in case the token is expired
intermediate_action_result = ActionResult()
response_data = None
# Generate new token if not available
if not self._token:
ret_code = self._generate_api_token(action_result)
if phantom.is_fail(ret_code):
return action_result.get_status(), response_data
if headers:
headers.update({"Authorization": "Bearer {}".format(self._token)})
else:
headers = {"Authorization": "Bearer {}".format(self._token)}
# Make call
rest_ret_code, response_data = self._make_rest_call(endpoint, intermediate_action_result, headers=headers,
params=params, data=data, method=method, timeout=timeout)
# Regenerating a new token if expired
if str(consts.SEP_REST_RESP_UNAUTHORIZED) in str(intermediate_action_result.get_message()):
ret_code = self._generate_api_token(action_result)
if phantom.is_fail(ret_code):
return action_result.get_status(), response_data
headers = {"Authorization": "Bearer {}".format(self._token)}
rest_ret_code, response_data = self._make_rest_call(endpoint, intermediate_action_result, headers=headers,
params=params, data=data, method=method)
# Assigning intermediate action_result to action_result, since no further invocation required
if phantom.is_fail(rest_ret_code):
action_result.set_status(rest_ret_code, intermediate_action_result.get_message())
return action_result.get_status(), response_data
return phantom.APP_SUCCESS, response_data
def _make_rest_call(self, endpoint, action_result, headers=None, params=None, data=None, method="get",
timeout=None):
""" Function that makes the REST call to the device. It is a generic function that can be called from various
action handlers.
:param endpoint: REST endpoint that needs to appended to the service address
:param action_result: object of ActionResult class
:param headers: request headers
:param params: request parameters if method is get
:param data: request body
:param method: GET/POST/PUT/DELETE ( Default method will be 'GET' )
:param timeout: request timeout
:return: status success/failure(along with appropriate message), response obtained by making an API call
"""
response_data = None
try:
request_func = getattr(requests, method)
except AttributeError:
self.debug_print(consts.SEP_ERR_API_UNSUPPORTED_METHOD.format(method=method))
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(
phantom.APP_ERROR, consts.SEP_ERR_API_UNSUPPORTED_METHOD.format(method=method)), response_data
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print(consts.SEP_EXCEPTION_OCCURRED, err)
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, consts.SEP_EXCEPTION_OCCURRED, err), response_data
if headers:
if not headers.get("Content-Type"):
headers.update({"Content-Type": "application/json"})
else:
headers = {"Content-Type": "application/json"}
# Make the call
try:
response = request_func("{}{}{}".format(self._url, consts.SEP_API_URL, endpoint), params=params,
data=data, headers=headers, verify=self._verify_server_cert,
timeout=timeout)
# store the r_text in debug data, it will get dumped in the logs if an error occurs
if hasattr(action_result, 'add_debug_data'):
if response is not None:
action_result.add_debug_data({'r_status_code': response.status_code})
action_result.add_debug_data({'r_text': response.text})
action_result.add_debug_data({'r_headers': response.headers})
else:
action_result.add_debug_data({'r_text': 'r is None'})
except Exception as e:
err = self._get_error_message_from_exception(e)
self.debug_print(consts.SEP_ERR_SERVER_CONNECTION, err)
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, consts.SEP_ERR_SERVER_CONNECTION, err), response_data
# Try parsing the json
try:
content_type = response.headers.get('content-type', "")
if 'json' in content_type:
response_data = response.json()
elif 'html' in content_type:
response_data = self._process_html_response(response)
else:
response_data = response.text
except Exception as e:
# r.text is guaranteed to be NON None, it will be empty, but not None
msg_string = consts.SEP_ERR_JSON_PARSE.format(raw_text=response.text)
self.debug_print(msg_string, e)
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, msg_string, e), response_data
if response.status_code in ERROR_RESPONSE_DICT:
message = ERROR_RESPONSE_DICT[response.status_code]
# overriding message if available in response
if isinstance(response_data, dict):
message = response_data.get("error_description", response_data.get("errorMessage", response_data.get(
"message", message)))
self.debug_print(consts.SEP_ERR_FROM_SERVER.format(status=response.status_code,
detail=message))
# set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, consts.SEP_ERR_FROM_SERVER,
status=response.status_code, detail=message), response_data
# In case of success scenario
if response.status_code == consts.SEP_REST_RESP_SUCCESS:
return phantom.APP_SUCCESS, response_data
# If response code is unknown
message = consts.SEP_REST_RESP_OTHER_ERROR_MSG
# overriding message if available in response
if isinstance(response_data, dict):
message = response_data.get("error_description", response_data.get("errorMessage", response_data.get(
"message", message)))
# If response code is unknown
self.debug_print(consts.SEP_ERR_FROM_SERVER.format(
status=response.status_code, detail=message))
# All other response codes from REST call
# Set the action_result status to error, the handler function will most probably return as is
return action_result.set_status(phantom.APP_ERROR, consts.SEP_ERR_FROM_SERVER,
status=response.status_code,
detail=message), response_data
def _fetch_items_paginated(self, url, action_result, params=None):
"""Helper function to get list of items for given url using pagination
:param url: API url to fetch items from
:param params: object of parameters to pass in API request call
:param action_result: object of ActionResult class
:return items list
"""
pagination_completed = False
items_list = []
if not params:
params = dict()
try:
limit = params.pop(consts.SEP_PARAM_LIMIT)
except KeyError:
limit = None
params['pageIndex'] = 1
params['pageSize'] = 500
while not pagination_completed:
response_status, response_data = self._make_rest_call_abstract(url,
action_result, params=params, method="get")
# | |
<filename>Scripts/build/lib.linux-x86_64-2.7/rdpy/protocol/rfb/rfb.py
#
# Copyright (c) 2014-2015 <NAME>
#
# This file is part of rdpy.
#
# rdpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Implement Remote FrameBuffer protocol use in VNC client and server
@see: http://www.realvnc.com/docs/rfbproto.pdf
@todo: server side of protocol
@todo: more encoding rectangle
"""
from rdpy.core.layer import RawLayer, RawLayerClientFactory
from rdpy.core.type import UInt8, UInt16Be, UInt32Be, SInt32Be, String, CompositeType
from rdpy.core.error import InvalidValue, CallPureVirtualFuntion
from rdpy.security.pyDes import des
import rdpy.core.log as log
class ProtocolVersion(object):
"""
@summary: Different protocol version
"""
UNKNOWN = ""
RFB003003 = "RFB 003.003\n"
RFB003007 = "RFB 003.007\n"
RFB003008 = "RFB 003.008\n"
class SecurityType(object):
"""
@summary: Security type supported
"""
INVALID = 0
NONE = 1
VNC = 2
class Pointer(object):
"""
@summary: Mouse event code (which button)
actually in RFB specification only
three buttons are supported
"""
BUTTON1 = 0x1
BUTTON2 = 0x2
BUTTON3 = 0x4
class Encoding(object):
"""
@summary: Encoding types of FrameBuffer update
"""
RAW = 0
class ClientToServerMessages(object):
"""
@summary: Client to server messages types
"""
PIXEL_FORMAT = 0
ENCODING = 2
FRAME_BUFFER_UPDATE_REQUEST = 3
KEY_EVENT = 4
POINTER_EVENT = 5
CUT_TEXT = 6
class PixelFormat(CompositeType):
"""
@summary: Pixel format structure
"""
def __init__(self):
CompositeType.__init__(self)
self.BitsPerPixel = UInt8(32)
self.Depth = UInt8(24)
self.BigEndianFlag = UInt8(False)
self.TrueColorFlag = UInt8(True)
self.RedMax = UInt16Be(255)
self.GreenMax = UInt16Be(255)
self.BlueMax = UInt16Be(255)
self.RedShift = UInt8(16)
self.GreenShift = UInt8(8)
self.BlueShift = UInt8(0)
self.padding = (UInt16Be(), UInt8())
class ServerInit(CompositeType):
"""
@summary: Server init structure
FrameBuffer configuration
"""
def __init__(self):
CompositeType.__init__(self)
self.width = UInt16Be()
self.height = UInt16Be()
self.pixelFormat = PixelFormat()
class FrameBufferUpdateRequest(CompositeType):
"""
@summary: FrameBuffer update request send from client to server
Incremental means that server send update with a specific
order, and client must draw orders in same order
"""
def __init__(self, incremental = False, x = 0, y = 0, width = 0, height = 0):
CompositeType.__init__(self)
self.incremental = UInt8(incremental)
self.x = UInt16Be(x)
self.y = UInt16Be(y)
self.width = UInt16Be(width)
self.height = UInt16Be(height)
class Rectangle(CompositeType):
"""
@summary: Header message of update rectangle
"""
def __init__(self):
CompositeType.__init__(self)
self.x = UInt16Be()
self.y = UInt16Be()
self.width = UInt16Be()
self.height = UInt16Be()
self.encoding = SInt32Be()
class KeyEvent(CompositeType):
"""
@summary: Key event structure message
Use to send a keyboard event
"""
def __init__(self):
CompositeType.__init__(self)
self.downFlag = UInt8(False)
self.padding = UInt16Be()
self.key = UInt32Be()
class PointerEvent(CompositeType):
"""
@summary: Pointer event structure message
Use to send mouse event
"""
def __init__(self):
CompositeType.__init__(self)
self.mask = UInt8()
self.x = UInt16Be()
self.y = UInt16Be()
class ClientCutText(CompositeType):
"""
@summary: Client cut text message message
Use to simulate copy paste (ctrl-c ctrl-v) only for text
"""
def __init__(self, text = ""):
CompositeType.__init__(self)
self.padding = (UInt16Be(), UInt8())
self.size = UInt32Be(len(text))
self.message = String(text)
class ServerCutTextHeader(CompositeType):
"""
@summary: Cut text header send from server to client
"""
def __init__(self):
CompositeType.__init__(self)
self.padding = (UInt16Be(), UInt8())
self.size = UInt32Be()
class RFB(RawLayer):
"""
@summary: Implement RFB protocol
"""
def __init__(self, listener):
"""
@param listener: listener use to inform new orders
"""
RawLayer.__init__(self)
#set client listener
self._clientListener = listener
#useful for RFB protocol
self._callbackBody = None
#protocol version negotiated
self._version = String(ProtocolVersion.RFB003008)
#number security launch by server
self._securityLevel = UInt8(SecurityType.INVALID)
#shared FrameBuffer client init message
self._sharedFlag = UInt8(False)
#server init message
#which contain FrameBuffer dim and pixel format
self._serverInit = ServerInit()
#client pixel format
self._pixelFormat = PixelFormat()
#server name
self._serverName = String()
#nb rectangle
self._nbRect = 0
#current rectangle header
self._currentRect = Rectangle()
#for vnc security type
self._password = <PASSWORD>' * 8
def expectWithHeader(self, expectedHeaderLen, callbackBody):
"""
2nd level of waiting event
read expectedHeaderLen that contain body size
@param expectedHeaderLen: contains the number of bytes, which body length needs to be encoded
@param callbackBody: next state use when expected date from expectedHeaderLen
are received
"""
self._callbackBody = callbackBody
self.expect(expectedHeaderLen, self.expectedBody)
def expectedBody(self, data):
"""
Read header and wait header value to call next state
@param data: Stream that length are to header length (1|2|4 bytes)
set next state to callBack body when length read from header
are received
"""
bodyLen = None
if data.len == 1:
bodyLen = UInt8()
elif data.len == 2:
bodyLen = UInt16Be()
elif data.len == 4:
bodyLen = UInt32Be()
else:
log.error("invalid header length")
return
data.readType(bodyLen)
self.expect(bodyLen.value, self._callbackBody)
def connect(self):
"""
Call when transport layer connection is made
in Client mode -> wait protocol version
"""
self.expect(12, self.recvProtocolVersion)
def readProtocolVersion(self, data):
"""
Read protocol version
@param data: Stream may contain protocol version string (ProtocolVersion)
"""
data.readType(self._version)
if not self._version.value in [ProtocolVersion.RFB003003, ProtocolVersion.RFB003007, ProtocolVersion.RFB003008]:
self._version.value = ProtocolVersion.UNKNOWN
def recvProtocolVersion(self, data):
"""
Read handshake packet
If protocol receive from client is unknown
try best version of protocol version (ProtocolVersion.RFB003008)
@param data: Stream
"""
self.readProtocolVersion(data)
if self._version.value == ProtocolVersion.UNKNOWN:
log.info("Unknown protocol version %s send 003.008"%data.getvalue())
#protocol version is unknown try best version we can handle
self._version.value = ProtocolVersion.RFB003008
#send same version of
self.send(self._version)
#next state read security
if self._version.value == ProtocolVersion.RFB003003:
self.expect(4, self.recvSecurityServer)
else:
self.expectWithHeader(1, self.recvSecurityList)
def recvSecurityServer(self, data):
"""
Security handshake for 33 RFB version
Server imposed security level
@param data: well formed packet
"""
#TODO!!!
pass
def recvSecurityList(self, data):
"""
Read security list packet send from server to client
@param data: Stream that contains well formed packet
"""
securityList = []
while data.dataLen() > 0:
securityElement = UInt8()
data.readType(securityElement)
securityList.append(securityElement)
#select high security level
for s in securityList:
if s.value in [SecurityType.NONE, SecurityType.VNC] and s > self._securityLevel:
self._securityLevel = s
break
#send back security level choosen
self.send(self._securityLevel)
if self._securityLevel.value == SecurityType.VNC:
self.expect(16, self.recvVNCChallenge)
else:
self.expect(4, self.recvSecurityResult)
def recvVNCChallenge(self, data):
"""
@summary: receive challenge in VNC authentication case
@param data: Stream that contain well formed packet
"""
key = (self._password + '\0' * 8)[:8]
newkey = []
for ki in range(len(key)):
bsrc = ord(key[ki])
btgt = 0
for i in range(8):
if bsrc & (1 << i):
btgt = btgt | (1 << 7-i)
newkey.append(chr(btgt))
algo = des(newkey)
self.send(String(algo.encrypt(data.getvalue())))
self.expect(4, self.recvSecurityResult)
def recvSecurityResult(self, data):
"""
Read security result packet
Use by server to inform connection status of client
@param data: Stream that contain well formed packet
"""
result = UInt32Be()
data.readType(result)
if result == UInt32Be(1):
log.info("Authentification failed")
if self._version.value == ProtocolVersion.RFB003008:
self.expectWithHeader(4, self.recvSecurityFailed)
else:
log.debug("Authentification OK")
self.sendClientInit()
def recvSecurityFailed(self, data):
"""
Send by server to inform reason of why it's refused client
@param data: Stream that contains well formed packet
"""
log.info("Security failed cause to %s"%data.getvalue())
def recvServerInit(self, data):
"""
Read server init packet
@param data: Stream that contains well formed packet
"""
data.readType(self._serverInit)
self.expectWithHeader(4, self.recvServerName)
def recvServerName(self, data):
"""
@summary: Read server name
@param data: Stream that contains well formed packet
"""
data.readType(self._serverName)
log.info("Server name %s"%str(self._serverName))
#end of handshake
#send pixel format
self.sendPixelFormat(self._pixelFormat)
#write encoding
self.sendSetEncoding()
#request entire zone
self.sendFramebufferUpdateRequest(False, 0, 0, self._serverInit.width.value, self._serverInit.height.value)
#now i'm ready to send event
self._clientListener.onReady()
self.expect(1, self.recvServerOrder)
def recvServerOrder(self, data):
"""
@summary: Read order receive from server
Main function for bitmap update from server to client
@param data: Stream that contains well formed packet
"""
packetType = UInt8()
data.readType(packetType)
if packetType.value == 0:
self.expect(3, self.recvFrameBufferUpdateHeader)
elif packetType.value == 2:
self._clientListener.onBell()
elif packetType.value == 3:
self.expect(7, self.recvServerCutTextHeader)
else:
log.error("Unknown message type %s"%packetType.value)
def recvFrameBufferUpdateHeader(self, data):
"""
@summary: Read frame buffer update packet header
@param data: Stream that contains well formed packet
"""
#padding
nbRect = UInt16Be()
self._nbRect = data.readType((UInt8(), nbRect))
self._nbRect = nbRect.value
self.expect(12, self.recvRectHeader)
def recvRectHeader(self, data):
"""
@summary: Read rectangle header
@param data: Stream that contains well formed | |
#!/usr/bin/env python3
import sys, os, zlib, struct, math, argparse, time, operator
import getopt, hashlib, collections, binascii, stat, difflib
# ./.fkgit, same as ./.git
baseName = '.git'
# Data for one entry in the git index (.git/index)
''' Parse Index File.
| 0 | 4 | 8 | C |
|-------------|--------------|-------------|----------------|
0 | DIRC | Version | File count | Ctime | 0
| Nano-Sec | Mtime | Nano-Sec | Device |
2 | Inode | Mode | UID | GID | 2
| File size | Entry SHA-1 ... ... |
4 | ... ... | Flags | File Name(\0x00) | 4
| Ext-Sig | Ext-Size | Ext-Data (Ext was optional) |
6 | Checksum ... ... ... | 6
-->>
2 | Mode - 32 bit | 4 | Flags - 16 bit
|-------------------| |-------------------------|
| 16-bit unknown | | 1-bit assume-valid flag |
| 4-bit object type | | 1-bit extended flag |
| 3-bit unused | | 2-bit stage |
| 9-bit unix perm | | 12-bit name length |
'''
# IndexEntry = <class '__main__.IndexEntryType'>
IndexEntry = collections.namedtuple('IndexEntryType', [
'ctime_s', 'ctime_n', 'mtime_s', 'mtime_n', 'dev', 'ino', 'mode', 'uid',
'gid', 'size', 'sha1', 'flags', 'path'])
def lsFiles(verbose = False):
''' Show staged contents' object name in the output. '''
for entry in readIndex():
''' IndexEntryType(ctime_s=1505698291, ctime_n=0, mtime_s=1505698291,
mtime_n=0, dev=64512, ino=194773692, mode=33277, uid=1000,
gid=1000, size=8920,
sha1=b'\xd6\x8e\x19\x16S\xc2\xd2\x98\xe0\xdd\xfcW\xda\xeb=\xbdO\xa7\x8e\xf0',
flags=14, path='indexcat.py')
'''
if verbose:
sIndex = 0
# > echo "obase = 8; ibase = 10; 33204" | bc ==> 100664
print('{:06o} {} {}\t{}'.format(entry.mode,
binascii.hexlify(entry.sha1).decode('utf-8'),
sIndex, entry.path))
else:
print(entry.path)
def diff():
''' Show diff between index and working tree. '''
''' entries_by_path = {e.path: e for e in read_index()} =
{'indexcat.py': IndexEntry(..., ..., path='indexcat.py'),
'main.cpp': IndexEntry(..., ..., path='main.cpp')}
'''
# type(entries_by_path) = <class 'dict'>, convert to Key -> Value.
entriesByPath = {entry.path: entry for entry in readIndex()}
''' for path in enumerate(changed):
... print(path)
...
(0, 'demo.py')
(1, 'fkgit.py')
'''
changed, _, _ = getStatus()
for _, path in enumerate(changed):
sha1 = binascii.hexlify(entriesByPath[path].sha1).decode('utf-8')
objType, data = readObject(sha1)
assert objType == 'blob', "Only Support blob type."
indexLines = data.decode('utf-8').splitlines()
workingLines = readFile(path).decode('utf-8').splitlines()
''' unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
tofiledate='', n=3, lineterm='\n')
Compare two sequences of lines;
generate the delta as a unified diff.
'''
# For inputs that do not have trailing newlines, set the lineterm
# argument to "" so that the output will be uniformly newline free
diffLines = difflib.unified_diff(
indexLines, workingLines,
'a/{} (index)'.format(path),
'b/{} (working tree)'.format(path),
lineterm = '')
for line in diffLines:
print(line)
def findObject(hashCode):
""" Find object with given SHA-1 prefix and return path to object. Or
exit if there are no one or more than one object with this prefix.
"""
if len(hashCode) < 7:
errMsg("Hash Prefix Must Longer than 7 Characters.")
objDir = os.path.join(baseName, 'objects', hashCode[:2])
restHashCode = hashCode[2:]
try:
objs = [name for name in os.listdir(objDir)
if name.startswith(restHashCode)]
except FileNotFoundError:
errMsg("File Not Found.")
if not objs:
# "Object '0fe2738082e4f75c9c6bf154af70c12d9b55af' Not Found."
print("Object {!r} Not Found.".format(hashCode))
sys.exit(1)
if len(objs) > 2:
print("There Are [{}] Objects with HashCode {!r}.".format( len(objs), hashCode))
# .git/objects/48/0fe2738082e4f75c9c6bf154af70c12d9b55af
return os.path.join(objDir, objs[0])
def readObject(hashCode, printRaw = False):
''' Read object with given SHA1 hashcode.
Return: tuple of (type, data), or ValueError if not found.
'''
path = findObject(hashCode)
# Notice, the object file was Zlib compressed.
''' fullData = b'tree 114\x00100664 main.cpp\x00\xd8\xc1\xa2&i{:\x12\xf9%
\x85\x03\x13\xe3{\x91\xe6"\xe4\xce100775 indexcat.py\x00\xd6\x8e
\x19\x16S\xc2\xd2\x98\xe0\xdd\xfcW\xda\xeb=\xbdO\xa7\x8e\xf0100775
fkgit.py\x001\x03\x99\xe2Uh\xed4\x0f[\xba\xc6\x0f\xa3GU\xeb\x12\x85i'
'''
fullData = zlib.decompress(readFile(path))
if printRaw:
print(fullData)
# find the first occurance of b'\x00', take index as 8
try:
nullIndex = fullData.index(b'\x00')
except ValueError:
errMsg("Wrong Object File. Exit Now.")
# [ ), right not included.
header = fullData[0:nullIndex]
# header = b'tree 114'
# b'tree 114'.decode().split() = ['tree', '114']
type, sizeStr = header.decode('utf-8').split()
size = int(sizeStr)
data = fullData[nullIndex + 1:]
assert size == len(data), "Expect size {}, But Got {} bytes.".\
format(size, len(data))
return (type, data)
def readTree(hashCode = None, data = None):
''' Read Tree object and return list of (mode, path, sha1) tuples. '''
if hashCode is not None:
objType, data = readObject(hashCode)
assert objType == 'true'
elif data is None:
print("You Should Specify 'sha1' or 'data'")
''' data = b'100664 main.cpp\x00\xd8\xc1\xa2&i{:\x12\xf9%\x85\x03\x13
\xe3{\x91\xe6"\xe4\xce100775 indexcat.py\x00\xd6\x8e\x19\x16S
\xc2\xd2\x98\xe0\xdd\xfcW\xda\xeb=\xbdO\xa7\x8e\xf0100775 fkgit.py
\x001\x03\x99\xe2Uh\xed4\x0f[\xba\xc6\x0f\xa3GU\xeb\x12\x85i'
'''
# B.index(sub[, start[, end]]) -> int
# Like B.find() but raise ValueError when the substring is not found.
start = 0
entries = []
while True:
try:
# index = 15
index = data.index(b'\x00', start)
except ValueError:
break
# ['100664', 'main.cpp']
mode, path = data[start:index].decode('utf-8').split()
sha1 = data[index + 1:index + 21]
# pack three elements as a tuple.
# ('100664', 'main.cpp', 'd8c1a226697b3a12f925850313e37b91e622e4ce')
mixTuple = (mode, path, binascii.hexlify(sha1).decode('utf-8'))
entries.append(mixTuple)
start = index + 21
return entries
def catFile(mode, hashCode):
''' Upper function of cat-file call. '''
''' git cat-file -p 19b5340d1316fc3f19b4d87f558ad2bd082d80fd
100644 blob 49ec02b198e6ce4b454a9958929efe62cb5b530f main.cpp
100755 blob 0feef4cb2560de7e7380d2396a1e5fa18d92da37 fkgit.py
100755 blob 6dd1382a4dcc9ef465515885865f41f89623873c indexcat.py
100755 blob 4285790ef284f43f960f4e2b85c464fb8d473767 demo.py
'''
if mode == 'raw':
objType, data = readObject(hashCode, True)
else:
objType, data = readObject(hashCode)
if mode == 'size':
print(len(data))
elif mode == 'type':
print(objType)
elif mode == 'pretty':
if objType in ['blob', 'commit']:
sys.stdout.buffer.write(data)
elif objType == 'tree':
# ('100664', 'main.cpp', 'd8c1a226697b3a12f925850313e37b91e622e4ce')
for mode, path, sha1 in readTree(data = data):
# int(x, base=10) -> integer, int(modeStr, 8) = 33188
modInt = int(mode, 8)
# S_ISDIR(mode) -> bool
# Return True if mode is from a directory.
if stat.S_ISDIR(modInt):
type = 'tree'
else:
type = 'blob'
# int('040000', 8) = 16384
# stat.S_ISDIR(16384) = True
'''040000 tree f90a0dbf3408bfc7a52e1693314abf8abbf7b7f7 haha
100644 blob 49ec02b198e6ce4b454a9958929efe62cb5b530f main.cpp
'''
# print '040000', notice '0' before '40000'.
# The store value is only '40000', need to be adjusted to length 6.
# {:06} => 016384, {:06o} => 040000
print("{:06o} {} {}\t{}".format(modInt, type, sha1, path))
def getLocalMasterHash():
''' Get SHA-1 of the latest commit of local master branch. '''
# '.fkgit/refs/heads/master'
masterPath = os.path.join(baseName, 'refs', 'heads', 'master')
try:
return readFile(masterPath).decode('utf-8').strip()
except FileNotFoundError:
return None
def writeTree():
''' Write a tree object from the current index file. '''
for entry in readIndex():
# entry.mode = 33277, {:o} o => octal
# '{:o} {}'.format(entry.mode, entry.path) => '100775 demo.py'
modePath = '{:o} {}'.format(entry.mode, entry.path).encode('utf-8')
treeEntry = modePath + b'\x00' + entry.sha1
treeEntries.append(treeEntry)
print("treeEntries = ", treeEntries)
# Example: b'.'.join([b'ab', b'pq', b'rs']) -> b'ab.pq.rs'.
return(hashObject(b''.join(treeEntries), 'tree', True))
def commit(message):
''' Commit, using the index file and given message,
return: sha1 of commit object. '''
treeHash = writeTree()
parent = getLocalMasterHash()
# 'corsair <<EMAIL>>'
author = 'Annoymous'
email = '<EMAIL>'
try:
author = '{} <{}>'.format(
os.environ['GIT_AUTHOR_NAME'], os.environ['GIT_AUTHOR_EMAIL'])
except KeyError:
author = '{} <{}>'.format(author, email)
# format author time.
timeStamp = int(time.mktime(time.localtime()))
authorTime = '1505732862 -0500'
# standard git commit, The first commit, has no parent.
''' > git cat-file -p 13bf599
tree 25e4ad73b4a7b7fd156f665a11769b98b434d1dc
author corsair <<EMAIL>> 1505724533 -0400
committer corsair <<EMAIL>> 1505724533 -0400
Init commit
'''
# standard git commit, has parent commit.
''' > git cat-file -p df34f29
tree 19b5340d1316fc3f19b4d87f558ad2bd082d80fd
parent 13bf599a061991f4a0c1bfd6086ea6d48e5e232b
author corsair <<EMAIL>> 1505725603 -0400
committer corsair <<EMAIL>> 1505725603 -0400
second commit
'''
# format commit info.
commitInfo = ['tree ' + treeHash]
# if has parent commit
if parent:
commitInfo.append('parent ' + parent)
commitInfo.append('author {} {}'.format(author, authorTime))
commitInfo.append('committer {} {}'.format(author, authorTime))
commitInfo.append('')
commitInfo.append(message)
commitInfo.append('')
''' S.join(iterable) -> str
Return a string which is the concatenation of the strings in the
iterable. The separator between elements is S.
S => '\n', in this example.
'''
data = '\n'.join(commitInfo).encode('utf-8')
sha1 = hashObject(data, 'commit', True)
masterPath = os.path.join(baseName, 'refs', 'heads', 'master')
writeFile(masterPath, (sha1 + '\n').encode('utf-8'))
# [master df34f29] second commit
print("[master {}] {}".format(sha1, message))
return sha1
def readIndex():
''' Read index file, return list of IndexEntry object. '''
try:
data = readFile(os.path.join(baseName, 'index'))
except FileNotFoundError:
return []
# calculate checksum leaving the last 20 bytes(checksum itself).
# data[0:20], left included, right not.
checkSum = hashlib.sha1(data[0:-20]).digest()
assert checkSum == data[-20:], "Error, Invalid Index CheckSum."
sigh, ver, fileCnt = struct.unpack('!4sLL', data[0:12])
assert sigh == b'DIRC', \
'Error, Invalid Index Signature {}'.format(sigh)
assert ver == 2, 'Error, Unknown Index Version {}'.format(ver)
# omit header | |
req_dict_with_format = dict(req_dict)
req_dict_with_format["format"] = "json" \
if out_format == "dict" else out_format
# Set the payload on the request message (Python dictionary to JSON
# payload)
MessageUtils.dict_to_json_payload(request, req_dict_with_format)
# Perform a synchronous DXL request
response = self._dxl_sync_request(request)
# If the caller requested "dict" as the output format, convert the JSON
# payload in the DXL response message to a Python dictionary and return
# it. Otherwise, just decode the raw payload per whatever other format
# was requested and return it.
return MessageUtils.json_payload_to_dict(response) \
if out_format == "dict" else \
MessageUtils.decode_payload(response)
def account_information(self, out_format="dict"):
"""
Retrieve information for the DomainTools API user account. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#account-information>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/account-information/>`__
documentation for more information.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
return self._invoke_service(req_dict, self._REQ_TOPIC_ACCOUNT_INFO,
out_format)
def brand_monitor(self, query, exclude=None, domain_status=None,
days_back=None, out_format="dict"):
"""
Retrieves information for domains which match a customer's brand or
monitored word/string. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#brand-monitor>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/brand-monitor/>`__
documentation for more information.
:param query: One or more domain search terms.
:type query: str or list(str) or tuple(str) or set(str)
:param exclude: [``optional``] : Domain names with these words will be
excluded from the result set.
:type exclude: str or list(str) or tuple(str) or set(str)
:param str domain_status: [``optional``] : Scope of the domain names to
search. By default, the API will search both new domain names and
domain names which are now on-hold (pending delete). To narrow your
search to only one of these status codes, set this parameter to
either ``new`` or ``on-hold``.
:param int days_back: [``optional``] : Use this parameter in
exceptional circumstances where you need to search domains
registered prior to the current date.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query, "|")
self._add_exclude_param(req_dict, exclude)
self._add_domain_status_param(req_dict, domain_status)
self._add_days_back_param(req_dict, days_back)
return self._invoke_service(req_dict, self._REQ_TOPIC_BRAND_MONITOR,
out_format)
def domain_profile(self, query, out_format="dict"):
"""
Retrieves a profile for the specified domain name. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#domain-profile>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/domain-profile/>`__
documentation for more information.
:param str query: Domain name for which to retrieve profile
information.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query)
return self._invoke_service(req_dict, self._REQ_TOPIC_DOMAIN_PROFILE,
out_format)
def domain_search(self, query, exclude_query=None, max_length=None,
min_length=None, has_hyphen=None, has_number=None,
active_only=None, deleted_only=None, anchor_left=None,
anchor_right=None, page=None, out_format="dict"):
"""
Retrieves information for domains which match a search string. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#domain-search>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/domain-search/>`__
documentation for more information.
:param query: One or more domain search terms.
:type query: str or list(str) or tuple(str) or set(str)
:param exclude_query: [``optional``] : Domain names with these words
will be excluded from the result set.
:type exclude_query: str or list(str) or tuple(str) or set(str)
:param int max_length: [``optional``] : Limit the maximum domain
character count.
:param int min_length: [``optional``] : Limit the minimum domain
character count.
:param bool has_hyphen: [``optional``] : Return results with hyphens
in the domain name.
:param bool has_number: [``optional``] : Return results with numbers
in the domain name.
:param bool active_only: [``optional``] : Return only domains which
are currently registered.
:param bool deleted_only: [``optional``] : Return only domains
previously registered but not currently registered.
:param bool anchor_left: [``optional``] : Return only domains that
start with the query term.
:param bool anchor_right: [``optional``] : Return only domains that
end with the query term.
:param int page: [``optional``] : Sets the page of results to retrieve
from the server.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query, " ")
self._add_exclude_query_param(req_dict, exclude_query)
self._add_page_param(req_dict, page)
DomainToolsApiClient._add_string_param_by_name(
req_dict, DomainToolsApiClient._PARAM_MAX_LENGTH, max_length)
DomainToolsApiClient._add_string_param_by_name(
req_dict, DomainToolsApiClient._PARAM_MIN_LENGTH, min_length)
DomainToolsApiClient._add_boolean_param_by_name(
req_dict, DomainToolsApiClient._PARAM_HAS_HYPHEN, has_hyphen)
DomainToolsApiClient._add_boolean_param_by_name(
req_dict, DomainToolsApiClient._PARAM_HAS_NUMBER, has_number)
DomainToolsApiClient._add_boolean_param_by_name(
req_dict, DomainToolsApiClient._PARAM_ACTIVE_ONLY, active_only)
DomainToolsApiClient._add_boolean_param_by_name(
req_dict, DomainToolsApiClient._PARAM_DELETED_ONLY, deleted_only)
DomainToolsApiClient._add_boolean_param_by_name(
req_dict, DomainToolsApiClient._PARAM_ANCHOR_LEFT, anchor_left)
DomainToolsApiClient._add_boolean_param_by_name(
req_dict, DomainToolsApiClient._PARAM_ANCHOR_RIGHT, anchor_right)
return self._invoke_service(req_dict, self._REQ_TOPIC_DOMAIN_SEARCH,
out_format)
def domain_suggestions(self, query, out_format="dict"):
"""
Retrieves list of domain names which are similar to words in the
supplied ``query`` parameter. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#domain-suggestions>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/domain-suggestions/>`__
documentation for more information.
:param str query: Domain name for which to retrieve suggestions.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query, " ")
return self._invoke_service(req_dict,
self._REQ_TOPIC_DOMAIN_SUGGESTIONS,
out_format)
def hosting_history(self, query, out_format="dict"):
"""
Retrieves a list of changes which have occurred in a domain name's
registrar, IP address, and name servers. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#hosting-history>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/hosting-history/>`__
documentation for more information.
:param str query: Domain name to retrieve hosting history for.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query)
return self._invoke_service(req_dict, self._REQ_TOPIC_HOSTING_HISTORY,
out_format)
def ip_monitor(self, query, days_back=None, page=None, out_format="dict"):
"""
Retrieves activity for monitored domains which match the ip address
supplied in the ``query`` parameter. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#ip-monitor>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/ip-monitor/>`__
documentation for more information.
:param str query: IP address to query. For example: ``192.168.3.11``.
:param int days_back: [``optional``] : Use this parameter in
exceptional circumstances where you need to search domains
registered prior to the current date.
:param int page: [``optional``] : Sets the page of results to retrieve
from the server.
:param str out_format: [``optional``] : The format in which the
response output should be rendered. Available formats include
``dict``, ``json``, and ``xml``. For ``dict``, the return type is a
Python dictionary. For the other formats, the return type is a
``unicode``.
:return: Response data.
:rtype: dict or unicode
"""
req_dict = {}
self._add_query_param(req_dict, query)
self._add_days_back_param(req_dict, days_back)
self._add_page_param(req_dict, page)
return self._invoke_service(req_dict, self._REQ_TOPIC_IP_MONITOR,
out_format)
def ip_registrant_monitor(self, query, days_back=None,
search_type=None, server=None, country=None,
org=None, page=None, include_total_count=None,
out_format="dict"):
"""
Retrieves information for IP ranges which match one of the terms in the
supplied ``query`` parameter. See
`DXL service method <https://github.com/opendxl/opendxl-domaintools-service-python/wiki/Service-Methods#ip-registrant-monitor>`__
and `DomainTools API <https://www.domaintools.com/resources/api-documentation/ip-registrant-monitor/>`__
documentation for more information.
:param query: One or more free text query terms.
:type query: str or list(str) or tuple(str) or set(str)
:param int days_back: [``optional``] : Use this parameter in
exceptional circumstances where you need to search domains
registered prior to the current date.
:param str search_type: [``optional``] : Type of changes to return.
Valid options are ``all``, ``additions``, ``removals``, and
``modifications``. Defaults to ``all``.
:param str server: [``optional``] : Limits results to ranges from a
particular Whois server.
:param str country: [``optional``] : Limits results to IP addresses
allocated to an entity with a particular country. Valid options are
ISO 3166-1 two character country codes.
:param str org: [``optional``] : Limits results to a particular
organization.
| |
the ith cut incoming edges.
# (iii) decode q_values
# (iv) q_vals[i] <- the ith cut q_values from the ith replica's
decoder_inputs = (cut_encoding, edge_index_dec, edge_attr_dec)
cut_decoding, _, _ = self.decoder_conv(decoder_inputs)
# take the decoder output only at the cut_index and estimate q values
return self.q(cut_decoding)
def inference_v1(self, cut_encoding):
ncuts = cut_encoding.shape[0]
# rand permutation over available cuts
inference_order = torch.randperm(ncuts)
edge_index_dec = torch.cat([torch.arange(ncuts).view(1, -1),
torch.empty((1, ncuts), dtype=torch.long)], dim=0).to(self.device)
# initialize the decoder with all cuts marked as not (processed, selected)
decoder_edge_index_list = []
decoder_edge_attr_list = []
edge_attr_dec = torch.zeros((ncuts, 2), dtype=torch.float32).to(self.device)
# create a tensor of all q values to return to user
q_vals = torch.empty_like(edge_attr_dec)
# iterate over all cuts in random order, and process one cut each time
for cut_index in inference_order:
# set all edges to point from all cuts to the currently processed one (focus the attention mechanism)
edge_index_dec[1, :] = cut_index
# store the context (edge_index_dec and edge_attr_dec) of the current iteration
decoder_edge_attr_list.append(edge_attr_dec.detach().cpu().clone())
decoder_edge_index_list.append(edge_index_dec.detach().cpu().clone())
# decode
decoder_inputs = (cut_encoding, edge_index_dec, edge_attr_dec)
cut_decoding, _, _ = self.decoder_conv(decoder_inputs)
# take the decoder output only at the cut_index and estimate q values
q = self.q(cut_decoding[cut_index, :])
edge_attr_dec[cut_index, 0] = 1 # mark the current cut as processed
edge_attr_dec[cut_index, 1] = q.argmax() # mark the cut as selected or not, greedily according to q
# store q in the output q_vals tensor
q_vals[cut_index, :] = q
# finally, stack the decoder edge_attr and edge_index tensors,
# and make a transformer context in order to generate later a Transition for training,
# allowing by that fast parallel backprop
edge_attr_dec = torch.cat(decoder_edge_attr_list, dim=0)
edge_index_dec = torch.cat(decoder_edge_index_list, dim=1)
self.decoder_context = TransformerDecoderContext(edge_index_dec, edge_attr_dec)
return q_vals
def inference_v2(self, cut_encoding, edge_index_a2a):
ncuts = cut_encoding.shape[0]
# Build the action iteratively by picking the argmax across all q_values
# of all cuts.
# The edge_index_dec at each iteration is the same as edge_index_a2a,
# and the edge_attr_dec is 1-dim vector indicating whether a cut has been already selected.
# The not(edge_attr_dec) will serve as mask for finding the next argmax
# At the end of each iteration, before updating edge_attr_dec with the newly selected cut,
# the edges pointing to the selected cut are stored in edge_index_list,
# together with the corresponding edge_attr_dec entries.
# Those will serve as transformer context to train the selected cut Q value.
# initialize the decoder with all cuts marked as (not selected)
edge_attr_dec = torch.zeros((edge_index_a2a.shape[1], ), dtype=torch.float32).to(self.device)
# todo assert that edge_index_a2a contains all the self loops
edge_index_dec, edge_attr_dec = add_remaining_self_loops(edge_index_a2a, edge_weight=edge_attr_dec, fill_value=0)
edge_attr_dec.unsqueeze_(dim=1)
decoder_edge_index_list = []
decoder_edge_attr_list = []
# create a tensor of all q values to return to user
q_vals = torch.empty(size=(ncuts, 2), dtype=torch.float32)
selected_cuts_mask = torch.zeros(size=(ncuts,), dtype=torch.bool)
# run loop until all cuts are selected, or the first one is discarded
for _ in range(ncuts):
# decode
decoder_inputs = (cut_encoding, edge_index_dec, edge_attr_dec)
cut_decoding, _, _ = self.decoder_conv(decoder_inputs)
# compute q values for all cuts
q = self.q(cut_decoding)
# mask already selected cuts, overriding their q_values by -inf
q[selected_cuts_mask, :] = -float('Inf')
# force selecting at least one cut
# by setting the "discard" q_values of all cuts to -Inf at the first iteration only
if self.select_at_least_one_cut and not selected_cuts_mask.any():
masked_q = q.clone()
masked_q[:, 0] = -float('Inf')
serial_index = masked_q.argmax()
else:
# find argmax [cut_index, selected] and max q_value
serial_index = q.argmax()
# translate the serial index to [row, col] (or in other words [cut_index, selected])
cut_index = torch.floor(serial_index.float() / 2).long()
# a cut is selected if the maximal value is q[cut_index, 1]
selected = serial_index % 2
if selected:
# append to the context list the edges pointing to the selected cut,
# and their corresponding attr
cut_incoming_edges_mask = edge_index_dec[1, :] == cut_index
incoming_edges = edge_index_dec[:, cut_incoming_edges_mask]
incoming_attr = edge_attr_dec[cut_incoming_edges_mask]
decoder_edge_attr_list.append(incoming_attr.detach().cpu())
decoder_edge_index_list.append(incoming_edges.detach().cpu())
# update the decoder context for the next iteration
# a. update the cut outgoing edges attribute to "selected"
cut_outgoing_edges_mask = edge_index_dec[0, :] == cut_index
edge_attr_dec[cut_outgoing_edges_mask] = selected.float()
# b. store the q values of the selected cut in the output q_vals
q_vals[cut_index, :] = q[cut_index, :]
# c. update the selected_cuts_mask
selected_cuts_mask[cut_index] = True
# go to the next iteration to see if there are more useful cuts
else:
# stop adding cuts
# store the current context for the remaining cuts
remaining_cuts_mask = selected_cuts_mask.logical_not()
remaining_cuts_idxs = remaining_cuts_mask.nonzero()
edge_attr_dec = edge_attr_dec.detach().cpu()
edge_index_dec = edge_index_dec.detach().cpu()
for cut_index in remaining_cuts_idxs:
# append to the context list the edges pointing to the cut_index,
# and their corresponding attr
cut_incoming_edges_mask = edge_index_dec[1, :] == cut_index
incoming_edges = edge_index_dec[:, cut_incoming_edges_mask]
incoming_attr = edge_attr_dec[cut_incoming_edges_mask]
decoder_edge_attr_list.append(incoming_attr)
decoder_edge_index_list.append(incoming_edges)
# store the last q values of the remaining cuts in the output q_vals
q_vals[remaining_cuts_mask, :] = q.detach().cpu()[remaining_cuts_mask, :]
break
if self.select_at_least_one_cut and ncuts > 0:
assert selected_cuts_mask.any()
# store the greedy action built on the fly to return to user,
# since the q_values.argmax(1) is not necessarily equal to selected_cuts_mask
self.decoder_greedy_action = selected_cuts_mask
# finally, stack the decoder edge_attr and edge_index lists,
# and make a "decoder context" for training the transformer
edge_attr_dec = torch.cat(decoder_edge_attr_list, dim=0)
edge_index_dec = torch.cat(decoder_edge_index_list, dim=1)
self.decoder_context = TransformerDecoderContext(edge_index_dec, edge_attr_dec)
return q_vals
def get_random_context(self, random_action):
ncuts = random_action.shape[0]
if self.version == 'v1':
inference_order = torch.randperm(ncuts)
elif self.version == 'v2':
selected_idxes = random_action.nonzero()
inference_order = torch.cat([selected_idxes[torch.randperm(len(selected_idxes))],
random_action.logical_not().nonzero()])
decoder_edge_attr_list = []
decoder_edge_index_list = []
edge_index_dec = torch.cat([torch.arange(ncuts).view(1, -1),
torch.empty((1, ncuts), dtype=torch.long)], dim=0)
edge_attr_dec = torch.zeros((ncuts, 2), dtype=torch.float32)
# iterate over all cuts, and assign a context to each one
for cut_index in inference_order:
# set all edges to point from all cuts to the currently processed one (focus the attention mechanism)
edge_index_dec[1, :] = cut_index
# store the context (edge_index_dec and edge_attr_dec) of the current iteration
decoder_edge_attr_list.append(edge_attr_dec.clone())
decoder_edge_index_list.append(edge_index_dec.clone())
# assign the random action of cut_index to the context of the next round
edge_attr_dec[cut_index, 0] = 1 # mark the current cut as processed
edge_attr_dec[cut_index, 1] = random_action[cut_index] # mark the cut as selected or not
# finally, stack the decoder edge_attr and edge_index tensors, and make a transformer context
random_edge_attr_dec = torch.cat(decoder_edge_attr_list, dim=0)
if self.version == 'v2':
# take only the "selected" attribute
random_edge_attr_dec = random_edge_attr_dec[:, 1].unsqueeze(dim=1)
random_edge_index_dec = torch.cat(decoder_edge_index_list, dim=1)
random_action_decoder_context = TransformerDecoderContext(random_edge_index_dec, random_edge_attr_dec)
self.decoder_context = random_action_decoder_context
return random_edge_index_dec.to(self.device), random_edge_attr_dec.to(self.device)
# feed forward Q network - no recurrence
class Qnet(torch.nn.Module):
def __init__(self, hparams={}):
super(Qnet, self).__init__()
self.hparams = hparams
###########
# Encoder #
###########
# stack lp conv layers todo consider skip connections
self.lp_conv = Seq(OrderedDict([(f'lp_conv_{i}', LPConv(x_v_channels=hparams.get('state_x_v_channels', SCIP_STATE_V_DIM) if i == 0 else hparams.get('emb_dim', 32),
x_c_channels=hparams.get('state_x_c_channels', SCIP_STATE_C_DIM) if i == 0 else hparams.get('emb_dim', 32),
x_a_channels=hparams.get('state_x_a_channels', SCIP_STATE_A_DIM) if i == 0 else hparams.get('emb_dim', 32),
edge_attr_dim=hparams.get('state_edge_attr_dim', 1), # mandatory - derived from state features
emb_dim=hparams.get('emb_dim', 32), # default
aggr=hparams.get('lp_conv_aggr', 'mean'), # default
cuts_only=(i == hparams.get('encoder_lp_conv_layers', 1) - 1)))
for i in range(hparams.get('encoder_lp_conv_layers', 1))]))
# stack cut conv layers todo consider skip connections
self.cut_conv = {
'CutConv': Seq(OrderedDict([(f'cut_conv_{i}', CutConv(channels=hparams.get('emb_dim', 32),
edge_attr_dim=1,
aggr=hparams.get('cut_conv_aggr', 'mean')))
for i in range(hparams.get('encoder_cut_conv_layers', 1))])),
'CATConv': Seq(OrderedDict([(f'cat_conv_{i}', CATConv(in_channels=hparams.get('emb_dim', 32),
out_channels=hparams.get('emb_dim', 32) // hparams.get('attention_heads', 4),
edge_attr_dim=1,
edge_attr_emb=1,
heads=hparams.get('attention_heads', 4)))
for i in range(hparams.get('encoder_cut_conv_layers', 1))])),
}.get(hparams.get('cut_conv', 'CATConv'))
###########
# Decoder #
###########
# todo add some standard sequential model, e.g. LSTM
##########
# Q head #
##########
self.q = Lin(hparams.get('emb_dim', 32), 2) # Q-values for adding a cut or not
def forward(self,
x_c,
x_v,
x_a,
edge_index_c2v,
edge_index_a2v,
edge_attr_c2v,
edge_attr_a2v,
edge_index_a2a,
edge_attr_a2a,
**kwargs
):
"""
:return: torch.Tensor([nvars, out_channels]) if self.cuts_only=True
torch.Tensor([x.shape[0], out_channels]) otherwise
"""
# encoding
# run lp conv and generate cut embedding
lp_conv_inputs = x_c, x_v, x_a, edge_index_c2v, edge_index_a2v, edge_attr_c2v, edge_attr_a2v
x_a = self.lp_conv(lp_conv_inputs)
# run cut conv and generate cut encoding
cut_conv_inputs = x_a, edge_index_a2a, edge_attr_a2a
cut_encoding, _, _ = self.cut_conv(cut_conv_inputs)
# decoding
# todo - add here the sequential decoder stuff.
# compute q values
return self.q(cut_encoding)
# imitation learning models - not relevant
class CutsSelector(torch.nn.Module):
def __init__(self, channels, edge_attr_dim, hparams={}):
super(CutsSelector, self).__init__()
self.channels = channels
self.edge_attr_dim = edge_attr_dim
self.factorization_arch = hparams.get('factorization_arch', | |
<reponame>quest-gmulcahy/bacpypes<filename>tests/test_utilities/test_state_machine.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Utilities State Machine
----------------------------
"""
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from ..state_machine import State, StateMachine, StateMachineGroup, match_pdu
from ..time_machine import reset_time_machine, run_time_machine
from ..trapped_classes import TrappedState, TrappedStateMachine
# some debugging
_debug = 0
_log = ModuleLogger(globals())
@bacpypes_debugging
class TPDU:
def __init__(self, **kwargs):
if _debug: TPDU._debug("__init__ %r", kwargs)
self.__dict__.update(kwargs)
def __repr__(self):
return '<TPDU {}>'.format(', '.join(
'{}={}'.format(k, v) for k,v in self.__dict__.items(),
))
@bacpypes_debugging
class TestMatchPDU(unittest.TestCase):
def test_match_pdu(self):
if _debug: TestMatchPDU._debug("test_match_pdu")
tpdu = TPDU(x=1)
Anon = type('Anon', (), {})
anon = Anon()
# no criteria passes
assert match_pdu(tpdu)
assert match_pdu(anon)
# matching/not matching types
assert match_pdu(tpdu, TPDU)
assert not match_pdu(tpdu, Anon)
assert match_pdu(tpdu, (TPDU, Anon))
# matching/not matching attributes
assert match_pdu(tpdu, x=1)
assert not match_pdu(tpdu, x=2)
assert not match_pdu(tpdu, y=1)
assert not match_pdu(anon, x=1)
# matching/not matching types and attributes
assert match_pdu(tpdu, TPDU, x=1)
assert not match_pdu(tpdu, TPDU, x=2)
assert not match_pdu(tpdu, TPDU, y=1)
@bacpypes_debugging
class TestState(unittest.TestCase):
def test_state_doc(self):
if _debug: TestState._debug("test_state_doc")
# change the doc string
ts = State(None)
ns = ts.doc("test state")
assert ts.doc_string == "test state"
assert ns is ts
if _debug: TestState._debug(" - passed")
def test_state_success(self):
if _debug: TestState._debug("test_state_success")
# create a state and flag it success
ts = State(None)
ns = ts.success()
assert ts.is_success_state
assert ns is ts
with self.assertRaises(RuntimeError):
ts.success()
with self.assertRaises(RuntimeError):
ts.fail()
if _debug: TestState._debug(" - passed")
def test_state_fail(self):
if _debug: TestState._debug("test_state_fail")
# create a state and flag it fail
ts = State(None)
ns = ts.fail()
assert ts.is_fail_state
assert ns is ts
with self.assertRaises(RuntimeError):
ts.success()
with self.assertRaises(RuntimeError):
ts.fail()
if _debug: TestState._debug(" - passed")
def test_something_else(self):
if _debug: TestState._debug("test_something_else")
if _debug: TestState._debug(" - passed")
@bacpypes_debugging
class TestStateMachine(unittest.TestCase):
def test_state_machine_run(self):
if _debug: TestStateMachine._debug("test_state_machine_run")
# create a state machine
tsm = StateMachine()
# run the machine
tsm.run()
# check for still running in the start state
assert tsm.running
assert tsm.current_state is tsm.start_state
if _debug: TestStateMachine._debug(" - passed")
def test_state_machine_success(self):
if _debug: TestStateMachine._debug("test_state_machine_success")
# create a trapped state machine
tsm = TrappedStateMachine()
assert isinstance(tsm.start_state, TrappedState)
# make the start state a success
tsm.start_state.success()
# run the machine
tsm.run()
# check for success
assert not tsm.running
assert tsm.current_state.is_success_state
if _debug: TestStateMachine._debug(" - passed")
def test_state_machine_fail(self):
if _debug: TestStateMachine._debug("test_state_machine_fail")
# create a trapped state machine
tsm = TrappedStateMachine()
assert isinstance(tsm.start_state, TrappedState)
# make the start state a fail
tsm.start_state.fail()
# run the machine
tsm.run()
# check for success
assert not tsm.running
assert tsm.current_state.is_fail_state
if _debug: TestStateMachine._debug(" - passed")
def test_state_machine_send(self):
if _debug: TestStateMachine._debug("test_state_machine_send")
# create a trapped state machine
tsm = TrappedStateMachine()
# make pdu object
pdu = TPDU()
# make a send transition from start to success, run the machine
tsm.start_state.send(pdu).success()
tsm.run()
# check for success
assert not tsm.running
assert tsm.current_state.is_success_state
# check the callbacks
assert tsm.start_state.before_send_pdu is pdu
assert tsm.start_state.after_send_pdu is pdu
assert tsm.before_send_pdu is pdu
assert tsm.after_send_pdu is pdu
# make sure the pdu was sent
assert tsm.sent is pdu
# check the transaction log
assert len(tsm.transaction_log) == 1
assert tsm.transaction_log[0][1] is pdu
if _debug: TestStateMachine._debug(" - passed")
def test_state_machine_receive(self):
if _debug: TestStateMachine._debug("test_state_machine_receive")
# create a trapped state machine
tsm = TrappedStateMachine()
# make pdu object
pdu = TPDU()
# make a receive transition from start to success, run the machine
tsm.start_state.receive(TPDU).success()
tsm.run()
# check for still running
assert tsm.running
# tell the machine it is receiving the pdu
tsm.receive(pdu)
# check for success
assert not tsm.running
assert tsm.current_state.is_success_state
# check the callbacks
assert tsm.start_state.before_receive_pdu is pdu
assert tsm.start_state.after_receive_pdu is pdu
assert tsm.before_receive_pdu is pdu
assert tsm.after_receive_pdu is pdu
# check the transaction log
assert len(tsm.transaction_log) == 1
assert tsm.transaction_log[0][1] is pdu
if _debug: TestStateMachine._debug(" - passed")
def test_state_machine_unexpected(self):
if _debug: TestStateMachine._debug("test_state_machine_unexpected")
# create a trapped state machine
tsm = TrappedStateMachine()
# make pdu object
good_pdu = TPDU(a=1)
bad_pdu = TPDU(b=2)
# make a receive transition from start to success, run the machine
tsm.start_state.receive(TPDU, a=1).success()
tsm.run()
# check for still running
assert tsm.running
# give the machine a bad pdu
tsm.receive(bad_pdu)
# check for fail
assert not tsm.running
assert tsm.current_state.is_fail_state
assert tsm.current_state is tsm.unexpected_receive_state
# check the callback
assert tsm.unexpected_receive_pdu is bad_pdu
# check the transaction log
assert len(tsm.transaction_log) == 1
assert tsm.transaction_log[0][1] is bad_pdu
if _debug: TestStateMachine._debug(" - passed")
def test_state_machine_call(self):
if _debug: TestStateMachine._debug("test_state_machine_call")
# simple hook
self._called = False
# create a trapped state machine
tsm = TrappedStateMachine()
# make a send transition from start to success, run the machine
tsm.start_state.call(setattr, self, '_called', True).success()
tsm.run()
# check for success
assert not tsm.running
assert tsm.is_success_state
# check for the call
assert self._called
def test_state_machine_call_exception(self):
if _debug: TestStateMachine._debug("test_state_machine_call_exception")
# simple hook
self._called = False
def fn():
self._called = True
raise AssertionError("error")
# create a trapped state machine
tsm = TrappedStateMachine()
# make a send transition from start to success, run the machine
tsm.start_state.call(fn).success()
tsm.run()
# check for failed call
assert not tsm.running
assert tsm.is_fail_state
# check for the call
assert self._called
def test_state_machine_loop_01(self):
if _debug: TestStateMachine._debug("test_state_machine_loop_01")
# create a trapped state machine
tsm = TrappedStateMachine()
# make pdu object
first_pdu = TPDU(a=1)
if _debug: TestStateMachine._debug(" - first_pdu: %r", first_pdu)
second_pdu = TPDU(a=2)
if _debug: TestStateMachine._debug(" - second_pdu: %r", second_pdu)
# after sending the first pdu, wait for the second
s0 = tsm.start_state
s1 = s0.send(first_pdu)
s2 = s1.receive(TPDU, a=2)
s2.success()
# run the machine
tsm.run()
# check for still running and waiting
assert tsm.running
assert tsm.current_state is s1
if _debug: TestStateMachine._debug(" - still running and waiting")
# give the machine the second pdu
tsm.receive(second_pdu)
# check for success
assert not tsm.running
assert tsm.current_state.is_success_state
if _debug: TestStateMachine._debug(" - success")
# check the callbacks
assert s0.before_send_pdu is first_pdu
assert s0.after_send_pdu is first_pdu
assert s1.before_receive_pdu is second_pdu
assert s1.after_receive_pdu is second_pdu
if _debug: TestStateMachine._debug(" - callbacks passed")
# check the transaction log
assert len(tsm.transaction_log) == 2
assert tsm.transaction_log[0][1] is first_pdu
assert tsm.transaction_log[1][1] is second_pdu
if _debug: TestStateMachine._debug(" - transaction log passed")
def test_state_machine_loop_02(self):
if _debug: TestStateMachine._debug("test_state_machine_loop_02")
# create a trapped state machine
tsm = TrappedStateMachine()
# make pdu object
first_pdu = TPDU(a=1)
second_pdu = TPDU(a=2)
# when the first pdu is received, send the second
s0 = tsm.start_state
s1 = s0.receive(TPDU, a=1)
s2 = s1.send(second_pdu)
s2.success()
# run the machine
tsm.run()
# check for still running
assert tsm.running
if _debug: TestStateMachine._debug(" - still running")
# give the machine the first pdu
tsm.receive(first_pdu)
# check for success
assert not tsm.running
assert tsm.current_state.is_success_state
if _debug: TestStateMachine._debug(" - success")
# check the callbacks
assert s0.before_receive_pdu is first_pdu
assert s0.after_receive_pdu is first_pdu
assert s1.before_send_pdu is second_pdu
assert s1.after_send_pdu is second_pdu
if _debug: TestStateMachine._debug(" - callbacks passed")
# check the transaction log
assert len(tsm.transaction_log) == 2
assert tsm.transaction_log[0][1] is first_pdu
assert tsm.transaction_log[1][1] is second_pdu
if _debug: TestStateMachine._debug(" - transaction log passed")
@bacpypes_debugging
class TestStateMachineTimeout1(unittest.TestCase):
def test_state_machine_timeout_1(self):
if _debug: TestStateMachineTimeout1._debug("test_state_machine_timeout_1")
# create a trapped state machine
tsm = TrappedStateMachine()
# make a timeout transition from start to success
tsm.start_state.timeout(1.0).success()
reset_time_machine()
if _debug: TestStateMachineTimeout1._debug(" - time machine reset")
tsm.run()
run_time_machine(60.0)
if _debug: TestStateMachineTimeout1._debug(" - time machine finished")
# check for success
assert not tsm.running
assert tsm.current_state.is_success_state
if _debug: TestStateMachine._debug(" - passed")
@bacpypes_debugging
class TestStateMachineTimeout2(unittest.TestCase):
def test_state_machine_timeout_2(self):
if _debug: TestStateMachineTimeout2._debug("test_state_machine_timeout_2")
# make some pdu's
first_pdu = TPDU(a=1)
second_pdu = TPDU(a=2)
# create a trapped state machine
tsm = TrappedStateMachine()
s0 = tsm.start_state
# send something, wait, send something, wait, success
s1 = s0.send(first_pdu)
s2 = s1.timeout(1.0)
s3 = s2.send(second_pdu)
s4 = s3.timeout(1.0).success()
reset_time_machine()
if _debug: TestStateMachineTimeout2._debug(" - time machine reset")
tsm.run()
run_time_machine(60.0)
if _debug: TestStateMachineTimeout2._debug(" - time machine finished")
# check for success
assert not tsm.running
assert tsm.current_state.is_success_state
# check the transaction log
assert len(tsm.transaction_log) == 2
assert tsm.transaction_log[0][1] is first_pdu
assert tsm.transaction_log[1][1] is second_pdu
if _debug: TestStateMachine._debug(" - passed")
@bacpypes_debugging
class TestStateMachineGroup(unittest.TestCase):
def test_state_machine_group_success(self):
if _debug: TestStateMachineGroup._debug("test_state_machine_group_success")
# create a state machine group
smg = StateMachineGroup()
# create a trapped state machine, start state is success
tsm = TrappedStateMachine()
tsm.start_state.success()
# add it to the group
smg.append(tsm)
reset_time_machine()
if _debug: TestStateMachineGroup._debug(" - time machine reset")
# tell the group to run
smg.run()
run_time_machine(60.0)
if _debug: TestStateMachineGroup._debug(" - time machine finished")
# check for success
| |
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForWeightedLanguageModeling(DataCollator):
tokenizer: PreTrainedTokenizer
mlm: bool = True
mlm_probability: float = 0.15
weighted_vocab: list = None
def collate_batch(self, examples:List[torch.Tensor]) -> torch.Tensor :
batch = self._tensorize_batch(examples)
if self.mlm:
inputs, labels = self.mask_tokens(batch)
return {"input_ids": inputs, "labels":labels}
else:
return {"input_ids": batch, "labels":labels}
def _tensorize_batch(self, examples: List[torch.Tensor]) -> torch.Tensor :
length_of_first = examples[0].size(0)
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length:
return torch.stack(examples, dim=0)
else:
if self.tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({self.tokenizer.__class__.__name__}) does not have one."
)
return pad_sequence(examples, batch_first=True, padding_value=self.tokenizer.pad_token_id)
def mask_tokens(self, inputs:torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone() #[batch, seq_len]
probability_matrix = torch.full(labels.shape, self.mlm_probability)
# Inevitable O(n^2) index
for i in range(len(inputs)):
probs = []
for j in range(len(inputs[i])):
input_id = int(inputs[i][j])
probs.append( 1.0000 + float(self.weighted_vocab[input_id])) # not negative
sum_prob = sum(probs)
weighted_probs = []
for j in range(len(inputs[i])):
weighted_prob= (probs[j]*len(inputs[i])*1.000/sum_prob) * self.mlm_probability
probability_matrix[i][j] = weighted_prob
weighted_probs.append(weighted_prob)
# print("max_prob:{} min_prob:{}".format(max(weighted_probs), min(weighted_probs)))
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForTrainGen(DataCollator):
tokenizer: PreTrainedTokenizer
def collate_batch(self, examples:List) -> Dict[str, torch.Tensor]:
all_inputs = []
all_labels = []
for instance in examples:
all_inputs.append(instance["input_ids"])
sl_labels = []
mask_token_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
for i in instance["labels"]:
if i !=-100 :
sl_labels.append(1)
else:
sl_labels.append(0)
all_labels.append(sl_labels)
return {
"input_ids":torch.stack(all_inputs, dim=0),
"labels":torch.tensor(all_labels, dtype=torch.long)
}
@dataclass
class DataCollatorForMaskGen(DataCollator):
tokenizer: PreTrainedTokenizer
generator: MaskGenerator
mlm: bool = True
mlm_probability: float = 0.15
def collate_batch(self, examples:List) -> Dict[str, torch.Tensor]:
all_input_ids = torch.tensor([instance.input_ids for instance in examples], dtype=torch.long)
all_attention_mask = torch.tensor([instance.attention_mask for instance in examples], dtype=torch.long)
all_token_type_ids = torch.tensor([instance.token_type_ids for instance in examples], dtype=torch.long)
generator_input = {
"input_ids":all_input_ids,
"attention_mask":all_attention_mask,
"token_type_ids": all_token_type_ids
}
out = self.generator.predict(generator_input).float() # out shape same as input_ids
print(out)
all_labels = all_input_ids.clone()
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in all_labels.tolist()
]
out.masked_fill(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = all_labels.eq(self.tokenizer.pad_token_id)
out.masked_fill(padding_mask, value=0.0)
masked_indices = torch.bernoulli(out).bool()
all_labels[~masked_indices] = -100
indices_replaced = torch.bernoulli(torch.full(out.shape, 0.8)).bool() & masked_indices
all_input_ids[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
indices_random = torch.bernoulli(torch.full(all_labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), all_labels.shape, dtype=torch.long)
all_input_ids[indices_random] = random_words[indices_random]
return {
"input_ids":all_input_ids,
"labels":all_labels
}
@dataclass
class DataCollatorForDistillLM(DataCollator):
tokenizer: PreTrainedTokenizer
selector : MaskSelector
mlm_sample_times: int = 16
mlm: bool = True
mlm_probability: float = 0.15
def collate_batch(self, examples: List) -> Dict[str, torch.Tensor]:
all_inputs = []
all_attention_mask = []
all_token_type_ids = []
all_labels = []
for instance in examples:
batch = {}
for k,v in vars(instance).items():
batch[k] = torch.tensor([getattr(instance, k) for _ in range(self.mlm_sample_times)], dtype=torch.long)
inputs, labels = self.mask_tokens(batch["input_ids"])
selector_input = {
"input_ids":inputs,
"attention_mask":batch["attention_mask"],
"token_type_ids":batch["token_type_ids"],
}
out = self.selector.predict(selector_input)
selected_instance = inputs[out] #[seq_len]
# show examples
selected_inputs = selected_instance.detach().cpu().numpy()
selected_labels = labels[out].detach().cpu().numpy()
# convert to sequence labelling
# print(selected_instance.shape)
sl_labels = []
mask_token_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# print(mask_token_id)
for i in selected_inputs:
if i == mask_token_id:
sl_labels.append(1)
else:
sl_labels.append(0)
all_inputs.append(instance.input_ids)
all_attention_mask.append(instance.attention_mask)
all_token_type_ids.append(instance.token_type_ids)
all_labels.append(sl_labels)
return {
"input_ids":torch.tensor(all_inputs, dtype=torch.long),
"attention_mask":torch.tensor(all_attention_mask, dtype=torch.long),
"token_type_ids":torch.tensor(all_token_type_ids, dtype=torch.long),
"labels": torch.tensor(all_labels, dtype=torch.long)
}
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def _tensorize_batch(self, examples: List[torch.Tensor]) -> torch.Tensor:
length_of_first = examples[0].size(0)
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length:
return torch.stack(examples, dim=0)
else:
if self.tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({self.tokenizer.__class__.__name__}) does not have one."
)
return pad_sequence(examples, batch_first=True, padding_value=self.tokenizer.pad_token_id)
@dataclass
class DataCollatorForSelectLM(DataCollator):
"""
Data collator used for language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
"""
tokenizer: PreTrainedTokenizer
selector: MaskSelector
mlm_sample_times: int = 1
mlm: bool = True
mlm_probability: float = 0.15
def collate_batch(self, examples: List) -> Dict[str, torch.Tensor]:
# batch = self._tensorize_batch(examples)
all_inputs = []
all_labels = []
for instance in examples:
batch = {}
for k, v in vars(instance).items():
batch[k] = torch.tensor([getattr(instance, k) for _ in range(self.mlm_sample_times)], dtype=torch.long)
inputs, labels = self.mask_tokens(batch["input_ids"])
selector_input = {
"input_ids":inputs,
"attention_mask":batch["attention_mask"],
"token_type_ids":batch["token_type_ids"],
}
out = self.selector.predict(selector_input)
selected_instance = batch["input_ids"][out]
# show examples
selected_inputs = selected_instance.detach().cpu().numpy()
selected_labels = labels[0].detach().cpu().numpy()
all_inputs.append(selected_instance)
all_labels.append(labels[0])
return {
"input_ids":torch.stack(all_inputs, dim=0),
"labels": torch.stack(all_labels, dim=0)
}
def _tensorize_batch(self, examples: List[torch.Tensor]) -> torch.Tensor:
length_of_first = examples[0].size(0)
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length:
return torch.stack(examples, dim=0)
else:
if self.tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({self.tokenizer.__class__.__name__}) does not have one."
)
return pad_sequence(examples, batch_first=True, padding_value=self.tokenizer.pad_token_id)
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with | |
<reponame>smolar/tripleo-heat-templates<filename>tools/yaml-validate.py
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import re
import six
import sys
import traceback
import yaml
from copy import copy
def is_string(value):
return isinstance(value, six.string_types)
# Only permit the template alias versions.
# The current template version should be the last element.
# As tripleo-heat-templates is a branched repository, this
# list should contain only the alias name for the current branch.
# This allows to avoid merging old templates versions aliases.
valid_heat_template_versions = [
'rocky',
]
current_heat_template_version = valid_heat_template_versions[-1]
required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords',
'RoleName', 'RoleParameters', 'ServiceData']
# NOTE(bnemec): The duplication in this list is intentional. For the
# transition to generated environments we have two copies of these files,
# so they need to be listed twice. Once the deprecated version can be removed
# the duplicate entries can be as well.
envs_containing_endpoint_map = ['no-tls-endpoints-public-ip.yaml',
'tls-endpoints-public-dns.yaml',
'tls-endpoints-public-ip.yaml',
'tls-everywhere-endpoints-dns.yaml']
ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
OPTIONAL_SECTIONS = ['ansible_group_vars',
'cellv2_discovery',
'firewall_rules',
'keystone_resources']
REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
'config_settings']
OPTIONAL_DOCKER_SECTIONS = ['container_puppet_tasks', 'upgrade_tasks',
'deploy_steps_tasks',
'pre_upgrade_rolling_tasks',
'fast_forward_upgrade_tasks',
'fast_forward_post_upgrade_tasks',
'post_upgrade_tasks', 'update_tasks',
'post_update_tasks', 'service_config_settings',
'host_prep_tasks', 'metadata_settings',
'kolla_config', 'global_config_settings',
'external_deploy_tasks',
'external_post_deploy_tasks',
'container_config_scripts', 'step_config',
'monitoring_subscription', 'scale_tasks',
'external_update_tasks', 'external_upgrade_tasks']
REQUIRED_DOCKER_SECTIONS_OVERRIDES = {
# Runs puppet within a container
'./deployment/neutron/neutron-agents-ib-config-container-puppet.yaml': [
'service_name',
'docker_config',
'config_settings'
],
# Just sets hieradata
'./deployment/neutron/neutron-ovn-dpdk-config-container-puppet.yaml': [
'service_name',
'config_settings'
],
# Does not deploy container
'./deployment/ceilometer/ceilometer-base-container-puppet.yaml': [
'service_name',
'config_settings'
],
# Does not manage container using docker_config
'./deployment/nova/nova-libvirt-guests-container-puppet.yaml': [
'service_name',
'puppet_config',
'config_settings'
],
# Inherits sections
'./deployment/haproxy/haproxy-edge-container-puppet.yaml': [
'service_name',
'config_settings'
],
'./deployment/glance/glance-api-edge-container-puppet.yaml': [
'service_name',
],
}
# ansible tasks cannot be an empty dict or ansible is unhappy
ANSIBLE_TASKS_SECTIONS = ['upgrade_tasks', 'pre_upgrade_rolling_tasks',
'fast_forward_upgrade_tasks',
'fast_forward_post_upgrade_tasks',
'post_upgrade_tasks', 'update_tasks',
'post_update_tasks', 'host_prep_tasks',
'external_deploy_tasks',
'external_post_deploy_tasks']
REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
'config_image']
OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = ['puppet_tags', 'volumes']
REQUIRED_DOCKER_LOGGING_OUTPUTS = ['config_settings', 'docker_config',
'volumes', 'host_prep_tasks']
# Mapping of parameter names to a list of the fields we should _not_ enforce
# consistency across files on. This should only contain parameters whose
# definition we cannot change for backwards compatibility reasons. New
# parameters to the templates should not be added to this list.
PARAMETER_DEFINITION_EXCLUSIONS = {
'CephPools': ['description', 'type', 'default'],
'ManagementNetCidr': ['default'],
'ManagementAllocationPools': ['default'],
'ExternalNetCidr': ['default'],
'ExternalAllocationPools': ['default'],
'StorageNetCidr': ['default'],
'StorageAllocationPools': ['default'],
'StorageMgmtNetCidr': ['default'],
'StorageMgmtAllocationPools': ['default'],
'TenantNetCidr': ['default'],
'TenantAllocationPools': ['default'],
'InternalApiNetCidr': ['default'],
'InternalApiAllocationPools': ['default'],
'UpdateIdentifier': ['description'],
'key_name': ['default'],
'CeilometerAgentCentralLoggingSource': ['default'],
'CeilometerAgentIpmiLoggingSource': ['default'],
'CeilometerAgentNotificationLoggingSource': ['default'],
'CinderApiLoggingSource': ['default'],
'CinderSchedulerLoggingSource': ['default'],
'CinderVolumeLoggingSource': ['default'],
'DesignateApiLoggingSource': ['default'],
'DesignateCentralLoggingSource': ['default'],
'DesignateMiniDNSLoggingSource': ['default'],
'DesignateProducerLoggingSource': ['default'],
'DesignateSinkLoggingSource': ['default'],
'DesignateWorkerLoggingSource': ['default'],
'GlanceApiLoggingSource': ['default'],
'GnocchiApiLoggingSource': ['default'],
'HeatApiCfnLoggingSource': ['default'],
'HeatApiLoggingSource': ['default'],
'HeatEngineLoggingSource': ['default'],
'KeystoneLoggingSource': ['default'],
'KeystoneErrorLoggingSource': ['default'],
'KeystoneAdminAccessLoggingSource': ['default'],
'KeystoneAdminErrorLoggingSource': ['default'],
'KeystoneMainAcccessLoggingSource': ['default'],
'KeystoneMainErrorLoggingSource': ['default'],
'LibvirtVncCACert': ['description'],
'NeutronApiLoggingSource': ['default'],
'NeutronDhcpAgentLoggingSource': ['default'],
'NeutronL3AgentLoggingSource': ['default'],
'NeutronMetadataAgentLoggingSource': ['default'],
'NeutronOpenVswitchAgentLoggingSource': ['default'],
'NovaApiLoggingSource': ['default'],
'NovaComputeLoggingSource': ['default'],
'NovaConductorLoggingSource': ['default'],
'NovaMetadataLoggingSource': ['default'],
'NovaSchedulerLoggingSource': ['default'],
'NovaVncproxyLoggingSource': ['default'],
'OctaviaApiLoggingSource': ['default'],
'OctaviaHealthManagerLoggingSource': ['default'],
'OctaviaHousekeepingLoggingSource': ['default'],
'OctaviaWorkerLoggingSource': ['default'],
'OvnMetadataAgentLoggingSource': ['default'],
'PlacementLoggingSource': ['default'],
'SaharaApiLoggingSource': ['default'],
'SaharaEngineLoggingSource': ['default'],
# There's one template that defines this
# differently, and I'm not sure if we can
# safely change it.
'ControlPlaneDefaultRoute': ['default'],
# TODO(bnemec): Address these existing inconsistencies.
'ServiceNetMap': ['description', 'default'],
'network': ['default'],
'ControlPlaneIP': ['default',
'description'],
'ControlPlaneIp': ['default',
'description'],
'NeutronBigswitchLLDPEnabled': ['default'],
'NeutronWorkers': ['description'],
'ServerMetadata': ['description'],
'server': ['description'],
'servers': ['description'],
'ExtraConfig': ['description'],
'DefaultPasswords': ['description',
'default'],
'BondInterfaceOvsOptions': ['description',
'default',
'constraints'],
# NOTE(anil): This is a temporary change and
# will be removed once bug #1767070 properly
# fixed. OVN supports only VLAN, geneve
# and flat for NeutronNetworkType. But VLAN
# tenant networks have a limited support
# in OVN. Till that is fixed, we restrict
# NeutronNetworkType to 'geneve'.
'NeutronNetworkType': ['description', 'default', 'constraints'],
'KeyName': ['constraints'],
'OVNSouthboundServerPort': ['description'],
'ExternalInterfaceDefaultRoute': ['description', 'default'],
'ManagementInterfaceDefaultRoute': ['description', 'default'],
'IPPool': ['description'],
'SSLCertificate': ['description', 'default', 'hidden'],
'NodeIndex': ['description'],
'name': ['description', 'default'],
'image': ['description', 'default'],
'NeutronBigswitchAgentEnabled': ['default'],
'EndpointMap': ['description', 'default'],
'ContainerManilaConfigImage': ['description', 'default'],
'replacement_policy': ['default'],
'CloudDomain': ['description', 'default'],
'EnableLoadBalancer': ['description'],
'ControllerExtraConfig': ['description'],
'NovaComputeExtraConfig': ['description'],
'controllerExtraConfig': ['description'],
'ContainerSwiftConfigImage': ['default'],
'input_values': ['default'],
'fixed_ips': ['default', 'type']
}
PREFERRED_CAMEL_CASE = {
'haproxy': 'HAProxy',
'metrics-qdr': 'MetricsQdr'
}
# Overrides for docker/puppet validation
# <filename>: True explicitly enables validation
# <filename>: False explicitly disables validation
#
# If a filename is not found in the overrides then the top level directory is
# used to determine which validation method to use.
VALIDATE_PUPPET_OVERRIDE = {
# deployment/rabbitmq/rabbitmq-messaging*.yaml provide oslo_messaging services
'./deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml': False,
'./deployment/rabbitmq/rabbitmq-messaging-notify-container-puppet.yaml': False,
'./deployment/rabbitmq/rabbitmq-messaging-rpc-container-puppet.yaml': False,
# docker/services/messaging/*.yaml provide oslo_messaging services
'./deployment/messaging/rpc-qdrouterd-container-puppet.yaml': False,
# docker/services/pacemaker/*-rabbitmq.yaml provide oslo_messaging services
'./deployment/rabbitmq/rabbitmq-messaging-notify-pacemaker-puppet.yaml': False,
'./deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml': False,
# qdr aliases rabbitmq service to provide alternative messaging backend
'./puppet/services/qdr.yaml': False,
# puppet/services/messaging/*.yaml provide oslo_messaging services
'./puppet/services/messaging/rpc-qdrouterd.yaml': False,
}
VALIDATE_DOCKER_OVERRIDE = {
# deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml does not
# deploy container
'./deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml': False,
# Does not follow the filename pattern
'./deployment/multipathd/multipathd-container.yaml': True
}
DEPLOYMENT_RESOURCE_TYPES = [
'OS::Heat::SoftwareDeploymentGroup',
'OS::Heat::StructuredDeploymentGroup',
'OS::Heat::SoftwareDeployment',
'OS::Heat::StructuredDeployment',
'OS::TripleO::SoftwareDeployment'
]
CONFIG_RESOURCE_TYPES = [
'OS::Heat::SoftwareConfig',
'OS::Heat::StructuredConfig'
]
WORKFLOW_TASKS_EXCLUSIONS = [
'./deployment/octavia/octavia-deployment-config.yaml',
'./deployment/ceph-ansible/ceph-external.yaml',
'./deployment/ceph-ansible/ceph-osd.yaml',
'./deployment/ceph-ansible/ceph-rbdmirror.yaml',
'./deployment/ceph-ansible/ceph-client.yaml',
'./deployment/ceph-ansible/ceph-mds.yaml',
'./deployment/ceph-ansible/ceph-rgw.yaml',
'./deployment/ceph-ansible/ceph-base.yaml',
'./deployment/ceph-ansible/ceph-mon.yaml',
'./deployment/ceph-ansible/ceph-mgr.yaml',
]
HEAT_OUTPUTS_EXCLUSIONS = [
'./puppet/extraconfig/tls/ca-inject.yaml',
'./deployed-server/deployed-server.yaml',
'./extraconfig/tasks/ssh/host_public_key.yaml',
'./extraconfig/pre_network/host_config_and_reboot.yaml'
]
CONFIG_IMAGE_OVERRIDES = {
'ContainerSwiftRingbuilderConfigImage': 'ContainerSwiftConfigImage'
}
SERVICE_NAME_OVERRIDE = {
'./deployment/rabbitmq/rabbitmq-messaging-pacemaker-puppet.yaml': 'rabbitmq',
}
def exit_usage():
print('Usage %s <yaml file or directory>' % sys.argv[0])
sys.exit(1)
def to_camel_case(string):
return PREFERRED_CAMEL_CASE.get(string, ''.join(s.capitalize() or '_' for
s in string.split('_')))
def get_base_endpoint_map(filename):
try:
with open(filename, 'r') as f:
tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
return tpl['parameters']['EndpointMap']['default']
except Exception:
print(traceback.format_exc())
return None
def get_endpoint_map_from_env(filename):
try:
with open(filename, 'r') as f:
tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
return {
'file': filename,
'map': tpl['parameter_defaults']['EndpointMap']
}
except Exception:
print(traceback.format_exc())
return None
def validate_endpoint_map(base_map, env_map):
return sorted(base_map.keys()) == sorted(env_map.keys())
def validate_role_name(filename):
with open(filename, 'r') as f:
tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
role_data = tpl[0]
if role_data['name'] != os.path.basename(filename).split('.')[0]:
print('ERROR: role name should match file name for role : %s.'
% filename)
return 1
return 0
def validate_hci_compute_services_default(env_filename, env_tpl):
env_services_list = env_tpl['parameter_defaults']['ComputeServices']
env_services_list.remove('OS::TripleO::Services::CephOSD')
roles_filename = os.path.join(os.path.dirname(env_filename),
'../roles/Compute.yaml')
with open(roles_filename, 'r') as f:
roles_tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
for role in roles_tpl:
if role['name'] == 'Compute':
roles_services_list = role['ServicesDefault']
if sorted(env_services_list) != sorted(roles_services_list):
print('ERROR: ComputeServices in %s is different from '
'ServicesDefault in roles/Compute.yaml' % env_filename)
return 1
return 0
def validate_hci_computehci_role(hci_role_filename, hci_role_tpl):
compute_role_filename = os.path.join(os.path.dirname(hci_role_filename),
'./Compute.yaml')
with open(compute_role_filename, 'r') as f:
compute_role_tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
compute_role_services = compute_role_tpl[0]['ServicesDefault']
for role in hci_role_tpl:
if role['name'] == 'ComputeHCI':
hci_role_services = role['ServicesDefault']
hci_role_services.remove('OS::TripleO::Services::CephOSD')
if sorted(hci_role_services) != sorted(compute_role_services):
print('ERROR: ServicesDefault in %s is different from '
'ServicesDefault in roles/Compute.yaml' % hci_role_filename)
return 1
return 0
def validate_controller_dashboard(filename, tpl):
control_role_filename = os.path.join(os.path.dirname(filename),
'./Controller.yaml')
with open(control_role_filename, 'r') as f:
control_role_tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
control_role_services = control_role_tpl[0]['ServicesDefault']
for role in tpl:
if role['name'] == 'ControllerStorageDashboard':
services = role['ServicesDefault']
if sorted(services) != sorted(control_role_services):
print('ERROR: ServicesDefault in %s is different from '
'ServicesDefault in roles/Controller.yaml' % filename)
return 1
return 0
def validate_controller_storage_nfs(filename, tpl, exclude_service=()):
control_role_filename = os.path.join(os.path.dirname(filename),
'./Controller.yaml')
with open(control_role_filename, 'r') as f:
control_role_tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
control_role_services = control_role_tpl[0]['ServicesDefault']
for role in tpl:
if role['name'] == 'ControllerStorageNfs':
services = [x for x in role['ServicesDefault'] if (x not in exclude_service)]
if sorted(services) != sorted(control_role_services):
print('ERROR: ServicesDefault in %s is different from '
'ServicesDefault in roles/Controller.yaml' % filename)
return 1
return 0
def validate_hci_role(hci_role_filename, hci_role_tpl):
role_files = ['HciCephAll', 'HciCephFile', 'HciCephMon', 'HciCephObject']
if hci_role_filename in ['./roles/' + x + '.yaml' for x in role_files]:
compute_role_filename = \
os.path.join(os.path.dirname(hci_role_filename), './Compute.yaml')
with open(compute_role_filename, 'r') as f:
compute_role_tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
compute_role_services = compute_role_tpl[0]['ServicesDefault']
for role in hci_role_tpl:
if role['name'] == 'HciCephAll':
hci_role_services = role['ServicesDefault']
hci_role_services.remove('OS::TripleO::Services::CephGrafana')
hci_role_services.remove('OS::TripleO::Services::CephMds')
hci_role_services.remove('OS::TripleO::Services::CephMgr')
hci_role_services.remove('OS::TripleO::Services::CephMon')
hci_role_services.remove('OS::TripleO::Services::CephRbdMirror')
hci_role_services.remove('OS::TripleO::Services::CephRgw')
hci_role_services.remove('OS::TripleO::Services::CephOSD')
if role['name'] == 'HciCephFile':
hci_role_services = role['ServicesDefault']
hci_role_services.remove('OS::TripleO::Services::CephMds')
hci_role_services.remove('OS::TripleO::Services::CephOSD')
if role['name'] == 'HciCephMon':
hci_role_services = role['ServicesDefault']
hci_role_services.remove('OS::TripleO::Services::CephMgr')
hci_role_services.remove('OS::TripleO::Services::CephMon')
hci_role_services.remove('OS::TripleO::Services::CephOSD')
if role['name'] == 'HciCephObject':
hci_role_services = role['ServicesDefault']
hci_role_services.remove('OS::TripleO::Services::CephRgw')
hci_role_services.remove('OS::TripleO::Services::CephOSD')
if sorted(hci_role_services) != sorted(compute_role_services):
print('ERROR: ServicesDefault in %s is different from '
'ServicesDefault in roles/Compute.yaml' % hci_role_filename)
return 1
return 0
def validate_ceph_role(ceph_role_filename, ceph_role_tpl):
role_files = ['CephAll', 'CephFile', 'CephMon', 'CephObject']
if ceph_role_filename in ['./roles/' + x + '.yaml' for x in role_files]:
ceph_storage_role_filename = \
os.path.join(os.path.dirname(ceph_role_filename), './CephStorage.yaml')
with open(ceph_storage_role_filename, 'r') as f:
ceph_storage_role_tpl = yaml.load(f.read(), Loader=yaml.SafeLoader)
ceph_storage_role_services = ceph_storage_role_tpl[0]['ServicesDefault']
for role in ceph_role_tpl:
if role['name'] == 'CephAll':
ceph_role_services = role['ServicesDefault']
ceph_role_services.remove('OS::TripleO::Services::CephGrafana')
ceph_role_services.remove('OS::TripleO::Services::CephMds')
ceph_role_services.remove('OS::TripleO::Services::CephMgr')
ceph_role_services.remove('OS::TripleO::Services::CephMon')
ceph_role_services.remove('OS::TripleO::Services::CephRbdMirror')
ceph_role_services.remove('OS::TripleO::Services::CephRgw')
if role['name'] == 'CephFile':
ceph_role_services = role['ServicesDefault']
ceph_role_services.remove('OS::TripleO::Services::CephClient')
ceph_role_services.remove('OS::TripleO::Services::CephMds')
if | |
<reponame>ericmehl/cortex
##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import imath
import IECore
import shlex
class testParameterParser( unittest.TestCase ) :
def testMultiply( self ) :
l = IECore.ClassLoader( IECore.SearchPath( os.path.join( "test", "IECore", "ops" ) ) )
a = l.load( os.path.join( "maths", "multiply" ) )()
p = a.parameters()
IECore.ParameterParser().parse( ["-a", "10", "-b", "20" ], p )
self.assertEqual( a(), IECore.IntData( 200 ) )
def testParameterTypes( self ) :
a = IECore.ClassLoader( IECore.SearchPath( os.path.join( "test", "IECore", "ops" ) ) ).load( "parameterTypes" )()
IECore.ParameterParser().parse( [
"-a", "10",
"-b", "20.2",
"-c", "40.5",
"-d", "hello",
"-e", "2", "4", "5",
"-f", "one", "two", "three", "\\-1", "\\-dash", "\\\\-slashDash", "\\\\\\-slashSlashDash", "inline-dash",
"-g", "2", "4",
"-h", "1", "4", "8",
"-i", "2", "4",
"-compound.j", "1", "4", "8",
"-compound.k", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16",
"-l", "1", "0", "0",
"-m", "1", "1", "0", "1",
"-o", "myFile.tif",
"-p", "test",
"-q", "true",
"-r", "mySequence.####.tif",
"-s", "-1", "-2", "10", "20",
"-t", "-1", "-2", "-3", "10", "20", "30",
"-u", "64", "128",
"-v", "25", "26", "27",
"-w", "0-500x250",
"-x", '0', '0', '0', '1', '1', '1', '0', '0', '0', '0', '0', '0', 'XYZ', '1', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0',
"-y", '0', '0', '0', '1', '1', '1', '0', '0', '0', '0', '0', '0', 'XYZ', '1', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0',
], a.parameters() )
a()
def testPresetParsing( self ) :
a = IECore.ClassLoader( IECore.SearchPath( os.path.join( "test", "IECore", "ops" ) ) ).load( "presetParsing" )()
IECore.ParameterParser().parse( [
"-h", "x",
"-compound", "one",
], a.parameters() )
a()
def testReadParsing( self ) :
l = IECore.ClassLoader( IECore.SearchPath( os.path.join( "test", "IECore", "ops" )) )
a = l.load( os.path.join( "maths", "multiply" ) )()
p = a.parameters()
IECore.ParameterParser().parse( ["-a", "read:" + os.path.join( "test", "IECore", "data", "cobFiles", "intDataTen.cob" ), "-b", "30" ], p )
self.assertEqual( a(), IECore.IntData( 300 ) )
def testSerialising( self ) :
a = IECore.ClassLoader( IECore.SearchPath( os.path.join( "test", "IECore", "ops" ) ) ).load( "parameterTypes" )()
IECore.ParameterParser().parse( [
"-a", "10",
"-b", "20.2",
"-c", "40.5",
"-d", "hello",
"-e", "2", "4", "5",
"-f", "one", "two", "three", "\\-1", "\\-dash", "\\\\-slashDash", "\\\\\\-slashSlashDash", "inline-dash",
"-g", "2", "4",
"-h", "1", "4", "8",
"-i", "2", "4",
"-compound.j", "1", "4", "8",
"-compound.k", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16",
"-l", "1", "0", "0",
"-m", "1", "1", "0", "1",
"-o", "myFile.tif",
"-p", "test",
"-q", "true",
"-r", "mySequence.####.tif",
"-s", "-1", "-2", "10", "20",
"-t", "-1", "-2", "-3", "10", "20", "30",
"-u", "64", "128",
"-v", "25", "26", "27",
"-w", "0-500x250",
"-x", '0', '0', '0', '1', '1', '1', '0', '0', '0', '0', '0', '0', 'Default', '1', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0',
"-y", '0', '0', '0', '1', '1', '1', '0', '0', '0', '0', '0', '0', 'Default', '1', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0',
], a.parameters() )
a()
# remove some parameters that don't have serializing/parsing methods yet.
for name in [ 'p1', 'p2', 'p3', 'p4' ] :
a.parameters().removeParameter( name )
s = IECore.ParameterParser().serialise( a.parameters() )
a = IECore.ClassLoader( IECore.SearchPath( os.path.join( "test", "IECore", "ops" ) ) ).load( "parameterTypes" )()
IECore.ParameterParser().parse( s, a.parameters() )
a()
# test alternate serialisation (without validation)
notValidated = IECore.ParameterParser().serialise( a.parameters(), values = a.parameters().getValue() )
IECore.ParameterParser().parse( notValidated, a.parameters() )
a()
def testSerialisingNonValidParameterValues( self ) :
p = IECore.FileNameParameter(
name = "f",
description = "d",
extensions = "cob scc fio pdc",
check = IECore.FileNameParameter.CheckType.MustExist,
)
p.setValue( IECore.StringData( "test" ) )
self.assertRaises( RuntimeError, p.getValidatedValue )
self.assertRaises( RuntimeError, IECore.curry( IECore.ParameterParser().serialise, p ) )
s = IECore.ParameterParser().serialise( p, values = p.getValue() )
self.assertRaises( SyntaxError, IECore.curry( IECore.ParameterParser().parse, s, p ) )
realImage = IECore.StringData( os.path.join( "test", "IECore", "data", "cobFiles", "ball.cob" ) )
s = IECore.ParameterParser().serialise( p, values = realImage )
self.assertRaises( RuntimeError, p.getValidatedValue )
IECore.ParameterParser().parse( s, p )
self.assertEqual( p.getValidatedValue(), realImage )
def testStringParsing( self ) :
a = IECore.ClassLoader( IECore.SearchPath( os.path.join( "test", "IECore", "ops" ) ) ).load( "stringParsing" )()
IECore.ParameterParser().parse( [
"-emptyString", "",
"-normalString", "hello",
"-stringWithSpace", "hello there",
"-stringWithManySpaces", "hello there old chap",
], a.parameters() )
a()
a = IECore.ClassLoader( IECore.SearchPath( os.path.join( "test", "IECore", "ops" ) ) ).load( "stringParsing" )()
IECore.ParameterParser().parse( shlex.split("-emptyString '' -normalString 'hello' -stringWithSpace 'hello there' -stringWithManySpaces 'hello there old chap'"), a.parameters() )
a()
def testFlaglessParsing( self ) :
parameters = IECore.CompoundParameter(
members = [
IECore.IntParameter(
name = "a",
description = "",
defaultValue = 1
),
IECore.StringParameter(
name = "b",
description = "",
defaultValue = "2"
),
IECore.IntParameter(
name = "c",
description = "",
defaultValue = 3
),
],
userData = {
"parser" : {
"flagless" : IECore.StringVectorData( [ "b", "c" ] )
}
}
)
# check that normal parsing still works
IECore.ParameterParser().parse( [
"-a", "10",
"-b", "hello",
"-c", "4"
],
parameters
)
self.assertEqual( parameters["a"].getNumericValue(), 10 )
self.assertEqual( parameters["b"].getTypedValue(), "hello" )
self.assertEqual( parameters["c"].getNumericValue(), 4 )
# check that flagless parsing works
IECore.ParameterParser().parse( [
"-a", "15",
"goodbye", "20"
],
parameters
)
self.assertEqual( parameters["a"].getNumericValue(), 15 )
self.assertEqual( parameters["b"].getTypedValue(), "goodbye" )
self.assertEqual( parameters["c"].getNumericValue(), 20 )
# check that invalid stuff is still detected
self.assertRaises( SyntaxError, IECore.ParameterParser().parse,
[ "-iDontExist", "10" ],
parameters,
)
self.assertRaises( SyntaxError, IECore.ParameterParser().parse,
[ "-a" ],
parameters,
)
self.assertRaises( SyntaxError, IECore.ParameterParser().parse,
[ "too", "2", "many", "flaglessValues" ],
parameters,
)
def testOptionalSerialisation( self ) :
parameters = IECore.CompoundParameter(
members = [
IECore.IntParameter(
name = "a",
description = "",
defaultValue = 1
),
IECore.StringParameter(
name = "b",
description = "",
defaultValue = "2",
userData = { "parser" : { "serialise" : IECore.BoolData( True ) } }
),
IECore.IntParameter(
name = "c",
description = "",
defaultValue = 3,
userData = { "parser" : { "serialise" : IECore.BoolData( False ) } }
),
]
)
s = IECore.ParameterParser().serialise( parameters )
self.assertFalse( "-a" not in s )
self.assertFalse( "-b" not in s )
self.assertFalse( "-c" in s )
def testEvalParsing( self ) :
l = IECore.ClassLoader( IECore.SearchPath( os.path.join( "test", "IECore", "ops" ) ) )
a = l.load( os.path.join( "maths", "multiply" ) )()
p = a.parameters()
IECore.ParameterParser().parse( ["-a", "python:IECore.IntData( 20 )", "-b", "30" ], p )
self.assertEqual( p["a"].getValue(), IECore.IntData( 20 ) )
self.assertEqual( a(), IECore.IntData( 600 ) )
def testSplineParsing( self ) :
p = IECore.CompoundParameter(
members = [
IECore.SplineffParameter(
name = "a",
description = "d",
defaultValue = IECore.SplineffData( IECore.Splineff( IECore.CubicBasisf.catmullRom(), ( ( 0, 0 ), ( 1, 1 ) ) ) ),
),
]
)
s = IECore.ParameterParser().serialise( p )
v = p["a"].getValue().copy()
p["a"].setValue( IECore.SplineffData() )
self.assertNotEqual( p["a"].getValue(), v )
IECore.ParameterParser().parse( s, p )
self.assertEqual( p["a"].getValue(), v )
def testTransformationMatrixParsing( self ) :
p = IECore.CompoundParameter(
members = [
IECore.TransformationMatrixfParameter(
name = "t",
description = "d",
defaultValue = IECore.TransformationMatrixf(),
),
]
)
args = [ "-t", '1', '2', '3', '10', '11', '12', '4', '5', '6', '7', '8', '9', 'ZYX', '1', '21', '22', '23', '26', '27', '28', '36', '37', '38', '46', '47', '48', '56', '57', '58' ]
IECore.ParameterParser().parse( args, p )
t = p["t"].getTypedValue()
self.assertEqual( t.translate,imath.V3f( 1,2,3 ) )
self.assertEqual( t.scale,imath.V3f( 10,11,12 ) )
self.assertEqual( t.shear, imath.V3f( 4,5,6 ) )
self.assertEqual( t.rotate, imath.V3f( 7,8,9 ) )
self.assertEqual( t.rotate.order(),imath.Eulerf.Order.ZYX )
self.assertEqual( t.rotationOrientation, imath.Quatf( 1,21,22,23 ) )
self.assertEqual( t.rotatePivot, imath.V3f( 26,27,28 ) )
self.assertEqual( t.rotatePivotTranslation, imath.V3f( 36,37,38 ) )
self.assertEqual( t.scalePivot, imath.V3f( 46,47,48 ) )
self.assertEqual( t.scalePivotTranslation, imath.V3f( 56,57,58 ) )
def testLineSegmentParsing( self ) :
p = IECore.CompoundParameter(
members = [
IECore.LineSegment3fParameter(
name = "f",
description = "",
defaultValue = IECore.LineSegment3f( imath.V3f( 1 ), imath.V3f( 2 ) )
),
IECore.LineSegment3dParameter(
name = "d",
description = "",
defaultValue = IECore.LineSegment3d( imath.V3d( 1 ), imath.V3d( 2 ) )
),
]
)
args = "-f 1.0 2.0 3.0 4.0 5.0 6.0 -d 6.0 5.0 4.0 3.0 2.0 1.0".split()
IECore.ParameterParser().parse( args, p )
self.assertEqual( p["f"].getTypedValue(), IECore.LineSegment3f( imath.V3f( 1, 2, 3 ), imath.V3f( 4, 5, 6 ) ) )
self.assertEqual( p["d"].getTypedValue(), IECore.LineSegment3d( imath.V3d( 6, 5, 4 ), imath.V3d( 3, 2, 1 ) ) )
self.assertEqual( IECore.ParameterParser().serialise( p ), args )
def testDatetimeParsing( self ) :
import datetime
now = datetime.datetime.now()
now = now.replace( microsecond = 0 )
p = IECore.CompoundParameter(
members = [
IECore.DateTimeParameter(
name = "testName",
description = "testName description",
defaultValue = now
),
]
)
s = IECore.ParameterParser().serialise( p )
v = p["testName"].getValue().copy()
p["testName"].setValue( IECore.DateTimeData() )
self.assertNotEqual( p["testName"].getValue(), v )
IECore.ParameterParser().parse( | |
= EmailTemplate.objects.get(name=operation.email_template_contributor_name)
if operation.email_template_participant_name:
email_template_participant = EmailTemplate.objects.get(name=operation.email_template_participant_name)
template_data = dict(
semester=semester,
evaluations=applicable_evaluations,
target_state=target_state,
confirmation_message=operation.confirmation_message,
email_template=email_template,
email_template_contributor=email_template_contributor,
email_template_participant=email_template_participant,
show_email_checkbox=email_template is not None
or email_template_contributor is not None
or email_template_participant is not None,
)
return render(request, "staff_evaluation_operation.html", template_data)
@manager_required
def semester_create(request):
form = SemesterForm(request.POST or None)
if form.is_valid():
semester = form.save()
delete_navbar_cache_for_users(
[user for user in UserProfile.objects.all() if user.is_reviewer or user.is_grade_publisher]
)
messages.success(request, _("Successfully created semester."))
return redirect("staff:semester_view", semester.id)
return render(request, "staff_semester_form.html", dict(form=form))
@require_POST
@manager_required
@transaction.atomic
def semester_make_active(request):
semester_id = request.POST.get("semester_id")
semester = get_object_or_404(Semester, id=semester_id)
Semester.objects.update(is_active=None)
semester.is_active = True
semester.save()
return HttpResponse()
@manager_required
def semester_edit(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
form = SemesterForm(request.POST or None, instance=semester)
if form.is_valid():
semester = form.save()
messages.success(request, _("Successfully updated semester."))
return redirect("staff:semester_view", semester.id)
return render(request, "staff_semester_form.html", dict(semester=semester, form=form))
@require_POST
@manager_required
def semester_delete(request):
semester_id = request.POST.get("semester_id")
semester = get_object_or_404(Semester, id=semester_id)
if not semester.can_be_deleted_by_manager:
raise SuspiciousOperation("Deleting semester not allowed")
RatingAnswerCounter.objects.filter(contribution__evaluation__course__semester=semester).delete()
TextAnswer.objects.filter(contribution__evaluation__course__semester=semester).delete()
Contribution.objects.filter(evaluation__course__semester=semester).delete()
Evaluation.objects.filter(course__semester=semester).delete()
Course.objects.filter(semester=semester).delete()
semester.delete()
delete_navbar_cache_for_users(
[user for user in UserProfile.objects.all() if user.is_reviewer or user.is_grade_publisher]
)
return redirect("staff:index")
@manager_required
def semester_import(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
if semester.participations_are_archived:
raise PermissionDenied
excel_form = ImportForm(request.POST or None, request.FILES or None)
import_type = ImportType.SEMESTER
errors = {}
warnings = {}
success_messages = []
if request.method == "POST":
operation = request.POST.get("operation")
if operation not in ("test", "import"):
raise SuspiciousOperation("Invalid POST operation")
if operation == "test":
delete_import_file(request.user.id, import_type) # remove old files if still exist
excel_form.fields["excel_file"].required = True
if excel_form.is_valid():
excel_file = excel_form.cleaned_data["excel_file"]
file_content = excel_file.read()
success_messages, warnings, errors = EnrollmentImporter.process(
file_content, semester, vote_start_datetime=None, vote_end_date=None, test_run=True
)
if not errors:
save_import_file(excel_file, request.user.id, import_type)
elif operation == "import":
file_content = get_import_file_content_or_raise(request.user.id, import_type)
excel_form.fields["vote_start_datetime"].required = True
excel_form.fields["vote_end_date"].required = True
if excel_form.is_valid():
vote_start_datetime = excel_form.cleaned_data["vote_start_datetime"]
vote_end_date = excel_form.cleaned_data["vote_end_date"]
success_messages, warnings, __ = EnrollmentImporter.process(
file_content, semester, vote_start_datetime, vote_end_date, test_run=False
)
forward_messages(request, success_messages, warnings)
delete_import_file(request.user.id, import_type)
delete_navbar_cache_for_users(UserProfile.objects.all())
return redirect("staff:semester_view", semester_id)
test_passed = import_file_exists(request.user.id, import_type)
# casting warnings to a normal dict is necessary for the template to iterate over it.
return render(
request,
"staff_semester_import.html",
dict(
semester=semester,
success_messages=success_messages,
errors=sorted_messages(errors),
warnings=sorted_messages(warnings),
excel_form=excel_form,
test_passed=test_passed,
),
)
@manager_required
def semester_export(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
ExportSheetFormset = formset_factory(form=ExportSheetForm, can_delete=True, extra=0, min_num=1, validate_min=True)
formset = ExportSheetFormset(request.POST or None, form_kwargs={"semester": semester})
if formset.is_valid():
include_not_enough_voters = request.POST.get("include_not_enough_voters") == "on"
include_unpublished = request.POST.get("include_unpublished") == "on"
selection_list = []
for form in formset:
selection_list.append((form.cleaned_data["selected_degrees"], form.cleaned_data["selected_course_types"]))
filename = "Evaluation-{}-{}.xls".format(semester.name, get_language())
response = FileResponse(filename, content_type="application/vnd.ms-excel")
ResultsExporter().export(response, [semester], selection_list, include_not_enough_voters, include_unpublished)
return response
return render(request, "staff_semester_export.html", dict(semester=semester, formset=formset))
@manager_required
def semester_raw_export(_request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
filename = "Evaluation-{}-{}_raw.csv".format(semester.name, get_language())
response = FileResponse(filename, content_type="text/csv")
writer = csv.writer(response, delimiter=";", lineterminator="\n")
writer.writerow(
[
_("Name"),
_("Degrees"),
_("Type"),
_("Single result"),
_("State"),
_("#Voters"),
_("#Participants"),
_("#Text answers"),
_("Average grade"),
]
)
for evaluation in sorted(semester.evaluations.all(), key=lambda cr: cr.full_name):
degrees = ", ".join([degree.name for degree in evaluation.course.degrees.all()])
avg_grade = ""
if evaluation.can_staff_see_average_grade:
distribution = calculate_average_distribution(evaluation)
if distribution is not None:
avg_grade = "{:.1f}".format(distribution_to_grade(distribution))
writer.writerow(
[
evaluation.full_name,
degrees,
evaluation.course.type.name,
evaluation.is_single_result,
evaluation.state_str,
evaluation.num_voters,
evaluation.num_participants,
evaluation.textanswer_set.count(),
avg_grade,
]
)
return response
@manager_required
def semester_participation_export(_request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
participants = (
UserProfile.objects.filter(evaluations_participating_in__course__semester=semester).distinct().order_by("email")
)
filename = "Evaluation-{}-{}_participation.csv".format(semester.name, get_language())
response = FileResponse(filename, content_type="text/csv")
writer = csv.writer(response, delimiter=";", lineterminator="\n")
writer.writerow(
[
_("Email"),
_("Can use reward points"),
_("#Required evaluations voted for"),
_("#Required evaluations"),
_("#Optional evaluations voted for"),
_("#Optional evaluations"),
_("Earned reward points"),
]
)
for participant in participants:
number_of_required_evaluations = semester.evaluations.filter(participants=participant, is_rewarded=True).count()
number_of_required_evaluations_voted_for = semester.evaluations.filter(
voters=participant, is_rewarded=True
).count()
number_of_optional_evaluations = semester.evaluations.filter(
participants=participant, is_rewarded=False
).count()
number_of_optional_evaluations_voted_for = semester.evaluations.filter(
voters=participant, is_rewarded=False
).count()
query = RewardPointGranting.objects.filter(semester=semester, user_profile=participant).aggregate(Sum("value"))
earned_reward_points = query["value__sum"] or 0
writer.writerow(
[
participant.email,
can_reward_points_be_used_by(participant),
number_of_required_evaluations_voted_for,
number_of_required_evaluations,
number_of_optional_evaluations_voted_for,
number_of_optional_evaluations,
earned_reward_points,
]
)
return response
@manager_required
def semester_questionnaire_assign(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
if semester.participations_are_archived:
raise PermissionDenied
evaluations = semester.evaluations.filter(state=Evaluation.State.NEW)
course_types = CourseType.objects.filter(courses__evaluations__in=evaluations)
form = QuestionnairesAssignForm(request.POST or None, course_types=course_types)
if form.is_valid():
for evaluation in evaluations:
if form.cleaned_data[evaluation.course.type.name]:
evaluation.general_contribution.questionnaires.set(form.cleaned_data[evaluation.course.type.name])
if form.cleaned_data["all-contributors"]:
for contribution in evaluation.contributions.exclude(contributor=None):
contribution.questionnaires.set(form.cleaned_data["all-contributors"])
evaluation.save()
messages.success(request, _("Successfully assigned questionnaires."))
return redirect("staff:semester_view", semester_id)
return render(request, "staff_semester_questionnaire_assign_form.html", dict(semester=semester, form=form))
@manager_required
def semester_preparation_reminder(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
evaluations = semester.evaluations.filter(
state__in=[Evaluation.State.PREPARED, Evaluation.State.EDITOR_APPROVED]
).prefetch_related("course__degrees")
prepared_evaluations = semester.evaluations.filter(state=Evaluation.State.PREPARED)
responsibles = list(
set(responsible for evaluation in prepared_evaluations for responsible in evaluation.course.responsibles.all())
)
responsibles.sort(key=lambda responsible: (responsible.last_name, responsible.first_name))
responsible_list = [
(
responsible,
[evaluation for evaluation in evaluations if responsible in evaluation.course.responsibles.all()],
responsible.delegates.all(),
)
for responsible in responsibles
]
if request.method == "POST":
template = EmailTemplate.objects.get(name=EmailTemplate.EDITOR_REVIEW_REMINDER)
subject_params = {}
for responsible, evaluations, __ in responsible_list:
body_params = {"user": responsible, "evaluations": evaluations}
template.send_to_user(responsible, subject_params, body_params, use_cc=True, request=request)
messages.success(request, _("Successfully sent reminders to everyone."))
return HttpResponse()
template_data = dict(semester=semester, responsible_list=responsible_list)
return render(request, "staff_semester_preparation_reminder.html", template_data)
@manager_required
def semester_grade_reminder(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
courses = semester.courses.filter(
evaluations__state__gte=Evaluation.State.EVALUATED,
evaluations__wait_for_grade_upload_before_publishing=True,
gets_no_grade_documents=False,
).distinct()
courses = [course for course in courses if not course.final_grade_documents.exists()]
courses.sort(key=lambda course: course.name)
responsibles = list(set(responsible for course in courses for responsible in course.responsibles.all()))
responsibles.sort(key=lambda responsible: (responsible.last_name.lower(), responsible.first_name.lower()))
responsible_list = [
(responsible, [course for course in courses if responsible in course.responsibles.all()])
for responsible in responsibles
]
template_data = dict(semester=semester, responsible_list=responsible_list)
return render(request, "staff_semester_grade_reminder.html", template_data)
@manager_required
def send_reminder(request, semester_id, responsible_id):
responsible = get_object_or_404(UserProfile, id=responsible_id)
semester = get_object_or_404(Semester, id=semester_id)
form = RemindResponsibleForm(request.POST or None, responsible=responsible)
evaluations = Evaluation.objects.filter(state=Evaluation.State.PREPARED, course__responsibles__in=[responsible])
if form.is_valid():
form.send(request, evaluations)
messages.success(request, _("Successfully sent reminder to {}.").format(responsible.full_name))
return redirect("staff:semester_preparation_reminder", semester_id)
return render(
request, "staff_semester_send_reminder.html", dict(semester=semester, responsible=responsible, form=form)
)
@require_POST
@manager_required
def semester_archive_participations(request):
semester_id = request.POST.get("semester_id")
semester = get_object_or_404(Semester, id=semester_id)
if not semester.participations_can_be_archived:
raise SuspiciousOperation("Archiving participations for this semester is not allowed")
semester.archive()
return HttpResponse() # 200 OK
@require_POST
@manager_required
def semester_delete_grade_documents(request):
semester_id = request.POST.get("semester_id")
semester = get_object_or_404(Semester, id=semester_id)
if not semester.grade_documents_can_be_deleted:
raise SuspiciousOperation("Deleting grade documents for this semester is not allowed")
semester.delete_grade_documents()
delete_navbar_cache_for_users(UserProfile.objects.all())
return HttpResponse() # 200 OK
@require_POST
@manager_required
def semester_archive_results(request):
semester_id = request.POST.get("semester_id")
semester = get_object_or_404(Semester, id=semester_id)
if not semester.results_can_be_archived:
raise SuspiciousOperation("Archiving results for this semester is not allowed")
semester.archive_results()
delete_navbar_cache_for_users(UserProfile.objects.all())
return HttpResponse() # 200 OK
@manager_required
def course_create(request, semester_id):
semester = get_object_or_404(Semester, id=semester_id)
if semester.participations_are_archived:
raise PermissionDenied
course = Course(semester=semester)
course_form = CourseForm(request.POST or None, instance=course)
operation = request.POST.get("operation")
if course_form.is_valid():
if operation not in ("save", "save_create_evaluation", "save_create_single_result"):
raise SuspiciousOperation("Invalid POST operation")
course = course_form.save()
messages.success(request, _("Successfully created course."))
if operation == "save_create_evaluation":
return redirect("staff:evaluation_create", semester_id, course.id)
if operation == "save_create_single_result":
return redirect("staff:single_result_create", semester_id, course.id)
return redirect("staff:semester_view", semester_id)
return render(request, "staff_course_form.html", dict(semester=semester, course_form=course_form, editable=True))
@manager_required
def course_copy(request, semester_id, course_id):
semester = get_object_or_404(Semester, id=semester_id)
course = get_object_or_404(Course, id=course_id, semester=semester)
course_form = CourseCopyForm(request.POST or None, instance=course)
if course_form.is_valid():
copied_course = course_form.save()
messages.success(request, _("Successfully copied course."))
inactive_users = UserProfile.objects.filter(
Q(contributions__evaluation__course=copied_course, is_active=False)
| Q(courses_responsible_for=copied_course, is_active=False)
).distinct()
if inactive_users:
messages.warning(
request,
_("The accounts of the following contributors were reactivated:")
+ " {accounts}".format(accounts=", ".join(user.full_name for user in inactive_users)),
)
inactive_users.update(is_active=True)
return redirect("staff:semester_view", copied_course.semester_id)
evaluations = sorted(course.evaluations.exclude(is_single_result=True), key=lambda cr: cr.full_name)
return render(
request,
"staff_course_copyform.html",
dict(
course=course,
evaluations=evaluations,
semester=semester,
course_form=course_form,
editable=True,
disable_breadcrumb_course=True,
),
)
@manager_required
def course_edit(request, semester_id, course_id):
semester = get_object_or_404(Semester, id=semester_id)
course = get_object_or_404(Course, id=course_id, semester=semester)
course_form = CourseForm(request.POST or None, instance=course)
editable = course.can_be_edited_by_manager
if request.method == "POST" and not editable:
raise SuspiciousOperation("Modifying this course is not allowed.")
operation = request.POST.get("operation")
if course_form.is_valid():
if operation not in ("save", "save_create_evaluation", "save_create_single_result"):
raise SuspiciousOperation("Invalid POST operation")
if course_form.has_changed():
course = course_form.save()
update_template_cache_of_published_evaluations_in_course(course)
messages.success(request, _("Successfully updated course."))
if operation == "save_create_evaluation":
return redirect("staff:evaluation_create", semester_id, course.id)
if operation == "save_create_single_result":
return redirect("staff:single_result_create", semester_id, course.id)
return redirect("staff:semester_view", semester.id)
template_data = dict(
course=course,
semester=semester,
course_form=course_form,
editable=editable,
disable_breadcrumb_course=True,
)
return render(request, "staff_course_form.html", template_data)
@require_POST
@manager_required
def course_delete(request):
course_id = request.POST.get("course_id")
course = get_object_or_404(Course, id=course_id)
if not course.can_be_deleted_by_manager:
raise SuspiciousOperation("Deleting course not allowed")
course.delete()
return HttpResponse() # 200 OK
@manager_required
def evaluation_create(request, semester_id, course_id=None):
semester = get_object_or_404(Semester, id=semester_id)
if semester.participations_are_archived:
raise PermissionDenied
evaluation = Evaluation()
if course_id:
evaluation.course = Course.objects.get(id=course_id)
InlineContributionFormset = inlineformset_factory(
Evaluation, Contribution, formset=ContributionFormSet, form=ContributionForm, extra=1
)
evaluation_form = EvaluationForm(request.POST or None, instance=evaluation, semester=semester)
formset = InlineContributionFormset(
request.POST or None, instance=evaluation, form_kwargs={"evaluation": evaluation}
)
if evaluation_form.is_valid() and formset.is_valid():
evaluation = evaluation_form.save()
formset.save()
update_template_cache_of_published_evaluations_in_course(evaluation.course)
messages.success(request, _("Successfully created evaluation."))
return redirect("staff:semester_view", semester_id)
return render(
request,
"staff_evaluation_form.html",
dict(
semester=semester, evaluation_form=evaluation_form, formset=formset, manager=True, editable=True, state=""
),
)
@manager_required
def evaluation_copy(request, semester_id, evaluation_id):
semester = get_object_or_404(Semester, id=semester_id)
evaluation = get_object_or_404(Evaluation, id=evaluation_id, course__semester=semester)
form = EvaluationCopyForm(request.POST or None, evaluation)
InlineContributionFormset = inlineformset_factory(
Evaluation, Contribution, formset=ContributionCopyFormSet, form=ContributionCopyForm, extra=1
)
formset = InlineContributionFormset(request.POST or None, instance=evaluation, new_instance=form.instance)
if form.is_valid() and formset.is_valid():
copied_evaluation = form.save()
formset.save()
update_template_cache_of_published_evaluations_in_course(copied_evaluation.course)
messages.success(request, _("Successfully created evaluation."))
return redirect("staff:semester_view", semester_id)
return render(
request,
"staff_evaluation_form.html",
dict(
semester=semester,
evaluation_form=form,
formset=formset,
| |
import attr
import datetime as dt
import geojson
import numpy as np
import shapely
from faker import Faker
from functools import partial
from random import Random
from shapely.geometry import Point, Polygon, MultiPolygon
from .base import TohuBaseGenerator, SeedGenerator
from .item_list import ItemList
from .logging import logger
from .utils import identity
__all__ = ['Boolean', 'CharString', 'Constant', 'DigitString', 'FakerGenerator', 'Float', 'GeoJSONGeolocation',
'HashDigest', 'Integer', 'IterateOver', 'NumpyRandomGenerator', 'SelectOnePrimitive',
'SelectMultiplePrimitive', 'Sequential', 'Timestamp', 'as_tohu_generator']
class PrimitiveGenerator(TohuBaseGenerator):
"""
Base class for all primitive generators
"""
class Constant(PrimitiveGenerator):
"""
Generator which produces a constant sequence (repeating the same value indefinitely).
"""
def __init__(self, value):
"""
Parameters
----------
value:
The constant value produced by this generator.
"""
super().__init__()
self.value = value
def reset(self, seed=None):
super().reset(seed)
return self
def __next__(self):
return self.value
def spawn(self):
return Constant(self.value)
def _set_random_state_from(self, other):
pass
class Boolean(PrimitiveGenerator):
"""
Generator which produces random boolean values (True or False).
"""
def __init__(self, p=0.5):
"""
Parameters
----------
p: float
The probability that True is returned. Must be between 0.0 and 1.0.
"""
super().__init__()
self.p = p
self.randgen = Random()
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def __next__(self):
return self.randgen.random() < self.p
def spawn(self):
new_obj = Boolean(self.p)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.randgen.setstate(other.randgen.getstate())
class Integer(PrimitiveGenerator):
"""
Generator which produces random integers k in the range low <= k <= high.
"""
def __init__(self, low, high):
"""
Parameters
----------
low: integer
Lower bound (inclusive).
high: integer
Upper bound (inclusive).
"""
super().__init__()
self.low = low
self.high = high
self.randgen = Random()
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def __next__(self):
return self.randgen.randint(self.low, self.high)
def spawn(self):
new_obj = Integer(self.low, self.high)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.randgen.setstate(other.randgen.getstate())
class Float(PrimitiveGenerator):
"""
Generator which produces random floating point numbers x in the range low <= x <= high.
"""
def __init__(self, low, high):
"""
Parameters
----------
low: integer
Lower bound (inclusive).
high: integer
Upper bound (inclusive).
"""
super().__init__()
self.low = low
self.high = high
self.randgen = Random()
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def __next__(self):
return self.randgen.uniform(self.low, self.high)
def spawn(self):
new_obj = Float(self.low, self.high)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.randgen.setstate(other.randgen.getstate())
CHARACTER_SETS = {
'<alphanumeric>': 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
'<alphanumeric_lowercase>': 'abcdefghijklmnopqrstuvwxyz0123456789',
'<alphanumeric_uppercase>': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
'<lowercase>': 'abcdefghijklmnopqrstuvwxyz',
'<uppercase>': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'<digits>': '0123456789',
}
class CharString(PrimitiveGenerator):
"""
Generator which produces a sequence of character strings.
"""
def __init__(self, *, length, charset='<alphanumeric>'):
"""
Parameters
----------
length: integer
Length of the character strings produced by this generator.
charset: iterable
Character set to draw from when generating strings, or string
with the name of a pre-defined character set.
Default: <alphanumeric> (both lowercase and uppercase letters).
"""
super().__init__()
self.length = length
try:
self.charset = CHARACTER_SETS[charset]
logger.debug(f"Using pre-defined character set: '{charset}'")
except KeyError:
self.charset = charset
self.seed_generator = SeedGenerator()
self.char_gen = Random()
def spawn(self):
new_obj = CharString(length=self.length, charset=self.charset)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.seed_generator._set_random_state_from(other.seed_generator)
self.char_gen.setstate(other.char_gen.getstate())
def __next__(self):
chars = self.char_gen.choices(self.charset, k=self.length)
return ''.join(chars)
def reset(self, seed):
super().reset(seed)
self.seed_generator.reset(seed)
self.char_gen.seed(next(self.seed_generator))
return self
class DigitString(CharString):
"""
Generator which produces a sequence of strings containing only digits.
"""
def __init__(self, *, length=None):
"""
Parameters
----------
length: integer
Length of the character strings produced by this generator.
"""
charset = "0123456789"
super().__init__(length=length, charset=charset)
def spawn(self):
new_obj = DigitString(length=self.length)
new_obj._set_random_state_from(self)
return new_obj
class HashDigest(PrimitiveGenerator):
"""
Generator which produces a sequence of hex strings representing hash digest values.
"""
def __init__(self, *, length=None, as_bytes=False, uppercase=True):
"""
Parameters
----------
length: integer
Length of the character strings produced by this generator.
as_bytes: bool
If True, return `length` random bytes. If False, return a string of `length`
characters with a hexadecimal representation of `length/2` random bytes.
Note that in the second case `length` must be an even number.
uppercase: bool
If True (the default), return hex string using uppercase letters, otherwise lowercase.
This only has an effect if `as_bytes=False`.
"""
super().__init__()
self.length = length
self._internal_length = length if as_bytes else length / 2
if not as_bytes and (length % 2) != 0:
raise ValueError(
f"Length must be an even number if as_bytes=False because it "
f"represents length = 2 * num_random_bytes. Got: length={length})")
self.as_bytes = as_bytes
self.uppercase = uppercase
self.randgen = np.random.RandomState()
self._maybe_convert_to_hex = identity if self.as_bytes else bytes.hex
self._maybe_convert_to_uppercase = identity if (self.as_bytes or not uppercase) else str.upper
def reset(self, seed):
super().reset(seed)
self.randgen.seed(seed)
return self
def __next__(self):
val = self.randgen.bytes(self._internal_length)
return self._maybe_convert_to_uppercase(self._maybe_convert_to_hex(val))
def spawn(self):
new_obj = HashDigest(length=self.length, as_bytes=self.as_bytes, uppercase=self.uppercase)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.randgen.set_state(other.randgen.get_state())
class Sequential(PrimitiveGenerator):
"""
Generator which produces a sequence of strings
of the form:
"PREFIX001"
"PREFIX002"
"PREFIX003"
...
Both the prefix and the number of digits can
be modified by the user.
Example:
>>> s = Sequential(prefix="Foobar_", digits=4)
>>> next(s)
Foobar_0001
>>> next(s)
Foobar_0002
>>> next(s)
Foobar_0003
"""
def __init__(self, *, prefix, digits):
"""
Parameters
----------
prefix: string
Prefix to be appended to generated elements.
digits: integer
Number of digits to use for the sequential numbering.
Any numbers will fewer digits will be zero-padded;
numbers with more digits are unaffected.
"""
super().__init__()
self.prefix = prefix
self.digits = digits
self.fmt_str = self.prefix + '{{:0{digits}}}'.format(digits=digits)
self.cnt = 0
def spawn(self):
new_obj = Sequential(prefix=self.prefix, digits=self.digits)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.cnt = other.cnt
def reset(self, seed=None):
"""
Note that this method supports the `seed` argument (for consistency with other generators),
but its value is ignored - the generator is simply reset to its initial value.
"""
super().reset(seed)
self.cnt = 0
return self
def __next__(self):
self.cnt += 1
return self.fmt_str.format(self.cnt)
class NumpyRandomGenerator(TohuBaseGenerator):
"""
Generator which produces random numbers using one of the methods supported by numpy. [1]
[1] https://docs.scipy.org/doc/numpy/reference/routines.random.html
"""
def __init__(self, method, **numpy_args):
"""
Parameters
----------
method: string
Name of the numpy function to use (see [1] for details)
numpy_args:
Remaining arguments passed to the numpy function (see [1] for details)
References
----------
[1] https://docs.scipy.org/doc/numpy/reference/routines.random.html
"""
super().__init__()
self.method = method
self.random_state = np.random.RandomState()
self.randgen = getattr(self.random_state, method)
self.numpy_args = numpy_args
def reset(self, seed):
super().reset(seed)
self.random_state.seed(seed)
return self
def __next__(self):
return self.randgen(**self.numpy_args)
def spawn(self):
new_obj = NumpyRandomGenerator(method=self.method, **self.numpy_args)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.random_state.set_state(other.random_state.get_state())
class FakerGenerator(PrimitiveGenerator):
"""
Generator which produces random elements using one of the methods supported by faker. [1]
[1] https://faker.readthedocs.io/
"""
def __init__(self, method, *, locale=None, **faker_args):
"""
Parameters
----------
method: string
Name of the faker provider to use (see [1] for details)
locale: string
Locale to use when generating data, e.g. 'en_US' (see [1] for details)
faker_args:
Remaining arguments passed to the faker provider (see [1] for details)
References
----------
[1] https://faker.readthedocs.io/
"""
super().__init__()
self.method = method
self.locale = locale
self.faker_args = faker_args
self.fake = Faker(locale=locale)
self.randgen = getattr(self.fake, method)
self.fake.seed_instance(None) # seed instance to ensure we are decoupled from the global random state
def reset(self, seed):
super().reset(seed)
self.fake.seed_instance(seed)
return self
def __next__(self):
return self.randgen(**self.faker_args)
def spawn(self):
new_obj = FakerGenerator(self.method, locale=self.locale, **self.faker_args)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.fake.random.setstate(other.fake.random.getstate())
class IterateOver(PrimitiveGenerator):
"""
Generator which simply iterates over all items in a given iterable
"""
def __init__(self, seq):
if not isinstance(seq, (list, tuple, ItemList, str)):
raise TypeError(
f"For the time being 'seq' must be a list, tuple, ItemList or string "
f"so that we can reproducibly spawn and reset this generator. Got: {seq}")
super().__init__()
self.seq = seq
# Note: iterating using an explicit index isn't ideal but it allows
# to transfer the internal state when spawning (for reproducibility)
self.idx = 0
self.reset()
def __repr__(self):
return f"<IterateOver, list with {len(self.seq)} items>"
def __next__(self):
try:
val = self.seq[self.idx]
except IndexError:
raise StopIteration()
self.idx += 1
return val
def __iter__(self):
return self
def reset(self, seed=None):
super().reset(seed)
self.idx = 0
return self
def spawn(self):
new_obj = IterateOver(self.seq)
new_obj._set_random_state_from(self)
return new_obj
def _set_random_state_from(self, other):
self.idx = other.idx
class SelectOnePrimitive(PrimitiveGenerator):
"""
Generator which produces a sequence of items taken from a given set of elements.
"""
def __init__(self, values, p=None):
"""
Parameters
----------
values: list
List of options from which to choose elements.
p: list, optional
The probabilities associated with each element in `values`.
If not given the assumes a uniform distribution over all values.
"""
super().__init__()
self.values = list(values) # need to convert to a list so that numpy.random.RandomState() doesn't get confused
self.p = p
self.randgen = | |
import logging
import os
import threading
from galaxy import util
from tool_shed.util import common_util
from tool_shed.util import container_util
from tool_shed.util import readme_util
from tool_shed.utility_containers import utility_container_manager
log = logging.getLogger( __name__ )
class FailedTest( object ):
"""Failed tool tests object"""
def __init__( self, id=None, stderr=None, test_id=None, tool_id=None, tool_version=None, traceback=None ):
self.id = id
self.stderr = stderr
self.test_id = test_id
self.tool_id = tool_id
self.tool_version = tool_version
self.traceback = traceback
class InvalidRepositoryDependency( object ):
"""Invalid repository dependency definition object"""
def __init__( self, id=None, toolshed=None, repository_name=None, repository_owner=None, changeset_revision=None,
prior_installation_required=False, only_if_compiling_contained_td=False, error=None ):
self.id = id
self.toolshed = toolshed
self.repository_name = repository_name
self.repository_owner = repository_owner
self.changeset_revision = changeset_revision
self.prior_installation_required = prior_installation_required
self.only_if_compiling_contained_td = only_if_compiling_contained_td
self.error = error
class InvalidToolDependency( object ):
"""Invalid tool dependency definition object"""
def __init__( self, id=None, name=None, version=None, type=None, error=None ):
self.id = id
self.name = name
self.version = version
self.type = type
self.error = error
class MissingTestComponent( object ):
"""Missing tool test components object"""
def __init__( self, id=None, missing_components=None, tool_guid=None, tool_id=None, tool_version=None ):
self.id = id
self.missing_components = missing_components
self.tool_guid = tool_guid
self.tool_id = tool_id
self.tool_version = tool_version
class NotTested( object ):
"""NotTested object"""
def __init__( self, id=None, reason=None ):
self.id = id
self.reason = reason
class PassedTest( object ):
"""Passed tool tests object"""
def __init__( self, id=None, test_id=None, tool_id=None, tool_version=None ):
self.id = id
self.test_id = test_id
self.tool_id = tool_id
self.tool_version = tool_version
class RepositoryInstallationError( object ):
"""Repository installation error object"""
def __init__( self, id=None, tool_shed=None, name=None, owner=None, changeset_revision=None, error_message=None ):
self.id = id
self.tool_shed = tool_shed
self.name = name
self.owner = owner
self.changeset_revision = changeset_revision
self.error_message = error_message
class RepositorySuccessfulInstallation( object ):
"""Repository installation object"""
def __init__( self, id=None, tool_shed=None, name=None, owner=None, changeset_revision=None ):
self.id = id
self.tool_shed = tool_shed
self.name = name
self.owner = owner
self.changeset_revision = changeset_revision
class TestEnvironment( object ):
"""Tool test environment object"""
def __init__( self, id=None, architecture=None, galaxy_database_version=None, galaxy_revision=None, python_version=None, system=None, time_tested=None,
tool_shed_database_version=None, tool_shed_mercurial_version=None, tool_shed_revision=None ):
self.id = id
self.architecture = architecture
self.galaxy_database_version = galaxy_database_version
self.galaxy_revision = galaxy_revision
self.python_version = python_version
self.system = system
self.time_tested = time_tested
self.tool_shed_database_version = tool_shed_database_version
self.tool_shed_mercurial_version = tool_shed_mercurial_version
self.tool_shed_revision = tool_shed_revision
class ToolDependencyInstallationError( object ):
"""Tool dependency installation error object"""
def __init__( self, id=None, type=None, name=None, version=None, error_message=None ):
self.id = id
self.type = type
self.name = name
self.version = version
self.error_message = error_message
class ToolDependencySuccessfulInstallation( object ):
"""Tool dependency installation object"""
def __init__( self, id=None, type=None, name=None, version=None, installation_directory=None ):
self.id = id
self.type = type
self.name = name
self.version = version
self.installation_directory = installation_directory
class ToolShedUtilityContainerManager( utility_container_manager.UtilityContainerManager ):
def __init__( self, app ):
self.app = app
def build_invalid_repository_dependencies_root_folder( self, folder_id, invalid_repository_dependencies_dict ):
"""Return a folder hierarchy containing invalid repository dependencies."""
label = 'Invalid repository dependencies'
if invalid_repository_dependencies_dict:
invalid_repository_dependency_id = 0
folder_id += 1
invalid_repository_dependencies_root_folder = \
utility_container_manager.Folder( id=folder_id,
key='root',
label='root',
parent=None )
folder_id += 1
invalid_repository_dependencies_folder = \
utility_container_manager.Folder( id=folder_id,
key='invalid_repository_dependencies',
label=label,
parent=invalid_repository_dependencies_root_folder )
invalid_repository_dependencies_root_folder.folders.append( invalid_repository_dependencies_folder )
invalid_repository_dependencies = invalid_repository_dependencies_dict[ 'repository_dependencies' ]
for invalid_repository_dependency in invalid_repository_dependencies:
folder_id += 1
invalid_repository_dependency_id += 1
toolshed, name, owner, changeset_revision, prior_installation_required, only_if_compiling_contained_td, error = \
common_util.parse_repository_dependency_tuple( invalid_repository_dependency, contains_error=True )
key = container_util.generate_repository_dependencies_key_for_repository( toolshed,
name,
owner,
changeset_revision,
prior_installation_required,
only_if_compiling_contained_td )
label = "Repository <b>%s</b> revision <b>%s</b> owned by <b>%s</b>" % ( name, changeset_revision, owner )
folder = utility_container_manager.Folder( id=folder_id,
key=key,
label=label,
parent=invalid_repository_dependencies_folder )
ird = InvalidRepositoryDependency( id=invalid_repository_dependency_id,
toolshed=toolshed,
repository_name=name,
repository_owner=owner,
changeset_revision=changeset_revision,
prior_installation_required=util.asbool( prior_installation_required ),
only_if_compiling_contained_td=util.asbool( only_if_compiling_contained_td ),
error=error )
folder.invalid_repository_dependencies.append( ird )
invalid_repository_dependencies_folder.folders.append( folder )
else:
invalid_repository_dependencies_root_folder = None
return folder_id, invalid_repository_dependencies_root_folder
def build_invalid_tool_dependencies_root_folder( self, folder_id, invalid_tool_dependencies_dict ):
"""Return a folder hierarchy containing invalid tool dependencies."""
# # INvalid tool dependencies are always packages like:
# {"R/2.15.1": {"name": "R", "readme": "some string", "type": "package", "version": "2.15.1" "error" : "some sting" }
label = 'Invalid tool dependencies'
if invalid_tool_dependencies_dict:
invalid_tool_dependency_id = 0
folder_id += 1
invalid_tool_dependencies_root_folder = \
utility_container_manager.Folder( id=folder_id, key='root', label='root', parent=None )
folder_id += 1
invalid_tool_dependencies_folder = \
utility_container_manager.Folder( id=folder_id,
key='invalid_tool_dependencies',
label=label,
parent=invalid_tool_dependencies_root_folder )
invalid_tool_dependencies_root_folder.folders.append( invalid_tool_dependencies_folder )
for td_key, requirements_dict in invalid_tool_dependencies_dict.items():
folder_id += 1
invalid_tool_dependency_id += 1
try:
name = requirements_dict[ 'name' ]
type = requirements_dict[ 'type' ]
version = requirements_dict[ 'version' ]
error = requirements_dict[ 'error' ]
except Exception, e:
name = 'unknown'
type = 'unknown'
version = 'unknown'
error = str( e )
key = self.generate_tool_dependencies_key( name, version, type )
label = "Version <b>%s</b> of the <b>%s</b> <b>%s</b>" % ( version, name, type )
folder = utility_container_manager.Folder( id=folder_id,
key=key,
label=label,
parent=invalid_tool_dependencies_folder )
itd = InvalidToolDependency( id=invalid_tool_dependency_id,
name=name,
version=version,
type=type,
error=error )
folder.invalid_tool_dependencies.append( itd )
invalid_tool_dependencies_folder.folders.append( folder )
else:
invalid_tool_dependencies_root_folder = None
return folder_id, invalid_tool_dependencies_root_folder
def build_repository_containers( self, repository, changeset_revision, repository_dependencies, repository_metadata,
exclude=None ):
"""
Return a dictionary of containers for the received repository's dependencies and
contents for display in the Tool Shed.
"""
if exclude is None:
exclude = []
containers_dict = dict( datatypes=None,
invalid_tools=None,
readme_files=None,
repository_dependencies=None,
tool_dependencies=None,
valid_tools=None,
workflows=None,
valid_data_managers=None
)
if repository_metadata:
metadata = repository_metadata.metadata
lock = threading.Lock()
lock.acquire( True )
try:
folder_id = 0
# Datatypes container.
if metadata:
if 'datatypes' not in exclude and 'datatypes' in metadata:
datatypes = metadata[ 'datatypes' ]
folder_id, datatypes_root_folder = self.build_datatypes_folder( folder_id, datatypes )
containers_dict[ 'datatypes' ] = datatypes_root_folder
# Invalid repository dependencies container.
if metadata:
if 'invalid_repository_dependencies' not in exclude and 'invalid_repository_dependencies' in metadata:
invalid_repository_dependencies = metadata[ 'invalid_repository_dependencies' ]
folder_id, invalid_repository_dependencies_root_folder = \
self.build_invalid_repository_dependencies_root_folder( folder_id,
invalid_repository_dependencies )
containers_dict[ 'invalid_repository_dependencies' ] = invalid_repository_dependencies_root_folder
# Invalid tool dependencies container.
if metadata:
if 'invalid_tool_dependencies' not in exclude and 'invalid_tool_dependencies' in metadata:
invalid_tool_dependencies = metadata[ 'invalid_tool_dependencies' ]
folder_id, invalid_tool_dependencies_root_folder = \
self.build_invalid_tool_dependencies_root_folder( folder_id,
invalid_tool_dependencies )
containers_dict[ 'invalid_tool_dependencies' ] = invalid_tool_dependencies_root_folder
# Invalid tools container.
if metadata:
if 'invalid_tools' not in exclude and 'invalid_tools' in metadata:
invalid_tool_configs = metadata[ 'invalid_tools' ]
folder_id, invalid_tools_root_folder = \
self.build_invalid_tools_folder( folder_id,
invalid_tool_configs,
changeset_revision,
repository=repository,
label='Invalid tools' )
containers_dict[ 'invalid_tools' ] = invalid_tools_root_folder
# Readme files container.
if metadata:
if 'readme_files' not in exclude and 'readme_files' in metadata:
readme_files_dict = readme_util.build_readme_files_dict( self.app, repository, changeset_revision, metadata )
folder_id, readme_files_root_folder = self.build_readme_files_folder( folder_id, readme_files_dict )
containers_dict[ 'readme_files' ] = readme_files_root_folder
if 'repository_dependencies' not in exclude:
# Repository dependencies container.
folder_id, repository_dependencies_root_folder = \
self.build_repository_dependencies_folder( folder_id=folder_id,
repository_dependencies=repository_dependencies,
label='Repository dependencies',
installed=False )
if repository_dependencies_root_folder:
containers_dict[ 'repository_dependencies' ] = repository_dependencies_root_folder
# Tool dependencies container.
if metadata:
if 'tool_dependencies' not in exclude and 'tool_dependencies' in metadata:
tool_dependencies = metadata[ 'tool_dependencies' ]
if 'orphan_tool_dependencies' in metadata:
# The use of the orphan_tool_dependencies category in metadata has been deprecated,
# but we still need to check in case the metadata is out of date.
orphan_tool_dependencies = metadata[ 'orphan_tool_dependencies' ]
tool_dependencies.update( orphan_tool_dependencies )
# Tool dependencies can be categorized as orphans only if the repository contains tools.
if 'tools' not in exclude:
tools = metadata.get( 'tools', [] )
tools.extend( metadata.get( 'invalid_tools', [] ) )
folder_id, tool_dependencies_root_folder = \
self.build_tool_dependencies_folder( folder_id,
tool_dependencies,
missing=False,
new_install=False )
containers_dict[ 'tool_dependencies' ] = tool_dependencies_root_folder
# Valid tools container.
if metadata:
if 'tools' not in exclude and 'tools' in metadata:
valid_tools = metadata[ 'tools' ]
folder_id, valid_tools_root_folder = self.build_tools_folder( folder_id,
valid_tools,
repository,
changeset_revision,
label='Valid tools' )
containers_dict[ 'valid_tools' ] = valid_tools_root_folder
# Tool test results container.
tool_test_results = util.listify( repository_metadata.tool_test_results )
# Only create and populate this folder if there are actual tool test results to display.
if self.can_display_tool_test_results( tool_test_results, exclude=exclude ):
folder_id, tool_test_results_root_folder = \
self.build_tool_test_results_folder( folder_id,
tool_test_results,
label='Tool test results' )
containers_dict[ 'tool_test_results' ] = tool_test_results_root_folder
# Workflows container.
if metadata:
if 'workflows' not in exclude and 'workflows' in metadata:
workflows = metadata[ 'workflows' ]
folder_id, workflows_root_folder = \
self.build_workflows_folder( folder_id=folder_id,
workflows=workflows,
repository_metadata_id=repository_metadata.id,
repository_id=None,
label='Workflows' )
containers_dict[ 'workflows' ] = workflows_root_folder
# Valid Data Managers container
if metadata:
if 'data_manager' not in exclude and 'data_manager' in metadata:
data_managers = metadata['data_manager'].get( 'data_managers', None )
folder_id, data_managers_root_folder = \
self.build_data_managers_folder( folder_id, data_managers, label="Data Managers" )
containers_dict[ 'valid_data_managers' ] = data_managers_root_folder
error_messages = metadata['data_manager'].get( 'error_messages', None )
data_managers = metadata['data_manager'].get( 'invalid_data_managers', None )
folder_id, data_managers_root_folder = \
self.build_invalid_data_managers_folder( folder_id,
data_managers,
error_messages,
label="Invalid Data Managers" )
containers_dict[ 'invalid_data_managers' ] = data_managers_root_folder
except Exception, e:
log.exception( "Exception in build_repository_containers: %s" % str( e ) )
| |
# Hamiltonian Neural Networks | 2019
# <NAME>, <NAME>, <NAME>
import torch, argparse
import numpy as np
import os, sys
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(PARENT_DIR)
from nn_models import MLP, MLP_sgp4
from hnn import HNN
from data import get_dataset, get_sgp4_orbit, coords2state_sgp4
from utils import L2_loss, to_pickle, from_pickle, get_model_parm_nums
from tensorboardX import SummaryWriter # importing tensorboard
import random
import math
# for integrating a vector field parameterized by a NN or HNN
def model_update(t, state, model, device):
state = state.reshape(-1,7)
deriv = np.zeros_like(state)
np_x = state[:,1:] # drop mass
np_x = np_x.T.flatten()[None, :]
x = torch.tensor( np_x, requires_grad=True, dtype=torch.float32).to(device)
dx_hat = model.time_derivative(x)
deriv[:,1:] = dx_hat.detach().data.cpu().numpy().reshape(6,2).T
return deriv.reshape(-1)
def get_args():
parser = argparse.ArgumentParser(description=None)
#parser.add_argument('--input_dim', default=2*4, type=int, help='dimensionality of input tensor')
parser.add_argument('--hidden_dim', default=200, type=int, help='hidden dimension of mlp') # original default is 200
parser.add_argument('--learn_rate', default=1e-3, type=float, help='learning rate') # original default is 1e-3
parser.add_argument('--batch_size', default=200, type=int, help='batch_size')
parser.add_argument('--input_noise', default=0.0, type=int, help='std of noise added to inputs')
parser.add_argument('--nonlinearity', default='tanh', type=str, help='neural net nonlinearity')
parser.add_argument('--total_steps', default=10000, type=int, help='number of gradient steps')
parser.add_argument('--print_every', default=1, type=int, help='number of gradient steps between prints')
parser.add_argument('--name', default='2body', type=str, help='only one option right now')
parser.add_argument('--baseline', dest='baseline', action='store_true', help='run baseline or experiment?')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='verbose?')
parser.add_argument('--field_type', default='solenoidal', type=str, help='type of vector field to learn (solenoidal or conservative)')
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--exp_dir', default=THIS_DIR, type=str, help='where to save the trained model')
parser.add_argument('--gpu_enable', dest='gpu_enable', action='store_true', help='include if gpu is to be used')
parser.add_argument('--gpu_select', default=0, type=int, help='select which gpu to use')
parser.add_argument('--satellite_problem', dest='satellite_problem', action='store_true', help='set scenario to be Satellite Problem instead of Two-Body Problem as demonstrated in the paper')
parser.add_argument('--data_percentage_usage', default=1, type=float, help='percentage of data to use (eg. 1 means all data)')
parser.add_argument('--save_best_weights', dest='save_best_weights', action='store_true', help='to save weight if result is better than before')
parser.add_argument('--epoch', default=1, type=int, help='epoch for satellite_problem')
parser.set_defaults(feature=True)
return parser.parse_args()
def train(args, save_dir, tb, label, test_split=0.2):
# set random seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# initialise device
device = torch.device('cuda:' + str(args.gpu_select) if args.gpu_enable else 'cpu')
# parameters to test
hidden_dim_list = [200, 300]
learn_rate_list = [1e-4, 1e-3, 1e-2]
#batch_size_list = [300]
input_noise_list = [0, 0.05, 0.1]
layers_list = [3, 4]
# initialise infinity loss first
best_test_loss = float('inf')
trial_no = 0
for hidden_dim in hidden_dim_list:
for learn_rate in learn_rate_list:
for input_noise in input_noise_list:
for layers in layers_list:
trial_no = trial_no + 1
# init model and optimizer
if args.verbose:
print("Training baseline model:" if args.baseline else "Training HNN model:")
print("hidden_dim: " + str(hidden_dim) + ", learn_rate: " + str(learn_rate) + ", input_noise: " + str(input_noise) + ", layers: " + str(layers))
if args.satellite_problem:
output_dim = 2*6 if args.baseline else 2
if layers == 3:
nn_model = MLP_sgp4(2*6, hidden_dim, output_dim, args.nonlinearity).to(device)
else:
nn_model = MLP(2*6, hidden_dim, output_dim, args.nonlinearity).to(device)
model = HNN(2*6, differentiable_model=nn_model,
field_type=args.field_type, baseline=args.baseline, device = device)
else:
output_dim = 2*4 if args.baseline else 2
nn_model = MLP(2*4, hidden_dim, output_dim, args.nonlinearity).to(device)
model = HNN(2*4, differentiable_model=nn_model,
field_type=args.field_type, baseline=args.baseline, device = device)
num_parm = get_model_parm_nums(model)
print('model contains {} parameters'.format(num_parm))
optim = torch.optim.Adam(model.parameters(), learn_rate, weight_decay=0)
# arrange data
data = get_dataset(args.name, args.exp_dir, satellite_problem = args.satellite_problem, data_percentage_usage = args.data_percentage_usage, verbose=True)
if args.satellite_problem:
x = torch.tensor( data[0]['coords'], requires_grad=True, dtype=torch.float32).to(device)
# Each line of 'x' is in the form (qx1, qx2, qy1, qy2, px1, px2, py1, py2) in original 2-body experiment and (qx1, qx2, qy1, qy2, qz1, qz2, px1, px2, py1, py2, pz1, pz2) in the satellite-problem experiment
test_x = torch.tensor( data[0]['test_coords'], requires_grad=True, dtype=torch.float32).to(device)
dxdt = torch.Tensor(data[0]['dcoords']).to(device)
test_dxdt = torch.Tensor(data[0]['test_dcoords']).to(device)
# a list of lengths of every trajectory
lengths = data[1]['lengths']
print('lengths: ', len(lengths))
else:
x = torch.tensor( data['coords'], requires_grad=True, dtype=torch.float32).to(device)
# Each line of 'x' is in the form (qx1, qx2, qy1, qy2, px1, px2, py1, py2) in original 2-body experiment and (qx1, qx2, qy1, qy2, qz1, qz2, px1, px2, py1, py2, pz1, pz2) in the satellite-problem experiment
test_x = torch.tensor( data['test_coords'], requires_grad=True, dtype=torch.float32).to(device)
dxdt = torch.Tensor(data['dcoords']).to(device)
test_dxdt = torch.Tensor(data['test_dcoords']).to(device)
stats = {'train_loss': [], 'test_loss': []}
if not args.satellite_problem:
for step in range(args.total_steps+1):
# train step
# 'torch.randperm(x.shape[0])' randomizes array index (shuffling) and '[:args.batch_size]' slices the first 'batch_size' array index for training
ixs = torch.randperm(x.shape[0])[:args.batch_size]
dxdt_hat = model.time_derivative(x[ixs])
dxdt_hat += input_noise * torch.randn(*x[ixs].shape).to(device) # add noise, maybe
# if args.verbose and step % args.print_every == 0:
# print('\nExample Training Ground Truth: ', dxdt[ixs][0])
# print('\nExample Training Prediction: ', dxdt_hat[0])
print(dxdt_hat.shape)
loss = L2_loss(dxdt[ixs], dxdt_hat)
loss.backward()
grad = torch.cat([p.grad.flatten() for p in model.parameters()]).clone()
optim.step() ; optim.zero_grad()
# run test data
test_ixs = torch.randperm(test_x.shape[0])[:args.batch_size]
test_dxdt_hat = model.time_derivative(test_x[test_ixs])
test_dxdt_hat += input_noise * torch.randn(*test_x[test_ixs].shape).to(device) # add noise, maybe
# if args.verbose and step % args.print_every == 0:
# print('\nExample Testing Ground Truth: ', test_dxdt[test_ixs][0])
# print('\nExample Testing Prediction: ', test_dxdt_hat[0])
test_loss = L2_loss(test_dxdt[test_ixs], test_dxdt_hat)
# logging
stats['train_loss'].append(loss.item())
stats['test_loss'].append(test_loss.item())
tb.add_scalar('Train loss vs Steps', loss.item(), step)
tb.add_scalar('Test loss vs Steps', test_loss.item(), step)
if args.verbose and step % args.print_every == 0:
print("\nstep {}, train_loss {:.4e}, test_loss {:.4e}, grad norm {:.4e}, grad std {:.4e}"
.format(step, loss.item(), test_loss.item(), grad@grad, grad.std()))
if args.save_best_weights and step % args.print_every == 0:
if test_loss.item() < best_test_loss:
path = '{}/{}-orbits-{}-step-{}.tar'.format(save_dir, args.name, label, step)
torch.save(model.state_dict(), path)
best_test_loss = test_loss.item()
train_dxdt_hat = model.time_derivative(x)
train_dist = (dxdt - train_dxdt_hat)**2
test_dxdt_hat = model.time_derivative(test_x)
test_dist = (test_dxdt - test_dxdt_hat)**2
print('\nFinal train loss {:.4e} +/- {:.4e}\nFinal test loss {:.4e} +/- {:.4e}\n'
.format(train_dist.mean().item(), train_dist.std().item()/np.sqrt(train_dist.shape[0]),
test_dist.mean().item(), test_dist.std().item()/np.sqrt(test_dist.shape[0])))
else:
train_lengths_num = math.ceil(len(lengths) - len(lengths)*test_split)
test_lengths_num = math.floor(len(lengths)*test_split)
train_trajectory_start = test_lengths_num
step = 0
for epoch_no in range(args.epoch):
for trajectory_no in range(train_lengths_num):
#trajectory_no = 0
# train step for position
train_trajectory_end = train_trajectory_start + lengths[test_lengths_num + trajectory_no]
trajectory_states = x[train_trajectory_start:train_trajectory_end, :]
print(train_trajectory_start)
print(train_trajectory_end)
# t_points = 2
# t_span = [1, t_points]
# initial_coord = x[train_trajectory_start]
# state = coords2state_sgp4(initial_coord)
# update_fn = lambda t, y0: model_update(t, y0, model, device)
# hnn_orbit, settings = get_sgp4_orbit(state, t_points=t_points, t_span=t_span, update_fn=update_fn)
# hnn_orbit_pos = hnn_orbit[0].T[:, [1, 2, 3]]
# next_true_pos = trajectory_states[:, [0, 2, 4]]
# print('Train Next True Pos: ', next_true_pos[1, :])
# next_predicted_pos = torch.tensor(hnn_orbit_pos).to(device)
# print('Train Next Predicted Pos: ', next_predicted_pos[1, :])
# if args.verbose and step % args.print_every == 0:
# print('\nExample Training Ground Truth: ', next_true_pos)
# print('\nExample Training Prediction: ', next_predicted_pos)
# loss_pos = L2_loss(next_true_pos[1, :], next_predicted_pos[1, :])
# print('Train Loss Pos: ', loss_pos.item())
# train step for position derivatives
dxdt_hat = model.time_derivative(x[train_trajectory_start:train_trajectory_end, :])
dxdt_hat += input_noise * torch.randn(*x[train_trajectory_start:train_trajectory_end, :].shape).to(device) # add noise, maybe
#print(dxdt_hat)
# if args.verbose and step % args.print_every == 0:
# print('\nExample Training Ground Truth: ', dxdt[ixs][0])
# print('\nExample Training Prediction: ', dxdt_hat[0])
loss_dxdt = L2_loss(dxdt[train_trajectory_start:train_trajectory_end, :], dxdt_hat)
#print('Train Loss dxdt: ', loss_dxdt.item())
#loss = loss_pos + loss_dxdt
#print('Train Loss: ', loss.item())
loss = loss_dxdt
loss.backward()
grad = torch.cat([p.grad.flatten() for p in model.parameters()]).clone()
optim.step() ; optim.zero_grad()
# run test data
index = random.randint(0, test_lengths_num-1)
test_trajectory_start = sum(lengths[:index])
test_trajectory_end = test_trajectory_start + lengths[index]
# test step for position
# trajectory_states = x[test_trajectory_start:test_trajectory_end, :]
# t_points = test_trajectory_end - test_trajectory_start
# t_span = [1, t_points]
# initial_coord = x[test_trajectory_start]
# state = coords2state_sgp4(initial_coord)
# update_fn = lambda t, y0: model_update(t, y0, model, device)
# hnn_orbit, settings = get_sgp4_orbit(state, t_points=t_points, t_span=t_span, update_fn=update_fn)
# hnn_orbit_pos = hnn_orbit[0].T[:, [1, 2, 3]]
# next_true_pos = trajectory_states[:, [0, 2, 4]]
# #print('Test Next True Pos: ', next_true_pos.shape)
# next_predicted_pos = torch.tensor(hnn_orbit_pos).to(device)
# #print('Test Next Predicted Pos: ', next_predicted_pos.shape)
# # if args.verbose and step % args.print_every == 0:
# # print('\nExample Training Ground Truth: ', next_true_pos)
# # print('\nExample Training Prediction: ', next_predicted_pos)
# test_loss_pos = L2_loss(next_true_pos, next_predicted_pos)
# #print('Test Loss Pos: ', test_loss_pos.item())
# test step for position derivatives
test_dxdt_hat = model.time_derivative(test_x[test_trajectory_start:test_trajectory_end, :])
test_dxdt_hat += input_noise * torch.randn(*test_x[test_trajectory_start:test_trajectory_end, :].shape).to(device) # add noise, maybe
# if args.verbose and step % args.print_every == 0:
# print('\nExample Testing Ground Truth: ', test_dxdt[test_ixs][0])
# print('\nExample Testing Prediction: ', test_dxdt_hat[0])
test_loss_dxdt = L2_loss(test_dxdt[test_trajectory_start:test_trajectory_end, :], test_dxdt_hat)
#print('Test Loss dxdt: ', test_loss_dxdt.item())
#test_loss = test_loss_pos + test_loss_dxdt
test_loss = test_loss_dxdt
#print('Test Loss: ', test_loss.item())
if math.isnan(test_loss.item()) or math.isnan(loss.item()):
print(trajectory_no)
print('lengths: ', len(lengths))
print(train_lengths_num)
print(test_lengths_num)
print(x.shape)
print(test_x.shape)
print(train_trajectory_start)
print(train_trajectory_end)
sys.exit()
# logging
stats['train_loss'].append(loss.item())
stats['test_loss'].append(test_loss.item())
tb.add_scalar('Train loss vs Steps', loss.item(), step)
tb.add_scalar('Test loss vs Steps', test_loss.item(), step)
if | |
( ) :
return ( platform . uname ( ) [ 0 ] == "Darwin" )
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
def lisp_is_alpine ( ) :
return ( os . path . exists ( "/etc/alpine-release" ) )
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
def lisp_is_x86 ( ) :
I1Iiiiiii = platform . machine ( )
return ( I1Iiiiiii in ( "x86" , "i686" , "x86_64" ) )
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
def lisp_process_logfile ( ) :
oOooO0 = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( oOooO0 ) ) : return
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
sys . stdout . close ( )
sys . stdout = open ( oOooO0 , "a" )
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 13 - 13: Oo0Ooo
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
if 41 - 41: Ii1I
if 77 - 77: I1Ii111
if 65 - 65: II111iiii . I1IiiI % oO0o * OoO0O00
lisp_hostname = socket . gethostname ( )
iI11I = lisp_hostname . find ( "." )
if ( iI11I != - 1 ) : lisp_hostname = lisp_hostname [ 0 : iI11I ]
return
if 11 - 11: iII111i - oO0o + II111iiii - iIii1I11I1II1
if 7 - 7: IiII - I11i / II111iiii * Ii1I . iII111i * iII111i
if 61 - 61: I11i % ooOoO0o - OoO0O00 / Oo0Ooo
if 4 - 4: OoooooooOO - i1IIi % Ii1I - OOooOOo * o0oOOo0O0Ooo
if 85 - 85: OoooooooOO * iIii1I11I1II1 . iII111i / OoooooooOO % I1IiiI % O0
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
if 95 - 95: IiII
def lprint ( * args ) :
if ( lisp_debug_logging == False ) : return
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
lisp_process_logfile ( )
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
OOOO0O00o = OOOO0O00o [ : - 3 ]
print "{}: {}:" . format ( OOOO0O00o , lisp_log_id ) ,
for OOoOoo0 in args : print OOoOoo0 ,
print ""
try : sys . stdout . flush ( )
except : pass
return
if 17 - 17: Ii1I + oO0o . OoO0O00 - Oo0Ooo * i11iIiiIii
if 20 - 20: I1IiiI . OoooooooOO % OOooOOo
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
def debug ( * args ) :
lisp_process_logfile ( )
if 19 - 19: II111iiii * IiII + Ii1I
OOOO0O00o = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
OOOO0O00o = OOOO0O00o [ : - 3 ]
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
print red ( ">>>" , False ) ,
print "{}:" . format ( OOOO0O00o ) ,
for OOoOoo0 in args : print OOoOoo0 ,
print red ( "<<<\n" , False )
try : sys . stdout . flush ( )
except : pass
return
if 19 - 19: i11iIiiIii + iII111i % ooOoO0o
if 14 - 14: OoO0O00 . II111iiii . I11i / Ii1I % I1ii11iIi11i - ooOoO0o
if 67 - 67: I11i - OOooOOo . i1IIi
if 35 - 35: iII111i + ooOoO0o - oO0o . iII111i . IiII
if 87 - 87: OoOoOO00
if 25 - 25: i1IIi . OoO0O00 - OoOoOO00 / OoO0O00 % OoO0O00 * iIii1I11I1II1
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
if ( lisp_version == "" ) :
lisp_version = commands . getoutput ( "cat lisp-version.txt" )
if 57 - 57: OOooOOo + iIii1I11I1II1 % i1IIi % I1IiiI
OO0oo = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , OO0oo ) )
return
if 15 - | |
"""
Accepts same args as draw_on, but uses maplotlib
Args:
channel (int | str): category index to visualize, or special key
"""
# If draw doesnt exist use draw_on
import numpy as np
if image is None:
if imgspace:
dims = self.img_dims
else:
dims = self.bounds
shape = tuple(dims) + (4,)
image = np.zeros(shape, dtype=np.float32)
image = self.draw_on(image, channel=channel, imgspace=imgspace,
**kwargs)
import kwplot
kwplot.imshow(image)
def draw_on(self, image=None, channel=None, invert=False, with_alpha=1.0,
interpolation='linear', vecs=False, kpts=None, imgspace=None):
"""
Overlays a heatmap channel on top of an image
Args:
image (ndarray): image to draw on, if unspecified one is created.
channel (int | str): category index to visualize, or special key.
special keys are: class_idx, class_probs, class_idx
imgspace (bool, default=False): colorize the image after
warping into the image space.
TODO:
- [ ] Find a way to visualize offset, diameter, and class_probs
either individually or all at the same time
Example:
>>> # xdoctest: +REQUIRES(module:kwplot)
>>> import kwarray
>>> import kwimage
>>> image = kwimage.grab_test_image('astro')
>>> probs = kwimage.gaussian_patch(image.shape[0:2])[None, :]
>>> probs = probs / probs.max()
>>> class_probs = kwarray.ArrayAPI.cat([probs, 1 - probs], axis=0)
>>> self = kwimage.Heatmap(class_probs=class_probs, offset=5 * np.random.randn(2, *probs.shape[1:]))
>>> toshow = self.draw_on(image, 0, vecs=True, with_alpha=0.85)
>>> # xdoctest: +REQUIRES(--show)
>>> import kwplot
>>> kwplot.autompl()
>>> kwplot.imshow(toshow)
Example:
>>> # xdoctest: +REQUIRES(module:kwplot)
>>> # xdoctest: +REQUIRES(module:ndsampler)
>>> import kwimage
>>> self = kwimage.Heatmap.random(dims=(200, 200), dets='coco', keypoints=True)
>>> image = kwimage.grab_test_image('astro')
>>> toshow = self.draw_on(image, 0, vecs=False, with_alpha=0.85)
>>> # xdoctest: +REQUIRES(--show)
>>> import kwplot
>>> kwplot.autompl()
>>> kwplot.imshow(toshow)
Example:
>>> # xdoctest: +REQUIRES(module:kwplot)
>>> # xdoctest: +REQUIRES(module:ndsampler)
>>> import kwimage
>>> self = kwimage.Heatmap.random(dims=(200, 200), dets='coco', keypoints=True)
>>> kpts = [6]
>>> self = self.warp(self.tf_data_to_img.params)
>>> image = kwimage.grab_test_image('astro')
>>> image = kwimage.ensure_alpha_channel(image)
>>> toshow = self.draw_on(image, 0, with_alpha=0.85, kpts=kpts)
>>> # xdoctest: +REQUIRES(--show)
>>> import kwplot
>>> kwplot.autompl()
>>> kwplot.imshow(toshow)
Example:
>>> # xdoctest: +REQUIRES(module:kwplot)
>>> # xdoctest: +REQUIRES(module:ndsampler)
>>> import kwimage
>>> mask = np.random.rand(32, 32)
>>> self = kwimage.Heatmap(
>>> class_probs=mask,
>>> img_dims=mask.shape[0:2],
>>> tf_data_to_img=np.eye(3),
>>> )
>>> canvas = self.draw_on()
>>> # xdoctest: +REQUIRES(--show)
>>> import kwplot
>>> kwplot.autompl()
>>> kwplot.imshow(canvas)
import xdev
globals().update(xdev.get_func_kwargs(Heatmap.draw_on))
"""
import kwimage
if image is None:
image = np.zeros(self.img_dims)
if channel is None:
if 'class_idx' in self.data:
channel = 'class_idx'
elif 'class_probs' in self.data:
channel = 'class_probs'
elif 'class_energy' in self.data:
channel = 'class_energy'
else:
raise Exception('unsure how to default channel')
if imgspace is None:
if np.all(image.shape[0:2] == np.array(self.img_dims)):
imgspace = True
colormask = self.colorize(channel, invert=invert,
with_alpha=with_alpha,
interpolation=interpolation,
imgspace=imgspace)
dtype_fixer = _generic._consistent_dtype_fixer(image)
image = kwimage.ensure_float01(image)
layers = []
vec_colors = kwimage.Color.distinct(2)
vec_alpha = .5
if kpts is not None:
# TODO: make a nicer keypoint offset vector visuliazation
if kpts is True:
if self.data.get('keypoints', None) is not None:
keypoints = self.data['keypoints']
kpts = list(range(len(keypoints.shape[1])))
if not ub.iterable(kpts):
kpts = [kpts]
E = int(bool(vecs))
vec_colors = kwimage.Color.distinct(len(kpts) + E)
if vecs:
if self.data.get('offset', None) is not None:
#Hack
# Visualize center offset vectors
dy, dx = kwarray.ArrayAPI.numpy(self.data['offset'])
color = vec_colors[0]
vecmask = kwimage.make_vector_field(
dx, dy, stride=4, scale=1.0, alpha=with_alpha * vec_alpha,
color=color)
vec_alpha = max(.1, vec_alpha - .1)
chw = torch.Tensor(vecmask.transpose(2, 0, 1))
vecalign = self._warp_imgspace(chw, interpolation=interpolation)
vecalign = vecalign.transpose(1, 2, 0)
layers.append(vecalign)
if kpts is not None:
# TODO: make a nicer keypoint offset vector visuliazation
if self.data.get('keypoints', None) is not None:
keypoints = self.data['keypoints']
for i, k in enumerate(kpts):
# color = (np.array(vec_colors[k]) * 255).astype(np.uint8)
color = vec_colors[i + E]
dy, dx = kwarray.ArrayAPI.numpy(keypoints[:, k])
vecmask = kwimage.make_vector_field(dx, dy, stride=8,
scale=0.5,
alpha=with_alpha *
vec_alpha, color=color)
vec_alpha = max(.1, vec_alpha - .1)
chw = torch.Tensor(vecmask.transpose(2, 0, 1))
vecalign = self._warp_imgspace(chw, interpolation=interpolation)
vecalign = vecalign.transpose(1, 2, 0)
layers.append(vecalign)
layers.append(colormask)
layers.append(image)
overlaid = kwimage.overlay_alpha_layers(layers)
overlaid = dtype_fixer(overlaid, copy=False)
return overlaid
class _HeatmapWarpMixin(object):
"""
mixin method having to do with warping and aligning heatmaps
"""
def _align_other(self, other):
"""
Warp another Heatmap (with the same underlying imgdims) into the same
space as this heatmap. This lets us perform elementwise operations on
the two heatmaps (like geometric mean).
Args:
other (Heatmap): the heatmap to align with `self`
Returns:
Heatmap: warped version of `other` that aligns with `self`.
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> self = Heatmap.random((120, 130), img_dims=(200, 210), classes=2, nblips=10, rng=0)
>>> other = Heatmap.random((60, 70), img_dims=(200, 210), classes=2, nblips=10, rng=1)
>>> other2 = self._align_other(other)
>>> assert self.shape != other.shape
>>> assert self.shape == other2.shape
>>> # xdoctest: +REQUIRES(--show)
>>> kwplot.autompl()
>>> kwplot.imshow(self.colorize(0, imgspace=False), fnum=1, pnum=(3, 2, 1))
>>> kwplot.imshow(self.colorize(1, imgspace=False), fnum=1, pnum=(3, 2, 2))
>>> kwplot.imshow(other.colorize(0, imgspace=False), fnum=1, pnum=(3, 2, 3))
>>> kwplot.imshow(other.colorize(1, imgspace=False), fnum=1, pnum=(3, 2, 4))
"""
if self is other:
return other
# The heatmaps must belong to the same image space
assert self.classes == other.classes
assert np.all(self.img_dims == other.img_dims)
img_to_self = np.linalg.inv(self.tf_data_to_img.params)
other_to_img = other.tf_data_to_img.params
other_to_self = np.matmul(img_to_self, other_to_img)
mat = other_to_self
output_dims = self.class_probs.shape[1:]
# other now exists in the same space as self
new_other = other.warp(mat, output_dims=output_dims)
return new_other
def _align(self, mask, interpolation='linear'):
"""
Align a linear combination of heatmap channels with the original image
DEPRICATE
"""
import kwimage
M = self.tf_data_to_img.params[0:3]
dsize = tuple(map(int, self.img_dims[::-1]))
# flags = kwimage.im_cv2._coerce_interpolation('lanczos')
# flags = kwimage.im_cv2._coerce_interpolation('nearest')
flags = kwimage.im_cv2._coerce_interpolation(interpolation)
aligned = cv2.warpAffine(mask, M[0:2], dsize=tuple(dsize), flags=flags)
aligned = np.clip(aligned, 0, 1)
return aligned
def _warp_imgspace(self, chw, interpolation='linear'):
import kwimage
if self.tf_data_to_img is None and self.img_dims is None:
aligned = chw.cpu().numpy()
else:
if self.tf_data_to_img is None:
# If img dims are the same then we dont need a transform we
# know its identity
if self.img_dims == self.dims:
return chw.cpu().numpy()
output_dims = self.img_dims
mat = torch.Tensor(self.tf_data_to_img.params[0:3])
outputs = kwimage.warp_tensor(
chw[None, :], mat, output_dims=output_dims, mode=interpolation
)
aligned = outputs[0].cpu().numpy()
return aligned
def upscale(self, channel=None, interpolation='linear'):
"""
Warp the heatmap with the image dimensions
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> self = Heatmap.random(rng=0, dims=(32, 32))
>>> colormask = self.upscale()
"""
if channel is None:
chw = torch.Tensor(self.class_probs)
else:
chw = torch.Tensor(self.class_probs[channel])[None, :]
aligned = self._warp_imgspace(chw, interpolation=interpolation)
return aligned
# @profile
def warp(self, mat=None, input_dims=None, output_dims=None,
interpolation='linear', modify_spatial_coords=True,
int_interpolation='nearest', mat_is_xy=True, version=None):
"""
Warp all spatial maps. If the map contains spatial data, that data is
also warped (ignoring the translation component).
Args:
mat (ArrayLike): transformation matrix
input_dims (tuple): unused, only exists for compatibility
output_dims (tuple): size of the output heatmap
interpolation (str): see `kwimage.warp_tensor`
int_interpolation (str): interpolation used for interger types (should be nearest)
mat_is_xy (bool, default=True): set to false if the matrix
is in yx space instead of xy space
Returns:
Heatmap: this heatmap warped into a new spatial dimension
Ignore:
# Verify swapping rows 0 and 1 and then swapping columns 0 and 1
# Produces a matrix that works with permuted coordinates
# It does.
import sympy
a, b, c, d, e, f, g, h, i, x, y, z = sympy.symbols('a, b, c, d, e, f, g, h, i, x, y, z')
M1 = sympy.Matrix([[a, b, c], [d, e, f], [g, h, i]])
M2 = sympy.Matrix([[e, d, f], [b, a, c], [h, g, i]])
xy = sympy.Matrix([[x], [y], [z]])
yx = sympy.Matrix([[y], [x], [z]])
R1 = M1.multiply(xy)
R2 = M2.multiply(yx)
R3 = sympy.Matrix([[R1[1]], [R1[0]], [R1[2]],])
assert R2 == R3
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> from kwimage.structs.heatmap import * # NOQA
>>> self = Heatmap.random(rng=0, keypoints=True)
>>> S = 3.0
>>> mat = np.eye(3) * S
>>> mat[-1, -1] = 1
>>> newself = self.warp(mat, np.array(self.dims) * S).numpy()
>>> assert newself.offset.shape[0] == 2
>>> assert newself.diameter.shape[0] == 2
>>> f1 = newself.offset.max() / self.offset.max()
>>> assert f1 == S
>>> f2 = newself.diameter.max() / self.diameter.max()
>>> assert f2 == S
Example:
>>> import kwimage
>>> # xdoctest: +REQUIRES(module:ndsampler)
>>> self = kwimage.Heatmap.random(dims=(100, 100), dets='coco', keypoints=True)
>>> image = np.zeros(self.img_dims)
>>> # xdoctest: +REQUIRES(module:kwplot)
>>> toshow = self.draw_on(image, 1, vecs=True, with_alpha=0.85)
>>> # xdoctest: +REQUIRES(--show)
>>> import kwplot
>>> kwplot.autompl()
>>> kwplot.figure(fnum=1, doclf=True)
>>> kwplot.imshow(toshow)
"""
import kwimage
if mat is None:
mat = self.tf_data_to_img.params
| |
<gh_stars>0
import os
import csv
import traceback
import shutil
from pathlib import Path
from os.path import basename
from utils import Config, Editor, Rubric, Process
import difflib
import re
class PythonMarker:
"""
The marker script for Python submissions.
Attributes:
----------
extension
The extension of Python files.
run:
The Python runtime.
editor:
An instance of the editor class.
inputFiles:
The list of input files for the assignment.
outputFiles:
The List of output files for the assignment.
diff:
Whether to perform the diff or not.
workingDir:
The directory where we copy all of the files.
preProcessScript:
The script that needs to be run before the assignment is run.
auxFiles:
The list of auxiliary files.
"""
def __init__(self):
self.extension = '.py'
self.run = 'python'
self.editor = Editor()
self.inputFiles = ''
self.outputFiles = ''
self.diff = False
self.workingDir = ''
self.preProcessScript = ''
self.auxFiles = []
def convertByteString(self, bytes):
"""
Decodes the given byte string into a regular string.
Parameters:
---------
bytes:
The byte string to be decoded.
Returns:
-------
The decoded string (if possible)
"""
decoded = False
# Try to decode as utf-8
try:
bytes = bytes.decode('utf-8', 'backslashreplace')
decoded = True
except:
pass
if decoded:
bytes = bytes.replace('\r\n', '\n')
return bytes
def runFile(self, name):
"""
Runs the python script.
This will also capture stdout, stderr, and use any input files as
stdin.
Parameters:
----------
name:
The name of the file to run.
Returns:
-------
The stdout, stderr, and return code of the program.
"""
runProc = Process()
runProc.procName = [self.run]
runProc.procArgs = [name]
# Check if there is an input file that needs to be used.
inputFile = ''
for file in self.inputFiles:
fName = os.path.splitext(basename(file))[0]
if fName == name:
inputFile = file
break
if inputFile:
with open(inputFile, 'r') as inFile:
inLines = inFile.read()
inLines = str.encode(inLines)
runOut, runErr, runCode = runProc.runPiped(input = inLines)
else:
runOut, runErr, runCode = runProc.runPiped()
runOut = self.convertByteString(runOut)
runErr = self.convertByteString(runErr)
return runCode, runErr, runOut
def performDiff(self, expected, ans):
"""
Performs the diff between the student's output and the master output.
Parameters:
----------
expected:
The master output to compare against.
ans:
The students answer.
Returns:
-------
0 if the diff fails, 1 otherwise. It will also return the results of
the diff.
"""
if len(ans) == 0:
return 0, []
d = difflib.Differ()
diff = list(d.compare(expected, ans))
if len(diff) != len(expected):
return 0, diff
for line in diff:
if re.search('(^[+] .*)|^(- ).*|^([?].*)', line):
return 0, diff
return 1, []
def runSubmission(self, submission):
"""
Runs the student submission.
Parameters:
----------
submission:
The student submission bundle.
Returns:
The list of files for the editor.
"""
summaryFile = 'summary.txt'
fileList = []
for entry in submission[-1]:
if self.extension not in entry.name:
continue
fileList.append(entry.name)
# TODO: Add support for multiple input files.
runCode, runErr, runOut = self.runFile(entry.name)
diffResult = []
diffCode = -1
if runCode is 0 and self.diff:
# First load in the output file.
outFile = ''
for file in self.outputFiles:
fName = os.path.splitext(basename(file))[0]
sName = os.path.splitext(basename(entry.name))[0]
if fName.lower() == sName.lower():
outFile = file
break
if outFile:
with open(outFile, 'r') as oFile:
master = oFile.readlines()
student = runOut.splitlines(keepends = True)
diffCode, diffResult = self.performDiff(master,
student)
if os.path.exists(summaryFile):
mode = 'a'
else:
mode = 'w'
with open(summaryFile, mode, newline = '\n', encoding = 'utf-8') as sFile:
sFile.write('#=========================================#\n')
sFile.write('# Summary for file {}\n'.format(entry.name))
sFile.write('#=========================================#\n')
sFile.write('Program return code: {}\n\n'.format(runCode))
if runCode is 0:
if self.diff:
if diffCode is 1:
sFile.write(
'Diff results: outputs are identical.\n\n')
elif diffCode is -1:
sFile.write('Could not perform diff.\n\n')
else:
if len(diffResult) == 0:
sFile.write('Diff results\n')
sFile.write('Empty diff. No output received from program.')
else:
sFile.write('Diff results:\n')
sFile.write('Legend:\n')
sFile.write('-: expected\n')
sFile.write('+: received\n')
sFile.write('?: diff results\n\n')
sFile.writelines(diffResult)
sFile.write('\n')
else:
sFile.write('# Output for {}\n'.format(
entry.name))
sFile.write('#=============================#\n')
sFile.write('stdout:\n{}\n\n'.format(runOut))
sFile.write('#=============================#\n')
sFile.write('stderr:\n{}\n\n'.format(runErr))
else:
sFile.write('# Output for {}\n'.format(entry.name))
sFile.write('#=============================#\n')
sFile.write('stdout:\n{}\n\n'.format(runOut))
sFile.write('#=============================#\n')
sFile.write('stderr:\n{}\n\n'.format(runErr))
fileList.append(summaryFile)
return fileList
def formatForCSV(self, table, rubric):
"""
Formats the current table of students so they can be written into a
CSV file.
Parameters:
----------
table:
The table of students and their grades.
rubric:
The marking rubric to use as template to format the CSV file.
Returns:
-------
The header and list of grades ready to be written to CSV.
"""
# First make the header.
header = ['Student']
for item in rubric.attributes:
header.append(item)
header.append('Total')
header.append('Comments')
grades = []
for entry in table:
row = []
row.append(entry.studentName)
for item, num in entry.attributes.items():
row.append(num)
row.append(entry.total)
row.append(entry.comments)
grades.append(row)
return header, grades
def writeIncremental(self, table, rubric):
"""
Writes the incremental file.
This file is used as a backup in case the script crashes (or a break
needs to be taken.) It also keeps track of which students have been
marked.
Note:
----
This assumes that the students are marked in the order that their
directories exist in the root directory.
Parameters:
----------
table:
The table of students and their grades.
rubric:
The sample rubric.
"""
header, grades = self.formatForCSV(table, rubric)
csvFile = os.path.join(self.workingDir, 'grades_inc.csv')
with open(csvFile, 'w+', newline = '\n') as file:
writer = csv.writer(file)
writer.writerow(header)
writer.writerows(grades)
def loadIncremental(self, file, masterRubric):
"""
Loads the incremental file.
This restores the list of grades for students using the incremental
file.
Parameters:
----------
file:
The name of the incremental file.
masterRubric:
The master rubric.
Returns:
-------
The restored table of students and their grades along with the count
of students that were restored.
"""
count = 0
table = []
with open(file, 'r') as inFile:
reader = csv.reader(inFile)
header = next(reader)
for line in reader:
count += 1
rubric = Rubric()
rubric.make(masterRubric)
for i in range(1, len(header) - 2):
rubric.attributes[header[i]] = float(line[i])
rubric.comments = line[-1]
rubric.total = float(line[-2])
rubric.studentName = line[0]
table.append(rubric)
return table, count
def mark(self, rootDir, rubric):
"""
This is the main function of the Marker.
This will iterate over the directory of each student, read their
submission, compile and run it. It will then capture their output and
diff it. This will then be sent to the editor so the TA can mark the
assignment. It can also restore the list using an incremental file.
Parameters:
----------
rootDir:
The root of the assignemnts.
rubric:
The marking rubric to use.
Returns:
-------
The table containing all of the students, their marks and comments.
"""
table = []
# Check if we have a partial file already.
incPath = os.path.join(self.workingDir, 'grades_inc.csv')
incFile = Path(incPath)
start = 0
if incFile.is_file():
table, start = self.loadIncremental(incPath, rubric)
# Next, check copy over any input and output files to the working
# directory.
count = 0
for entry in os.scandir(rootDir):
if not entry.is_dir():
continue
if start is not 0:
start -= 1
continue
name = entry.name
subPath = os.path.join(entry.path, 'Submission attachment(s)')
submission = [file for file in os.scandir(subPath) if
file.is_file()]
bundle = [submission]
for file in self.inputFiles:
shutil.copy2(file, self.workingDir)
for file in self.outputFiles:
shutil.copy2(file, self.workingDir)
for file in self.auxFiles:
shutil.copy2(file, self.workingDir)
if self.preProcessScript:
shutil.copy2(self.preProcessScript, self.workingDir)
# Now copy the submission over to the working directory.
for file in submission:
shutil.copy2(file.path, self.workingDir)
os.chdir(self.workingDir)
# Check if we have to run anything before.
if self.preProcessScript:
proc = Process()
proc.procName = 'python'
proc.procArgs = [self.preProcessScript]
proc.run()
try:
list = self.runSubmission(bundle)
except Exception as e:
print('Error in entry {}'.format(count))
print('Path: {}'.format(subPath))
print(traceback.format_exc())
self.writeIncremental(table, rubric)
continue
with open('rubric.txt', 'w+') as rubricFile:
i = 0
for item, mark in rubric.attributes.items():
rubricFile.write('{}: {}/{}\n'.format(item, mark,
rubric.maxVals[i]))
i += 1
rubricFile.write('#==============================#\n')
rubricFile.write('# Instructor comments\n')
rubricFile.write('#==============================#\n')
rubricFile.write('')
list.append('rubric.txt')
self.editor.run(list)
# The grader has now entered the grades and comments, so lets
# re-open the file and update the marks.
studentRubric = Rubric()
studentRubric.make(rubric)
studentRubric.studentName = name
with open('rubric.txt', 'r+') as rubricFile:
header = 0
comments = []
for line in rubricFile:
if line.startswith('#'):
header += 1
continue
if header is 3:
comments.append(line)
continue
tokens = line.split(':')
item = tokens[0]
vals = tokens[1].split('/')
studentRubric.attributes[item] = float(vals[0])
comments = ' '.join(comments)
studentRubric.comments = comments
studentRubric.addMarks()
table.append(studentRubric)
self.writeIncremental(table, rubric)
try:
os.remove('rubric.txt')
except:
pass
try:
os.remove('summary.txt')
except:
pass
for file in list:
if file | |
Field("mapmaker_enabled", "boolean", default=False),
Field("mapmaker", default="Google MapMaker"),
Field("mapmakerhybrid_enabled", "boolean", default=False),
Field("mapmakerhybrid", default="Google MapMaker Hybrid"),
Field("earth_enabled", "boolean", default=True),
Field("streetview_enabled", "boolean", default=True),
role_required(), # Single Role
#roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_timestamp())
def gis_google_onvalidation(form):
"""
Warn the user about issues
"""
if not form.vars.apikey:
response.warning = T("Google Layers cannot be displayed if there isn't a valid API Key")
# Enable the overall LayerType if any of the layers are enabled
if "satellite_enabled" in form.vars or \
"maps_enabled" in form.vars or \
"hybrid_enabled" in form.vars or \
"mapmaker_enabled" in form.vars or \
"mapmakerhybrid_enabled" in form.vars or \
"earth_enabled" in form.vars or \
"streetview_enabled" in form.vars:
form.vars.enabled = True
else:
# Disable it
form.vars.enabled = False
configure("gis_layer_google",
onvalidation=gis_google_onvalidation)
# -------------------------------------------------------------------------
# GPX
table = define_table("gis_layer_gpx",
name_field(),
Field("description", label=T("Description")),
Field("enabled", "boolean", default=True, label=T("Available in Viewer?")),
Field("visible", "boolean", default=True,
label=T("On by default? (only applicable to Overlays)")),
Field("track", "upload", autodelete = True,
label = T("GPS Track File"),
requires = IS_UPLOAD_FILENAME(extension="gpx"),
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(request.folder,
"uploads",
"tracks"),
comment = DIV( _class="tooltip",
_title="%s|%s" % (T("GPS Track"),
T("A file in GPX format taken from a GPS."),
#T("Timestamps can be correlated with the timestamps on the photos to locate them on the map.")
))),
Field("waypoints", "boolean", default=True,
label=T("Display Waypoints?")),
Field("tracks", "boolean", default=True,
label=T("Display Tracks?")),
Field("routes", "boolean", default=False,
label=T("Display Routes?")),
marker_id(),
gis_opacity(),
cluster_distance(),
cluster_threshold(),
role_required(), # Single Role
#roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_timestamp())
# -------------------------------------------------------------------------
# KML
table = define_table("gis_layer_kml",
name_field(),
Field("description", label=T("Description")),
Field("enabled", "boolean", default=True, label=T("Available in Viewer?")),
Field("visible", "boolean", default=True,
label=T("On by default? (only applicable to Overlays)")),
Field("url", label=T("Location"),
requires=IS_NOT_EMPTY(),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Location"),
T("The URL to access the service.")))),
Field("title", label=T("Title"), default="name",
comment=T("The attribute within the KML which is used for the title of popups.")),
Field("body", label=T("Body"), default="description",
comment=T("The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)")),
gis_refresh(),
gis_opacity(),
cluster_distance(),
cluster_threshold(),
marker_id(),
role_required(), # Single Role
#roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_timestamp())
# -------------------------------------------------------------------------
# JS
# - raw JavaScript code for advanced users
# @ToDo: Move to a Plugin (more flexible)
table = define_table("gis_layer_js",
name_field(),
Field("description", label=T("Description")),
Field("enabled", "boolean", default=True, label=T("Available in Viewer?")),
Field("code", "text", label=T("Code"),
default="var myNewLayer = new OpenLayers.Layer.XYZ();\nmap.addLayer(myNewLayer);"),
role_required(), # Single Role
#roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_timestamp())
# -------------------------------------------------------------------------
# MGRS
table = define_table("gis_layer_mgrs",
name_field(),
Field("description", label=T("Description")),
Field("enabled", "boolean", default=True, label=T("Available in Viewer?")),
Field("url", label=T("Location"),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Location"),
T("The URL to access the service.")))),
role_required(), # Single Role
#roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_timestamp())
# -------------------------------------------------------------------------
# OpenStreetMap
table = define_table("gis_layer_openstreetmap",
name_field(),
Field("description", label=T("Description")),
Field("enabled", "boolean", default=True, label=T("Available in Viewer?")),
Field("visible", "boolean", default=True,
label=T("On by default? (only applicable to Overlays)")),
Field("url1", label=T("Location"), requires=IS_NOT_EMPTY(),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Location"),
T("The URL to access the service.")))),
Field("url2", label=T("Secondary Server (Optional)")),
Field("url3", label=T("Tertiary Server (Optional)")),
Field("base", "boolean", default=True,
label=T("Base Layer?")),
Field("attribution", label=T("Attribution")),
Field("zoom_levels", "integer",
requires = IS_INT_IN_RANGE(1, 30),
label=T("Zoom Levels"),
default=19),
role_required(), # Single Role
#roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_timestamp())
# -------------------------------------------------------------------------
# TMS
table = define_table("gis_layer_tms",
name_field(),
Field("description", label=T("Description")),
Field("enabled", "boolean", default=True, label=T("Available in Viewer?")),
Field("url", label=T("Location"), requires=IS_NOT_EMPTY(),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Location"),
T("The URL to access the service.")))),
Field("url2", label=T("Secondary Server (Optional)")),
Field("url3", label=T("Tertiary Server (Optional)")),
Field("layername", label=T("Layer Name"),
requires=IS_NOT_EMPTY()),
Field("img_format", label=T("Format")),
Field("attribution", label=T("Attribution")),
Field("zoom_levels", "integer",
requires = IS_INT_IN_RANGE(1, 30),
label=T("Zoom Levels"),
default=19),
projection_id(default=1), # 900913
role_required(), # Single Role
#roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_timestamp())
# -------------------------------------------------------------------------
# WFS
table = define_table("gis_layer_wfs",
name_field(),
Field("description", label=T("Description")),
Field("enabled", "boolean", default=True, label=T("Available in Viewer?")),
Field("visible", "boolean", default=True,
label=T("On by default? (only applicable to Overlays)")),
Field("url", label=T("Location"), requires = IS_NOT_EMPTY(),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Location"),
T("Mandatory. The URL to access the service.")))),
Field("featureType", label=T("Feature Type"),
requires = IS_NOT_EMPTY(),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Feature Type"),
T("Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).")))),
Field("featureNS", label=T("Feature Namespace"),
requires=IS_NULL_OR(IS_URL()),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Feature Namespace"),
T("Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).")))),
Field("title", label=T("Title"), default="name",
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Title"),
T("The attribute which is used for the title of popups.")))),
Field("style_field", label=T("Style Field"),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Style Field"),
T("Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.")))),
Field("style_values", label=T("Style Values"), default="{}",
comment=DIV( _class="stickytip",
_title="%s|%s" % (T("Style Values"),
T("Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}")))),
Field("geometryName", label=T("Geometry Name"), default = "the_geom",
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Geometry Name"),
T("Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.")))),
Field("wfs_schema", label=T("Schema"),
requires=IS_NULL_OR(IS_URL()),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Schema"),
T("Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.")))),
projection_id(default=2), # 4326
Field("version", label=T("Version"), default="1.1.0",
requires=IS_IN_SET(["1.0.0", "1.1.0"], zero=None)),
#gis_refresh(),
gis_opacity(),
cluster_distance(),
cluster_threshold(),
#Field("editable", "boolean", default=False, label=T("Editable?")),
role_required(), # Single Role
#roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_timestamp())
# -------------------------------------------------------------------------
# WMS
table = define_table("gis_layer_wms",
name_field(),
Field("description", label=T("Description")),
Field("enabled", "boolean", default=True, label=T("Available in Viewer?")),
Field("visible", "boolean", default=True,
label=T("On by default? (only applicable to Overlays)")),
Field("url", label=T("Location"), requires = IS_NOT_EMPTY(),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Location"),
T("The URL to access the service.")))),
Field("version", length=32,
label=T("Version"), default="1.1.1",
requires=IS_IN_SET(["1.1.1", "1.3.0"], zero=None)),
Field("base", "boolean", default=False,
label=T("Base Layer?")),
Field("transparent", "boolean", default=True,
label=T("Transparent?")),
gis_opacity(),
Field("map", length=32, label=T("Map"),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Map"),
T("Optional selection of a MapServer map.")))),
Field("layers", label=T("Layers"),
requires=IS_NOT_EMPTY()),
Field("img_format", length=32, label=T("Format"),
requires=IS_NULL_OR(IS_IN_SET(gis_layer_wms_img_formats)),
default="image/png"),
Field("style", length=32, label=T("Style"),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Style"),
T("Optional selection of an alternate style.")))),
Field("bgcolor", length=32, label=T("Background Color"),
requires=IS_NULL_OR(IS_HTML_COLOUR()),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Background Color"),
T("Optional selection of a background color.")))),
Field("tiled", "boolean", label=T("Tiled"), default=False,
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Tiled"),
T("Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.")))),
Field("buffer", "integer", label=T("Buffer"), default=0,
requires=IS_INT_IN_RANGE(0, 10),
comment=DIV( _class="tooltip",
_title="%s|%s" % (T("Buffer"),
T("The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.")))),
#Field("queryable", "boolean", default=False, label=T("Queryable?")),
#Field("legend_url", label=T("legend URL")),
#Field("legend_format", label=T("Legend Format"), requires = IS_NULL_OR(IS_IN_SET(gis_layer_wms_img_formats))),
role_required(), # Single Role
#roles_permitted(), # Multiple Roles (needs implementing in modules/s3gis.py)
*s3_timestamp())
#table.url.requires = [IS_URL, IS_NOT_EMPTY()]
# -------------------------------------------------------------------------
# GIS Cache
# -------------------------------------------------------------------------
# Store downloaded GeoRSS feeds in the DB
# - to allow refresh timer, BBOX queries, unified approach to Markers & Popups
tablename = "gis_cache"
table = define_table(tablename,
Field("title"),
Field("description"),
Field("link"), # Used by GeoRSS
Field("data"),
Field("image"),
Field("lat"),
Field("lon"),
Field("marker"), # Used by KML
Field("source", requires=IS_NULL_OR(IS_URL())),
*(s3_timestamp() + s3_uid()))
# Store downloaded KML feeds on the filesystem
# @ToDo: Migrate to DB instead (using above gis_cache)
tablename = "gis_cache2"
table = define_table(tablename,
Field("name", length=128, notnull=True, unique=True),
Field("file", "upload", autodelete = True,
# upload folder needs to be visible to the download() function as well as the upload
uploadfolder = os.path.join(request.folder,
"uploads",
"gis_cache")),
*s3_timestamp())
# Provide a handle to this load function
s3mgr.model.loader(gis_map_tables,
"gis_feature_class",
"gis_layer_feature",
"gis_cache",
"gis_cache2",
"gis_layer_bing",
"gis_layer_coordinate",
"gis_layer_geojson",
"gis_layer_georss",
"gis_layer_google",
"gis_layer_gpx",
"gis_layer_js",
"gis_layer_kml",
"gis_layer_mgrs",
"gis_layer_openstreetmap",
"gis_layer_tms",
"gis_layer_wfs",
"gis_layer_wms",
)
# =============================================================================
def gis_feature_query():
""" Load this GIS Map Tables when needed """
# Store results of Feature Queries in a temporary table to allow
# BBOX queries, Clustering, Refresh, Client-side Filtering, etc
tablename = "gis_feature_query"
table = define_table(tablename,
Field("name", length=128, notnull=True),
Field("lat", requires=IS_LAT()),
Field("lon", requires=IS_LON()),
Field("popup_url"),
Field("popup_label"),
# Optional Marker
Field("marker_url"),
Field("marker_height", "integer"),
Field("marker_width", "integer"),
# or Shape/Size/Colour
Field("shape",
requires=IS_NULL_OR(IS_IN_SET(["circle", "square", "star", "x", "cross", "triangle"]))),
Field("size", "integer"),
Field("colour", requires=IS_NULL_OR(IS_HTML_COLOUR())),
s3_meta_created_by(),
s3_meta_owned_by_user(),
*s3_timestamp())
# Provide a handle to this load function
s3mgr.model.loader(gis_feature_query,
"gis_feature_query")
# =============================================================================
# Tasks to be callable async
# =============================================================================
#def download_kml(record_id, filename, user_id=None):
# """
# Download a KML file
# - will normally | |
abstracttree("do\n\nla\ndo") == Collection(
[UnnamedPassage([Line([do])]), UnnamedPassage([Line([la]), Line([do])])]
)
assert abstracttree("do\n\n\nla\ndo") == Collection(
[UnnamedPassage([Line([do])]), UnnamedPassage([Line([la]), Line([do])])]
)
assert abstracttree("do\n\nla\n\ndo") == Collection(
[
UnnamedPassage([Line([do])]),
UnnamedPassage([Line([la])]),
UnnamedPassage([Line([do])]),
]
)
assert abstracttree("do\n\n\nla\n\n\ndo") == Collection(
[
UnnamedPassage([Line([do])]),
UnnamedPassage([Line([la])]),
UnnamedPassage([Line([do])]),
]
)
assert abstracttree("f = do") == Collection(
[NamedPassage(Assignment(Word("f"), []), [Line([do])])]
)
assert abstracttree("f(x) = do") == Collection(
[NamedPassage(Assignment(Word("f"), [Word("x")]), [Line([do])])]
)
assert abstracttree("f(x y) = do") == Collection(
[NamedPassage(Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do])])]
)
assert abstracttree("f(x y) = do la") == Collection(
[NamedPassage(Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do, la])])]
)
assert abstracttree("f(x y) = do\nla") == Collection(
[
NamedPassage(
Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do]), Line([la])]
)
]
)
assert abstracttree("f(x y) =\ndo\nla") == Collection(
[
NamedPassage(
Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do]), Line([la])]
)
]
)
assert abstracttree("f(x y) =\ndo\n\nla") == Collection(
[
NamedPassage(Assignment(Word("f"), [Word("x"), Word("y")]), [Line([do])]),
UnnamedPassage([Line([la])]),
]
)
with pytest.raises(SymbolAllUnderscores):
abstracttree("_ = do")
with pytest.raises(SymbolAllUnderscores):
abstracttree("___ = do")
def test_comments():
do = Modified(Word("do"), 0, 0, 0, None, None, 1)
la = Modified(Word("la"), 0, 0, 0, None, None, 1)
assert abstracttree("""do""").comments == []
assert (
abstracttree(
"""do
"""
).comments
== ["\n"]
)
assert abstracttree("""do | one""").comments == ["| one"]
assert (
abstracttree(
"""do | one
"""
).comments
== ["| one\n"]
)
assert abstracttree("""do |one""").comments == ["|one"]
assert (
abstracttree(
"""do |one
"""
).comments
== ["|one\n"]
)
assert (
abstracttree(
"""do
la"""
).comments
== ["\n"]
)
assert (
abstracttree(
"""do
la
"""
).comments
== ["\n", "\n"]
)
assert (
abstracttree(
"""do
la"""
).comments
== ["\n"]
)
assert (
abstracttree(
"""do
la
"""
).comments
== ["\n", "\n"]
)
assert (
abstracttree(
"""do | one
la"""
).comments
== ["| one\n"]
)
assert (
abstracttree(
"""do | one
la
"""
).comments
== ["| one\n", "\n"]
)
assert (
abstracttree(
"""do | one
la | two"""
).comments
== ["| one\n", "| two"]
)
assert (
abstracttree(
"""do | one
la | two
"""
).comments
== ["| one\n", "| two\n"]
)
assert (
abstracttree(
"""do | one
la | two"""
)
== Collection([UnnamedPassage([Line([do]), Line([la])])])
)
assert (
abstracttree(
"""do | one
la | two
"""
)
== Collection([UnnamedPassage([Line([do]), Line([la])])])
)
assert (
abstracttree(
"""do
la | two"""
).comments
== ["\n", "| two"]
)
assert (
abstracttree(
"""do
la | two
"""
).comments
== ["\n", "| two\n"]
)
assert (
abstracttree(
"""do
la | two"""
).comments
== ["\n", "| two"]
)
assert (
abstracttree(
"""do
la | two
"""
).comments
== ["\n", "| two\n"]
)
assert (
abstracttree(
"""do
| two
la | three"""
).comments
== ["\n", "| two\n", "| three"]
)
assert (
abstracttree(
"""do
| two
la | three
"""
).comments
== ["\n", "| two\n", "| three\n"]
)
assert (
abstracttree(
"""do
| two
la | three"""
)
== Collection([UnnamedPassage([Line([do])]), UnnamedPassage([Line([la])])])
)
assert (
abstracttree(
"""do
| two
la | three
"""
)
== Collection([UnnamedPassage([Line([do])]), UnnamedPassage([Line([la])])])
)
assert abstracttree("""f = do | one""").comments == ["| one"]
assert (
abstracttree(
"""f = do | one
"""
).comments
== ["| one\n"]
)
assert (
abstracttree(
"""f =
do | two"""
).comments
== ["\n", "| two"]
)
assert (
abstracttree(
"""f =
do | two
"""
).comments
== ["\n", "| two\n"]
)
assert (
abstracttree(
"""f = | one
do | two"""
).comments
== ["| one\n", "| two"]
)
assert (
abstracttree(
"""f = | one
do | two
"""
).comments
== ["| one\n", "| two\n"]
)
assert (
abstracttree(
"""| one
f =
do | three"""
).comments
== ["| one\n", "\n", "| three"]
)
assert (
abstracttree(
"""| one
f =
do | three
"""
).comments
== ["| one\n", "\n", "| three\n"]
)
assert (
abstracttree(
"""| one
f = | two
do | three"""
).comments
== ["| one\n", "| two\n", "| three"]
)
assert (
abstracttree(
"""| one
f = | two
do | three
"""
).comments
== ["| one\n", "| two\n", "| three\n"]
)
def test_evaluate():
assert evaluate(abstracttree("do").passages[0], Scope({}), 0, 0, (), ()) == (
1.0,
[AbstractNote(0.0, 1.0, Word("do"))],
)
assert evaluate(abstracttree("do re mi").passages[0], Scope({}), 0, 0, (), ()) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
assert evaluate(abstracttree("do....").passages[0], Scope({}), 0, 0, (), ()) == (
4.0,
[AbstractNote(0.0, 4.0, Word("do"))],
)
assert evaluate(
abstracttree("do.. re.. mi..").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
],
)
assert evaluate(abstracttree("___").passages[0], Scope({}), 0, 0, (), ()) == (
3.0,
[],
)
assert evaluate(abstracttree("do _ mi").passages[0], Scope({}), 0, 0, (), ()) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
assert evaluate(abstracttree("do __ mi").passages[0], Scope({}), 0, 0, (), ()) == (
4.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(3.0, 4.0, Word("mi")),
],
)
assert evaluate(
abstracttree("do __ mi _").passages[0], Scope({}), 0, 0, (), ()
) == (
5.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(3.0, 4.0, Word("mi")),
],
)
assert evaluate(
abstracttree("do\nre\nmi").passages[0], Scope({}), 0, 0, (), ()
) == (
1.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(0.0, 1.0, Word("re")),
AbstractNote(0.0, 1.0, Word("mi")),
],
)
assert evaluate(
abstracttree("do\n_\nre mi").passages[0], Scope({}), 0, 0, (), ()
) == (
2.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(0.0, 1.0, Word("re")),
AbstractNote(1.0, 2.0, Word("mi")),
],
)
assert evaluate(abstracttree("do'").passages[0], Scope({}), 0, 0, (), ()) == (
1.0,
[AbstractNote(0.0, 1.0, Word("do"), octave=1)],
)
assert evaluate(abstracttree("do+1").passages[0], Scope({}), 0, 0, (), ()) == (
1.0,
[AbstractNote(0.0, 1.0, Word("do"), augmentations=(AugmentStep(1),))],
)
assert evaluate(
abstracttree("{do re mi}").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi}'").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), octave=1),
AbstractNote(1.0, 2.0, Word("re"), octave=1),
AbstractNote(2.0, 3.0, Word("mi"), octave=1),
],
)
assert evaluate(
abstracttree("{do @re mi}'").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), octave=1),
AbstractNote(1.0, 2.0, Word("re"), octave=1),
AbstractNote(2.0, 3.0, Word("mi"), octave=1),
],
)
assert evaluate(
abstracttree("{do re mi}+1").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), augmentations=(AugmentStep(1),)),
AbstractNote(1.0, 2.0, Word("re"), augmentations=(AugmentStep(1),)),
AbstractNote(2.0, 3.0, Word("mi"), augmentations=(AugmentStep(1),)),
],
)
assert evaluate(
abstracttree("{do @re mi}+1").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(0.0, 1.0, Word("do"), augmentations=(AugmentStep(1),)),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi"), augmentations=(AugmentStep(1),)),
],
)
assert evaluate(
abstracttree("{{do @re mi}+1}>2").passages[0], Scope({}), 0, 0, (), ()
) == (
3.0,
[
AbstractNote(
0.0, 1.0, Word("do"), augmentations=(AugmentDegree(2), AugmentStep(1))
),
AbstractNote(1.0, 2.0, Word("re"), augmentations=(AugmentDegree(2),)),
AbstractNote(
2.0, 3.0, Word("mi"), augmentations=(AugmentDegree(2), AugmentStep(1))
),
],
)
assert evaluate(
abstracttree("{do re mi}:6").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi}:*2").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi} fa").passages[0], Scope({}), 0, 0, (), ()
) == (
4.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("fa")),
],
)
assert evaluate(
abstracttree("{do re mi}:6 fa").passages[0], Scope({}), 0, 0, (), ()
) == (
7.0,
[
AbstractNote(0.0, 2.0, Word("do")),
AbstractNote(2.0, 4.0, Word("re")),
AbstractNote(4.0, 6.0, Word("mi")),
AbstractNote(6.0, 7.0, Word("fa")),
],
)
assert evaluate(abstracttree("do * 2").passages[0], Scope({}), 0, 0, (), ()) == (
2.0,
[AbstractNote(0.0, 1.0, Word("do")), AbstractNote(1.0, 2.0, Word("do"))],
)
assert evaluate(
abstracttree("do re mi * 2").passages[0], Scope({}), 0, 0, (), ()
) == (
4.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("mi")),
],
)
assert evaluate(
abstracttree("{do re mi} * 2").passages[0], Scope({}), 0, 0, (), ()
) == (
6.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("do")),
AbstractNote(4.0, 5.0, Word("re")),
AbstractNote(5.0, 6.0, Word("mi")),
],
)
def test_evaluate_assign():
definition = abstracttree("f(x y) = y x").passages[0]
assert evaluate(
abstracttree("do f(mi re) fa so").passages[0],
Scope({"f": definition}),
0,
0,
(),
(),
) == (
5.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("mi")),
AbstractNote(3.0, 4.0, Word("fa")),
AbstractNote(4.0, 5.0, Word("so")),
],
)
assert evaluate(
abstracttree("do f({mi mi} {re re}) fa so").passages[0],
Scope({"f": definition}),
0,
0,
(),
(),
) == (
7.0,
[
AbstractNote(0.0, 1.0, Word("do")),
AbstractNote(1.0, 2.0, Word("re")),
AbstractNote(2.0, 3.0, Word("re")),
AbstractNote(3.0, 4.0, Word("mi")),
AbstractNote(4.0, 5.0, Word("mi")),
AbstractNote(5.0, 6.0, Word("fa")),
AbstractNote(6.0, 7.0, Word("so")),
],
)
with pytest.raises(MismatchingArguments):
evaluate(
abstracttree("f(mi)").passages[0], Scope({"f": definition}), 0, 0, (), ()
)
with pytest.raises(MismatchingArguments):
evaluate(
abstracttree("f(la la la)").passages[0],
| |
<gh_stars>1-10
import unittest, doctest
from test import test_support
from collections import namedtuple, Counter, Mapping
import pickle, cPickle, copy
from random import randrange
import operator
from collections import Hashable, Iterable, Iterator
from collections import Sized, Container, Callable
from collections import Set, MutableSet
from collections import Mapping, MutableMapping
from collections import Sequence, MutableSequence
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__doc__, 'Point(x, y)')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
self.assertEqual(Point._fields, ('x', 'y'))
self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char
self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword
self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char
self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword
self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore
self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field
namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names
namedtuple('_', 'a b c') # Test leading underscores in a typename
nt = namedtuple('nt', u'the quick brown fox') # check unicode input
self.assert_("u'" not in repr(nt._fields))
nt = namedtuple('nt', (u'the', u'quick')) # check unicode input
self.assert_("u'" not in repr(nt._fields))
self.assertRaises(TypeError, Point._make, [11]) # catch too few args
self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args
def test_instance(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1) # too few args
self.assertRaises(TypeError, Point, 1, 2, 3) # too many args
self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals()) # wrong keyword argument
self.assertRaises(TypeError, eval, 'Point(x=1)', locals()) # missing keyword argument
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assert_('__dict__' not in dir(p)) # verify instance has no dict
self.assert_('__weakref__' not in dir(p))
self.assertEqual(p, Point._make([11, 22])) # test _make classmethod
self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute
self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method
self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
# verify that field string can have commas
Point = namedtuple('Point', 'x, y')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
# verify that fieldspec can be a non-string sequence
Point = namedtuple('Point', ('x', 'y'))
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assert_(isinstance(p, tuple))
self.assertEqual(p, (11, 22)) # matches a real tuple
self.assertEqual(tuple(p), (11, 22)) # coercable to a real tuple
self.assertEqual(list(p), [11, 22]) # coercable to a list
self.assertEqual(max(p), 22) # iterable
self.assertEqual(max(*p), 22) # star-able
x, y = p
self.assertEqual(p, (x, y)) # unpacks like a tuple
self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple
self.assertRaises(IndexError, p.__getitem__, 3)
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
self.assertRaises(AttributeError, eval, 'p.z', locals())
def test_odd_sizes(self):
Zero = namedtuple('Zero', '')
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = namedtuple('Dot', 'd')
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d':1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
n = 5000
import string, random
names = list(set(''.join([random.choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
Big = namedtuple('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = range(n)
b2_expected[1] = 999
b2_expected[-5] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TestNT(x=10, y=20, z=30)
for module in pickle, cPickle:
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in -1, 0, 1, 2:
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_copy(self):
p = TestNT(x=10, y=20, z=30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
class ABCTestCase(unittest.TestCase):
def validate_abstract_methods(self, abc, *names):
methodstubs = dict.fromkeys(names, lambda s, *args: 0)
# everything should work will all required methods are present
C = type('C', (abc,), methodstubs)
C()
# instantiation should fail if a required method is missing
for name in names:
stubs = methodstubs.copy()
del stubs[name]
C = type('C', (abc,), stubs)
self.assertRaises(TypeError, C, name)
class TestOneTrickPonyABCs(ABCTestCase):
def test_Hashable(self):
# Check some non-hashables
non_samples = [list(), set(), dict()]
for x in non_samples:
self.failIf(isinstance(x, Hashable), repr(x))
self.failIf(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type,
]
for x in samples:
self.failUnless(isinstance(x, Hashable), repr(x))
self.failUnless(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super(H, self).__hash__()
__eq__ = Hashable.__eq__ # Silence Py3k warning
self.assertEqual(hash(H()), 0)
self.failIf(issubclass(int, H))
self.validate_abstract_methods(Hashable, '__hash__')
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.failIf(isinstance(x, Iterable), repr(x))
self.failIf(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.failUnless(isinstance(x, Iterable), repr(x))
self.failUnless(issubclass(type(x), Iterable), repr(type(x)))
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super(I, self).__iter__()
self.assertEqual(list(I()), [])
self.failIf(issubclass(str, I))
self.validate_abstract_methods(Iterable, '__iter__')
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, "".encode('ascii'), "", (), [],
{}, set()]
for x in non_samples:
self.failIf(isinstance(x, Iterator), repr(x))
self.failIf(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.failUnless(isinstance(x, Iterator), repr(x))
self.failUnless(issubclass(type(x), Iterator), repr(type(x)))
self.validate_abstract_methods(Iterator, 'next')
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Sized), repr(x))
self.failIf(issubclass(type(x), Sized), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.failUnless(isinstance(x, Sized), repr(x))
self.failUnless(issubclass(type(x), Sized), repr(type(x)))
self.validate_abstract_methods(Sized, '__len__')
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Container), repr(x))
self.failIf(issubclass(type(x), Container), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.failUnless(isinstance(x, Container), repr(x))
self.failUnless(issubclass(type(x), Container), repr(type(x)))
self.validate_abstract_methods(Container, '__contains__')
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", "".encode('ascii'), (), [], {}, set(),
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Callable), repr(x))
self.failIf(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.failUnless(isinstance(x, Callable), repr(x))
self.failUnless(issubclass(type(x), Callable), repr(type(x)))
self.validate_abstract_methods(Callable, '__call__')
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C(B):
pass
self.failUnless(issubclass(C, B))
self.failIf(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C:
__metaclass__ = type
__hash__ = None # Make sure it isn't hashable by default
self.failIf(issubclass(C, B), B.__name__)
B.register(C)
self.failUnless(issubclass(C, B))
class TestCollectionABCs(ABCTestCase):
# XXX For now, we only test some virtual inheritance properties.
# We should also test the proper behavior of the collection ABCs
# as real base classes or mix-in classes.
def test_Set(self):
for sample in [set, frozenset]:
self.failUnless(isinstance(sample(), Set))
self.failUnless(issubclass(sample, Set))
self.validate_abstract_methods(Set, '__contains__', '__iter__', '__len__')
def test_hash_Set(self):
class OneTwoThreeSet(Set):
def __init__(self):
self.contents = [1, 2, 3]
def __contains__(self, x):
return x in self.contents
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents)
def __hash__(self):
return self._hash()
a, b = OneTwoThreeSet(), OneTwoThreeSet()
self.failUnless(hash(a) == hash(b))
def test_MutableSet(self):
self.failUnless(isinstance(set(), MutableSet))
self.failUnless(issubclass(set, MutableSet))
self.failIf(isinstance(frozenset(), MutableSet))
self.failIf(issubclass(frozenset, MutableSet))
self.validate_abstract_methods(MutableSet, '__contains__', '__iter__', '__len__',
'add', 'discard')
def test_issue_4920(self):
# MutableSet.pop() method did not work
class MySet(collections.MutableSet):
__slots__=['__s']
def __init__(self,items=None):
if items is None:
items=[]
self.__s=set(items)
def __contains__(self,v):
return v in self.__s
def __iter__(self):
return iter(self.__s)
def __len__(self):
return len(self.__s)
def add(self,v):
result=v not in self.__s
self.__s.add(v)
return result
def discard(self,v):
result=v in self.__s
self.__s.discard(v)
return result
def __repr__(self):
return "MySet(%s)" % repr(list(self))
s = MySet([5,43,2,1])
self.assertEqual(s.pop(), 1)
def test_Mapping(self):
for sample in [dict]:
self.failUnless(isinstance(sample(), Mapping))
self.failUnless(issubclass(sample, Mapping))
self.validate_abstract_methods(Mapping, '__contains__', '__iter__', '__len__',
'__getitem__')
def test_MutableMapping(self):
for sample in [dict]:
self.failUnless(isinstance(sample(), MutableMapping))
self.failUnless(issubclass(sample, MutableMapping))
self.validate_abstract_methods(MutableMapping, '__contains__', '__iter__', '__len__',
'__getitem__', '__setitem__', '__delitem__')
def test_Sequence(self):
for sample in [tuple, list, str]:
| |
<reponame>clay3899/Freestyle
import pygame as pg
from settings import *
from os import path
vec = pg.math.Vector2
class Player(pg.sprite.Sprite):
"""
Creates the Player class to provide a template for players in the game.
"""
def __init__(self, game, img):
"""
Initializes (sets up) the player class.
Parameters:
self (self): keyword we can access the attributes and methods
of the class in python
game: used to reference items in the game class
img (.png file): png file that has an image for the player
Source: YouTube Videos KidsCanCode provided information needed for initial setup of code, though code was majorly altered to tailor to project
Source Link: https://www.youtube.com/watch?v=uWvb3QzA48c
"""
self.game = game
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((32,32))
self.image = pg.image.load(path.join(img_dir, img)).convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.pos = vec(WIDTH/2, HEIGHT/2)
self.vel = vec(0,0)
self.acc = vec(0,0)
self.health = PLAYER_HEALTH
self.radius = 15
def jump(self):
"""
Defines rules for the player action of jumping.
Parameters:
self (self): keyword we can access the attributes and methods
of the class in python
Source: YouTube Videos KidsCanCode provided information needed for initial setup of code, though code was majorly altered to tailor to project
Source Link: https://www.youtube.com/watch?v=uWvb3QzA48c
"""
self.rect.y += 1
hits = pg.sprite.spritecollide(self,self.game.platforms, False)
self.rect.y -= 1
if hits:
self.vel.y = -PLAYER_JUMP
def update(self):
"""
Method to control sprite's behavior (player movement).
Parameters:
self (self): keyword we can access the attributes and methods
of the class in python
Source: YouTube Videos KidsCanCode provided information needed for initial setup of code, though code was majorly altered to tailor to project
Source Link: https://www.youtube.com/watch?v=uWvb3QzA48c
"""
self.acc = vec(0,PLAYER_GRAV)
keys = pg.key.get_pressed()
if keys[pg.K_LEFT]:
self.acc.x = -PLAYER_ACC
if keys[pg.K_RIGHT]:
self.acc.x = PLAYER_ACC
# apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
self.pos += self.vel + 0.5 * self.acc
# Nothing passed the sides
self.rect.midbottom = self.pos
if self.pos.x > WIDTH:
self.pos.x = WIDTH
if self.pos.x < 0:
self.pos.x = 0
class Platform(pg.sprite.Sprite):
"""
Creates the Platform class to provide a template for platforms in the game.
"""
def __init__(self, x, y, w, h):
"""
Initializes (sets up) the platform class.
Parameters:
self (self): keyword we can access the attributes and methods
of the class in python
x (int): x coordinate of the platform on the screen (changing
the coordinate moves the pltform horizontally)
y (int): y coordinate of the platform on the screen (changing
the coordinate moves the pltform vertically)
w (int): length of the platform (changing the coordinate makes
the platform longer)
h (int): height of the platform (changing the coordinate makes
the platform taller)
Source: YouTube Videos KidsCanCode provided information needed for initial setup of code, though code was majorly altered to tailor to project
Source Link: https://www.youtube.com/watch?v=uWvb3QzA48c
"""
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((w,h))
self.image.fill(BLACK)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Enemy(pg.sprite.Sprite):
"""
Creates the Enemy class to provide a template for enemies in the game.
"""
def __init__(self,x,y, img):
"""
Initializes (sets up) the enemy class.
Parameters:
self (self): keyword we can access the attributes and methods
of the class in python
x (int): x coordinate of the platform on the screen (changing
the coordinate moves the platform horizontally)
y (int): y coordinate of the platform on the screen (changing
the coordinate moves the platform vertically)
img (.png file): png file that has an image for the enemy
"""
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load(path.join(img_dir, img)).convert_alpha()
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.health = ENEMY_HEALTH
def update(self):
if self.health <= 0:
"""
Method to control sprite's behavior (enemy health).
"""
"""Method to control sprite's behavior (enemy health).
Parameters:
self (self): keyword we can access the attributes and methods of the class in python
"""
if self.health <= 0:
self.kill()
death_sound = pg.mixer.Sound('game\sounds\explode.ogg')
pg.mixer.Sound.play(death_sound)
def draw_health(self):
"""
Used to draw the enemy health bars.
Parameters:
self (self): keyword we can access the attributes and methods
of the class in python
"""
if self.health > 60:
col = GREEN
elif self.health > 30:
col = YELLOW
else:
col = RED
width = int(self.rect.width * self.health/ENEMY_HEALTH)
width2 = int(self.rect.width)
self.health_bar = pg.Rect(0, 0, width, 7)
self.total = pg.Rect(0,0, width2, 7)
if self.health < ENEMY_HEALTH:
pg.draw.rect(self.image, BLACK, self.total)
pg.draw.rect(self.image, col, self.health_bar)
class Arrow(pg.sprite.Sprite):
"""
Creates the Arrow class to provide a template for arrows (player weapons) in the game.
"""
def __init__(self, x, y, img):
"""
Initializes (sets up) the arrow class.
Parameters:
self (self): keyword we can access the attributes and methods
of the class in python
x (int): x coordinate of the arrow on the screen
y (int): y coordinate of the arrow on the screen
img (.png file): png file that has an image for the enemy
Source: YouTube Videos KidsCanCode provided information needed for initial setup of code, though code was majorly altered to tailor to project
Source Link: https://www.youtube.com/watch?v=uWvb3QzA48c
"""
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load(path.join(img_dir, img)).convert_alpha()
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.centery = y
self.rect.centerx = x
self.pos = vec(x, y)
self.vel = vec(ARROW_SPEED,-3)
self.acc = vec(0,0)
def update(self):
"""
Method to control sprite's behavior (arrow movement and impact).
Parameters:
self (self): keyword we can access the attributes and methods
of the class in python
Source: YouTube Videos KidsCanCode provided information needed for initial setup of code, though code was majorly altered to tailor to project
Source Link: https://www.youtube.com/watch?v=uWvb3QzA48c
"""
# equations of motion
self.acc = vec(0, PLAYER_GRAV)
self.acc.x += self.vel.x
self.vel.y += self.acc.y
self.pos += self.vel + 0.5 * self.acc
self.rect.x = self.pos.x
self.rect.y = self.pos.y - 32
if self.rect.x > WIDTH + 100:
self.kill()
if self.rect.y > HEIGHT + 100:
self.kill()
class Fireball(pg.sprite.Sprite):
"""
Creates the Fireball class to provide a template for fireballs (enemy weapons) in the game.
"""
def __init__(self, x, y, img):
"""
Initializes (sets up) the fireball class.
Parameters:
self (self): keyword we can access the attributes and methods
of the class in python
x (int): x coordinate of the fireball on the screen
y (int): y coordinate of the fireball on the screen
img (.png file): png file that has an image for the enemy
"""
pg.sprite.Sprite.__init__(self)
self.image = pg.image.load(path.join(img_dir, img)).convert_alpha()
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.centery = y
self.rect.centerx = x
self.pos = vec(x, y)
self.vel = vec(-FIREBALL_SPEED,0)
self.acc = vec(0,0)
def update(self):
"""
Method to control sprite's behavior (fireball movement and impact).
Parameters:
self (self): keyword we can access the attributes and methods
of the class in python
"""
# equations of motion
self.acc = vec(0, 0.006)
self.acc.x += self.vel.x
self.vel.y += self.acc.y
self.pos += self.vel + 0.5 * self.acc
self.rect.x = self.pos.x
self.rect.y = self.pos.y - 64
class Draw_Text(pg.sprite.Sprite):
def __init__(self, surface, text, size, x, y, color):
"""
Function to draw text on the the start screen
Parameters:
self (self): keyword we can access the attributes and
methods of the class in python
surface: identifies the screen on which to draw text
text (str): Words that are desired to be on the pygame
screen
size (int): Provides the desired text size of word
x (int): x coordinate of the text on the screen (changing
the coordinate moves the text horizontally)
y (int): y coordinate of the text on the screen (changing
the coordinate moves the text vertically)
color (preset color code from pygame): Determines the color
of the text
"""
font = pg.font.Font(pg.font.match_font('cambria'), size)
text_surface = font.render(text, | |
<reponame>alvaroabascar/spaCy
from typing import List, Sequence, Dict, Any, Tuple, Optional
from pathlib import Path
from collections import Counter
import sys
import srsly
from wasabi import Printer, MESSAGES, msg
import typer
from ._util import app, Arg, Opt, show_validation_error, parse_config_overrides
from ._util import import_code, debug_cli
from ..training import Example
from ..training.initialize import get_sourced_components
from ..schemas import ConfigSchemaTraining
from ..pipeline._parser_internals import nonproj
from ..pipeline._parser_internals.nonproj import DELIMITER
from ..language import Language
from ..util import registry, resolve_dot_names
from .. import util
# Minimum number of expected occurrences of NER label in data to train new label
NEW_LABEL_THRESHOLD = 50
# Minimum number of expected occurrences of dependency labels
DEP_LABEL_THRESHOLD = 20
# Minimum number of expected examples to train a new pipeline
BLANK_MODEL_MIN_THRESHOLD = 100
BLANK_MODEL_THRESHOLD = 2000
@debug_cli.command(
"data", context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
)
@app.command(
"debug-data",
context_settings={"allow_extra_args": True, "ignore_unknown_options": True},
hidden=True, # hide this from main CLI help but still allow it to work with warning
)
def debug_data_cli(
# fmt: off
ctx: typer.Context, # This is only used to read additional arguments
config_path: Path = Arg(..., help="Path to config file", exists=True, allow_dash=True),
code_path: Optional[Path] = Opt(None, "--code-path", "--code", "-c", help="Path to Python file with additional code (registered functions) to be imported"),
ignore_warnings: bool = Opt(False, "--ignore-warnings", "-IW", help="Ignore warnings, only show stats and errors"),
verbose: bool = Opt(False, "--verbose", "-V", help="Print additional information and explanations"),
no_format: bool = Opt(False, "--no-format", "-NF", help="Don't pretty-print the results"),
# fmt: on
):
"""
Analyze, debug and validate your training and development data. Outputs
useful stats, and can help you find problems like invalid entity annotations,
cyclic dependencies, low data labels and more.
DOCS: https://spacy.io/api/cli#debug-data
"""
if ctx.command.name == "debug-data":
msg.warn(
"The debug-data command is now available via the 'debug data' "
"subcommand (without the hyphen). You can run python -m spacy debug "
"--help for an overview of the other available debugging commands."
)
overrides = parse_config_overrides(ctx.args)
import_code(code_path)
debug_data(
config_path,
config_overrides=overrides,
ignore_warnings=ignore_warnings,
verbose=verbose,
no_format=no_format,
silent=False,
)
def debug_data(
config_path: Path,
*,
config_overrides: Dict[str, Any] = {},
ignore_warnings: bool = False,
verbose: bool = False,
no_format: bool = True,
silent: bool = True,
):
msg = Printer(
no_print=silent, pretty=not no_format, ignore_warnings=ignore_warnings
)
# Make sure all files and paths exists if they are needed
with show_validation_error(config_path):
cfg = util.load_config(config_path, overrides=config_overrides)
nlp = util.load_model_from_config(cfg)
config = nlp.config.interpolate()
T = registry.resolve(config["training"], schema=ConfigSchemaTraining)
# Use original config here, not resolved version
sourced_components = get_sourced_components(cfg)
frozen_components = T["frozen_components"]
resume_components = [p for p in sourced_components if p not in frozen_components]
pipeline = nlp.pipe_names
factory_names = [nlp.get_pipe_meta(pipe).factory for pipe in nlp.pipe_names]
msg.divider("Data file validation")
# Create the gold corpus to be able to better analyze data
dot_names = [T["train_corpus"], T["dev_corpus"]]
train_corpus, dev_corpus = resolve_dot_names(config, dot_names)
train_dataset = list(train_corpus(nlp))
dev_dataset = list(dev_corpus(nlp))
msg.good("Corpus is loadable")
nlp.initialize(lambda: train_dataset)
msg.good("Pipeline can be initialized with data")
# Create all gold data here to avoid iterating over the train_dataset constantly
gold_train_data = _compile_gold(train_dataset, factory_names, nlp, make_proj=True)
gold_train_unpreprocessed_data = _compile_gold(
train_dataset, factory_names, nlp, make_proj=False
)
gold_dev_data = _compile_gold(dev_dataset, factory_names, nlp, make_proj=True)
train_texts = gold_train_data["texts"]
dev_texts = gold_dev_data["texts"]
frozen_components = T["frozen_components"]
msg.divider("Training stats")
msg.text(f"Language: {nlp.lang}")
msg.text(f"Training pipeline: {', '.join(pipeline)}")
if resume_components:
msg.text(f"Components from other pipelines: {', '.join(resume_components)}")
if frozen_components:
msg.text(f"Frozen components: {', '.join(frozen_components)}")
msg.text(f"{len(train_dataset)} training docs")
msg.text(f"{len(dev_dataset)} evaluation docs")
if not len(gold_dev_data):
msg.fail("No evaluation docs")
overlap = len(train_texts.intersection(dev_texts))
if overlap:
msg.warn(f"{overlap} training examples also in evaluation data")
else:
msg.good("No overlap between training and evaluation data")
# TODO: make this feedback more fine-grained and report on updated
# components vs. blank components
if not resume_components and len(train_dataset) < BLANK_MODEL_THRESHOLD:
text = f"Low number of examples to train a new pipeline ({len(train_dataset)})"
if len(train_dataset) < BLANK_MODEL_MIN_THRESHOLD:
msg.fail(text)
else:
msg.warn(text)
msg.text(
f"It's recommended to use at least {BLANK_MODEL_THRESHOLD} examples "
f"(minimum {BLANK_MODEL_MIN_THRESHOLD})",
show=verbose,
)
msg.divider("Vocab & Vectors")
n_words = gold_train_data["n_words"]
msg.info(
f"{n_words} total word(s) in the data ({len(gold_train_data['words'])} unique)"
)
if gold_train_data["n_misaligned_words"] > 0:
n_misaligned = gold_train_data["n_misaligned_words"]
msg.warn(f"{n_misaligned} misaligned tokens in the training data")
if gold_dev_data["n_misaligned_words"] > 0:
n_misaligned = gold_dev_data["n_misaligned_words"]
msg.warn(f"{n_misaligned} misaligned tokens in the dev data")
most_common_words = gold_train_data["words"].most_common(10)
msg.text(
f"10 most common words: {_format_labels(most_common_words, counts=True)}",
show=verbose,
)
if len(nlp.vocab.vectors):
msg.info(
f"{len(nlp.vocab.vectors)} vectors ({nlp.vocab.vectors.n_keys} "
f"unique keys, {nlp.vocab.vectors_length} dimensions)"
)
n_missing_vectors = sum(gold_train_data["words_missing_vectors"].values())
msg.warn(
"{} words in training data without vectors ({:0.2f}%)".format(
n_missing_vectors, n_missing_vectors / gold_train_data["n_words"]
),
)
msg.text(
"10 most common words without vectors: {}".format(
_format_labels(
gold_train_data["words_missing_vectors"].most_common(10),
counts=True,
)
),
show=verbose,
)
else:
msg.info("No word vectors present in the package")
if "ner" in factory_names:
# Get all unique NER labels present in the data
labels = set(
label for label in gold_train_data["ner"] if label not in ("O", "-", None)
)
label_counts = gold_train_data["ner"]
model_labels = _get_labels_from_model(nlp, "ner")
new_labels = [l for l in labels if l not in model_labels]
existing_labels = [l for l in labels if l in model_labels]
has_low_data_warning = False
has_no_neg_warning = False
has_ws_ents_error = False
has_punct_ents_warning = False
msg.divider("Named Entity Recognition")
msg.info(
f"{len(new_labels)} new label(s), {len(existing_labels)} existing label(s)"
)
missing_values = label_counts["-"]
msg.text(f"{missing_values} missing value(s) (tokens with '-' label)")
for label in new_labels:
if len(label) == 0:
msg.fail("Empty label found in new labels")
if new_labels:
labels_with_counts = [
(label, count)
for label, count in label_counts.most_common()
if label != "-"
]
labels_with_counts = _format_labels(labels_with_counts, counts=True)
msg.text(f"New: {labels_with_counts}", show=verbose)
if existing_labels:
msg.text(f"Existing: {_format_labels(existing_labels)}", show=verbose)
if gold_train_data["ws_ents"]:
msg.fail(f"{gold_train_data['ws_ents']} invalid whitespace entity spans")
has_ws_ents_error = True
if gold_train_data["punct_ents"]:
msg.warn(f"{gold_train_data['punct_ents']} entity span(s) with punctuation")
has_punct_ents_warning = True
for label in new_labels:
if label_counts[label] <= NEW_LABEL_THRESHOLD:
msg.warn(
f"Low number of examples for new label '{label}' ({label_counts[label]})"
)
has_low_data_warning = True
with msg.loading("Analyzing label distribution..."):
neg_docs = _get_examples_without_label(train_dataset, label)
if neg_docs == 0:
msg.warn(f"No examples for texts WITHOUT new label '{label}'")
has_no_neg_warning = True
if not has_low_data_warning:
msg.good("Good amount of examples for all labels")
if not has_no_neg_warning:
msg.good("Examples without occurrences available for all labels")
if not has_ws_ents_error:
msg.good("No entities consisting of or starting/ending with whitespace")
if not has_punct_ents_warning:
msg.good("No entities consisting of or starting/ending with punctuation")
if has_low_data_warning:
msg.text(
f"To train a new entity type, your data should include at "
f"least {NEW_LABEL_THRESHOLD} instances of the new label",
show=verbose,
)
if has_no_neg_warning:
msg.text(
"Training data should always include examples of entities "
"in context, as well as examples without a given entity "
"type.",
show=verbose,
)
if has_ws_ents_error:
msg.text(
"As of spaCy v2.1.0, entity spans consisting of or starting/ending "
"with whitespace characters are considered invalid."
)
if has_punct_ents_warning:
msg.text(
"Entity spans consisting of or starting/ending "
"with punctuation can not be trained with a noise level > 0."
)
if "textcat" in factory_names:
msg.divider("Text Classification")
labels = [label for label in gold_train_data["cats"]]
model_labels = _get_labels_from_model(nlp, "textcat")
new_labels = [l for l in labels if l not in model_labels]
existing_labels = [l for l in labels if l in model_labels]
msg.info(
f"Text Classification: {len(new_labels)} new label(s), "
f"{len(existing_labels)} existing label(s)"
)
if new_labels:
labels_with_counts = _format_labels(
gold_train_data["cats"].most_common(), counts=True
)
msg.text(f"New: {labels_with_counts}", show=verbose)
if existing_labels:
msg.text(f"Existing: {_format_labels(existing_labels)}", show=verbose)
if set(gold_train_data["cats"]) != set(gold_dev_data["cats"]):
msg.fail(
f"The train and dev labels are not the same. "
f"Train labels: {_format_labels(gold_train_data['cats'])}. "
f"Dev labels: {_format_labels(gold_dev_data['cats'])}."
)
if gold_train_data["n_cats_multilabel"] > 0:
msg.info(
"The train data contains instances without "
"mutually-exclusive classes. Use '--textcat-multilabel' "
"when training."
)
if gold_dev_data["n_cats_multilabel"] == 0:
msg.warn(
"Potential train/dev mismatch: the train data contains "
"instances without mutually-exclusive classes while the "
"dev data does not."
)
else:
msg.info(
"The train data contains only instances with "
"mutually-exclusive classes."
)
if gold_dev_data["n_cats_multilabel"] > 0:
msg.fail(
"Train/dev mismatch: the dev data contains instances "
"without mutually-exclusive classes while the train data "
"contains only instances with mutually-exclusive classes."
)
if "tagger" in factory_names:
msg.divider("Part-of-speech Tagging")
labels = [label for label in gold_train_data["tags"]]
# TODO: does this need to be updated?
msg.info(f"{len(labels)} label(s) in data")
labels_with_counts = _format_labels(
gold_train_data["tags"].most_common(), counts=True
)
msg.text(labels_with_counts, show=verbose)
if "parser" in factory_names:
has_low_data_warning = False
msg.divider("Dependency Parsing")
# profile sentence length
msg.info(
f"Found {gold_train_data['n_sents']} sentence(s) with an average "
f"length of {gold_train_data['n_words'] / gold_train_data['n_sents']:.1f} words."
)
# check for documents with multiple sentences
sents_per_doc = gold_train_data["n_sents"] / len(gold_train_data["texts"])
if sents_per_doc < 1.1:
msg.warn(
| |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019, 2020 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
__all__ = ['VirialCorrelationsPitzerCurl', 'VirialGas']
from fluids.numerics import newton
from chemicals.utils import log
from thermo.heat_capacity import HeatCapacityGas
from .phase import Phase
from chemicals.virial import BVirial_Pitzer_Curl, Z_from_virial_density_form
class VirialCorrelationsPitzerCurl(object):
def __init__(self, Tcs, Pcs, omegas):
self.Tcs = Tcs
self.Pcs = Pcs
self.omegas = omegas
self.N = len(Tcs)
def C_pures(self, T):
return [0.0]*self.N
def dC_dT_pures(self, T):
return [0.0]*self.N
def d2C_dT2_pures(self, T):
return [0.0]*self.N
def C_interactions(self, T):
N = self.N
Ciij = [[0.0]*N for i in range(N)]
Cijj = [[0.0]*N for i in range(N)]
# Full return should be (Ciij, Ciji, Cjii), (Cijj, Cjij, Cjji)
# but due to symmetry there is only those two matrices
return Ciij, Cijj
def dC_dT_interactions(self, T):
N = self.N
Ciij = [[0.0]*N for i in range(N)]
Cijj = [[0.0]*N for i in range(N)]
return Ciij, Cijj
def d2C_dT2_interactions(self, T):
N = self.N
Ciij = [[0.0]*N for i in range(N)]
Cijj = [[0.0]*N for i in range(N)]
return Ciij, Cijj
def B_pures(self, T):
Tcs, Pcs, omegas = self.Tcs, self.Pcs, self.omegas
return [BVirial_Pitzer_Curl(T, Tcs[i], Pcs[i], omegas[i]) for i in range(self.N)]
def dB_dT_pures(self, T):
Tcs, Pcs, omegas = self.Tcs, self.Pcs, self.omegas
return [BVirial_Pitzer_Curl(T, Tcs[i], Pcs[i], omegas[i], 1) for i in range(self.N)]
def B_interactions(self, T):
N = self.N
return [[0.0]*N for i in range(N)]
def dB_dT_interactions(self, T):
N = self.N
return [[0.0]*N for i in range(N)]
def B_matrix(self, T):
N = self.N
B_mat = [[0.0]*N for i in range(N)]
pures = self.B_pures(T)
B_interactions = self.B_interactions(T)
for i in range(N):
B_mat[i][i] = pures[i]
for i in range(N):
for j in range(i):
B_mat[i][j] = B_interactions[i][j]
B_mat[j][i] = B_interactions[j][i]
return B_mat
def dB_dT_matrix(self, T):
N = self.N
B_mat = [[0.0]*N for i in range(N)]
pures = self.dB_dT_pures(T)
B_interactions = self.dB_dT_interactions(T)
for i in range(N):
B_mat[i][i] = pures[i]
for i in range(N):
for j in range(i):
B_mat[i][j] = B_interactions[i][j]
B_mat[j][i] = B_interactions[j][i]
return B_mat
def d2B_dT2_pures(self, T):
Tcs, Pcs, omegas = self.Tcs, self.Pcs, self.omegas
return [BVirial_Pitzer_Curl(T, Tcs[i], Pcs[i], omegas[i], 2) for i in range(self.N)]
def d2B_dT2_interactions(self, T):
N = self.N
return [[0.0]*N for i in range(N)]
def d2B_dT2_matrix(self, T):
N = self.N
B_mat = [[0.0]*N for i in range(N)]
pures = self.d2B_dT2_pures(T)
B_interactions = self.d2B_dT2_interactions(T)
for i in range(N):
B_mat[i][i] = pures[i]
for i in range(N):
for j in range(i):
B_mat[i][j] = B_interactions[i][j]
B_mat[j][i] = B_interactions[j][i]
return B_mat
class VirialGas(Phase):
phase = 'g'
force_phase = 'g'
is_gas = True
is_liquid = False
ideal_gas_basis = True
pure_references = ('HeatCapacityGases',)
pure_reference_types = (HeatCapacityGas, )
def __init__(self, model, HeatCapacityGases=None, Hfs=None, Gfs=None,
T=None, P=None, zs=None):
self.model = model
self.HeatCapacityGases = HeatCapacityGases
self.Hfs = Hfs
self.Gfs = Gfs
if Hfs is not None and Gfs is not None and None not in Hfs and None not in Gfs:
self.Sfs = [(Hfi - Gfi)/298.15 for Hfi, Gfi in zip(Hfs, Gfs)]
else:
self.Sfs = None
for i in (zs, HeatCapacityGases, Hfs, Gfs):
if i is not None:
self.N = len(i)
break
if zs is not None:
self.zs = zs
if T is not None:
self.T = T
if P is not None:
self.P = P
if T is not None and P is not None and zs is not None:
Z = Z_from_virial_density_form(T, P, self.B(), self.C())
self._V = Z*self.R*T/P
def V(self):
return self._V
def dP_dT(self):
r'''
.. math::
\left(\frac{\partial P}{\partial T}\right)_{V} = \frac{R \left(T
\left(V \frac{d}{d T} B{\left(T \right)} + \frac{d}{d T} C{\left(T
\right)}\right) + V^{2} + V B{\left(T \right)} + C{\left(T \right)}
\right)}{V^{3}}
'''
try:
return self._dP_dT
except:
pass
T, V = self.T, self._V
self._dP_dT = dP_dT = self.R*(T*(V*self.dB_dT() + self.dC_dT()) + V*(V + self.B()) + self.C())/(V*V*V)
return dP_dT
def dP_dV(self):
r'''
.. math::
\left(\frac{\partial P}{\partial V}\right)_{T} =
- \frac{R T \left(V^{2} + 2 V B{\left(T \right)} + 3 C{\left(T
\right)}\right)}{V^{4}}
'''
try:
return self._dP_dV
except:
pass
T, V = self.T, self._V
self._dP_dV = dP_dV = -self.R*T*(V*V + 2.0*V*self.B() + 3.0*self.C())/(V*V*V*V)
return dP_dV
def d2P_dTdV(self):
r'''
.. math::
\left(\frac{\partial^2 P}{\partial V\partial T}\right)_{T} =
- \frac{R \left(2 T V \frac{d}{d T} B{\left(T \right)} + 3 T
\frac{d}{d T} C{\left(T \right)} + V^{2} + 2 V B{\left(T \right)}
+ 3 C{\left(T \right)}\right)}{V^{4}}
'''
try:
return self._d2P_dTdV
except:
pass
T, V = self.T, self._V
V2 = V*V
self._d2P_dTdV = d2P_dTdV = -self.R*(2.0*T*V*self.dB_dT() + 3.0*T*self.dC_dT()
+ V2 + 2.0*V*self.B() + 3.0*self.C())/(V2*V2)
return d2P_dTdV
def d2P_dV2(self):
r'''
.. math::
\left(\frac{\partial^2 P}{\partial V^2}\right)_{T} =
\frac{2 R T \left(V^{2} + 3 V B{\left(T \right)}
+ 6 C{\left(T \right)}\right)}{V^{5}}
'''
try:
return self._d2P_dV2
except:
pass
T, V = self.T, self._V
V2 = V*V
self._d2P_dV2 = d2P_dV2 = 2.0*self.R*T*(V2 + 3.0*V*self.B() + 6.0*self.C())/(V2*V2*V)
return d2P_dV2
def d2P_dT2(self):
r'''
.. math::
\left(\frac{\partial^2 P}{\partial T^2}\right)_{V} =
\frac{R \left(T \left(V \frac{d^{2}}{d T^{2}} B{\left(T \right)}
+ \frac{d^{2}}{d T^{2}} C{\left(T \right)}\right) + 2 V \frac{d}{d T}
B{\left(T \right)} + 2 \frac{d}{d T} C{\left(T \right)}\right)}{V^{3}}
'''
try:
return self._d2P_dT2
except:
pass
T, V = self.T, self._V
V2 = V*V
self._d2P_dT2 = d2P_dT2 = self.R*(T*(V*self.d2B_dT2() + self.d2C_dT2())
+ 2.0*V*self.dB_dT() + 2.0*self.dC_dT())/(V*V*V)
return d2P_dT2
def H_dep(self):
r'''
.. math::
H_{dep} = \frac{R T^{2} \left(2 V \frac{d}{d T} B{\left(T \right)}
+ \frac{d}{d T} C{\left(T \right)}\right)}{2 V^{2}} - R T \left(-1
+ \frac{V^{2} + V B{\left(T \right)} + C{\left(T \right)}}{V^{2}}
\right)
'''
'''
from sympy import *
Z, R, T, V, P = symbols('Z, R, T, V, P')
B, C = symbols('B, C', cls=Function)
base =Eq(P*V/(R*T), 1 + B(T)/V + C(T)/V**2)
P_sln = solve(base, P)[0]
Z = P_sln*V/(R*T)
# Two ways to compute H_dep
Hdep2 = R*T - P_sln*V + integrate(P_sln - T*diff(P_sln, T), (V, oo, V))
Hdep = -R*T*(Z-1) -integrate(diff(Z, T)/V, (V, oo, V))*R*T**2
'''
try:
return self._H_dep
except:
pass
T, V = self.T, self._V
V2 = V*V
RT = self.R*T
self._H_dep = H_dep = RT*(T*(2.0*V*self.dB_dT() + self.dC_dT())/(2.0*V2)
- (-1.0 + (V2 + V*self.B() + self.C())/V2))
return H_dep
def dH_dep_dT(self):
r'''
.. math::
\frac{\partial H_{dep}}{\partial T} = \frac{R \left(2 T^{2} V
\frac{d^{2}}{d T^{2}} B{\left(T \right)} + T^{2} \frac{d^{2}}{d T^{2}}
C{\left(T \right)} + 2 T V \frac{d}{d T} B{\left(T \right)}
- 2 V B{\left(T \right)} - 2 C{\left(T \right)}\right)}{2 V^{2}}
'''
try:
return self._dH_dep_dT
except:
pass
T, V = self.T, self._V
self._dH_dep_dT = dH_dep_dT = (self.R*(2.0*T*T*V*self.d2B_dT2() + T*T*self.d2C_dT2()
+ 2.0*T*V*self.dB_dT() - 2.0*V*self.B() - 2.0*self.C())/(2.0*V*V))
return dH_dep_dT
def S_dep(self):
r'''
.. math::
S_{dep} = \frac{R \left(- T \frac{d}{d T} C{\left(T \right)} + 2 V^{2}
\ln{\left(\frac{V^{2} + V B{\left(T \right)} + C{\left(T \right)}}
{V^{2}} \right)} - 2 V \left(T \frac{d}{d T} B{\left(T \right)}
+ B{\left(T \right)}\right) - C{\left(T \right)}\right)}{2 V^{2}}
'''
'''
dP_dT = diff(P_sln, T)
S_dep = integrate(dP_dT - R/V, (V, oo, V)) + R*log(Z)
'''
try:
return self._S_dep
except:
pass
T, V = self.T, self._V
V2 = V*V
self._S_dep = S_dep = (self.R*(-T*self.dC_dT() + 2*V2*log((V2 + V*self.B() + self.C())/V**2)
- 2*V*(T*self.dB_dT() + self.B()) - self.C())/(2*V2))
return S_dep
def dS_dep_dT(self):
r'''
.. math::
\frac{\partial S_{dep}}{\partial T} = \frac{R \left(2 V^{2} \left(V
\frac{d}{d T} B{\left(T \right)} + \frac{d}{d T} C{\left(T \right)}
\right) - \left(V^{2} + V B{\left(T \right)} + C{\left(T \right)}
\right) \left(T \frac{d^{2}}{d T^{2}} C{\left(T \right)} | |
"{4}::\n {5}:::\n{6}\n".format(
numerum_archiva, # 0
'Rēs interlinguālibus', # 1
'/download link/@eng-Latn', # 2
'link:{0}.no11.tbx[{0}.no11.tbx]'.format(
numerum_archiva
),
'Rēs linguālibus', # 4
'Lingua Anglica (Abecedarium Latinum)', # 5
_pad(self.notitiae.translatio(
'{% _🗣️ 1603_1_99_101_8 🗣️_ %}'), 4), # 6
'/reference URL/@eng-Latn', # 7
_pad(self.notitiae.translatio(
'{% _🗣️ 1603_1_99_101_8_854 🗣️_ %}'), 4) # 8
))
if self.annexis.est_annexum(numerum_archiva + '.no11.tmx'):
# save url, reference url, description
dictionaria_part.append(
"\n==== {0}.no11.tmx\n\n"
"{1}::\n {2}::: {3}\n {7}:::\n{8}\n"
"{4}::\n {5}:::\n{6}\n".format(
numerum_archiva, # 0
'Rēs interlinguālibus', # 1
'/download link/@eng-Latn', # 2
'link:{0}.no11.tmx[{0}.no11.tmx]'.format(
numerum_archiva
),
'Rēs linguālibus', # 4
'Lingua Anglica (Abecedarium Latinum)', # 5
_pad(self.notitiae.translatio(
'{% _🗣️ 1603_1_99_101_9 🗣️_ %}'), 4), # 6
'/reference URL/@eng-Latn', # 7
_pad(self.notitiae.translatio(
'{% _🗣️ 1603_1_99_101_9_854 🗣️_ %}'), 4) # 8
))
total_dictionaria += 1
if self.annexis.est_annexum(numerum_archiva + '.mul-Latn.codex.adoc'):
# save url, reference url, description
codex_part.append(
"\n==== {0}.mul-Latn.codex.adoc\n\n"
"{1}::\n {2}::: {3}\n {7}:::\n{8}\n"
"{4}::\n {5}:::\n{6}\n".format(
numerum_archiva, # 0
'Rēs interlinguālibus', # 1
'/download link/@eng-Latn', # 2
'link:{0}.mul-Latn.codex.adoc[{0}.mul-Latn.codex.adoc]'.format(
numerum_archiva
),
'Rēs linguālibus', # 4
'Lingua Anglica (Abecedarium Latinum)', # 5
_pad(self.notitiae.translatio(
'{% _🗣️ 1603_1_99_101_10 🗣️_ %}'), 4), # 6
'/reference URL/@eng-Latn', # 7
_pad(self.notitiae.translatio(
'{% _🗣️ 1603_1_99_101_10_854 🗣️_ %}'), 4) # 8
))
total_codex += 1
if self.annexis.est_annexum(numerum_archiva + '.mul-Latn.codex.pdf'):
codex_part.append(
"\n==== {0}.mul-Latn.codex.pdf\n\n"
"{1}::\n {2}::: {3}\n {7}:::\n{8}\n"
"{4}::\n {5}:::\n{6}\n".format(
numerum_archiva, # 0
'Rēs interlinguālibus', # 1
'/download link/@eng-Latn', # 2
'link:{0}.mul-Latn.codex.pdf[{0}.mul-Latn.codex.pdf]'.format(
numerum_archiva
),
'Rēs linguālibus', # 4
'Lingua Anglica (Abecedarium Latinum)', # 5
_pad(self.notitiae.translatio(
'{% _🗣️ 1603_1_99_101_11 🗣️_ %}'), 4), # 6
'/reference URL/@eng-Latn', # 7
_pad(self.notitiae.translatio(
'{% _🗣️ 1603_1_99_101_11_854 🗣️_ %}'), 4) # 8
))
total_codex += 1
# Compile final result
resultatum.append('=== Archīa prō dictiōnāriīs: {0}'.format(
total_dictionaria
))
resultatum.append('')
# resultatum.extend(descriptio_tabulae_de_lingua(
# ['Lingua Anglica (Abecedarium Latinum)'] * 1,
# [
# 'TIP: Is recommended to use the files on this section to '
# ' generate derived works.',
# ]))
# resultatum.append('')
resultatum.extend(dictionaria_part)
resultatum.append('')
resultatum.append('=== Archīa prō cōdice: {0}'.format(
total_codex
))
# textum_II = self.notitiae.translatio('{% _🗣️ 1603_1_99_10_3 🗣️_ %}')
# textum_III = self.notitiae.translatio('{% _🗣️ 1603_1_99_10_4 🗣️_ %}')
# resultatum.append('')
# resultatum.extend(descriptio_tabulae_de_lingua(
# ['Lingua Anglica (Abecedarium Latinum)'] * 2,
# # [
# # 'WARNING: Unless you are working with a natural language you '
# # 'understand it\'s letters and symbols, it is strongly '
# # 'advised to use automation to generate derived works. '
# # 'Keep manual human steps at minimum: '
# # 'if something goes wrong at least one or more languages can '
# # 'be used to verify mistakes. '
# # 'It\'s not at all necessary _know all languages_, '
# # 'but working with writing systems you don\'t understand is '
# # 'risky: '
# # 'copy and paste strategy can cause '
# # '_additional_ human errors and is unlikely to get human '
# # 'review as fast as you would need. ',
# # 'TIP: The Asciidoctor (.adoc) is better at copy and pasting! '
# # 'It can be converted to other text formats.',
# # ]))
# [
# textum_II,
# textum_III
# ]))
resultatum.append('')
resultatum.extend(codex_part)
resultatum.append('')
return resultatum
def codex_capiti(self) -> list:
"""cōdex capitī /book header/@eng-Latn
Trivia:
- cōdex, m, s, (Nominative), https://en.wiktionary.org/wiki/codex#Latin
- capitī, n, s, (Dative), https://en.wiktionary.org/wiki/caput#Latin
Returns:
[list]:
"""
resultatum = []
# resultatum.append(self._caput())
# resultatum.append(
# '# [`' +
# self.m1603_1_1__de_codex['#item+rem+i_qcc+is_zxxx+ix_n1603'] +
# '`] ' + self.m1603_1_1__de_codex['#item+rem+i_mul+is_zyyy'])
# resultatum.append(
# '= [`' +
# self.m1603_1_1__de_codex['#item+rem+i_qcc+is_zxxx+ix_n1603'] +
# '`] ' + self.m1603_1_1__de_codex['#item+rem+i_mul+is_zyyy'])
resultatum.append("= Cōdex [{0}]: {1}".format(
self.m1603_1_1__de_codex['#item+rem+i_qcc+is_zxxx+ix_n1603'],
self.m1603_1_1__de_codex['#item+rem+i_mul+is_zyyy']
))
resultatum.append(":doctype: book")
resultatum.append(":title: Cōdex [{0}]: {1}".format(
self.m1603_1_1__de_codex['#item+rem+i_qcc+is_zxxx+ix_n1603'],
self.m1603_1_1__de_codex['#item+rem+i_mul+is_zyyy']
))
resultatum.append(":lang: la")
# resultatum.append(":toc:")
resultatum.append(":toc: macro")
resultatum.append(":toclevels: 5")
# resultatum.append(":pdf-page-size: [8.25in, 11.69in]") # A5
# resultatum.append(":orgname: Etica.AI")
# resultatum.append(":version: 1.2.3")
# TODO: see the rest from here
# https://docs.asciidoctor.org/asciidoctor/latest/localization-support/
resultatum.append(":toc-title: Tabula contentorum")
resultatum.append(":table-caption: Tabula")
resultatum.append(":figure-caption: Pictūra")
resultatum.append(":example-caption: Exemplum")
# https://en.wiktionary.org/wiki/renovatio
resultatum.append(":last-update-label: Renovatio")
# https://en.wiktionary.org/wiki/versio#Latin
resultatum.append(":version-label: Versiō")
# @see https://docs.asciidoctor.org/asciidoc/latest/sections/appendix/
# https://en.wiktionary.org/wiki/appendix#Latin
resultatum.append(":appendix-caption: Appendix")
# resultatum.append(":sectnums:")
# resultatum.append(":partnums:")
resultatum.append(":source-highlighter: rouge")
# resultatum.append(":tip-caption: 💡")
# resultatum.append(":note-caption: ℹ️")
# resultatum.append(":warning-caption: ⚠️")
resultatum.append(":warning-caption: Hic sunt dracones")
# commendandum, verb, verbal-nouns>supine>accusative
# https://en.wiktionary.org/wiki/commendo#Latin
resultatum.append(":tip-caption: Commendātum")
# @see https://github.com/asciidoctor/asciidoctor-pdf/blob/main/docs
# /theming-guide.adoc#theme-related-document-attributes
_codex_numerum = numerordinatio_neo_separatum(
self.m1603_1_1__de_codex['#item+rem+i_qcc+is_zxxx+ix_n1603'])
_basepath = numerordinatio_neo_separatum(_codex_numerum, '/')
# _codex_cover = "{0}.mul-Latn.codex.png".format(
# _codex_numerum
# )
_codex_cover = "{0}.mul-Latn.codex.svg".format(
_codex_numerum
)
# basepath = basepath + '/' + \
# numerordinatio_neo_separatum(self.de_codex, '_')
if os.path.exists(_basepath + '/' + _codex_cover):
resultatum.append(":front-cover-image: image:{0}[\"Cōdex [{1}]: {2}\",1050,1600]".format(
_codex_cover,
_codex_numerum,
self.m1603_1_1__de_codex['#item+rem+i_mul+is_zyyy']
))
resultatum.append("\n")
resultatum.append("\n")
dominium_publicum = self.codex_dominium_publicum()
resultatum.extend(dominium_publicum)
# resultatum.extend((["{nbsp} +"] * 1))
# resultatum.append("<<<")
meta = {}
meta_langs = [
'#item+rem+i_qcc+is_zxxx+ix_codexfacto'
]
scrīptor = self.quod_res('0_1603_1_7_2616_50')
if scrīptor and qhxl(scrīptor, meta_langs) is not None:
meta['#item+rem+i_qcc+is_zxxx+ix_wikip50'] = \
qhxl(scrīptor, meta_langs)
translator = self.quod_res('0_1603_1_7_2616_655')
if translator and qhxl(translator, meta_langs) is not None:
meta['#item+rem+i_qcc+is_zxxx+ix_wikip655'] = \
qhxl(translator, meta_langs)
dictiōnārium_ēditōrī = self.quod_res('0_1603_1_7_2616_98')
if dictiōnārium_ēditōrī and \
qhxl(dictiōnārium_ēditōrī, meta_langs) is not None:
meta['#item+rem+i_qcc+is_zxxx+ix_wikip98'] = \
qhxl(dictiōnārium_ēditōrī, meta_langs)
publisher = self.quod_res('0_1603_1_7_2616_123')
if publisher and qhxl(publisher, meta_langs) is not None:
meta['#item+rem+i_qcc+is_zxxx+ix_wikip123'] = \
qhxl(publisher, meta_langs)
publication_date = self.quod_res('0_1603_1_7_2616_577')
if publication_date and qhxl(publication_date, meta_langs) is not None:
meta['#item+rem+i_qcc+is_zxxx+ix_wikip577'] = \
qhxl(publication_date, meta_langs)
meta['#item+rem+i_qcc+is_zxxx+ix_wikip393'] = \
datetime.datetime.now().replace(microsecond=0).isoformat()
spdx_licentiam = self.quod_res('0_1603_1_7_2616_2479')
if spdx_licentiam and qhxl(spdx_licentiam, meta_langs) is not None:
meta['#item+rem+i_qcc+is_zxxx+ix_wikip2479'] = \
qhxl(spdx_licentiam, meta_langs)
reference_url = self.quod_res('0_1603_1_7_2616_854')
if reference_url and qhxl(reference_url, meta_langs) is not None:
meta['#item+rem+i_qcc+is_zxxx+ix_wikip854'] = \
qhxl(reference_url, meta_langs)
spōnsor = self.quod_res('0_1603_1_7_2616_859')
if spōnsor and qhxl(spōnsor, meta_langs) is not None:
meta['#item+rem+i_qcc+is_zxxx+ix_wikip859'] = \
qhxl(spōnsor, meta_langs)
# paginae.append("")
# paginae.append(str(meta))
# paginae.append("")
if len(meta.keys()) > 0:
meta_tabulae = self.conceptum_ad_tabula_codicibus(meta)
resultatum.extend(meta_tabulae)
resultatum.append("")
resultatum.append("")
# resultatum.append("ifndef::backend-epub")
resultatum.append("ifndef::backend-epub3[]")
# resultatum.append("ifndef::ebook-format-kf8")
# resultatum.append("ifdef::ebook-format-kf8")
resultatum.append("<<<")
resultatum.append("toc::[]")
resultatum.append("<<<")
resultatum.append("endif::[]")
# resultatum.append("endifndef::[]")
resultatum.append("\n")
# TODO: potential list of images
# @see https://github.com/asciidoctor/asciidoctor/issues/2189
# @see https://github.com/Alwinator/asciidoctor-lists
return resultatum
def codex_dominium_publicum(self) -> list:
"""cōdex praefātiōnī /book preface/@eng-Latn
Trivia:
- cōdex, m, s, (Nominative), https://en.wiktionary.org/wiki/codex#Latin
- praefātiōnī, f, s, (Dative), https://en.wiktionary.org/wiki/praefatio
Returns:
[list]:
"""
resultatum = []
# resultatum.append("[id=0_999_1603_1]")
# resultatum.append("== [0] /Praefātiō/@lat-Latn \n")
# resultatum.append("== Praefātiō \n")
resultatum.extend((["{nbsp} +"] * 2))
# resultatum.append("[.text-rigth]")
# resultatum.append("[.lead]")
quote_textum = self.notitiae.translatio('{% _🗣️ 1603_1_99_50_1 🗣️_ %}')
resultatum.append("[quote]")
# resultatum.append(
# "/_**Public domain means that each major common issue "
# "only needs to be resolved once**_/@eng-Latn")
resultatum.append(quote_textum)
resultatum.append("")
resultatum.append("'''")
return resultatum
def codex_praefatio(self) -> list:
"""cōdex praefātiōnī /book preface/@eng-Latn
Trivia:
- cōdex, m, s, (Nominative), https://en.wiktionary.org/wiki/codex#Latin
- praefātiōnī, f, s, (Dative), https://en.wiktionary.org/wiki/praefatio
Returns:
[list]:
"""
paginae = []
paginae.append("[id=0_999_1603_1]")
# resultatum.append("== [0] /Praefātiō/@lat-Latn \n")
paginae.append("== Praefātiō \n")
textum_2 = self.notitiae.translatio('{% _🗣️ 1603_1_99_10_1 🗣️_ %}')
codex_praefatio_textum = textum_2.format( # noqa
self.m1603_1_1__de_codex['#item+rem+i_qcc+is_zxxx+ix_n1603'],
self.m1603_1_1__de_codex['#item+rem+i_mul+is_zyyy']
)
meta = {}
meta['#item+rem+i_eng+is_latn'] = codex_praefatio_textum
# meta_tabulae = self.conceptum_ad_tabula_codicibus(meta)
# resultatum.extend(meta_tabulae)
# resultatum.append("\nres_explanationibus\n")
paginae.extend(self.res_explanationibus(meta))
# dictionaria_necessitatibus = []
# for index, item in enumerate(range(100)):
# referentia_textum = self.quod_res(
# '0_1603_1_7_1_4_' + str(item))
# if referentia_textum and len(referentia_textum) > 0:
# dictionaria_necessitatibus.extend(
# self._dictionaria_necessitatibus(referentia_textum, index))
# if len(dictionaria_necessitatibus) > 0:
# paginae.append('=== Dictiōnāria necessitātibus')
# # paginae.append('')
# # paginae.append('----')
# # paginae.append(str(referentia))
# paginae.extend(dictionaria_necessitatibus)
# # paginae.append('----')
# paginae.append('')
# paginae.extend(descriptio_tabulae_de_lingua(
# 'Lingua Anglica (Abecedarium Latinum)',
# codex_praefatio_textum
# # ("".join(lineam) + '+' + "\n")
# ))
# meta = {}
# # meta_langs = [
# # '#item+rem+i_mul+is_zyyy',
# # '#item+rem+i_lat+is_latn'
# # ]
# meta_langs = [
# '#item+rem+i_qcc+is_zxxx+ix_codexfacto'
# ]
# scrīptor = self.quod_res('0_1603_1_7_2616_50')
# if scrīptor and qhxl(scrīptor, meta_langs) is not None:
# meta['#item+rem+i_qcc+is_zxxx+ix_wikip50'] = \
# qhxl(scrīptor, meta_langs)
# publisher = self.quod_res('0_1603_1_7_2616_123')
# if publisher and qhxl(publisher, meta_langs) is not None:
# meta['#item+rem+i_qcc+is_zxxx+ix_wikip123'] = \
# qhxl(publisher, meta_langs)
# publication_date = self.quod_res('0_1603_1_7_2616_577')
# if publication_date and qhxl(publication_date, meta_langs) is not None:
# meta['#item+rem+i_qcc+is_zxxx+ix_wikip577'] = \
# qhxl(publication_date, meta_langs)
# meta['#item+rem+i_qcc+is_zxxx+ix_wikip393'] = \
# datetime.datetime.now().replace(microsecond=0).isoformat()
# spdx_licentiam = self.quod_res('0_1603_1_7_2616_2479')
# if spdx_licentiam and qhxl(spdx_licentiam, meta_langs) is not None:
# meta['#item+rem+i_qcc+is_zxxx+ix_wikip2479'] = \
# qhxl(spdx_licentiam, meta_langs)
# reference_url = self.quod_res('0_1603_1_7_2616_854')
# if reference_url and qhxl(reference_url, meta_langs) is not None:
# meta['#item+rem+i_qcc+is_zxxx+ix_wikip854'] = \
# qhxl(reference_url, meta_langs)
# # paginae.append("")
# # paginae.append(str(meta))
# paginae.append("")
# if len(meta.keys()) > 0:
# meta_tabulae = self.conceptum_ad_tabula_codicibus(meta)
# paginae.extend(meta_tabulae)
# paginae.append("")
return paginae
# return resultatum
def codex_corpori(self) -> list:
"""cōdex corporī | |
res = t.update(row, namemapping)
if res:
row[(namemapping.get(t.key) or t.key)] = res
self._after_update(row, namemapping)
def _before_update(self, row, namemapping):
return None
def _after_update(self, row, namemapping):
pass
def ensure(self, row, namemapping={}):
"""Lookup the given member. If that fails, insert it. Return key value.
If the member must be inserted, data is automatically inserted in
all participating tables where (part of) the member is not
already represented.
Key values for different levels may be added to the row. It is
NOT guaranteed that key values for all levels exist in row
afterwards.
Arguments:
- row: the row to lookup or insert. Must contain the lookup
attributes.
- namemapping: an optional namemapping (see module's documentation)
"""
if type(self.root)==SlowlyChangingDimension:
for dim in self.levels.get(1, []):
(keyval, ignored) = self.__ensure_helper(dim, row, namemapping, False)
row[(namemapping.get(dim.key) or dim.key)] = keyval
row[(namemapping.get(self.root.key) or self.root.key)] = \
self.root.ensure(row, namemapping)
return row[(namemapping.get(self.root.key) or self.root.key)]
else:
(key, ignored) = self.__ensure_helper(self.root, row, namemapping,False)
return key
def insert(self, row, namemapping={}):
"""Insert the given member. If that fails, insert it. Return key value.
Data is automatically inserted in all participating tables where
(part of) the member is not already represented. If nothing is
inserted at all, a ValueError is raised.
Key values for different levels may be added to the row. It is
NOT guaranteed that key values for all levels exist in row
afterwards.
Arguments:
- row: the row to lookup or insert. Must contain the lookup
attributes.
- namemapping: an optional namemapping (see module's documentation)
"""
key = self._before_insert(row, namemapping)
if key is not None:
return key
(key, insertdone) = self.__ensure_helper(self.root, row, namemapping,
False)
if not insertdone:
raise ValueError, "Member already present - nothing inserted"
self._after_insert(row, namemapping, key)
return key
def _before_insert(self, row, namemapping):
return None
def _after_insert(self, row, namemapping, newkeyvalue):
pass
def endload(self):
"""Finalize the load."""
pass
def __ensure_helper(self, dimension, row, namemapping, insertdone):
""" """
# NB: Has side-effects: Key values are set for all dimensions
key = None
retry = False
try:
key = dimension.lookup(row, namemapping)
except KeyError:
retry = True # it can happen that the keys for the levels above
# aren't there yet but should be used as lookup
# attributes in dimension.
# Below we find them and we should then try a
# lookup again before we move on to do an insertion
if key is not None:
row[(namemapping.get(dimension.key) or dimension.key)] = key
return (key, insertdone)
# Else recursively get keys for refed tables and then insert
for refed in self.refs.get(dimension, []):
(key, insertdone) = self.__ensure_helper(refed, row, namemapping,
insertdone)
# We don't need to set the key value in the row as this already
# happened in the recursive step.
# We set insertdone = True to know later that we actually
#inserted something
if retry or self.expectboguskeyvalues:
# The following is similar to
# key = dimension.ensure(row, namemapping)
# but we set insertdone here.
key = dimension.lookup(row, namemapping)
if key is None:
key = dimension.insert(row, namemapping)
insertdone = True
else:
# We don't need to lookup again since no attributes were
# missing (no KeyError) and we don't expect bogus values.
# So we can proceed directly to do an insert.
key = dimension.insert(row, namemapping)
insertdone = True
row[(namemapping.get(dimension.key) or dimension.key)] = key
return (key, insertdone)
class FactTable(object):
"""A class for accessing a fact table in the DW."""
def __init__(self, name, keyrefs, measures=(), targetconnection=None):
"""Arguments:
- name: the name of the fact table in the DW
- keyrefs: a sequence of attribute names that constitute the
primary key of the fact tables (i.e., the dimension references)
- measures: a possibly empty sequence of measure names. Default: ()
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
"""
if targetconnection is None:
targetconnection = pyetlmr.getdefaulttargetconnection()
self.targetconnection = targetconnection
self.name = name
self.keyrefs = keyrefs
self.measures = measures
self.all = [k for k in keyrefs] + [m for m in measures]
pyetlmr._alltables.append(self)
# Create SQL
# INSERT INTO name (key1, ..., keyn, meas1, ..., measn)
# VALUES (%(key1)s, ..., %(keyn)s, %(meas1)s, ..., %(measn)s)
self.insertsql = "INSERT INTO " + name + "(" + \
", ".join(keyrefs) + (measures and ", " or "") + \
", ".join(measures) + ") VALUES (" + \
", ".join(["%%(%s)s" % (att,) for att in self.all]) + ")"
# SELECT key1, ..., keyn, meas1, ..., measn FROM name
# WHERE key1 = %(key1)s AND ... keyn = %(keyn)s
self.lookupsql = "SELECT " + ",".join(self.all) + " FROM " + name + \
" WHERE " + " AND ".join(["%s = %%(%s)s" % (k, k) \
for k in self.keyrefs])
def insert(self, row, namemapping={}):
"""Insert a fact into the fact table.
Arguments:
- row: a dict at least containing values for the keys and measures.
- namemapping: an optional namemapping (see module's documentation)
"""
tmp = self._before_insert(row, namemapping)
if tmp:
return
self.targetconnection.execute(self.insertsql, row, namemapping)
self._after_insert(row, namemapping)
def _before_insert(self, row, namemapping):
return None
def _after_insert(self, row, namemapping):
pass
def lookup(self, keyvalues, namemapping={}):
"""Lookup a fact from the given key values. Return key and measure vals.
Arguments:
- keyvalues: a dict at least containing values for all keys
- namemapping: an optional namemapping (see module's documentation)
"""
res = self._before_lookup(self, keyvalues, namemapping)
if res:
return tmp
self.targetconnection.execute(self.findsql, keyvalues, namemapping)
res = self.targetconnection.fetchone(self.all)
self._after_lookup(keyvalues, namemapping, res)
return res
def _before_lookup(self, keyvalues, namemapping):
return None
def _after_lookup(self, keyvalues, namemapping, resultrow):
pass
def ensure(self, row, compare=False, namemapping={}):
"""Ensure that a fact is present (insert it if it is not already there).
Arguments:
- row: a dict at least containing the attributes of the fact table
- compare: a flag deciding if measure vales from a fact that was
looked up are compared to those in the given row. If True and
differences are found, a ValueError is raised. Default: False
- namemapping: an optional namemapping (see module's documentation)
"""
res = self.lookup(row, namemapping)
if not res:
self.insert(row, namemapping)
return False
elif compare:
for m in self.measures:
if m in row and row[m] != res.get(m):
raise ValueError, \
"The existing fact has different measure values"
return True
def endload(self):
"""Finalize the load."""
pass
class BatchFactTable(FactTable):
"""A class for accessing a fact table in the DW. This class performs
performs insertions in batches.
"""
def __init__(self, name, keyrefs, measures=(), batchsize=10000,
targetconnection=None):
"""Arguments:
- name: the name of the fact table in the DW
- keyrefs: a sequence of attribute names that constitute the
primary key of the fact tables (i.e., the dimension references)
- measures: a possibly empty sequence of measure names. Default: ()
- batchsize: an int deciding many insert operations should be done
in one batch. Default: 10000
- targetconnection: The ConnectionWrapper to use. If not given,
the default target connection is used.
"""
FactTable.__init__(self, name, keyrefs, measures, targetconnection)
self.__batchsize = batchsize
self.__batch = []
def _before_insert(self, row, namemapping):
self.__batch.append(pyetlmr.project(self.all, row, namemapping))
if len(self.__batch) == self.__batchsize:
self.__insertnow()
return True # signal that we did something
def _before_lookup(self, keyvalues, namemapping):
self.__insertnow()
def endload(self):
"""Finalize the load."""
self.__insertnow()
def __insertnow(self):
self.targetconnection.executemany(self.insertsql, self.__batch)
self.__batch = []
class BulkFactTable(object):
"""Class for addition of facts to a fact table. Reads are not supported. """
def __init__(self, name, keyrefs, measures, bulkloader,
fieldsep='\t', rowsep='\n', nullsubst=None,
tempdest=None, bulksize=500000):
"""Arguments:
- name: the name of the fact table in the DW
- keyrefs: a sequence of attribute names that constitute the
primary key of the fact tables (i.e., the dimension references)
- measures: a possibly empty sequence of measure names. Default: ()
- bulkloader: A method
m(name, attributes, fieldsep, rowsep, nullsubst, tempdest)
that is called to load data from a temporary file into the DW.
The argument attributes is the combination of keyrefs and measures
and show the order in which the attribute values appear in the
| |
0: # likely found
# Sample output format ------------------------------------------
# Package: mysql-server
# Status: install ok installed
# Priority: optional
# Section: database
# Installed-Size: 107
# Maintainer: <NAME> <<EMAIL>>
# Architecture: all
# Source: mysql-5.7
# Version: 5.7.25-0ubuntu0.16.04.2
# Depends: mysql-server-5.7
# ------------------------------------------ --------------------
self.composite_logger.log_debug(" - Return code: 0. The package is likely present on the system.")
composite_found_flag = 0
for line in lines:
if 'Package: ' in line:
if package_name in line:
composite_found_flag = composite_found_flag | 1
else: # should never hit for the way this is invoked, hence telemetry
self.composite_logger.log_debug(" - Did not match name: " + str(package_name) + " (" + str(line) + ")")
self.telemetry_writer.write_event("[Installed check] Name did not match: " + package_name + " (line=" + str(line) + ")(out=" + str(output) + ")", Constants.TelemetryEventLevel.Verbose)
continue
if 'Version: ' in line:
if package_version in line:
composite_found_flag = composite_found_flag | 2
else: # should never hit for the way this is invoked, hence telemetry
self.composite_logger.log_debug(" - Did not match version: " + str(package_version) + " (" + str(line) + ")")
self.telemetry_writer.write_event("[Installed check] Version did not match: " + str(package_version) + " (line=" + str(line) + ")(out=" + str(output) + ")", Constants.TelemetryEventLevel.Verbose)
continue
if 'Status: ' in line:
if 'install ok installed' in line:
composite_found_flag = composite_found_flag | 4
else: # should never hit for the way this is invoked, hence telemetry
self.composite_logger.log_debug(" - Did not match status: " + str(package_name) + " (" + str(line) + ")")
self.telemetry_writer.write_event("[Installed check] Status did not match: 'install ok installed' (line=" + str(line) + ")(out=" + str(output) + ")", Constants.TelemetryEventLevel.Verbose)
continue
if composite_found_flag & 7 == 7: # whenever this becomes true, the exact package version is installed
self.composite_logger.log_debug(" - Package, Version and Status matched. Package is detected as 'Installed'.")
return True
self.composite_logger.log_debug(" - Inapplicable line: " + str(line))
self.composite_logger.log_debug(" - Install status check did NOT find the package installed: (composite_found_flag=" + str(composite_found_flag) + ")")
self.telemetry_writer.write_event("Install status check did NOT find the package installed: (composite_found_flag=" + str(composite_found_flag) + ")(output=" + output + ")", Constants.TelemetryEventLevel.Verbose)
else: # This is not expected to execute. If it does, the details will show up in telemetry. Improve this code with that information.
self.composite_logger.log_debug(" - Unexpected return code from dpkg: " + str(code) + ". Output: " + str(output))
self.telemetry_writer.write_event("Unexpected return code from dpkg: Cmd=" + str(cmd) + ". Code=" + str(code) + ". Output=" + str(output), Constants.TelemetryEventLevel.Verbose)
# SECONDARY METHOD - Fallback
# Sample output format
# Listing... Done
# apt/xenial-updates,now 1.2.29 amd64 [installed]
self.composite_logger.log_debug(" - [2/2] Verifying install status with Apt.")
cmd = self.single_package_find_installed_apt.replace('<PACKAGE-NAME>', package_name)
output = self.invoke_package_manager(cmd)
lines = output.strip().split('\n')
for line in lines:
package_details = line.split(' ')
if len(package_details) < 4:
self.composite_logger.log_debug(" - Inapplicable line: " + str(line))
else:
self.composite_logger.log_debug(" - Applicable line: " + str(line))
discovered_package_name = package_details[0].split('/')[0] # index out of bounds check is deliberately not being done
if discovered_package_name != package_name:
self.composite_logger.log_debug(" - Did not match name: " + discovered_package_name + " (" + package_name + ")")
continue
if package_details[1] != package_version:
self.composite_logger.log_debug(" - Did not match version: " + package_details[1] + " (" + str(package_details[1]) + ")")
continue
if 'installed' not in package_details[3]:
self.composite_logger.log_debug(" - Did not find status: " + str(package_details[3] + " (" + str(package_details[3]) + ")"))
continue
self.composite_logger.log_debug(" - Package version specified was determined to be installed.")
self.telemetry_writer.write_event("[Installed check] Fallback code disagreed with dpkg.", Constants.TelemetryEventLevel.Verbose)
return True
self.composite_logger.log_debug(" - Package version specified was determined to NOT be installed.")
return False
def get_dependent_list(self, package_name):
"""Returns dependent List of the package"""
cmd = self.single_package_dependency_resolution_template.replace('<PACKAGE-NAME>', package_name)
self.composite_logger.log_debug("\nRESOLVING DEPENDENCIES USING COMMAND: " + str(cmd))
output = self.invoke_package_manager(cmd)
packages, package_versions = self.extract_packages_and_versions(output)
if package_name in packages:
packages.remove(package_name)
self.composite_logger.log_debug(str(len(packages)) + " dependent updates were found for package '" + package_name + "'.")
return packages
def get_product_name(self, package_name):
"""Retrieve product name """
return package_name
def get_package_size(self, output):
"""Retrieve package size from update output string"""
# Sample line from output:
# Need to get 0 B/433 kB of archives
# or
# Need to get 110 kB of archives.
try:
if "is already the newest version" in output:
return Constants.UNKNOWN_PACKAGE_SIZE
search_txt = r'Need to get[ ](.*?)[ ]B/(.*?)[ ]of'
search = re.compile(search_txt, re.M | re.S)
pkg_list = search.findall(str(output))
if not pkg_list:
search_txt = r'Need to get[ ](.*?)[ ]of'
search = re.compile(search_txt, re.M | re.S)
pkg_list = search.findall(str(output))
if not pkg_list or pkg_list[0] == "":
return Constants.UNKNOWN_PACKAGE_SIZE
return pkg_list[0]
elif pkg_list[0][1] == "":
return Constants.UNKNOWN_PACKAGE_SIZE
return pkg_list[0][1]
except Exception as error:
self.composite_logger.log_debug(" - Could not get package size from output: " + repr(error))
return Constants.UNKNOWN_PACKAGE_SIZE
# endregion
# region auto OS updates
def get_current_auto_os_patch_state(self):
""" Gets the current auto OS update patch state on the machine """
self.composite_logger.log("Fetching the current automatic OS patch state on the machine...")
self.__get_current_auto_os_updates_setting_on_machine()
if int(self.unattended_upgrade_value) == 0:
current_auto_os_patch_state = Constants.AutomaticOSPatchStates.DISABLED
elif int(self.unattended_upgrade_value) == 1:
current_auto_os_patch_state = Constants.AutomaticOSPatchStates.ENABLED
else:
current_auto_os_patch_state = Constants.AutomaticOSPatchStates.UNKNOWN
self.composite_logger.log_debug("Current Auto OS Patch State is [State={0}]".format(str(current_auto_os_patch_state)))
return current_auto_os_patch_state
def __get_current_auto_os_updates_setting_on_machine(self):
""" Gets all the update settings related to auto OS updates currently set on the machine """
try:
image_default_patch_configuration = self.env_layer.file_system.read_with_retry(self.os_patch_configuration_settings_file_path)
settings = image_default_patch_configuration.strip().split('\n')
for setting in settings:
if self.update_package_list in str(setting):
self.update_package_list_value = re.search(self.update_package_list + ' *"(.*?)".', str(setting)).group(1)
if self.unattended_upgrade in str(setting):
self.unattended_upgrade_value = re.search(self.unattended_upgrade + ' *"(.*?)".', str(setting)).group(1)
if self.update_package_list_value == "":
self.composite_logger.log_debug("Machine did not have any value set for [Setting={0}]".format(str(self.update_package_list)))
if self.unattended_upgrade_value == "":
self.composite_logger.log_debug("Machine did not have any value set for [Setting={0}]".format(str(self.unattended_upgrade)))
except Exception as error:
raise Exception("Error occurred in fetching default auto OS updates from the machine. [Exception={0}]".format(repr(error)))
def disable_auto_os_update(self):
""" Disables auto OS updates on the machine only if they are enabled and logs the default settings the machine comes with """
try:
self.composite_logger.log_debug("Disabling auto OS updates if they are enabled")
self.backup_image_default_patch_configuration_if_not_exists()
self.update_os_patch_configuration_sub_setting(self.update_package_list, "0")
self.update_os_patch_configuration_sub_setting(self.unattended_upgrade, "0")
self.composite_logger.log("Successfully disabled auto OS updates")
except Exception as error:
self.composite_logger.log_error("Could not disable auto OS updates. [Error={0}]".format(repr(error)))
raise
def backup_image_default_patch_configuration_if_not_exists(self):
""" Records the default system settings for auto OS updates within patch extension artifacts for future reference.
We only log the default system settings a VM comes with, any subsequent updates will not be recorded"""
try:
image_default_patch_configuration_backup = {}
# read existing backup since it also contains backup from other update services. We need to preserve any existing data with backup file
if self.image_default_patch_configuration_backup_exists():
try:
image_default_patch_configuration_backup = json.loads(self.env_layer.file_system.read_with_retry(self.image_default_patch_configuration_backup_path))
except Exception as error:
self.composite_logger.log_error("Unable to read backup for default patch state. Will attempt to re-write. [Exception={0}]".format(repr(error)))
# verify if existing backup is valid if not, write to backup
is_backup_valid = self.is_image_default_patch_configuration_backup_valid(image_default_patch_configuration_backup)
if is_backup_valid:
self.composite_logger.log_debug("Since extension has a valid backup, no need to log the current settings again. [Default Auto OS update settings={0}] [File path={1}]"
.format(str(image_default_patch_configuration_backup), self.image_default_patch_configuration_backup_path))
else:
self.composite_logger.log_debug("Since the backup is invalid, will add a new backup with the current auto OS update settings")
self.__get_current_auto_os_updates_setting_on_machine()
backup_image_default_patch_configuration_json = {
self.update_package_list: self.update_package_list_value,
self.unattended_upgrade: self.unattended_upgrade_value
}
self.composite_logger.log_debug("Logging default system configuration settings for auto OS updates. [Settings={0}] [Log file path={1}]"
.format(str(backup_image_default_patch_configuration_json), self.image_default_patch_configuration_backup_path))
self.env_layer.file_system.write_with_retry(self.image_default_patch_configuration_backup_path, '{0}'.format(json.dumps(backup_image_default_patch_configuration_json)), mode='w+')
except Exception as error:
error_message = "Exception during fetching and logging default auto update settings on the machine. [Exception={0}]".format(repr(error))
self.composite_logger.log_error(error_message)
self.status_handler.add_error_to_status(error_message, Constants.PatchOperationErrorCodes.DEFAULT_ERROR)
raise
def is_image_default_patch_configuration_backup_valid(self, image_default_patch_configuration_backup):
if self.update_package_list in image_default_patch_configuration_backup and self.unattended_upgrade in image_default_patch_configuration_backup:
self.composite_logger.log_debug("Extension already has a valid backup of the default system configuration settings for auto OS updates.")
return True
else:
self.composite_logger.log_error("Extension does not have a valid backup of the default system configuration settings for auto OS updates.")
return False
def update_os_patch_configuration_sub_setting(self, patch_configuration_sub_setting, value="0", patch_configuration_sub_setting_pattern_to_match=""):
""" Updates (or adds if it doesn't exist) the given patch_configuration_sub_setting with the given value in os_patch_configuration_settings_file """
try:
# note: adding space between the patch_configuration_sub_setting and value since, we will have to do that if we have to add a patch_configuration_sub_setting that did not exist before
self.composite_logger.log("Updating system configuration settings for auto OS updates. [Patch Configuration Sub Setting={0}] [Value={1}]".format(str(patch_configuration_sub_setting), value))
os_patch_configuration_settings = self.env_layer.file_system.read_with_retry(self.os_patch_configuration_settings_file_path)
patch_configuration_sub_setting_to_update = patch_configuration_sub_setting + ' "' + value + '";'
patch_configuration_sub_setting_found_in_file = False
updated_patch_configuration_sub_setting = ""
| |
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
import datetime
import json
from jsonschema import Draft4Validator
import pytz
from django.urls import reverse
from django.contrib.auth import get_user_model
from accelerator.models import (
EntrepreneurProfile,
ExpertProfile,
MemberProfile,
)
from impact.tests.api_test_case import APITestCase
from impact.tests.contexts import UserContext
from impact.tests.factories import (
ExpertCategoryFactory,
IndustryFactory,
ProgramFamilyFactory,
StartupTeamMemberFactory,
)
from impact.tests.test_user_detail_view import (
ENTREPRENEUR_GET_FIELDS,
EXPERT_GET_FIELDS,
EXPERT_WRITE_FIELDS,
MUTABLE_FIELDS,
NON_MEMBER_MUTABLE_FIELDS,
WRITE_ONCE_FIELDS,
)
from impact.tests.utils import (
assert_fields,
assert_fields_not_required,
assert_fields_required,
)
from impact.utils import (
get_profile,
override_updated_at,
)
from impact.v1.helpers.validators import (
format_choices,
INVALID_CHOICE_ERROR,
INVALID_URL_ERROR,
)
from impact.v1.views.base_list_view import (
DEFAULT_MAX_LIMIT,
GREATER_THAN_MAX_LIMIT_ERROR,
KWARG_VALUE_NOT_INTEGER_ERROR,
KWARG_VALUE_IS_NON_POSITIVE_ERROR,
KWARG_VALUE_IS_NEGATIVE_ERROR,
)
from impact.v1.views.user_list_view import (
EMAIL_EXISTS_ERROR,
UNSUPPORTED_KEY_ERROR,
UserListView,
)
EXAMPLE_ENTREPRENEUR = {
"email": "<EMAIL>",
"is_active": "true",
"first_name": "Entre",
"gender": "f",
"last_name": "Preneur",
"user_type": EntrepreneurProfile.user_type,
}
EXAMPLE_EXPERT = {
"company": "Expert, Co.",
"email": "<EMAIL>",
"is_active": "true",
"first_name": "Ex",
"gender": "f",
"last_name": "Pert",
"phone": "123-456-7890",
"title": "Chief Expert",
"user_type": ExpertProfile.user_type,
"speaker_interest": "true",
}
EXAMPLE_MEMBER = {
"email": "<EMAIL>",
"is_active": "false",
"first_name": "Mem",
"gender": "o",
"last_name": "Ber",
"user_type": MemberProfile.user_type,
}
REQUIRED_POST_FIELDS = set([
"email",
"first_name",
"gender",
"last_name",
"user_type",
])
ALL_POST_FIELDS = set(EXPERT_WRITE_FIELDS +
MUTABLE_FIELDS +
NON_MEMBER_MUTABLE_FIELDS +
WRITE_ONCE_FIELDS)
User = get_user_model()
class TestUserListView(APITestCase):
url = reverse(UserListView.view_name)
def test_get(self):
user1 = UserContext().user
user2 = UserContext().user
with self.login(email=self.basic_user().email):
user_count = User.objects.count()
response = self.client.get(self.url)
results = response.data["results"]
assert user_count == min(len(results), 10)
emails = [result["email"] for result in response.data["results"]]
assert user1.email in emails
assert user2.email in emails
assert user_count == response.data["count"]
def test_get_returns_correct_count_attribute(self):
for _ in range(10):
UserContext()
with self.login(email=self.basic_user().email):
user_count = User.objects.count()
response = self.client.get(self.url)
assert user_count == response.data["count"]
def test_get_with_limit_returns_correct_number_of_results(self):
limit = 1
for _ in range(limit * 3):
UserContext()
with self.login(email=self.basic_user().email):
url = self.url + "?limit={}".format(limit)
response = self.client.get(url)
results = response.data["results"]
assert limit == len(results)
def test_get_correct_pagination_attributes_for_offset_zero(self):
limit = 3
current_implicit_offset = 0
for _ in range(limit * 3):
UserContext()
with self.login(email=self.basic_user().email):
limit_arg = "limit={}".format(limit)
url = self.url + "?" + limit_arg
response = self.client.get(url)
results = response.data["results"]
assert limit == len(results)
assert response.data["previous"] is None
assert limit_arg in response.data["next"]
next_offset_arg = "offset={}".format(
current_implicit_offset + limit)
assert next_offset_arg in response.data["next"]
def test_get_pagination_attrs_for_offset_between_zero_and_limit(self):
limit = 4
current_offset = limit - 2
for _ in range(limit * 3):
UserContext()
with self.login(email=self.basic_user().email):
limit_arg = "limit={}".format(limit)
offset_arg = "offset={}".format(current_offset)
url = self.url + "?" + limit_arg + "&" + offset_arg
response = self.client.get(url)
results = response.data["results"]
assert limit == len(results)
assert response.data["previous"] is not None
assert "offset" not in response.data["previous"]
assert limit_arg in response.data["next"]
next_offset_arg = "offset={}".format(current_offset + limit)
assert next_offset_arg in response.data["next"]
def test_get_pagination_attrs_for_offset_in_the_middle(self):
limit = 4
current_offset = limit + 2
for _ in range(current_offset + limit + 2):
UserContext()
with self.login(email=self.basic_user().email):
limit_arg = "limit={}".format(limit)
offset_arg = "offset={}".format(current_offset)
url = self.url + "?" + limit_arg + "&" + offset_arg
response = self.client.get(url)
results = response.data["results"]
assert limit == len(results)
prev_offset_arg = "offset={}".format(current_offset - limit)
assert prev_offset_arg in response.data["previous"]
assert limit_arg in response.data["next"]
next_offset_arg = "offset={}".format(current_offset + limit)
assert next_offset_arg in response.data["next"]
def test_get_pagination_attrs_for_offset_between_count_and_limit(self):
limit = 4
for _ in range(limit * 5):
UserContext()
current_offset = User.objects.count() - limit + 2
with self.login(email=self.basic_user().email):
limit_arg = "limit={}".format(limit)
offset_arg = "offset={}".format(current_offset)
url = self.url + "?" + limit_arg + "&" + offset_arg
response = self.client.get(url)
results = response.data["results"]
assert limit > len(results)
assert len(results) == response.data["count"] - current_offset
prev_offset_arg = "offset={}".format(current_offset - limit)
assert prev_offset_arg in response.data["previous"]
assert response.data["next"] is None
def test_get_pagination_attrs_for_offset_equals_number_of_results(self):
limit = 4
for _ in range(limit * 5):
UserContext()
with self.login(email=self.basic_user().email):
current_offset = User.objects.count()
limit_arg = "limit={}".format(limit)
offset_arg = "offset={}".format(current_offset)
url = self.url + "?" + limit_arg + "&" + offset_arg
response = self.client.get(url)
results = response.data["results"]
assert len(results) == 0
prev_offset_arg = "offset={}".format(current_offset - limit)
assert prev_offset_arg in response.data["previous"]
assert response.data["next"] is None
def test_get_pagination_attrs_for_offset_greater_than_num_of_results(self):
limit = 4
for _ in range(limit * 5):
UserContext()
with self.login(email=self.basic_user().email):
count = User.objects.count()
current_offset = count + 1
limit_arg = "limit={}".format(limit)
offset_arg = "offset={}".format(current_offset)
url = self.url + "?" + limit_arg + "&" + offset_arg
response = self.client.get(url)
results = response.data["results"]
assert len(results) == 0
prev_offset_arg = "offset={}".format(count - limit)
assert prev_offset_arg in response.data["previous"]
assert response.data["next"] is None
def test_get_pagination_attrs_for_limit_greater_than_num_of_results(self):
for _ in range(5):
UserContext()
with self.login(email=self.basic_user().email):
count = User.objects.count()
assert count < DEFAULT_MAX_LIMIT
limit_arg = "limit={}".format(count + 1)
url = self.url + "?" + limit_arg
response = self.client.get(url)
results = response.data["results"]
assert len(results) == count
assert response.data["previous"] is None
assert response.data["next"] is None
def test_get_limit_is_greater_than_max_limit_return_error(self):
with self.login(email=self.basic_user().email):
limit = DEFAULT_MAX_LIMIT + 1
limit_arg = "limit={}".format(limit)
url = self.url + "?" + limit_arg
response = self.client.get(url)
assert response.status_code == 401
assert GREATER_THAN_MAX_LIMIT_ERROR.format(
DEFAULT_MAX_LIMIT) in response.data
def test_get_limit_is_explicitly_null_return_error(self):
with self.login(email=self.basic_user().email):
limit = ''
limit_arg = "limit={}".format(limit)
url = self.url + "?" + limit_arg
response = self.client.get(url)
assert response.status_code == 401
error_msg = KWARG_VALUE_NOT_INTEGER_ERROR.format("limit")
assert error_msg in response.data
def test_get_limit_is_non_integer_return_error(self):
with self.login(email=self.basic_user().email):
limit = '5.5'
limit_arg = "limit={}".format(limit)
url = self.url + "?" + limit_arg
response = self.client.get(url)
assert response.status_code == 401
error_msg = KWARG_VALUE_NOT_INTEGER_ERROR.format("limit")
assert error_msg in response.data
def test_get_limit_is_zero_returns_error(self):
with self.login(email=self.basic_user().email):
limit = '0'
limit_arg = "limit={}".format(limit)
url = self.url + "?" + limit_arg
response = self.client.get(url)
assert response.status_code == 401
error_msg = KWARG_VALUE_IS_NON_POSITIVE_ERROR.format("limit")
assert error_msg in response.data
def test_get_limit_is_negative_returns_error(self):
with self.login(email=self.basic_user().email):
limit = '-1'
limit_arg = "limit={}".format(limit)
url = self.url + "?" + limit_arg
response = self.client.get(url)
assert response.status_code == 401
error_msg = KWARG_VALUE_IS_NON_POSITIVE_ERROR.format("limit")
assert error_msg in response.data
def test_get_offset_is_negative_returns_error(self):
with self.login(email=self.basic_user().email):
offset = '-1'
offset_arg = "offset={}".format(offset)
url = self.url + "?" + offset_arg
response = self.client.get(url)
assert response.status_code == 401
error_msg = KWARG_VALUE_IS_NEGATIVE_ERROR.format("offset")
assert error_msg in response.data
def test_get_offset_is_empty_returns_error(self):
with self.login(email=self.basic_user().email):
offset = ''
offset_arg = "offset={}".format(offset)
url = self.url + "?" + offset_arg
response = self.client.get(url)
assert response.status_code == 401
error_msg = KWARG_VALUE_NOT_INTEGER_ERROR.format("offset")
assert error_msg in response.data
def test_get_offset_is_non_integer_returns_error(self):
with self.login(email=self.basic_user().email):
offset = '5.5'
offset_arg = "offset={}".format(offset)
url = self.url + "?" + offset_arg
response = self.client.get(url)
assert response.status_code == 401
error_msg = KWARG_VALUE_NOT_INTEGER_ERROR.format("offset")
assert error_msg in response.data
def test_get_offset_is_explicit_zero_returns_successfully(self):
with self.login(email=self.basic_user().email):
offset = '0'
offset_arg = "offset={}".format(offset)
url = self.url + "?" + offset_arg
response = self.client.get(url)
assert response.status_code == 200
implicit_zero_response = self.client.get(self.url)
assert response.data == implicit_zero_response.data
def test_get_adjacent_offsets_has_unique_users(self):
limit = 3
for _ in range(limit * 3):
UserContext()
with self.login(email=self.basic_user().email):
limit_arg = "limit={}".format(limit)
url = self.url + "?" + limit_arg
response = self.client.get(url)
first_page_results = response.data["results"]
next_url = response.data["next"]
next_response = self.client.get(next_url)
second_page_results = next_response.data["results"]
first_page_ids = {result["id"] for result in first_page_results}
second_page_ids = {result["id"] for result in second_page_results}
assert not first_page_ids.intersection(second_page_ids)
def test_options(self):
with self.login(email=self.basic_user().email):
response = self.client.options(self.url)
assert response.status_code == 200
results = response.data["actions"]["GET"]["properties"]["results"]
get_options = results["item"]["properties"]
assert_fields(ENTREPRENEUR_GET_FIELDS, get_options)
assert_fields(EXPERT_GET_FIELDS, get_options)
post_options = response.data["actions"]["POST"]["properties"]
assert_fields_required(REQUIRED_POST_FIELDS, post_options)
assert_fields_not_required(ALL_POST_FIELDS - REQUIRED_POST_FIELDS,
post_options)
def test_options_against_get(self):
with self.login(email=self.basic_user().email):
options_response = self.client.options(self.url)
get_response = self.client.get(self.url)
schema = options_response.data["actions"]["GET"]
validator = Draft4Validator(schema)
assert validator.is_valid(json.loads(get_response.content))
def test_post_entrepreneur(self):
with self.login(email=self.basic_user().email):
response = self.client.post(self.url, EXAMPLE_ENTREPRENEUR)
id = response.data["id"]
user = User.objects.get(id=id)
assert user.email == EXAMPLE_ENTREPRENEUR["email"]
assert EntrepreneurProfile.objects.get(user=user)
def test_post_entrepreneur_with_expert_keys(self):
with self.login(email=self.basic_user().email):
data = _example_expert()
data["user_type"] = EntrepreneurProfile.user_type
response = self.client.post(self.url, data)
error_msg = UNSUPPORTED_KEY_ERROR.format(key="company",
type=data["user_type"])
assert error_msg in response.data
def test_post_expert(self):
with self.login(email=self.basic_user().email):
response = self.client.post(self.url, _example_expert())
id = response.data["id"]
user = User.objects.get(id=id)
assert user.email == EXAMPLE_EXPERT["email"]
assert ExpertProfile.objects.get(user=user)
def test_post_expert_with_bad_category(self):
with self.login(email=self.basic_user().email):
bad_name = "Bad Category"
response = self.client.post(
self.url, _example_expert(expert_category=bad_name))
error_msg = INVALID_CHOICE_ERROR.format(field="expert_category",
value=bad_name,
choices=format_choices([]))
assert error_msg in response.data
def test_post_expert_with_bad_url(self):
with self.login(email=self.basic_user().email):
bad_url = "Bad URL"
response = self.client.post(
self.url, _example_expert(personal_website_url=bad_url))
error_msg = INVALID_URL_ERROR.format(field="personal_website_url",
value=bad_url)
assert error_msg in response.data
def test_post_member(self):
with self.login(email=self.basic_user().email):
response = self.client.post(self.url, EXAMPLE_MEMBER)
id = response.data["id"]
user = User.objects.get(id=id)
assert user.email == EXAMPLE_MEMBER["email"]
assert MemberProfile.objects.get(user=user)
def test_post_member_with_bio(self):
with self.login(email=self.basic_user().email):
data = {"bio": "I have a bio!"}
data.update(EXAMPLE_MEMBER)
response = self.client.post(self.url, data)
error_msg = UNSUPPORTED_KEY_ERROR.format(key="bio",
type=data["user_type"])
assert error_msg in response.data
def test_post_without_required_field(self):
with self.login(email=self.basic_user().email):
response = self.client.post(self.url, {})
assert response.status_code == 403
def test_post_bad_key(self):
with self.login(email=self.basic_user().email):
bad_key = "bad key"
response = self.client.post(self.url, {bad_key: True})
assert response.status_code == 403
assert any([bad_key in error for error in response.data])
def test_post_with_existing_email(self):
user = UserContext().user
data = EXAMPLE_MEMBER.copy()
data["email"] = user.email
with self.login(email=self.basic_user().email):
response = self.client.post(self.url, data)
assert response.status_code == 403
assert EMAIL_EXISTS_ERROR.format(user.email) in response.data
def test_updated_at_before_datetime_filter(self):
updated_none = _user_for_date(None)
week_ago = datetime.datetime.now(pytz.utc) - datetime.timedelta(days=7)
one_day = datetime.timedelta(days=1)
updated_before = _user_for_date(week_ago - | |
enabled,no weather, no occupancy.
# other inputs are
# zone2/sensor - current temperature
# scheduled & manual setpoint changes
send = [
(Events.evtRuntime30,6),
(evtZone2SetPoint14,5),
(evtZone2Disable,3),
(evtZone2SetPoint18,4),
(evtZone2Enable,3),
(evtZone2ManSetPoint14,3),
(evtZone2Disable,3),
(evtZone2ManSetPoint18,2),
(evtZone2Enable,3),
]
# the events that we expect to be logged.
expect = [
"time/runtime",
"zone2/stop",
"zone2/targetset",
"zone2/state",
"zone2/name",
"zone2/stopped",
evtZone2SetPoint14,
"zone2/schedulesetpoint",
("zone2/targetset","val",14.0),
("zone2/state","cmdsource","Schedule"),
"zone2/schedulesetpoint",
evtZone2Disable,
("zone2/targetset","val",5.2),
("zone2/state","cmdsource","Frost"), # as disabled
evtZone2SetPoint18,
"zone2/schedulesetpoint",
("zone2/state","schedulesetpoint",18.0),
"zone2/schedulesetpoint",
evtZone2Enable,
("zone2/targetset","val",18.0),
("zone2/state","cmdsource","Schedule"),
evtZone2ManSetPoint14,
("zone2/targetset","val",14.0),
("zone2/state","cmdsource","Manual"),
evtZone2Disable,
("zone2/targetset","val",5.2),
("zone2/state","cmdsource","Frost"),
evtZone2ManSetPoint18,
("zone2/state","manualsetpoint",18.0),
evtZone2Enable,
("zone2/targetset","val",18.0),
("zone2/state","cmdsource","Manual"),
]
self.checkEvents(send, expect)
def testZone_2h(self):
# test setpoint behaviour when zone enable is changed
self._log.debug( "testZone_2h" )
self.loadPrimitive( "PersistZone2", TestHeatingVentilationACConfig2, (zone2FileList,) ) # a tuple or a list is required
# zone 2 enabled,no weather, no occupancy.
# other inputs are
# zone2/sensor - current temperature
# scheduled & manual setpoint changes
send = [
(Events.evtRuntime30,6),
(evtZone2SetPoint14,5),
(evtZone2FrostStat9,2),
(evtZone2FrostStat16,3),
(evtZone2SetPoint18,5),
(evtZone2Disable,3),
(evtZone2FrostStat5,3),
(evtZone2Enable,3),
]
# the events that we expect to be logged.
expect = [
"time/runtime",
"zone2/stop",
"zone2/targetset",
"zone2/state",
"zone2/name",
"zone2/stopped",
evtZone2SetPoint14,
"zone2/schedulesetpoint",
("zone2/targetset","val",14.0),
("zone2/state","cmdsource","Schedule"),
"zone2/schedulesetpoint",
evtZone2FrostStat9,
("zone2/state","minzonetemp", 9.0),
evtZone2FrostStat16,
("zone2/targetset","val",16.0),
("zone2/state","minzonetemp", 16.0),
evtZone2SetPoint18,
"zone2/schedulesetpoint",
("zone2/targetset","val",18.0),
("zone2/state","cmdsource","Schedule"),
"zone2/schedulesetpoint",
evtZone2Disable,
("zone2/targetset","val",16.0),
("zone2/state","cmdsource","Frost"), # as disabled
evtZone2FrostStat5,
("zone2/targetset","val",5.0),
("zone2/state","minzonetemp", 5.0),
evtZone2Enable,
("zone2/targetset","val",18.0),
("zone2/state","cmdsource","Schedule"),
]
self.checkEvents(send, expect)
def testZone_3_start(self):
# no setpoint given - test minpoint works.
self._log.debug( "testZone_3_start" )
self.loadPrimitive( "PersistZone3", TestHeatingVentilationACConfig3, (zone3FileList,) ) # a tuple or a list is required
# zone 3 enabled, no weather, follow occupancy
# other inputs are
# zone3/sensor - current temperature
send = [
(Events.evtRuntime30,6),
(evtOccupied,2),
]
# the events that we expect to be logged.
# TODO as setpoint has defaulted to mintemp we see run/state/running twice.
expect = [
"time/runtime",
"zone3/stop",
"zone3/targetset",
"zone3/state",
"zone3/name",
"zone3/stopped",
"occupants/home",
"zone3/state",
]
self.checkEvents(send, expect)
TestEventLogger.logEvents()
def testZone_3a(self):
self._log.debug( "testZone_3a" )
self.loadPrimitive( "PersistZone3", TestHeatingVentilationACConfig3, (zone3FileList,) ) # a tuple or a list is required
# zone 3 enabled, no weather, follow occupancy
# This one we start unoccupied and then go occupied
send = [
(Events.evtRuntime30,6),
(evtZone3SetPoint14,5),
(evtUnOccupied,3),
(evtZone3Temp15,3),
(evtZone3SetPoint18,4),
(evtOccupied,5),
(evtZone3SetPoint14,7),
]
# the events that we expect to be logged.
expect = [
"time/runtime",
"zone3/stop",
"zone3/targetset",
"zone3/state",
"zone3/name",
"zone3/stopped",
evtZone3SetPoint14,
"zone3/schedulesetpoint",
("zone3/targetset","val",14.0),
("zone3/state","cmdsource","Schedule"),
"zone3/schedulesetpoint",
evtUnOccupied,
("zone3/targetset","val",10.0),
("zone3/state","cmdsource","Frost"),
evtZone3Temp15,
"zone3/sensor",
("zone3/state","status","Idle"),
evtZone3SetPoint18,
"zone3/schedulesetpoint",
("zone3/state","cmdsource","Frost"),
"zone3/schedulesetpoint",
evtOccupied,
("zone3/targetset","val",18.0),
"zone3/run",
("zone3/state","cmdsource","Schedule"),
"zone3/running",
evtZone3SetPoint14,
"zone3/schedulesetpoint",
("zone3/targetset","val",14.0),
"zone3/stop",
("zone3/state","status","Idle"),
"zone3/schedulesetpoint",
"zone3/stopped",
]
self.checkEvents(send, expect)
def testZone_3b(self):
self._log.debug( "testZone_3b" )
self.loadPrimitive( "PersistZone3", TestHeatingVentilationACConfig3, (zone3FileList,) ) # a tuple or a list is required
# zone 3 enabled, no weather, follow occupancy
# This one we start occupied and then go unoccupied
send = [
(Events.evtRuntime30,6),
(evtZone3SetPoint14,5),
(evtOccupied,2),
(evtZone3Temp15,3),
(evtZone3SetPoint18,7),
(evtUnOccupied,5),
(evtZone3SetPoint14,4),
]
# the events that we expect to be logged.
expect = [
"time/runtime",
"zone3/stop",
"zone3/targetset",
"zone3/state",
"zone3/name",
"zone3/stopped",
evtZone3SetPoint14,
"zone3/schedulesetpoint",
("zone3/targetset","val",14.0),
("zone3/state","cmdsource","Schedule"),
"zone3/schedulesetpoint",
evtOccupied,
("zone3/state","cmdsource","Schedule"),
evtZone3Temp15,
"zone3/sensor",
("zone3/state","cmdsource","Schedule"),
evtZone3SetPoint18,
"zone3/schedulesetpoint",
("zone3/targetset","val",18.0),
"zone3/run",
("zone3/state","cmdsource","Schedule"),
"zone3/schedulesetpoint",
"zone3/running",
evtUnOccupied,
("zone3/targetset","val",10.0),
"zone3/stop",
("zone3/state","cmdsource","Frost"),
"zone3/stopped",
evtZone3SetPoint14,
"zone3/schedulesetpoint",
("zone3/state","cmdsource","Frost"),
"zone3/schedulesetpoint",
]
self.checkEvents(send, expect)
def testZone_4_start(self):
# zone 4 enabled, weather, no occupancy
# no setpoint given - test minpoint works.
self._log.debug( "testZone_4_start" )
self.loadZone4()
# zone is configured with:
# enabled
# no weather compensation
# matstat (froststat) at 10 degrees
# to follow occupancy
# other inputs are
# zone3/sensor - current temperature
send = [
(Events.evtRuntime30,6),
(evtOccupied,2),
]
# the events that we expect to be logged.
# TODO as setpoint has defaulted to mintemp we see run/state/running twice.
expect = [
"time/runtime",
"zone4/stop",
"zone4/targetset",
"zone4/state",
"zone4/name",
"zone4/stopped",
"occupants/home",
"zone4/state",
]
self.checkEvents(send, expect)
TestEventLogger.logEvents()
def testZone_4a(self):
# zone 4 enabled, weather, no occupancy
# test set point change
self._log.debug( "testZone_4a" )
self.loadZone4()
send = [
(Events.evtRuntime30,6),
(evtZone4SetPoint14,5),
(evtZone4Temp15,3),
(evtZone4SetPoint18,7),
(evtZone4SetPoint14,7),
]
# the events that we expect to be logged.
expect = [
"time/runtime",
"zone4/stop",
"zone4/targetset",
"zone4/state",
"zone4/name",
"zone4/stopped",
evtZone4SetPoint14,
"zone4/schedulesetpoint",
("zone4/targetset","val",14.0),
("zone4/state","cmdsource","Schedule"),
"zone4/schedulesetpoint",
evtZone4Temp15,
"zone4/sensor",
("zone4/state","cmdsource","Schedule"),
evtZone4SetPoint18,
"zone4/schedulesetpoint",
("zone4/targetset","val",18.0),
"zone4/run",
("zone4/state","cmdsource","Schedule"),
"zone4/schedulesetpoint",
"zone4/running",
evtZone4SetPoint14,
"zone4/schedulesetpoint",
("zone4/targetset","val",14.0),
"zone4/stop",
("zone4/state","cmdsource","Schedule"),
"zone4/schedulesetpoint",
"zone4/stopped",
]
self.checkEvents(send, expect)
def testZone_4b(self):
# zone 4 enabled, weather, no occupancy
# test set point change
# This test sees what happens when Hold Off on up change of set point
self._log.debug( "testZone_4b" )
self.loadZone4()
# first put zone in hold off state
send = [
(Events.evtRuntime30,6),
(Events.evtRuntime10,8),
(Events.evtMinute1,2),
(evtZG1Stopped,1),
(evtZone4SetPoint14,5),
(evtTemp22,2),
(Events.evtMinute10,1),
(evtTemp15,2),
(Events.evtMinute10,7),
(evtZone4Temp15,3),
(evtZone4SetPoint18,4),
(evtTemp5,2),
(Events.evtMinute10,12),
(evtZone4SetPoint14,7),
]
# the events that we expect to be logged.
expect = [
#0
"time/runtime",
"zone4/stop",
"zone4/targetset",
"zone4/state",
"zone4/name",
"zone4/stopped",
#5
Events.evtRuntime10,
"weather/0",
"weather/1",
"weather/2",
"weather/3",
("zone4/state","cmdsource","Frost"),
("zone4/state","cmdsource","Frost"),
("zone4/state","cmdsource","Frost"),
#13
Events.evtMinute1,
evtZG1Stop,
evtZG1Stopped,
evtZone4SetPoint14,
"zone4/schedulesetpoint",
("zone4/targetset","val",14.0),
("zone4/state","cmdsource","Schedule"),
"zone4/schedulesetpoint",
("webbrick/5/CT/1","val",22.0),
"weather/outsideTemp",
Events.evtMinute10,
("webbrick/5/CT/1","val",15.0),
"weather/outsideTemp",
Events.evtMinute10,
"weather/global",
("weather/1","state", "HoldOff"),
("weather/2","state", "Run"),
("weather/3","state", "Run"),
("zone4/state","cmdsource","Schedule"),
("zone4/state","cmdsource","Schedule"), #necessary, because weather 2 and 3 could potentially have changed and a change cannot easily be detected within zone
("zone4/state","cmdsource","Schedule"), #necessary, because weather 2 and 3 could potentially have changed and a change cannot easily be detected within zone
#33
evtZone4Temp15,
"zone4/sensor",
("zone4/state","cmdsource","Schedule"),
#36
evtZone4SetPoint18,
"zone4/schedulesetpoint",
("zone4/state","cmdsource","Schedule"),
"zone4/schedulesetpoint",
#40
("webbrick/5/CT/1","val",5.0),
"weather/outsideTemp",
#42
Events.evtMinute10,
"weather/global",
("weather/1","state", "Run"),
("weather/2","state", "Run"),
("weather/3","state", "Run"),
("zone4/targetset","val",18.0),
"zone4/run",
("zone4/state","cmdsource","Schedule"),
("zone4/state","cmdsource","Schedule"),
("zone4/state","cmdsource","Schedule"),
"zone4/running",
#52
evtZone4SetPoint14,
"zone4/schedulesetpoint",
("zone4/targetset","val",14.0),
"zone4/stop",
("zone4/state","cmdsource","Schedule"),
"zone4/schedulesetpoint",
"zone4/stopped",
]
self.checkEvents(send, expect)
def testZone_4c(self):
# zone 4 enabled, weather, no occupancy
# test set point change
# This test sees what happens when Hold Off on down change of set point
self._log.debug( "testZone_4c" )
self.loadZone4()
# first put zone in hold off state
send = [
(Events.evtRuntime30,6),
(Events.evtRuntime10,8),
(Events.evtMinute1,2),
(evtZG1Stopped,1),
(evtZone4SetPoint18,5),
(evtTemp22,2),
(Events.evtMinute10,1),
(evtTemp15,2),
(Events.evtMinute10,8),
(evtZone4Temp15,3),
(evtZone4SetPoint14,5),
]
# the events that we expect to be logged.
expect = [
#0
"time/runtime",
"zone4/stop",
"zone4/targetset",
"zone4/state",
"zone4/name",
"zone4/stopped",
#5
Events.evtRuntime10,
"weather/0",
"weather/1",
"weather/2",
"weather/3",
("zone4/state","cmdsource","Frost"),
("zone4/state","cmdsource","Frost"),
("zone4/state","cmdsource","Frost"),
Events.evtMinute1,
evtZG1Stop,
evtZG1Stopped,
#13
evtZone4SetPoint18,
"zone4/schedulesetpoint",
("zone4/targetset","val",18.0),
("zone4/state","cmdsource","Schedule"),
"zone4/schedulesetpoint",
#13
("webbrick/5/CT/1","val",22.0),
"weather/outsideTemp",
Events.evtMinute10,
("webbrick/5/CT/1","val",15.0),
"weather/outsideTemp",
#19
Events.evtMinute10,
"weather/global",
("weather/1","state", "HoldOff"),
("weather/2","state", "Run"),
("weather/3","state", "Run"),
("zone4/state","cmdsource","Schedule"),
("zone4/state","cmdsource","Schedule"),
("zone4/state","cmdsource","Schedule"),
#27
evtZone4Temp15,
"zone4/sensor",
("zone4/state","cmdsource","Schedule"),
#30
evtZone4SetPoint14,
"zone4/schedulesetpoint",
("zone4/targetset","val",14.0),
("zone4/state","cmdsource","Schedule"),
"zone4/schedulesetpoint",
]
self.checkEvents(send, expect)
def testZone_5_start(self):
# zone 5 enabled, weather, follow occupancy
# no setpoint given - test minpoint works.
self._log.debug( "testZone_5_start" )
self.loadZone5()
# other inputs are
# zone5/sensor - current temperature
send = [
(Events.evtRuntime30,6),
(evtOccupied,2),
# (evtZone5Temp15,2),
# (Events.evtMinute1,1),
# (Events.evtMinute1,1),
# (Events.evtMinute1,1),
]
# the events that we expect to be logged.
# TODO as setpoint has defaulted to mintemp we see run/state/running twice.
expect = [
"time/runtime",
"zone5/stop",
"zone5/targetset",
"zone5/state",
"zone5/name",
"zone5/stopped",
"occupants/home",
"zone5/state",
]
self.checkEvents(send, expect)
TestEventLogger.logEvents()
def testZone_5a(self):
# zone 5 enabled, weather, follow occupancy
# test set point change
self._log.debug( "testZone_5a" )
self.loadZone5()
send = [
(Events.evtRuntime30,6),
(evtZone5SetPoint14,5),
(evtZone5Temp15,3),
(evtZone5ManualSetPoint22,5),
(evtZone5SetPoint14,5),
]
# the events that we expect to be logged.
expect = [
"time/runtime",
"zone5/stop",
"zone5/targetset",
"zone5/state",
"zone5/name",
"zone5/stopped",
evtZone5SetPoint14,
"zone5/schedulesetpoint",
("zone5/targetset","val",14.0),
("zone5/state","cmdsource","Schedule"),
"zone5/schedulesetpoint",
evtZone5Temp15,
"zone5/sensor",
("zone5/state","cmdsource","Schedule"),
evtZone5ManualSetPoint22,
("zone5/targetset","val",22.0),
"zone5/run",
("zone5/state","cmdsource","Manual"),
"zone5/running",
evtZone5SetPoint14,
("zone5/targetset","val",14.0),
"zone5/stop",
("zone5/state","cmdsource","Schedule"),
"zone5/stopped",
]
self.checkEvents(send, expect)
def testMultiZone_1(self):
# zone 5 enabled, weather, follow occupancy
# test set point change
self._log.debug( "testMultiZone_1" )
self.loadPrimitive( "PersistZonesAll", TestHeatingVentilationACConfigMultiZone, (zone1FileList, zone2FileList) )
send = [
(Events.evtRuntime30,11),
]
# the events that we expect to be logged.
expect = [
"time/runtime",
"zone1/stop",
"zone1/targetset",
"zone1/state",
"zone1/name",
"zone1/stopped",
"zone2/stop",
"zone2/targetset",
"zone2/state",
"zone2/name",
"zone2/stopped",
]
self.checkEvents(send, expect)
def testHeatSourceSolarSingle(self):
# test the heat source logic
self._log.debug( "testHeatSourceSolarSingle" )
self.loadPrimitive( "PersistZonesAll", TestHeatingVentilationACConfigHeatSource4, (heatSource4FileList,) )
# the events we send
send = [
(Events.evtRuntime20,2),
(evtHS4PanelTemp5,2),
(evtHS4HeatExTemp5,3),
(evtHS4PanelTemp25,3),
(evtHS4PanelTemp95,3),
(evtHS4RequestRun,1),
(Events.evtMinute1,6),
(Events.evtMinute1,6),
(evtHS4RequestStop,1),
(Events.evtMinute1,6)
]
# the events that we expect to be logged.
expect = [
Events.evtRuntime20,
("heatsource/4/availability","availability",0),
evtHS4PanelTemp5,
"heatsource/4/panel",
evtHS4HeatExTemp5,
"heatsource/4/heatexbot",
"heatsource/4/heatex",
evtHS4PanelTemp25,
"heatsource/4/panel",
("heatsource/4/availability","availability",1),
evtHS4PanelTemp95,
"heatsource/4/panel",
("heatsource/4/availability","availability",2),
evtHS4RequestRun,
Events.evtMinute1,
evtHS4DoRun,
"webbrick/904/DO/4",
evtHS4Running,
"heatsource/4/state",
"heatsource/4/state",
Events.evtMinute1,
evtHS4DoRun,
"webbrick/904/DO/4",
evtHS4Running,
"heatsource/4/state",
"heatsource/4/state",
evtHS4RequestStop,
Events.evtMinute1,
evtHS4DoStop,
"webbrick/904/DO/4",
evtHS4Stopped,
"heatsource/4/state",
| |
import logging
from typing import Union
from copy import deepcopy
from glob import glob
from numpy import array, ndarray, round
from pandas.errors import ParserError
from tqdm import tqdm
from os.path import join
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
from . import utilities as utili
from .thundobj import Thunder
from . import map_scan_tools
from . import peak_fitting
from . import peak_finding
from .background import background_removal as bg_remove
# TODO
# make option of passing in many params files - one for each data file
class ThunderBag():
"""
A 'bag' of thunder objects. these are collectively stored with some metadata in this object, so we can do some
cool things like store a thunderfit object for each coordinate and make mapscans etc
"""
def __init__(self, input):
"""
initialise everything first
:param input: a dictionary of parameters to create the bag object with
"""
self.thunder_bag: {} = {}
self.coordinates: {} = {}
self.first: str = ''
self.stats: {} = {}
self.fit_params: {} = {}
self.x_ind: Union[None, int] = None
self.y_ind: Union[None, int] = None
self.e_ind: Union[None, int] = None
self.img_path: Union[None, str] = None
self.map: Union[None, str] = None
self.datapath: Union[None, str] = None
self.x_coord_ind: Union[None, int] = None
self.y_coord_ind: Union[None, int] = None
if isinstance(input, Thunder): # if only pass one but its already a thunder object then just use that
# add all the details in depending on args
self.thunder_bag[0] = Thunder(input)
elif isinstance(input, dict):
self.create_bag(input) # add all the details in depending on args
else:
raise TypeError('Cannot convert input to ThunderBag object')
def create_bag(self, inp):
"""
create a bag object given an inp
:param inp: this is a dictionary with all the necessary data on the bag
:return:
"""
logging.debug('creating bag object')
self.x_ind = inp.get('x_ind', 2)
self.y_ind = inp.get('y_ind', 3)
self.e_ind = inp.get('e_ind', None)
self.img_path = inp.get('imgpath', None)
self.coordinates = inp.get('coords', {})
# if user passes map as True then the file will be treated as a map
# file
self.map = inp.get('map', None)
# note this must be a list of datapaths, even if its just one element
self.datapath = inp.get('datapath', None)
for i, data in tqdm(enumerate(self.datapath)): # iterate through the datapaths for each file to be loaded
if len(self.datapath) > 1:
# if more than one datapath then we name them with i_j prefix.
prefix = f'{i}_'
else:# if not its just j
prefix = ''
if isinstance(data, Thunder):
# then its already loaded so just assign it
self.thunder_bag[i] = data
elif isinstance(data, str):
# then read the data file
if self.map: # then we have a map file with 4 columns and lots of individual runs in it
self.x_coord_ind, self.y_coord_ind = inp.get('x_coord_ind', 0), inp.get('y_coord_ind', 1)
# get the exact filepath to the data
map_path = glob(data)[0]
x_data, y_data, x_coords, y_coords = self.read_map(
map_path, self.x_ind, self.y_ind, self.x_coord_ind, self.y_coord_ind) # read the
# map file into lists of x_data etc
for j in range(
len(x_data)): # iterate through the list of data and coords
# the x and y data for each coordinate set
x_data_, y_data_ = x_data[j], y_data[j]
self.thunder_bag[f'{prefix}{j}'] = Thunder(
inp, x_data=x_data_, y_data=y_data_) # make a thunder obj
# with this data
x_coords_, y_coords_ = x_coords[j], y_coords[j]
# for each i we will have a list of tuples of x and y
# coords
self.coordinates[f'{prefix}{j}'] = (
x_coords_, y_coords_)
elif '*' in data: # then we need to load all the files in the filepath
filematches = glob(data) # this is a list of the matches
for j, file in enumerate(filematches): # for each file
try:
self.thunder_bag[f'{prefix}{j}'] = self.create_thunder(
file, inp) # make a thunder object for
# each file
except ParserError as e:
logging.warning(
f"A Thunder object could not be created for the datafile: {file}, skipping")
else: # its possible the user has passed in a list of thunder objects for us
try:
self.thunder_bag[str(i)] = self.create_thunder(
data, inp)
except ParserError as e:
logging.warning(
f"A Thunder object could not be created for the datafile: {file}, skipping")
else: # we can't load any other way
logging.warning(
f"wrong format in data list detected for {i}th element: {data}. Skipping element")
pass
@staticmethod
def create_thunder(file, inp):
"""
create a thunder object given a path and an inp
:param file: string
:param inp: correct filetype for thunder obj, i.e. a dict or a thunder obj
:return:
"""
logging.debug('creating thunder object')
arguments = deepcopy(inp)
arguments['datapath'] = file
thund_obj = Thunder(arguments)
return thund_obj
@staticmethod
def read_map(file_address, x_ind, y_ind, x_coord_ind, y_coord_ind):
"""
read a map file and return four lists for data and coordinates
:param file_address: what is the path to the file
:param x_ind: which column is x data
:param y_ind: which column is y data
:param x_coord_ind: which column is x coords
:param y_coord_ind: which column is y coords
:return:
"""
logging.debug('reading in mapscan file')
# load the data. note these drop nan rows but
x_data, y_data, _ = utili.load_data(file_address, x_ind, y_ind)
# does that for the whole filepath so will be consistent for data and
# coordinates
x_coords, y_coords, _ = utili.load_data(
file_address, x_coord_ind, y_coord_ind) # load the coordinates
x_data, y_data, x_coords, y_coords = utili.map_unique_coords(
x_data, y_data, x_coords, y_coords) #
return x_data, y_data, x_coords, y_coords
@staticmethod
def bag_iterator(bag, func, input_args, sett_args):
"""
this is a generic method which will apply a func with arguments input_args to a bag one by one, and set the
outputs to the sett_args attributes for each thunder object
:param bag: bag should be a dictionary of thunder objects
:param func: the function to apply
:param input_args: the arguments for the function
:param sett_args: what to set the output of the function as
:return:
"""
bagkeys = tqdm(bag.keys()) # progress bar
bagkeys.set_description(
f"Operating with: {func.__name__}, to find: {sett_args}")
for key in bagkeys:
thund = bag.get(key) # bag[key] is the thunder object
# get the input arg attributes from thunder obj
kwargs_ = [getattr(thund, arg) for arg in input_args]
# we return _ which we ignore and val which is a list of
_, val = utili.apply_func((key, kwargs_), func)
# output values
for i, arg in enumerate(sett_args):
try:
# set the data as an attribute to the thunder object
if len(sett_args) == 1:
setattr(thund, arg, val)
else:
setattr(thund, arg, val[i])
except KeyError as e:
if isinstance(val, dict):
setattr(thund, arg, val)
else:
print(f'Weird KeyError encountered: {e}')
def choose_spectrum(self):
"""
when doing user guided routines e.g. clipping data or background removal, run this to choose which data in the
bag will be the piece its based on
:return:
"""
logging.debug(
'choosing which thunder object will be the user specified data for bg etc')
# then we have to choose which spectrum we want
# changed from list to iter
first = next(iter(self.thunder_bag.keys()))
while True: # keep going until we break
try:
first_thunder = self.thunder_bag[first]
fig, ax = plt.subplots()
ax.plot(
getattr(
first_thunder, 'x_data'), getattr(
first_thunder, 'y_data'))
print(
f"Need a decision on which plot is representitive of data, the following is for index {first}")
plt.show(block=True)
ans = input(
"If you are happy with using this data file, type y, otherwise enter a new index")
if ans == 'y':
break
else:
try:
first = str(ans)
except ValueError:
print("You entered an incorrect answer! Trying again...")
except KeyError:
print('incorrect key, please enter a lower index value')
first = next(iter(self.thunder_bag.keys()))
self.first = first # save the user decision
def clip_data(self, clips=None):
"""
method to clip the data for each thunder object
:param clips: if none wil do a user guided routine, if a list of two elements will use those elements as the clips
:return:
"""
logging.debug('clipping data based on user specified plot')
first_thunder = self.thunder_bag[self.first]
clip_left, clip_right = utili.clip_data(
getattr(
first_thunder, 'x_data'), getattr(
first_thunder, 'y_data'), clips)
for thund in self.thunder_bag.values():
setattr(
thund, 'x_data', getattr(
thund, 'x_data')[
clip_left:clip_right])
setattr(
thund, 'y_data', getattr(
thund, 'y_data')[
clip_left:clip_right])
def cosmic_rays(self):
print(
'cosmic ray removal is not yet implemented. If this is an issue I recommend first smoothing the data elsewhere/ '
'if you can select a range to | |
receiveData_OpenDoor(self):
self.log("[Behaviour OpenDoor] -- Receiving Data")
# TYPICAL RECEIVE CALL #
self.waitForAllMessages() #
# END OF TYPICAL RECEIVE CALL #
# BEGIN OF RECEIVE BODY CALL #
print "[Behaviour OpenDoor] -- Receiving Data\n"
# END OF RECEIVE BODY CALL #
pass
##### Execute behaviour OpenDoor #####
def executeBehaviour_OpenDoor(self):
self.log("[Behaviour OpenDoor] -- Executing OpenDoor Behaviour")
stopBehaviourIteration=False
# Execution of a single iteration of a behaviour OpenDoor #
_behaviourIterations=0
# Starts execution! #
while True:
# Sleep is a method from class AuxiliaryFunctions which executes sleep from ROS #
self.auxiliaryFunctions.sleep()
# Calculates transition function -- output and internal buffers can only be modified by this function! #
self.transitionFunction_OpenDoor()
# Sends data! #
self.sendData_OpenDoor()
# Updates index! -- i.e. i:i+1 -- increment number of behaviour iterations and number of subsystem iterations #
self._behaviourIterations=self._behaviourIterations+1
# Receives data! #
self.receiveData_OpenDoor()
# Check both conditions, i.e. terminal condition and error condition #
stopBehaviourIteration = self.terminalCondition_OpenDoor() or self.errorCondition_OpenDoor()
if stopBehaviourIteration or not self.auxiliaryFunctions.isSubsystemOK():
'''
Iterate within the while loop until stopBehaviourIteration is set true, i.e. one
of error and terminal condition is fulfilled and isSubsystemOK is true. Otherwise
subsystem must have been switched to another state or SIGINT was sent
'''
break
# Stops execution! #
pass
##### Behaviour TakeCupFromCabinetAvoidingDoor #####
##### Terminal condition #####
def terminalCondition_TakeCupFromCabinetAvoidingDoor(self): # Int64_currentState #
self.log("[Behaviour TakeCupFromCabinetAvoidingDoor] -- Checking Terminal Condition")
return True
pass
##### Error condition #####
def errorCondition_TakeCupFromCabinetAvoidingDoor(self): # Int64_currentState #
self.log("[Behaviour TakeCupFromCabinetAvoidingDoor] -- Checking Error Condition")
return False
pass
##### Transition function #####
def transitionFunction_TakeCupFromCabinetAvoidingDoor(self): # Int64_currentState #
self.log("[Behaviour TakeCupFromCabinetAvoidingDoor] -- Calculating Transition Function")
# Transition function #
self.log("TRANSITION FUNCTION - TakeCupFromCabinetAvoidingDoor")
# Partial transition function - name - tf1_1
print "takeCupFromCabinetAvoidingDoor"
self._currentState.data=STATE_TAKE_CUP_FROM_CABINET_AVOIDING_DOOR
print "move_to_cabinet_right_door_after_openning_the_right_door"
# move hand back in front of cabinet (avoid the right opened door)
size_of_door=0.282
move_tool_by_delta(RIGHT_HAND, -size_of_door, 0, 0, 0.98) # length of the right cabinet door and avoiding the opened right door
move_tool_by_delta(RIGHT_HAND, 0.05, 0.3, 0, 0)
move_tool_by_delta(RIGHT_HAND, 0.15, 0.15, 0, -0.98) # hand should be in front of cabinet
move_in_front_of_cup(False, RIGHT_HAND)
print "####### take_cup_from_cabinet #######"
# open fingers
print "Open fingers"
how_wide_to_open=OPEN_HAND_FINGERS
q_gripper=[how_wide_to_open, how_wide_to_open, how_wide_to_open, 0]
set_grippers(RIGHT_HAND,q_gripper)
# move along y axis in order to set position of end-effector aligned to the cup (in Y axis)
print "Move along Y-axis"
move_tool_by_delta(RIGHT_HAND, 0, 0.01, 0, 0)
# move a little bit up along Z axis
print "Move along Z-axis"
move_tool_by_delta(RIGHT_HAND, 0, 0, 0.08, 0)
# move along X-axis
print "Move along X-axis"
grip_frame=check_object_frame("B", "Gr")
grip_pose=grip_frame.p
print "Right grip frame pose: "+str(grip_pose)
cup_frame=check_object_frame("B", "cup_in_cabinet")
cup_pose=cup_frame.p
print "Cup pose: "+str(cup_pose)
epsilon=0.01
distance=cup_pose[0]-grip_pose[0]
delta_x=distance/3
print "Delta_x="+str(delta_x)
move_tool_by_delta(RIGHT_HAND, delta_x, 0, 0, 0)
move_tool_by_delta(RIGHT_HAND, delta_x, 0, 0, 0)
delta_x=delta_x-epsilon
print "Delta_x="+str(delta_x)
move_tool_by_delta(RIGHT_HAND, delta_x, 0, 0, 0)
print "Close fingers"
how_wide_to_open=CLOSE_HAND_FINGERS
q_gripper=[how_wide_to_open, how_wide_to_open, how_wide_to_open, 0]
set_grippers(RIGHT_HAND,q_gripper )
print "Move along Z axis"
move_tool_by_delta(RIGHT_HAND, 0, 0, 0.03, 0)
print "Move hand back along X-axis"
move_tool_by_delta(RIGHT_HAND, -distance, 0, 0, 0)
move_tool_by_delta(RIGHT_HAND, -0.1, 0, 0, 0)
pass
##### Send data to other subsystems #####
def sendData_TakeCupFromCabinetAvoidingDoor(self):
self.log("[Behaviour TakeCupFromCabinetAvoidingDoor] -- Sending Data")
# DIAGNOSTICS SEND #
self.sendDataForDiagnostics()
# END OF DIAGNOSTICS SEND #
# TYPICAL SEND CALL #
# END OF TYPICAL SEND CALL #
# BEGIN OF BODY SEND CALL #
print "[Behaviour TakeCupFromCabinetAvoidingDoor] -- Sending Data\n"
# END OF BODY SEND CALL #
pass
##### Receive data from other subsystems #####
def receiveData_TakeCupFromCabinetAvoidingDoor(self):
self.log("[Behaviour TakeCupFromCabinetAvoidingDoor] -- Receiving Data")
# TYPICAL RECEIVE CALL #
self.waitForAllMessages() #
# END OF TYPICAL RECEIVE CALL #
# BEGIN OF RECEIVE BODY CALL #
print "[Behaviour TakeCupFromCabinetAvoidingDoor] -- Receiving Data\n"
# END OF RECEIVE BODY CALL #
pass
##### Execute behaviour TakeCupFromCabinetAvoidingDoor #####
def executeBehaviour_TakeCupFromCabinetAvoidingDoor(self):
self.log("[Behaviour TakeCupFromCabinetAvoidingDoor] -- Executing TakeCupFromCabinetAvoidingDoor Behaviour")
stopBehaviourIteration=False
# Execution of a single iteration of a behaviour TakeCupFromCabinetAvoidingDoor #
_behaviourIterations=0
# Starts execution! #
while True:
# Sleep is a method from class AuxiliaryFunctions which executes sleep from ROS #
self.auxiliaryFunctions.sleep()
# Calculates transition function -- output and internal buffers can only be modified by this function! #
self.transitionFunction_TakeCupFromCabinetAvoidingDoor()
# Sends data! #
self.sendData_TakeCupFromCabinetAvoidingDoor()
# Updates index! -- i.e. i:i+1 -- increment number of behaviour iterations and number of subsystem iterations #
self._behaviourIterations=self._behaviourIterations+1
# Receives data! #
self.receiveData_TakeCupFromCabinetAvoidingDoor()
# Check both conditions, i.e. terminal condition and error condition #
stopBehaviourIteration = self.terminalCondition_TakeCupFromCabinetAvoidingDoor() or self.errorCondition_TakeCupFromCabinetAvoidingDoor()
if stopBehaviourIteration or not self.auxiliaryFunctions.isSubsystemOK():
'''
Iterate within the while loop until stopBehaviourIteration is set true, i.e. one
of error and terminal condition is fulfilled and isSubsystemOK is true. Otherwise
subsystem must have been switched to another state or SIGINT was sent
'''
break
# Stops execution! #
pass
##### Behaviour MoveInFrontOfThirdTableAvoidingDoor #####
##### Terminal condition #####
def terminalCondition_MoveInFrontOfThirdTableAvoidingDoor(self): # Int64_currentState #
self.log("[Behaviour MoveInFrontOfThirdTableAvoidingDoor] -- Checking Terminal Condition")
return True
pass
##### Error condition #####
def errorCondition_MoveInFrontOfThirdTableAvoidingDoor(self): # Int64_currentState #
self.log("[Behaviour MoveInFrontOfThirdTableAvoidingDoor] -- Checking Error Condition")
return False
pass
##### Transition function #####
def transitionFunction_MoveInFrontOfThirdTableAvoidingDoor(self): # Int64_currentState #
self.log("[Behaviour MoveInFrontOfThirdTableAvoidingDoor] -- Calculating Transition Function")
# Transition function #
self.log("TRANSITION FUNCTION - MoveInFrontOfThirdTableAvoidingDoor")
# Partial transition function - name - tf1_1
print "moveInFrontOfThirdTableAvoidingDoor"
self._currentState.data=STATE_MOVE_IN_FRONT_OF_THIRD_TABLE_AVOIDING_DOOR
switch_to_cartesian_impedance_mode(RIGHT_HAND)
move_tool_by_delta(RIGHT_HAND,0,0,0,0.78)
rotate_torso(-1.56) # rotate torso to position -1.56
move_robot_to_initial_position(False, LEFT_HAND)
pass
##### Send data to other subsystems #####
def sendData_MoveInFrontOfThirdTableAvoidingDoor(self):
self.log("[Behaviour MoveInFrontOfThirdTableAvoidingDoor] -- Sending Data")
# DIAGNOSTICS SEND #
self.sendDataForDiagnostics()
# END OF DIAGNOSTICS SEND #
# TYPICAL SEND CALL #
# END OF TYPICAL SEND CALL #
# BEGIN OF BODY SEND CALL #
print "[Behaviour MoveInFrontOfThirdTableAvoidingDoor] -- Sending Data\n"
# END OF BODY SEND CALL #
pass
##### Receive data from other subsystems #####
def receiveData_MoveInFrontOfThirdTableAvoidingDoor(self):
self.log("[Behaviour MoveInFrontOfThirdTableAvoidingDoor] -- Receiving Data")
# TYPICAL RECEIVE CALL #
self.waitForAllMessages() #
# END OF TYPICAL RECEIVE CALL #
# BEGIN OF RECEIVE BODY CALL #
print "[Behaviour MoveInFrontOfThirdTableAvoidingDoor] -- Receiving Data\n"
# END OF RECEIVE BODY CALL #
pass
##### Execute behaviour MoveInFrontOfThirdTableAvoidingDoor #####
def executeBehaviour_MoveInFrontOfThirdTableAvoidingDoor(self):
self.log("[Behaviour MoveInFrontOfThirdTableAvoidingDoor] -- Executing MoveInFrontOfThirdTableAvoidingDoor Behaviour")
stopBehaviourIteration=False
# Execution of a single iteration of a behaviour MoveInFrontOfThirdTableAvoidingDoor #
_behaviourIterations=0
# Starts execution! #
while True:
# Sleep is a method from class AuxiliaryFunctions which executes sleep from ROS #
self.auxiliaryFunctions.sleep()
# Calculates transition function -- output and internal buffers can only be modified by this function! #
self.transitionFunction_MoveInFrontOfThirdTableAvoidingDoor()
# Sends data! #
self.sendData_MoveInFrontOfThirdTableAvoidingDoor()
# Updates index! -- i.e. i:i+1 -- increment number of behaviour iterations and number of subsystem iterations #
self._behaviourIterations=self._behaviourIterations+1
# Receives data! #
self.receiveData_MoveInFrontOfThirdTableAvoidingDoor()
# Check both conditions, i.e. terminal condition and error condition #
stopBehaviourIteration = self.terminalCondition_MoveInFrontOfThirdTableAvoidingDoor() or self.errorCondition_MoveInFrontOfThirdTableAvoidingDoor()
if stopBehaviourIteration or not self.auxiliaryFunctions.isSubsystemOK():
'''
Iterate within the while loop until stopBehaviourIteration is set true, i.e. one
of error and terminal condition is fulfilled and isSubsystemOK is true. Otherwise
subsystem must have been switched to another state or SIGINT was sent
'''
break
# Stops execution! #
pass
##### Behaviour PourBallOutOfCup #####
##### Terminal condition #####
def terminalCondition_PourBallOutOfCup(self): # Int64_currentState #
self.log("[Behaviour PourBallOutOfCup] -- Checking Terminal Condition")
return True
pass
##### Error condition #####
def errorCondition_PourBallOutOfCup(self): # Int64_currentState #
self.log("[Behaviour PourBallOutOfCup] -- Checking Error Condition")
return False
pass
##### Transition function #####
def transitionFunction_PourBallOutOfCup(self): # Int64_currentState #
self.log("[Behaviour PourBallOutOfCup] -- Calculating Transition Function")
# Transition function #
self.log("TRANSITION FUNCTION - PourBallOutOfCup")
# Partial transition function - name - tf1_1
print "pourBallOutOfCup"
self._currentState.data=STATE_POUR_BALL_OUT_OF_CUP
# get position of right hand
switch_to_cartesian_impedance_mode(RIGHT_HAND)
frame=check_object_frame("B","Tr")
pose=frame.p
angle_x=frame.M.GetRPY()[0]
angle_y=frame.M.GetRPY()[1]
angle_z=frame.M.GetRPY()[2]
move_end_effector_to_cartesian_position_and_angle_and_wrench(RIGHT_HAND, angle_x, angle_y, angle_z, pose[0], pose[1], pose[2], IMP_LIST_DEFAULT_STIFFNESS, IMP_TIME_LIST, MAX_WRENCH_DEFAULT, PATH_TOLERANCE_DEFAULT_2, DEFAULT_TIME)
move_tool_by_delta(RIGHT_HAND,0,0,0,-0.78)
move_tool_by_delta(RIGHT_HAND,0,0,0,-0.7)
print "Move in front of cup"
move_in_front_of_cup(True, RIGHT_HAND)
move_tool_by_delta(RIGHT_HAND,-0.1,0,0.1,0)
move_tool_by_delta(RIGHT_HAND,-0.05,0,0.03,0)
print "Move along Y-axis"
grip_frame=check_object_frame("B", "Gr")
grip_pose=grip_frame.p
cup_frame=check_object_frame("B", "cup_on_table")
cup_pose=cup_frame.p
distance_y=cup_pose[1]-grip_pose[1]
delta_y=distance_y
move_tool_by_delta(RIGHT_HAND, 0, delta_y, 0, 0)
move_and_rotate_tool_by_delta(RIGHT_HAND,0,0,0,-1.57,0,0)
grip_frame=check_object_frame("B", "Gr")
grip_pose=grip_frame.p
distance_x=cup_pose[0]-grip_pose[0]
delta_x=distance_x
move_and_rotate_tool_by_delta(RIGHT_HAND, distance_x-0.1, 0, 0.05, 0,0,0)
move_and_rotate_tool_by_delta(RIGHT_HAND,0,0,0,-0.77,0,0)
move_and_rotate_tool_by_delta(RIGHT_HAND,0,0,0,0.67,0,0)
move_and_rotate_tool_by_delta(RIGHT_HAND, -0.1, 0, 0.05, 0,0,0)
move_and_rotate_tool_by_delta(RIGHT_HAND,0,0,0,1.57,0,0)
pass
##### Send data to other subsystems #####
def sendData_PourBallOutOfCup(self):
self.log("[Behaviour PourBallOutOfCup] -- Sending Data")
# DIAGNOSTICS SEND #
self.sendDataForDiagnostics()
# END OF DIAGNOSTICS SEND #
# TYPICAL SEND CALL #
# END OF TYPICAL SEND CALL #
# BEGIN OF BODY SEND CALL #
print "[Behaviour PourBallOutOfCup] -- Sending Data\n"
# END OF BODY SEND CALL #
pass
##### Receive data from other subsystems #####
def receiveData_PourBallOutOfCup(self):
self.log("[Behaviour PourBallOutOfCup] -- Receiving Data")
# TYPICAL RECEIVE CALL #
self.waitForAllMessages() #
# END OF TYPICAL RECEIVE CALL #
# BEGIN OF RECEIVE BODY CALL #
print "[Behaviour PourBallOutOfCup] -- Receiving Data\n"
# END OF RECEIVE BODY CALL #
pass
##### Execute behaviour PourBallOutOfCup #####
def executeBehaviour_PourBallOutOfCup(self):
self.log("[Behaviour PourBallOutOfCup] -- Executing PourBallOutOfCup Behaviour")
stopBehaviourIteration=False
# Execution of a single iteration of a behaviour PourBallOutOfCup #
_behaviourIterations=0
# Starts execution! #
while True:
# Sleep is a method from class AuxiliaryFunctions which executes sleep from ROS #
self.auxiliaryFunctions.sleep()
# Calculates transition function -- output and internal buffers can only be modified by this function! #
self.transitionFunction_PourBallOutOfCup()
# Sends data! #
self.sendData_PourBallOutOfCup()
# Updates index! -- i.e. i:i+1 -- increment number of behaviour iterations and number of subsystem iterations #
self._behaviourIterations=self._behaviourIterations+1
# Receives data! #
self.receiveData_PourBallOutOfCup()
# Check both conditions, i.e. terminal condition and error condition #
stopBehaviourIteration = self.terminalCondition_PourBallOutOfCup() or self.errorCondition_PourBallOutOfCup()
if stopBehaviourIteration or not self.auxiliaryFunctions.isSubsystemOK():
'''
Iterate within the while loop until stopBehaviourIteration is set true, i.e. one
of error and terminal condition is fulfilled and isSubsystemOK is true. Otherwise
subsystem must have been switched to another state or SIGINT was sent
'''
break
# Stops execution! #
pass
##### Behaviour Stop #####
##### Terminal condition #####
def terminalCondition_Stop(self): # Int64_currentState #
self.log("[Behaviour Stop] -- Checking Terminal Condition")
return True
pass
##### Error condition #####
def errorCondition_Stop(self): # Int64_currentState #
self.log("[Behaviour Stop] -- Checking Error Condition")
return False
pass
##### Transition function #####
def transitionFunction_Stop(self): # Int64_currentState #
self.log("[Behaviour Stop] -- Calculating Transition Function")
# Transition function #
self.log("TRANSITION FUNCTION - Stop")
# Partial transition function - name - tf1_1
exit(1)
pass
##### Send data to other subsystems #####
def sendData_Stop(self):
self.log("[Behaviour Stop] -- Sending Data")
# DIAGNOSTICS SEND #
self.sendDataForDiagnostics()
# END OF DIAGNOSTICS SEND #
# TYPICAL SEND CALL #
# END OF TYPICAL SEND CALL #
# BEGIN OF BODY SEND CALL #
print "[Behaviour Stop] -- Sending Data\n"
# END OF BODY SEND | |
Defaults to 1000.
size : int or tuple of ints, optional
Size to pass for generating samples from the alternative
distribution. Defaults to None.
return_samples : boolean, optional
If True, return the bootstrapped statistic or jackknife
values. Defaults to False.
theta_star : array_like, optional
Bootstrapped statistic values. Can be passed if they have
already been calculated, which will speed this up
considerably.
theta_hat : float, optional
Observed statistic. Can be passed if it has already been
calculated, which will speed this up slightly.
two_sided : boolean, optional
If True, computes a two-sided significance value. If False
(default), only a one-sided value is returned. Support for
two-sided tests is *experimental*. Use with caution!
num_threads : int, optional
Number of threads to use for multicore processing. Defaults to
1, meaning all calculations will be done in a single
thread. Set to -1 to use all available cores.
Returns
-------
asl : float
Achieved significance level, the probability of an outcome at
least as extreme as that actually observed under the null
hypothesis; aka the p-value.
theta_star : ndarray
Array of bootstrapped statistic values. Only returned if
`return_samples` is True.
"""
asl = 0
if theta_hat is None:
theta_hat = stat(x)
if theta_star is None:
theta_star = bootstrap_samples(
dist, stat, B, size=size, num_threads=num_threads
)
if two_sided:
asl = (abs(theta_star) >= abs(theta_hat)).sum()
else:
asl = (theta_star >= theta_hat).sum()
asl /= len(theta_star)
if return_samples:
return asl, theta_star
else:
return asl
def percentile_asl(
dist,
stat,
x,
theta_0=0,
B=1000,
size=None,
return_samples=False,
theta_star=None,
theta_hat=None,
two_sided=False,
num_threads=1,
):
"""Achieved Significance Level, percentile method
Parameters
----------
dist : EmpiricalDistribution
The empirical distribution.
stat : function
The test statistic.
x : array_like or pandas DataFrame or tuple
The data, used to calculate the observed value of the
statistic if `theta_hat` is not passed.
theta_0 : float, optional
The mean of the test statistic under the null
hypothesis. Defaults to 0.
B : int, optional
Number of bootstrap samples.
size : int or tuple of ints, optional
Size to pass for generating samples from the alternative
distribution. Defaults to None.
return_samples : boolean, optional
If True, return the bootstrapped statistic or jackknife
values. Defaults to False.
theta_star : array_like, optional
Bootstrapped statistic values. Can be passed if they have
already been calculated, which will speed this up
considerably.
theta_hat : float, optional
Observed statistic. Can be passed if it has already been
calculated, which will speed this up slightly.
two_sided : boolean, optional
If True, computes a two-sided significance value. If False
(default), only a one-sided value is returned. Support for
two-sided tests is *experimental*. Use with caution!
num_threads : int, optional
Number of threads to use for multicore processing. Defaults to
1, meaning all calculations will be done in a single
thread. Set to -1 to use all available cores.
Returns
-------
asl : float
Achieved significance level, the probability of an outcome at
least as extreme as that actually observed under the null
hypothesis; aka the p-value.
theta_star : ndarray
Array of bootstrapped statistic values. Only returned if
`return_samples` is True.
Notes
-----
Under the null hypothesis, the value of the statistic is
theta_0. Suppose theta_hat > theta_0. Let theta_lo, theta_hi be
the endpoints of a 100(1-alpha)% confidence interval on theta.
Suppose alpha is such that theta_lo = theta_0. Then alpha is the
achieved significance level.
For the percentile interval, this is simply the fraction of
bootstrap samples that are "on the other side" of theta_0 from
theta_hat.
"""
if theta_hat is None:
theta_hat = stat(x)
if theta_hat == theta_0:
return 1.0
if theta_star is None:
theta_star = bootstrap_samples(
dist, stat, B, size=size, num_threads=num_threads
)
if theta_hat > theta_0:
b = (theta_star < theta_0).sum()
else:
b = (theta_star > theta_0).sum()
asl = b / len(theta_star)
if two_sided:
asl *= 2
if return_samples:
return asl, theta_star
else:
return asl
def bcanon_asl(
dist,
stat,
x,
theta_0=0,
B=1000,
size=None,
return_samples=False,
theta_star=None,
theta_hat=None,
jv=None,
two_sided=False,
num_threads=1,
):
"""Achieved Significance Level, bcanon method
Parameters
----------
dist : EmpiricalDistribution
The empirical distribution.
stat : function
The test statistic.
x : array_like or pandas DataFrame or tuple
The data, used to evaluate the observed statistic and compute
jackknife values.
theta_0 : float, optional
The mean of the test statistic under the null
hypothesis. Defaults to 0.
B : int, optional
Number of bootstrap samples.
size : int or tuple of ints, optional
Size to pass for generating samples from the alternative
distribution. Defaults to None.
return_samples : boolean, optional
If True, return the bootstrapped statistic or jackknife
values. Defaults to False.
theta_star : array_like, optional
Bootstrapped statistic values. Can be passed if they have
already been calculated, which will speed this up
considerably.
theta_hat : float, optional
Observed statistic. Can be passed if it has already been
calculated, which will speed this up slightly.
jv : array_like, optional
Jackknife values. Can be passed if they have already been
calculated, which will speed this up considerably.
two_sided : boolean, optional
If True, computes a two-sided significance value. If False
(default), only a one-sided value is returned. Support for
two-sided tests is *experimental*. Use with caution!
num_threads : int, optional
Number of threads to use for multicore processing. Defaults to
1, meaning all calculations will be done in a single
thread. Set to -1 to use all available cores.
Returns
-------
asl : float
Achieved significance level, the probability of an outcome at
least as extreme as that actually observed under the null
hypothesis; aka the p-value.
theta_star : ndarray
Array of bootstrapped statistic values. Only returned if
`return_samples` is True.
jv : ndarray
Jackknife values. Only returned if `return_samples` is True.
"""
if theta_hat is None:
theta_hat = stat(x)
a0, theta_star = percentile_asl(
dist,
stat,
x,
theta_0=theta_0,
B=B,
size=size,
return_samples=True,
theta_star=theta_star,
theta_hat=theta_hat,
two_sided=False,
num_threads=num_threads,
)
if a0 == 0 or a0 == 1:
if return_samples:
return a0, theta_star, None
else:
return a0
if theta_hat > theta_0:
zb = (theta_star < theta_hat).sum()
else:
zb = (theta_star > theta_hat).sum()
z0_hat = ss.norm.ppf(zb / len(theta_star))
if jv is None:
if dist.is_multi_sample:
jv = [
jackknife_values(x, stat, num_threads=num_threads, sample=i)
for i in range(len(x))
]
jv = (*jv,)
else:
jv = jackknife_values(x, stat, num_threads=num_threads)
a_hat = _bca_acceleration(jv)
w0 = ss.norm.ppf(a0)
t = (w0 - z0_hat) / (1 + a_hat * (w0 - z0_hat)) - z0_hat
asl = ss.norm.cdf(t)
if two_sided:
# To-do: generalize as follows.
# What we really want is stat with the samples
# interchanged. In many cases of practically interest, that is
# simply -stat, as shown below. But that's not always the case
# and we can be more general by allowing the user to pass a
# transform mapping stat to interchange stat. Need to think
# this through more.
other_asl = bcanon_asl(
dist,
lambda z: -stat(z),
x,
theta_hat=-theta_hat,
theta_0=-theta_0,
theta_star=-theta_star,
two_sided=False,
num_threads=num_threads,
)
asl += other_asl
if return_samples:
return asl, theta_star, jv
else:
return asl
def bootstrap_power(
alt_dist,
null_dist,
stat,
asl=bootstrap_asl,
alpha=0.05,
size=None,
P=100,
**kwargs,
):
"""Bootstrap Power
Parameters
----------
alt_dist : EmpiricalDistribution
Distribution under the alternative hypothesis.
null_dist : class
Class corresponding to the null distribution. See Notes.
stat : function
Function that computes the test statistic.
asl : function, optional
Function that computes an achieved significance
level. Defaults to bootstrap_asl.
alpha : float, optional
Desired Type-I error rate. Defaults to 0.05.
size : int or tuple of ints, optional
Size to pass for generating samples from the alternative
distribution. Defaults to None.
P : int, optional
Number of Monte Carlo simulations to run for the purposes of
calculating power. Defaults to 100.
kwargs : optional
Other keyword arguments to pass to `asl`, such as the number
of bootstrap samples to use.
Returns
-------
pwr : float
The fraction of Monte Carlo simulations in which the null
hypothesis was rejected.
Notes
-----
Perhaps the most confusing aspect of this function is that | |
constant for id
if (kern['id_port'] != None):
tcl_user_app.instBlock(
{
'name':'xlconstant',
'inst': 'applicationRegion/id_' + str(kern['num']),
'properties':['CONFIG.CONST_WIDTH {32}',
'CONFIG.CONST_VAL {'+ str(kern['num'])+'}']
}
)
tcl_user_app.makeConnection(
'net',
{
'name':'applicationRegion/id_' + str(kern['num']),
'type':'pin',
'port_name':'dout'
},
{
'name':'applicationRegion/' + instName,
'type':'pin',
'port_name':kern['id_port']
}
)
if kern['const'] != None:
for const in kern['const']:
tcl_user_app.instBlock(
{
'name':'xlconstant',
'inst': 'applicationRegion/' + instName + '_' + const['name'],
'properties':['CONFIG.CONST_WIDTH {' + const['width'] + '}',
' CONFIG.CONST_VAL {'+ const['val'] + '}']
}
)
tcl_user_app.makeConnection(
'net',
{
'name':'applicationRegion/' + instName + '_' + const['name'] ,
'type':'pin',
'port_name':'dout'
},
{
'name':'applicationRegion/' + instName,
'type':'pin',
'port_name':const['name']
}
)
def userApplicationRegionSwitchesInst(tcl_user_app, sim):
"""
I think this is for making the Galapagos router (i.e. the one that sits in
the application region and takes care of routing packets to the network
switch or to another kernel in the same FPGA). This only instantiates IPs
and does not make any connections (except to network table and IP/MAC consts)
Args:
tcl_user_app: a tclMe object (which contains references to the FPGA's
node object and a handle to the output file)
sim: I still don't really know what this does, exactly
"""
# I think this is the BRAM which stores the routing table
if tcl_user_app.fpga['comm'] != 'none':
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.instBlock(
{
'name':'blk_mem_gen',
'inst':'applicationRegion/blk_mem_switch_rom',
}
)
# The next 250 lines of code are a big if-elif-else statement which generate
# the correct Galapagos router depending on whether the communication type is
# "tcp", "eth", or "raw"
if tcl_user_app.fpga['comm'] == 'tcp':
tcl_user_app.instBlock(
{'vendor':'xilinx.com',
'lib':'hls',
'name':'width32router',
'inst':'applicationRegion/custom_switch_inst',
'clks':['aclk'],
'resetns':['aresetn']
}
)
# Properties for routing table BRAM
properties = ['CONFIG.Memory_Type {Single_Port_ROM}',
'CONFIG.Enable_32bit_Address {true}',
'CONFIG.Use_Byte_Write_Enable {false}',
'CONFIG.Byte_Size {8}',
'CONFIG.Write_Depth_A {256}',
'CONFIG.Register_PortA_Output_of_Memory_Primitives {false}',
'CONFIG.Use_RSTA_Pin {true}',
'CONFIG.Port_A_Write_Rate {0}',
'CONFIG.use_bram_block {BRAM_Controller}',
'CONFIG.EN_SAFETY_CKT {true}',
'CONFIG.Load_Init_File {true}',
'CONFIG.Coe_File $top_path/projects/$default_dir/ip.coe'
]
tcl_user_app.setProperties('applicationRegion/blk_mem_switch_rom', properties)
# I think this connects the board's local IP to the router (but I don't
# know why this is needed)
tcl_user_app.makeConnection(
'net',
{
'name':'network/ip_constant_block_inst',
'type':'pin',
'port_name':'ip'
},
{
'name':'applicationRegion/custom_switch_inst',
'type':'pin',
'port_name':'network_addr_V'
}
)
# Connect routing table BRAM to Galapagos router
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'network_table_V_PORTA'
},
{
'name':'applicationRegion/blk_mem_switch_rom',
'type':'intf',
'port_name':'BRAM_PORTA'
}
)
# Refer to comments in the case for TCP (above)
elif tcl_user_app.fpga['comm'] == 'eth':
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.instBlock(
{'vendor':'xilinx.com',
'lib':'hls',
'name':'width48router',
'inst':'applicationRegion/custom_switch_inst',
'clks':['aclk'],
'resetns':['aresetn']
}
)
properties =['CONFIG.Memory_Type {Single_Port_ROM}',
'CONFIG.Enable_32bit_Address {true}',
'CONFIG.Use_Byte_Write_Enable {false}',
'CONFIG.Byte_Size {8}',
'CONFIG.Write_Width_A {64}',
'CONFIG.Write_Depth_A {256}',
'CONFIG.Read_Width_A {64}',
'CONFIG.Write_Width_B {64}',
'CONFIG.Read_Width_B {64}',
'CONFIG.Register_PortA_Output_of_Memory_Primitives {false}',
'CONFIG.Use_RSTA_Pin {true}',
'CONFIG.Port_A_Write_Rate {0}',
'CONFIG.use_bram_block {BRAM_Controller}',
'CONFIG.EN_SAFETY_CKT {true}',
'CONFIG.Load_init_file {true}',
'CONFIG.Coe_File $top_path/projects/$default_dir/mac.coe'
]
tcl_user_app.setProperties('applicationRegion/blk_mem_switch_rom', properties)
tcl_user_app.makeConnection(
'net',
{
'name':'network/ip_constant_block_inst',
'type':'pin',
'port_name':'mac_big'
},
{
'name':'applicationRegion/custom_switch_inst',
'type':'pin',
'port_name':'network_addr_V'
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'network_table_V_PORTA'
},
{
'name':'applicationRegion/blk_mem_switch_rom',
'type':'intf',
'port_name':'BRAM_PORTA'
}
)
elif tcl_user_app.fpga['comm'] == 'raw':
# configures one memory to hold the IP addresses
properties = ['CONFIG.Memory_Type {Single_Port_ROM}',
'CONFIG.Enable_32bit_Address {true}',
'CONFIG.Use_Byte_Write_Enable {false}',
'CONFIG.Byte_Size {8}',
'CONFIG.Write_Depth_A {256}',
'CONFIG.Register_PortA_Output_of_Memory_Primitives {false}',
'CONFIG.Use_RSTA_Pin {true}',
'CONFIG.Port_A_Write_Rate {0}',
'CONFIG.use_bram_block {BRAM_Controller}',
'CONFIG.EN_SAFETY_CKT {true}',
'CONFIG.Load_Init_File {true}',
'CONFIG.Coe_File $top_path/projects/$default_dir/ip.coe'
]
tcl_user_app.setProperties('applicationRegion/blk_mem_switch_rom', properties)
tcl_user_app.instBlock(
{
'name':'axi_bram_ctrl',
'inst':'applicationRegion/ctrl_blk_mem_switch_rom',
'clks':['s_axi_aclk'],
'resetns':['s_axi_aresetn']
}
)
tcl_user_app.setProperties('applicationRegion/ctrl_blk_mem_switch_rom', ["CONFIG.SINGLE_PORT_BRAM {1}"])
# configures another memory to hold the MAC addresses
tcl_user_app.instBlock(
{
'name':'blk_mem_gen',
'inst':'applicationRegion/blk_mem_switch_rom_mac',
}
)
tcl_user_app.instBlock(
{
'name':'axi_bram_ctrl',
'inst':'applicationRegion/ctrl_blk_mem_switch_rom_mac',
'clks':['s_axi_aclk'],
'resetns':['s_axi_aresetn']
}
)
tcl_user_app.setProperties('applicationRegion/ctrl_blk_mem_switch_rom_mac', ["CONFIG.SINGLE_PORT_BRAM {1}", "CONFIG.DATA_WIDTH {64}"])
properties =['CONFIG.Memory_Type {Single_Port_ROM}',
'CONFIG.Enable_32bit_Address {true}',
'CONFIG.Use_Byte_Write_Enable {false}',
'CONFIG.Byte_Size {8}',
'CONFIG.Write_Width_A {64}',
'CONFIG.Write_Depth_A {256}',
'CONFIG.Read_Width_A {64}',
'CONFIG.Write_Width_B {64}',
'CONFIG.Read_Width_B {64}',
'CONFIG.Register_PortA_Output_of_Memory_Primitives {false}',
'CONFIG.Use_RSTA_Pin {true}',
'CONFIG.Port_A_Write_Rate {0}',
'CONFIG.use_bram_block {BRAM_Controller}',
'CONFIG.EN_SAFETY_CKT {true}',
'CONFIG.Load_init_file {true}',
'CONFIG.Coe_File $top_path/projects/$default_dir/mac.coe'
]
tcl_user_app.setProperties('applicationRegion/blk_mem_switch_rom_mac', properties)
# connect these two memories to the global interconnect
app_interfaces = len(getInterfaces(tcl_user_app.fpga, 's_axi', 'scope', 'global'))
idx_str = "%02d"%(app_interfaces)
tcl_user_app.makeConnection(
'intf',
{'name':'applicationRegion/axi_interconnect_ctrl',
'type':'intf',
'port_name':'M' + idx_str + '_AXI'
},
{
'name':'applicationRegion/ctrl_blk_mem_switch_rom',
'type':'intf',
'port_name':'S_AXI'
}
)
idx_str = "%02d"%(app_interfaces + 1)
tcl_user_app.makeConnection(
'intf',
{'name':'applicationRegion/axi_interconnect_ctrl',
'type':'intf',
'port_name':'M' + idx_str + '_AXI'
},
{
'name':'applicationRegion/ctrl_blk_mem_switch_rom_mac',
'type':'intf',
'port_name':'S_AXI'
}
)
# connect the BRAMs to their controllers
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/ctrl_blk_mem_switch_rom',
'type':'intf',
'port_name':'BRAM_PORTA'
},
{
'name':'applicationRegion/blk_mem_switch_rom',
'type':'intf',
'port_name':'BRAM_PORTA'
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/ctrl_blk_mem_switch_rom_mac',
'type':'intf',
'port_name':'BRAM_PORTA'
},
{
'name':'applicationRegion/blk_mem_switch_rom_mac',
'type':'intf',
'port_name':'BRAM_PORTA'
}
)
elif tcl_user_app.fpga['comm'] == 'none':
pass
else:
print("Unknown communication type: " + tcl_user_app.fpga['comm'])
exit(1)
# Ask how many (global) s_axis connections are in the user app region.
num_slave_s_axis_global = len(getInterfaces(tcl_user_app.fpga, 's_axis', 'scope' , 'global'))
if num_slave_s_axis_global == 0:
##TO DO: CHANGE TO VIP FOR 0 SLAVES
print("TO DO: CHANGE TO VIP FOR 0 SLAVES in userApplicationRegionSwitchesInst")
quit(0)
else:
#for simulation purposes use custom arbiter instead of axis_switch
if(sim == 0):
# we don't want an input switch IFF 1 slave and mode is raw
# if it is raw, we need just a single slave interface
if num_slave_s_axis_global > 1 and tcl_user_app.fpga['comm'] in ['raw', 'none']:
tcl_user_app.instBlock(
{
'name':'axis_switch',
'inst':'applicationRegion/input_switch',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_SI {1}',
'CONFIG.NUM_MI {' + str(num_slave_s_axis_global) + '}',
'CONFIG.ARG_ON_TLAST {1}',
'CONFIG.HAS_TLAST {1}'
]
}
)
elif tcl_user_app.fpga['comm'] not in ['raw', 'none']:
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.instBlock(
{
'name':'axis_switch',
'inst':'applicationRegion/input_switch',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_SI {2}',
'CONFIG.NUM_MI {' + str(num_slave_s_axis_global) + '}',
'CONFIG.HAS_TLAST {1}',
'CONFIG.ARB_ON_TLAST {1}'
]
}
)
else:
if num_slave_s_axis_global > 1:
tcl_user_app.instBlock(
{
'name':'axis_switch',
'inst':'applicationRegion/input_switch',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_SI {1}',
'CONFIG.NUM_MI {' + str(num_slave_s_axis_global) + '}',
'CONFIG.HAS_TLAST {1}',
'CONFIG.ARB_ON_TLAST {1}'
]
}
)
else:
tcl_user_app.instBlock(
{
'name':'arbiter',
'lib':'hls',
'vendor':'xilinx.com',
'inst':'applicationRegion/arbiter',
'clks':['ap_clk'],
'resetns':['ap_rst_n'],
}
)
switch_port_index = 0
properties = ['CONFIG.ARB_ON_MAX_XFERS {0}']
for kern in tcl_user_app.fpga['kernel']:
if kern['s_axis'] != None:
for s_axis in kern['s_axis']:
if s_axis['scope'] == 'global':
#print("adding kernel to switch " + kern['inst'])
kernel_index_str = "0x{:08x}".format(int(kern['num']))
switch_port_index_str = "%02d"%switch_port_index
properties.append('CONFIG.M' + switch_port_index_str + '_AXIS_BASETDEST {' + kernel_index_str + '}')
properties.append('CONFIG.M' + switch_port_index_str + '_AXIS_HIGHTDEST {' + kernel_index_str + '}')
switch_port_index = switch_port_index + 1
# this condition is prerequisite to have an input_switch
if num_slave_s_axis_global > 1 or tcl_user_app.fpga['comm'] != 'raw':
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.setProperties('applicationRegion/input_switch', properties)
# Ask how many (global) m_axis connections are in the user app region.
num_slave_m_axis_global = len(getInterfaces(tcl_user_app.fpga, 'm_axis', 'scope', 'global'))
if num_slave_m_axis_global == 0:
# TODO: CHANGE TO VIP FOR 0 SLAVES
tcl_user_app.instBlock(
{
'name':'axis_switch',
'inst':'applicationRegion/input_switch',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_SI {2}',
'CONFIG.NUM_MI {' + num_slave_s_axis_global + '}',
'CONFIG.ARB_ON_TLAST {1}']
}
)
#instantiate switch only if more than one output
elif num_slave_m_axis_global > 1:
tcl_user_app.instBlock(
{
'name':'axis_switch',
'inst':'applicationRegion/output_switch',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_SI {' + str(num_slave_s_axis_global) + '}',
'CONFIG.NUM_MI {1}',
'CONFIG.ARB_ON_TLAST {1}',
'CONFIG.M00_AXIS_HIGHTDEST {0xffffffff}']
}
)
def userApplicationRegionKernelConnectSwitches(outDir, tcl_user_app, sim):
"""
Now that the kernels, Galapagos router, and memory controllers are instantiated,
it's time to connect them all together.
Args:
tcl_user_app: a tclMe object (which contains references to the FPGA's
node object and a handle to the output file)
sim: I still don't really know what this does, exactly
"""
#iterate through all kernels on FPGA connecting them to the input and output switches and their control and memory interfaces
ctrl_interface_index = 0
mem_interface_index = 0
# Get list of all (global) s_axis. That is, all the kernel input streams
# By the way, the getInterfaces function has the side effect of adding refs
# to the interface's dict represntation which links to its parent kernel
# dict (under the 'kernel_inst' key).
s_axis_array = getInterfaces(tcl_user_app.fpga, 's_axis', 'scope', 'global')
# Now connect the Galapagos router through the input switch into all of
# the s_axis interfaces
if len(s_axis_array) > 1:
if(sim == 1):
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/arbiter',
'type':'intf',
'port_name':'M00_AXIS'
},
{'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'S00_AXIS'
}
)
# For each s_axis connection
for idx, s_axis in enumerate(s_axis_array):
instName = s_axis['kernel_inst']['inst']
idx_str = "%02d"%idx
# Connect it to the correct port on the AXI switch (NOT directly into
# the Galapagos router; there is an AXI stream switch IP between
# the router and the kernel(s) )
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'M' + idx_str + '_AXIS'
},
{
'name': instName,
'type':'intf',
'port_name':s_axis['name']
}
)
# custom_switch_inst only exists without raw
if tcl_user_app.fpga['comm'] not in ['raw', 'none']:
if 'custom' not in tcl_user_app.fpga | |
action_args = {}
capacity = parsed_args.capacity
adjustment = parsed_args.adjustment
percentage = parsed_args.percentage
min_size = parsed_args.min_size
max_size = parsed_args.max_size
min_step = parsed_args.min_step
wait = parsed_args.wait
if sum(v is not None for v in (capacity, adjustment, percentage,
min_size, max_size)) == 0:
raise exc.CommandError(_("At least one parameter of 'capacity', "
"'adjustment', 'percentage', 'min_size' "
"and 'max_size' should be specified."))
if sum(v is not None for v in (capacity, adjustment, percentage)) > 1:
raise exc.CommandError(_("Only one of 'capacity', 'adjustment' and"
" 'percentage' can be specified."))
action_args['adjustment_type'] = None
action_args['number'] = None
if capacity is not None:
if capacity < 0:
raise exc.CommandError(_('Cluster capacity must be larger than'
' or equal to zero.'))
action_args['adjustment_type'] = 'EXACT_CAPACITY'
action_args['number'] = capacity
if adjustment is not None:
if adjustment == 0:
raise exc.CommandError(_('Adjustment cannot be zero.'))
action_args['adjustment_type'] = 'CHANGE_IN_CAPACITY'
action_args['number'] = adjustment
if percentage is not None:
if (percentage == 0 or percentage == 0.0):
raise exc.CommandError(_('Percentage cannot be zero.'))
action_args['adjustment_type'] = 'CHANGE_IN_PERCENTAGE'
action_args['number'] = percentage
if min_step is not None and percentage is None:
raise exc.CommandError(_('Min step is only used with '
'percentage.'))
if min_size is not None:
if min_size < 0:
raise exc.CommandError(_('Min size cannot be less than zero.'))
if max_size is not None and max_size >= 0 and min_size > max_size:
raise exc.CommandError(_('Min size cannot be larger than '
'max size.'))
if capacity is not None and min_size > capacity:
raise exc.CommandError(_('Min size cannot be larger than the '
'specified capacity'))
if max_size is not None:
if capacity is not None and max_size > 0 and max_size < capacity:
raise exc.CommandError(_('Max size cannot be less than the '
'specified capacity.'))
# do a normalization
if max_size < 0:
max_size = -1
action_args['min_size'] = min_size
action_args['max_size'] = max_size
action_args['min_step'] = min_step
action_args['strict'] = parsed_args.strict
resp = senlin_client.resize_cluster(parsed_args.cluster, **action_args)
if 'action' in resp:
print('Request accepted by action: %s' % resp['action'])
if wait:
senlin_utils.await_action(senlin_client, resp['action'])
senlin_utils.await_cluster_status(senlin_client,
parsed_args.cluster)
return _show_cluster(senlin_client, parsed_args.cluster)
else:
print('Request error: %s' % resp)
return '', ''
class ScaleInCluster(command.ShowOne):
"""Scale in a cluster by the specified number of nodes."""
log = logging.getLogger(__name__ + ".ScaleInCluster")
def get_parser(self, prog_name):
parser = super(ScaleInCluster, self).get_parser(prog_name)
parser.add_argument(
'--count',
metavar='<count>',
help=_('Number of nodes to be deleted from the specified cluster')
)
parser.add_argument(
'cluster',
metavar='<cluster>',
help=_('Name or ID of cluster to operate on')
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for cluster scale-in to complete')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
resp = senlin_client.scale_in_cluster(parsed_args.cluster,
parsed_args.count)
status_code = resp.get('code', None)
if status_code in [409]:
raise exc.CommandError(_(
'Unable to scale in cluster: %s') % resp['error']['message'])
if 'action' in resp:
print('Request accepted by action: %s' % resp['action'])
if parsed_args.wait:
senlin_utils.await_action(senlin_client, resp['action'])
senlin_utils.await_cluster_status(senlin_client,
parsed_args.cluster)
return _show_cluster(senlin_client, parsed_args.cluster)
else:
print('Request error: %s' % resp)
return '', ''
class ScaleOutCluster(command.ShowOne):
"""Scale out a cluster by the specified number of nodes."""
log = logging.getLogger(__name__ + ".ScaleOutCluster")
def get_parser(self, prog_name):
parser = super(ScaleOutCluster, self).get_parser(prog_name)
parser.add_argument(
'--count',
metavar='<count>',
help=_('Number of nodes to be added to the specified cluster')
)
parser.add_argument(
'cluster',
metavar='<cluster>',
help=_('Name or ID of cluster to operate on')
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for cluster scale-out to complete')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
resp = senlin_client.scale_out_cluster(parsed_args.cluster,
parsed_args.count)
status_code = resp.get('code', None)
if status_code in [409]:
raise exc.CommandError(_(
'Unable to scale out cluster: %s') % resp['error']['message'])
if 'action' in resp:
print('Request accepted by action: %s' % resp['action'])
if parsed_args.wait:
senlin_utils.await_action(senlin_client, resp['action'])
senlin_utils.await_cluster_status(senlin_client,
parsed_args.cluster)
return _show_cluster(senlin_client, parsed_args.cluster)
else:
print('Request error: %s' % resp)
return '', ''
class ClusterPolicyAttach(command.Command):
"""Attach policy to cluster."""
log = logging.getLogger(__name__ + ".ClusterPolicyAttach")
def get_parser(self, prog_name):
parser = super(ClusterPolicyAttach, self).get_parser(prog_name)
parser.add_argument(
'--enabled',
metavar='<boolean>',
default=True,
help=_('Whether the policy should be enabled once attached. '
'Default to True')
)
parser.add_argument(
'--policy',
metavar='<policy>',
required=True,
help=_('ID or name of policy to be attached')
)
parser.add_argument(
'cluster',
metavar='<cluster>',
help=_('Name or ID of cluster to operate on')
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for cluster policy-attach to complete')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
kwargs = {
'enabled': strutils.bool_from_string(parsed_args.enabled,
strict=True),
}
resp = senlin_client.attach_policy_to_cluster(parsed_args.cluster,
parsed_args.policy,
**kwargs)
if 'action' in resp:
print('Request accepted by action: %s' % resp['action'])
if parsed_args.wait:
senlin_utils.await_action(senlin_client, resp['action'])
else:
print('Request error: %s' % resp)
class ClusterPolicyDetach(command.Command):
"""Detach policy from cluster."""
log = logging.getLogger(__name__ + ".ClusterPolicyDetach")
def get_parser(self, prog_name):
parser = super(ClusterPolicyDetach, self).get_parser(prog_name)
parser.add_argument(
'--policy',
metavar='<policy>',
required=True,
help=_('ID or name of policy to be detached')
)
parser.add_argument(
'cluster',
metavar='<cluster>',
help=_('Name or ID of cluster to operate on')
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for cluster policy-detach to complete')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
resp = senlin_client.detach_policy_from_cluster(parsed_args.cluster,
parsed_args.policy)
if 'action' in resp:
print('Request accepted by action: %s' % resp['action'])
if parsed_args.wait:
senlin_utils.await_action(senlin_client, resp['action'])
else:
print('Request error: %s' % resp)
class ClusterNodeList(command.Lister):
"""List nodes from cluster."""
log = logging.getLogger(__name__ + ".ClusterNodeList")
def get_parser(self, prog_name):
parser = super(ClusterNodeList, self).get_parser(prog_name)
parser.add_argument(
'--filters',
metavar='<key1=value1;key2=value2...>',
help=_("Filter parameters to apply on returned nodes. "
"This can be specified multiple times, or once with "
"parameters separated by a semicolon. The valid filter "
"keys are: ['status', 'name']"),
action='append'
)
parser.add_argument(
'--sort',
metavar='<key>[:<direction>]',
help=_("Sorting option which is a string containing a list of "
"keys separated by commas. Each key can be optionally "
"appended by a sort direction (:asc or :desc)' The valid "
"sort keys are:['index', 'name', 'status', 'init_at', "
"'created_at', 'updated_at']")
)
parser.add_argument(
'--limit',
metavar='<limit>',
help=_('Limit the number of nodes returned')
)
parser.add_argument(
'--marker',
metavar='<id>',
help=_('Only return nodes that appear after the given node ID')
)
parser.add_argument(
'--full-id',
default=False,
action="store_true",
help=_('Print full IDs in list')
)
parser.add_argument(
'cluster',
metavar='<cluster>',
help=_('Name or ID of cluster to nodes from')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
queries = {
'cluster_id': parsed_args.cluster,
'sort': parsed_args.sort,
'limit': parsed_args.limit,
'marker': parsed_args.marker,
}
if parsed_args.filters:
queries.update(senlin_utils.format_parameters(parsed_args.filters))
nodes = senlin_client.nodes(**queries)
if not parsed_args.full_id:
formatters = {
'id': lambda x: x[:8],
'physical_id': lambda x: x[:8] if x else ''
}
else:
formatters = {}
columns = ['id', 'name', 'index', 'status', 'physical_id',
'created_at']
return (
columns,
(utils.get_item_properties(n, columns, formatters=formatters)
for n in nodes)
)
class ClusterNodeAdd(command.ShowOne):
"""Add specified nodes to cluster."""
log = logging.getLogger(__name__ + ".ClusterNodeAdd")
def get_parser(self, prog_name):
parser = super(ClusterNodeAdd, self).get_parser(prog_name)
parser.add_argument(
'--nodes',
metavar='<nodes>',
required=True,
help=_('ID or name of nodes to be added; multiple nodes can be'
' separated with ","')
)
parser.add_argument(
'cluster',
metavar='<cluster>',
help=_('Name or ID of cluster to operate on')
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for cluster members add to complete')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
node_ids = parsed_args.nodes.split(',')
resp = senlin_client.add_nodes_to_cluster(parsed_args.cluster,
node_ids)
if 'action' in resp:
print('Request accepted by action: %s' % resp['action'])
if parsed_args.wait:
senlin_utils.await_action(senlin_client, resp['action'])
return _show_cluster(senlin_client, parsed_args.cluster)
else:
print('Request error: %s' % resp)
return '', ''
class ClusterNodeDel(command.ShowOne):
"""Delete specified nodes from cluster."""
log = logging.getLogger(__name__ + ".ClusterNodeDel")
def get_parser(self, prog_name):
parser = super(ClusterNodeDel, self).get_parser(prog_name)
parser.add_argument(
'--nodes',
metavar='<nodes>',
required=True,
help=_('Name or ID of nodes to be deleted; multiple nodes can be '
'separated with ","')
)
parser.add_argument(
'-d',
'--destroy-after-deletion',
required=False,
default=False,
help=_('Whether nodes should be destroyed after deleted. '
'Default is False.')
)
parser.add_argument(
'cluster',
metavar='<cluster>',
help=_('Name or ID of cluster to operate on')
)
parser.add_argument(
'--wait',
action='store_true',
help=_('Wait for cluster members delete to complete')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
senlin_client = self.app.client_manager.clustering
node_ids = parsed_args.nodes.split(',')
destroy = parsed_args.destroy_after_deletion
destroy = strutils.bool_from_string(destroy, strict=True)
kwargs = {"destroy_after_deletion": destroy}
resp = senlin_client.remove_nodes_from_cluster(
parsed_args.cluster, node_ids, **kwargs)
if 'action' in resp:
print('Request accepted by action: %s' % resp['action'])
if parsed_args.wait:
senlin_utils.await_action(senlin_client, resp['action'])
return _show_cluster(senlin_client, parsed_args.cluster)
else:
print('Request error: %s' % resp)
return '', ''
class ClusterNodeReplace(command.ShowOne):
"""Replace the nodes in a cluster with specified nodes."""
log = logging.getLogger(__name__ + ".ClusterNodeReplace")
def get_parser(self, prog_name):
parser = super(ClusterNodeReplace, self).get_parser(prog_name)
parser.add_argument(
'--nodes',
metavar='<OLD_NODE1=NEW_NODE1>',
required=True,
help=_("OLD_NODE is the name or ID of a node to be replaced, "
"NEW_NODE is the name or ID of a node as replacement. "
"This can be specified multiple times, or once with "
"node-pairs separated by | |
<reponame>lund5000/chirpradio
# -*- coding: utf-8 -*-
###
### Copyright 2009 The Chicago Independent Radio Project
### All Rights Reserved.
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
###
import logging
import re
import time
import unicodedata
from google.appengine.ext import db
from djdb import models
from common.autoretry import AutoRetry
# All search data used by this code is marked with this generation.
_GENERATION = 1
###
### Text Normalization
###
# This mapping is used when normalizing characters when indexing text
# and processing search queries.
_CHARACTER_NORMALIZATIONS = {
u'ø': 'o',
}
# These words are ignored when indexing text and processing search
# queries.
_STOP_WORDS = set(['and', 'in', 'is', 'it', 'my', 'of', 'the', 'to'])
def _is_stop_word(term):
return len(term) <= 1 or term in _STOP_WORDS
def _is_stop_word_prefix(prefix):
return any(sw.startswith(prefix) for sw in _STOP_WORDS)
def _scrub_char(c):
"""Normalize a character for use in indexing and searching.
Among other things, this removes diacritics and strips out punctuation.
"""
c = c.lower()
if unicodedata.category(c)[0] in ("L", "N"):
c = unicodedata.normalize("NFD", c)[0]
return _CHARACTER_NORMALIZATIONS.get(c, c)
elif c == "'":
# Filter out apostrophes, so "foo's" will become "foos".
return ""
else:
# Other types of characters are replaced by whitespace.
return " "
# This matches interior periods, i.e. "L.A"
_COLLAPSE_INTERIOR_PERIODS_RE = re.compile(r"(\S)\.(\S)")
def scrub(text):
"""Normalizes a text string for use in indexing and searching."""
# Strip out interior periods.
text = _COLLAPSE_INTERIOR_PERIODS_RE.sub(r"\1\2", text)
chars = [_scrub_char(c) for c in text]
return "".join(chars)
def explode(text):
"""Splits a piece of text into a normalized list of terms.
Stop words are stripped out, along with any other
un-indexable/searchable content.
"""
return [term for term in scrub(text).split() if not _is_stop_word(term)]
def strip_tags(text):
"""Removes all tags from a string.
A tag is a chunk of text enclosed in square bracks, [like this].
"""
return re.sub(r"\[[^\]]+\]", "", text)
###
### Indexing
###
class Indexer(object):
"""Builds a searchable index of text associated with datastore entities."""
def __init__(self, transaction=None):
# A cache of our pending, to-be-written SearchMatches objects.
self._matches = {}
# Additional objects to save at the same time as the
# SearchMatches.
self._txn_objects_to_save = []
if transaction:
self._transaction = transaction
else:
# We use the current time in microseconds as the transaction ID.
timestamp = int(1000000*time.time())
self._transaction = db.Key.from_path("IndexerTransaction",
timestamp)
@property
def transaction(self):
"""Transaction used for all created SearchMatches objects.
We expose this so that entities being indexed can be created inside
the same transaction, allowing both the objects and the index data
to be written into the datastore in an atomic operation.
"""
return self._transaction
def _get_matches(self, entity_kind, field, term, key=None):
"""Returns a cached SearchMatches object for a given kind and term."""
_key = (entity_kind, field, term)
sm = self._matches.get(_key)
if sm is None:
if key:
q = models.SearchMatches.all()
q.filter("entity_kind =", entity_kind)
q.filter("field =", field)
q.filter("term =", term)
q.filter("matches =", key)
sms = q.fetch(1)
if sms :
sm = sms[0]
if sm is None:
sm = models.SearchMatches(generation=_GENERATION,
entity_kind=entity_kind,
field=field,
term=term,
parent=self.transaction)
self._matches[_key] = sm
return sm
def add_key(self, key, field, text):
"""Prepare to index content associated with a datastore key.
Args:
key: A db.Key instance.
field: A field identifier string.
text: A unicode string, the content to be indexed.
"""
for term in set(explode(text)):
sm = self._get_matches(key.kind(), field, term)
sm.matches.append(key)
def add_artist(self, artist):
"""Prepare to index metadata associated with an Artist instance.
artist must have the indexer's transaction as its parent key.
artist is saved when the indexer's save() method is called.
"""
assert artist.parent_key() == self.transaction
self.add_key(artist.key(), "name", artist.name)
self._txn_objects_to_save.append(artist)
def add_album(self, album):
"""Prepare to index metdata associated with an Album instance.
album must have the indexer's transaction as its parent key.
album is saved when the indexer's save() method is called.
"""
assert album.parent_key() == self.transaction
self.add_key(album.key(), "title", strip_tags(album.title))
self.add_key(album.key(), "artist", album.artist_name)
if album.label is not None:
self.add_key(album.key(), "label", album.label)
if album.year is not None:
self.add_key(album.key(), "year", unicode(album.year))
self._txn_objects_to_save.append(album)
def add_track(self, track):
"""Prepare to index metdata associated with a Track instance.
track must have the indexer's transaction as its parent key.
track is saved when the indexer's save() method is called.
"""
assert track.parent_key() == self.transaction
self.add_key(track.key(), "title", strip_tags(track.title))
self.add_key(track.key(), "album", strip_tags(track.album.title))
self.add_key(track.key(), "artist", track.artist_name)
self._txn_objects_to_save.append(track)
def remove_key(self, key, field, text):
for term in set(explode(text)):
sm = self._get_matches(key.kind(), field, term, key)
sm.matches.remove(key)
if not sm.matches:
# Remove empty search index from datastore.
AutoRetry(db).delete(sm)
# Remove cached entry.
_key = (key.kind(), field, term)
if _key in self._matches:
del self._matches[_key]
def update_key(self, key, field, old_text, text):
"""Update index content associated with a datastore key.
Args:
key: A db.Key instance.
field: A field identifier string.
old_text: A unicode string, the old text, to be updated with text.
text: A unicode string, the content to be indexed.
"""
# Remove old terms.
for term in set(explode(old_text)):
sm = self._get_matches(key.kind(), field, term, key)
if key in sm.matches:
sm.matches.remove(key)
# Add new terms.
if text is not None:
self.add_key(key, field, text)
def update_artist(self, artist, fields):
"""Update index metadata associated with an Artist instance.
Args:
artist: An Artist instance.
fields: A dictionary of field/property names to update and new values.
artist must have the indexer's transaction as its parent key.
artist is saved when the indexer's save() method is called.
"""
assert artist.parent_key() == self.transaction
for field, value in fields.iteritems():
self.update_key(artist.key(), field, unicode(getattr(artist, field)), unicode(value))
setattr(artist, field, value)
self._txn_objects_to_save.append(artist)
def update_album(self, album, fields):
"""Update index metadata associated with an Album instance.
Args:
album: An Album instance.
fields: A dictionary of field/property names to update and new values.
album must have the indexer's transaction as its parent key.
album is saved when the indexer's save() method is called.
"""
assert album.parent_key() == self.transaction
for field, value in fields.iteritems():
self.update_key(album.key(), field, unicode(getattr(album, field)), unicode(value))
setattr(album, field, value)
self._txn_objects_to_save.append(album)
def update_track(self, track, fields):
"""Update index metadata associated with a Track instance.
Args:
track: A Track instance.
fields: A dictionary of field/properties to update and new values.
track must have the indexer's transaction as its parent key.
track is saved when the indexer's save() method is called.
"""
assert track.parent_key() == self.transaction
for field, value in fields.iteritems():
self.update_key(track.key(), field, unicode(getattr(track, field)), unicode(value))
setattr(track, field, value)
self._txn_objects_to_save.append(track)
def save(self, rpc=None):
"""Write all pending index data into the Datastore."""
self._txn_objects_to_save.extend(self._matches.itervalues())
# All of the objects in self._txn_objects_to_save are part of
# the same entity group. This ensures that db.save is an
# atomic operation --- either all of the objects are
# successfully saved or none are.
kwargs = {}
if rpc is not None:
kwargs["rpc"] = rpc
AutoRetry(db).save(self._txn_objects_to_save, **kwargs)
self._matches = {}
self._txn_objects_to_save
def optimize_index(term):
"""Optimize our index for a specific term.
Locates all SearchMatches objects associated with the given term
and merges them together so that there is only one SearchMatches
per entity kind and field.
Args:
text: A normalized search term.
Returns:
The decrease in the number of SearchMatches objects as a result of
the optimization.
"""
query = models.SearchMatches.all().filter("term =", term)
# First we iterate over all of the SearchMatches associated with
# particular term and segment them by entity kind and field.
segmented = {}
for sm in AutoRetry(query).fetch(999):
# Skip anything outside the current generation.
if sm.generation != _GENERATION:
continue
key = (sm.entity_kind, sm.field)
subset = segmented.get(key)
if not subset:
subset = segmented[key] = []
subset.append(sm)
num_deleted = 0
# Is this term a stop word? In that case we can just delete
# everything that we found. This case occurs when new stop words
# are added to the list.
if _is_stop_word(term):
for subset in segmented.itervalues():
db.delete(subset)
num_deleted += len(subset)
return num_deleted
# Now for any segment that contains more than | |
from operator import mul
import sys
import matplotlib.pyplot as plt
import numpy as np
from holoviews import opts
from scipy.signal.ltisys import dfreqresp
from scipy.spatial import Voronoi
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
import pandas as pd
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual, Text, interactive_output
from ipywidgets import Button, HBox, VBox,Layout,Label
import panel as pn
import seaborn as sns
from kneed import KneeLocator
from PatientGraphPheno import *
from patientKG.config.bedrock_connection import *
#from patientKG import utils_pickle
import patientKG.utils_pickle
from holoviews.operation.datashader import datashade, bundle_graph
import holoviews as hv
from holoviews import opts
from datetime import datetime
import re
import plotly.graph_objects as go
from pivottablejs import pivot_ui
from IPython.display import display, HTML
from sklearn.feature_selection import VarianceThreshold
from sklearn import preprocessing
import urllib, json
sns.set(style="ticks")
hv.extension('bokeh')
defaults = dict(width=1000, height=1000, padding=0.1)
from patientKG.tests.test_graphs import *
from ipywidgets import TwoByTwoLayout
import itertools
import time
from IPython.display import IFrame
import json, io
from patientKG.priorKnowledge.Hb1AC import *
from patientKG.priorKnowledge.Albumin import *
from patientKG.priorKnowledge.FBC import *
from patientKG.priorKnowledge.Creactive import *
from scipy.stats import chi2_contingency
import scipy.stats as stats
def show_SpellHRG_HRG_Table(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = patientKG.utils_pickle.read("Degree_ReAdmitted_HRG")
return Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == HRG)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
#This below block is for Jupyter-Notebook
"""stats = interact(PatientGraphVisuExplore.show_SpellHRG_HRG_Table,
HRG=widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_Income_Scatter(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == HRG)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
plt.scatter(data['Sum_Degree'], data['INCOME'], edgecolors='r')
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_Income_Scatter,
HRG=widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_LOS_Scatter(HRG,Degree,Readmit):
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
plt.scatter(data['Sum_Degree'], data['Total_LOS'], edgecolors='r')
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_LOS_Scatter,
HRG=widgets.SelectMultiple(
options=list(Degree_HRG['SpellHRG'].dropna().unique()),
value=['WJ06E'],
#rows=10,
description='HRG',
disabled=False
)
#widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_Pairplot(HRG,Degree,Readmit):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
sns.pairplot(df[df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE','ReAdmitted in DAYS'])], hue="SpellHRG")
"""
stats = interact(PatientGraphVisuExplore.plot_SpellHRG_Pairplot,
HRG=widgets.SelectMultiple(
options=list(Degree_HRG['SpellHRG'].dropna().unique()),
value=['WJ06E'],
#rows=10,
description='HRG',
disabled=False
)
#widgets.Dropdown(options=list(Degree_HRG['SpellHRG'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,100],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_HRG_ICD(HRG,ICD,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
if ICD == None:
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
else:
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellPDiag'] == ICD)&(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME']#,'Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['ACTIVITY_IDENTIFIER','age','sex','SpellHRG']+features+ ['ReAdmitted in DAYS','POD_CODE','SpellPDiag','SpellSDiag']]#,'ALL_DIAG']]
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_HRG_ICD,
HRG=widgets.SelectMultiple(
options=
init_code,
#list(Degree_HRG['SpellHRG'].dropna().unique()),
value=init_code,
#rows=10,
description='HRG',
disabled=False
),
ICD=widgets.Dropdown(
options=
#init_code,
sorted(list(Degree_HRG['SpellPDiag'].dropna().unique())),value=None
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Readmit=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=30,
step=1,
description='ReAdmitted:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'))"""
def plot_SpellHRG_ICD(ICD,Degree,Age,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in ICD:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellPDiag'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['age'].astype(int)>=Age[0])&(Degree_ReAdmitted_HRG['age'].astype(int) <=Age[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME']#,'Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: 'g', 2: 'r', 3: 'b', 4:'y'}
marker = {1:'circle', 2:'diamond', 3:'dot', 4:'triangle'}
size = {1:2,2:2,3:2,4:2}
colors = list(map(lambda x: colmap[x+1], labels))
markers = list(map(lambda x: marker[x+1], labels))
sizes = list(map(lambda x: size[x+1], labels))
#print(principalComponents)
df['principal component 1'] = principalComponents[:,0]
df['principal component 2'] = principalComponents[:,1]
df['color'] = colors
df['marker'] = markers
df['sizes'] = sizes
opts.defaults(opts.Points(padding=0.1, size=8, line_color='black'))
data ={'x':list(df['principal component 1'])
,'y':list(df['principal component 2'])
,'color':list(df['color'])
,'marker':list(df['marker'])
,'sizes':list(df['sizes'])}
#hv.Points(data, vdims=['color', 'marker', 'sizes']).opts(color='color', marker='marker', size='sizes')
plt.scatter(df['principal component 1'], df['principal component 2'], color=colors, alpha=0.5, edgecolor='k')
#for idx, centroid in enumerate(centroids):
#plt.scatter(*centroid, color=colmap[idx+1])
df['labels'] = labels
#print(list(df['labels'].unique()))
shape_ = {}
for item in list(df['labels'].unique()):
shape_.update({item:[(df[df['labels'] ==item].shape[0]),df[df['labels'] == item]['Sum_Degree'].mean()]})
print('Complex Degree:',df[df['labels'] == item]['Sum_Degree'].mean())
#print(shape_)
#print(sorted(shape_.items(), key=lambda x: x[1]))
minor_=sorted(shape_.items(), key=lambda x: x[1])[0][0]
major_=sorted(shape_.items(), key=lambda x: x[1])[1][0]
#sns.pairplot(df[df['labels'] ==1][df.columns.difference(['ACTIVITY_IDENTIFIER','POD_CODE'])], hue="SpellHRG")
#for label,x,y in zip(df[df['labels'] == minor_]['ACTIVITY_IDENTIFIER'],df[df['labels'] == minor_]['principal component 1'],df[df['labels'] == minor_]['principal component 2']):
for label,x,y in zip(df['ACTIVITY_IDENTIFIER'],df['principal component 1'],df['principal component 2']):
label = label
plt.annotate(label, (x,y),textcoords="offset points",xytext=(0,10),ha='center', size =20)
test=zip(regions, df['color'])
for item in test:
polygon = vertices[item[0]]
#print(region,polygon)
#print(*zip(*polygon))
plt.fill(*zip(*polygon), alpha=0.4
,color=item[1]
)
plt.xlim(vor.min_bound[0]-0.1, vor.max_bound[0]+0.1)
plt.ylim(vor.min_bound[1]-0.1, vor.max_bound[1]+0.1)
print('Minor Complex Degree:',df[df['labels'] == minor_]['Sum_Degree'].mean())
print('Major Complex Degree:',df[df['labels'] == major_]['Sum_Degree'].mean())
#df.loc[(df['POD_CODE'] == POD)]
return df[(df['POD_CODE'] == POD)][['age','sex','SpellHRG']+features+ ['POD_CODE','SpellPDiag','SpellSDiag']]#,'ALL_DIAG']]
#This block is for Jupyter-Notebook script
"""
codes =list(Degree_ReAdmitted_HRG['SpellHRG'].unique())
cardi=['DZ31Z',
'EC21Z',
'EC22Z',
'EY50Z',
'EY51Z',
'EY01A',
'EY01B',
'EY02A',
'EY02B',
'EY11Z',
'EY12A',
'EY12B',
'EY13Z',
'EY16A',
'EY16B',
'EY17A',
'EY17B']
init_code = list(set(codes).intersection(cardi))
stats = interact(plot_SpellHRG_ICD,
ICD=widgets.SelectMultiple(
options=
#init_code,
list(Degree_HRG['SpellPDiag'].dropna().unique()),
value=['A415'],
#rows=10,
description='ICD',
disabled=False
)
,POD=widgets.Dropdown(options=list(Degree_HRG['POD_CODE'].dropna().unique()))
,Degree=widgets.IntRangeSlider(value=[5,500],
min=0,
max=max(Degree_HRG['Sum_Degree']),
step=1,
description='Degree:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')
,Age=widgets.IntRangeSlider(value=[-1,30],
min=-1,
max=100,
step=1,
description='Age:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d')) """
def plot_SpellHRG_HRG(HRG,Degree,Readmit,POD):
df = pd.DataFrame()
Degree_ReAdmitted_HRG = utils_pickle.read("Degree_ReAdmitted_HRG")
for item in HRG:
#print(item)
data=Degree_ReAdmitted_HRG.loc[(Degree_ReAdmitted_HRG['SpellHRG'] == item)&(Degree_ReAdmitted_HRG['POD_CODE'] == POD)
&(((Degree_ReAdmitted_HRG['Sum_Degree']>=Degree[0])&(Degree_ReAdmitted_HRG['Sum_Degree'] <=Degree[1])))
&(((Degree_ReAdmitted_HRG['ReAdmitted in DAYS']>=Readmit[0])&(Degree_ReAdmitted_HRG['ReAdmitted in DAYS'] <=Readmit[1])))]
df = pd.concat([df,data])
features = ['Sum_Degree','Global_Central', 'Total_LOS', 'INCOME','Turnaround_Degree','DIAG_COUNT']
# Separating out the features
x = df.loc[:, features].values
# Separating out the target
#y = test.loc[:,['target']].values
# Standardizing the features
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['principal component 1', 'principal component 2'])
#Voronoi at least four points, though clusters not less than 4
kmeans = KMeans(n_clusters=2)
#pair = ['INCOME','Total_LOS']
kmeans.fit(principalDf)
labels = kmeans.predict(principalDf)
centroids = kmeans.cluster_centers_
#print(centroids)
v = np.vstack([centroids,[0,0]])
#print(v)
vor = Voronoi(principalComponents)
# plot
regions, vertices = voronoi_finite_polygons_2d(vor)
fig = plt.figure(figsize=(10, 10))
colmap = {1: | |
import logging
import os
import sys
from collections import Counter
import SimpleITK as sitk
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
from matplotlib.ticker import PercentFormatter
from sklearn.metrics import confusion_matrix
from src.utils.Utils_io import save_plot, ensure_dir
def my_autopct(pct):
"""
Helper to filter % values of a pie chart, which are smaller than 1%
:param pct:
:return:
"""
return ('%1.0f%%' % pct) if pct > 1 else ''
def get_metadata_maybe(sitk_img, key, default='not_found'):
# helper for unicode decode errors
try:
value = sitk_img.GetMetaData(key)
except Exception as e:
logging.debug('key not found: {}, {}'.format(key, e))
value = default
# need to encode/decode all values because of unicode errors in the dataset
if not isinstance(value, int):
value = value.encode('utf8', 'backslashreplace').decode('utf-8').replace('\\udcfc', 'ue')
return value
def show_2D_or_3D(img=None, mask=None, save=False, file_name='reports/figure/temp.png', dpi=200, f_size=(5, 5),
interpol='bilinear'):
"""
Wrapper for 2D/3D or 4D image/mask visualisation.
Single point for plotting, as this wrapper will delegate the plotting according to the input dim
Works with image,mask tuples but also with image,None internal it calls show_transparent, plot_3d or plot_4d
Parameters
----------
img : np.ndarray - with 2 <= dim <= 4
mask : np.ndarray - with 2 <= dim <= 4
save : bool - save this figure, please provide a file_name
file_name : string - full-path-and-file-name-with-suffix
dpi : int - dpi of the saved figure, will be used by matplotlib
f_size : tuple of int - define the figure size
interpol : enumaration - interpolation method for matplotlib e.g.: 'linear', 'bilinear', None
Returns matplotlib.figure
-------
"""
if isinstance(img, sitk.Image):
img = sitk.GetArrayFromImage(img).astype(np.float32)
if isinstance(mask, sitk.Image):
mask = sitk.GetArrayFromImage(mask).astype(np.float32)
# dont print anything if no images nor masks are given
if img is None and mask is None:
logging.error('No image data given')
return
if img is not None:
dim = img.ndim
else:
dim = mask.ndim
if dim == 2:
return show_slice_transparent(img, mask)
elif dim == 3 and img.shape[-1] == 1: # data from the batchgenerator
return show_slice_transparent(img, mask)
elif dim == 3:
return plot_3d_vol(img, mask, save=save, path=file_name, dpi=dpi, fig_size=f_size, interpol=interpol)
elif dim == 4 and img.shape[-1] == 1: # data from the batchgenerator
return plot_3d_vol(img, mask, save=save, path=file_name, dpi=dpi, fig_size=f_size, interpol=interpol)
elif dim == 4 and img.shape[-1] in [3, 4]: # only mask
return plot_3d_vol(img, save=save, path=file_name, dpi=dpi, fig_size=f_size, interpol=interpol)
elif dim == 4:
return plot_4d_vol(img, mask)
else:
logging.error('Unsupported dim: {}, shape: {}'.format(img.ndim, img.shape))
raise NotImplementedError('Wrong shape Exception in: {}'.format('show_2D_or_3D()'))
def create_eval_plot(df_dice, df_haus=None, df_hd=None, df_vol=None, eval=None):
"""
Create a violinplot with an integrated bland altmann plot
Nobs = median
Expects the following dataframe structure (created in notebooks/Evaluate/Evaluate_create_plots.ipynb):
Name Dice LV Volume LV Err LV(ml) Hausdorff LV Dice RV Volume RV Err RV(ml) Hausdorff RV Dice MYO Volume MYO Err MYO(ml) Hausdorff MYO
0 0000-0HQQW4ZN_2007-05-23_ED_msk 0.897436 110.887500 -7.106250 5.744563 0.868490 149.231250 -30.600000 7.211103 0.619342 57.768750 -2.925000 10.000000
1 0000-0HQQW4ZN_2007-05-23_ES_msk 0.850738 43.443750 4.921875 4.123106 0.830049 82.743750 -3.862500 10.816654 0.695500 51.993750 2.325000 5.830952
Parameters
----------
df_dice : pd.dataframe - melted dice dataframe
df_haus : pd.dataframe - melted dataframe with the hausdorff
df_hd : pd.dataframe - melted dataframe with the difference (pred-gt) of the volumes
df_vol : pd.dataframe - melted dataframe with the predicted volume
eval : pd.dataframe - full dataframe as shown in the fn description
Returns
-------
"""
import seaborn as sns
outliers = False
# make sure the color schema reflects the RGB schema of show_slice_transparent
my_pal_1 = {"Dice LV": "dodgerblue", "Dice MYO": "g", "Dice RV": "darkorange"}
my_pal_2 = {"Err LV(ml)": "dodgerblue", "Err MYO(ml)": "g", "Err RV(ml)": "darkorange"}
my_pal_3 = {"Volume LV": "dodgerblue", "Volume MYO": "g", "Volume RV": "darkorange"}
hd_pal = {"Hausdorff LV": "dodgerblue", "Hausdorff MYO": "g", "Hausdorff RV": "darkorange"}
plt.rcParams.update({'font.size': 20})
if df_haus is not None:
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(25, 8), sharey=False)
else:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(25, 8), sharey=False)
ax1 = sns.violinplot(x='variable', y='value', data=df_dice, order=["Dice LV", "Dice MYO", "Dice RV"],
palette=my_pal_1, showfliers=outliers, ax=ax1)
mean = df_dice.groupby(['variable'])['value'].mean().round(2)
sd = df_dice.groupby(['variable'])['value'].std().round(2)
nobs = ['{}+/-{}'.format(m, s) for m, s in zip(mean, sd)]
for tick, label in zip(range(len(ax1.get_xticklabels())), ax1.get_xticklabels()):
_ = ax1.text(tick, mean[tick], nobs[tick], horizontalalignment='center', size='x-small', color='black',
weight='semibold')
plt.setp(ax1, ylim=(0, 1))
plt.setp(ax1, ylabel=('DICE'))
plt.setp(ax1, xlabel='')
ax1.set_xticklabels(['LV', 'MYO', 'RV'])
# create bland altmannplot from vol diff
ax2 = bland_altman_metric_plot(eval, ax2)
# create violin plot for the volume
ax3 = sns.violinplot(x='variable', y='value', order=["Volume LV", "Volume MYO", "Volume RV"], palette=my_pal_3,
showfliers=outliers, data=df_vol, ax=ax3)
mean = df_vol.groupby(['variable'])['value'].mean().round(2)
sd = df_vol.groupby(['variable'])['value'].std().round(2)
nobs = ['{}+/-{}'.format(m, s) for m, s in zip(mean, sd)]
for tick, label in zip(range(len(ax3.get_xticklabels())), ax3.get_xticklabels()):
_ = ax3.text(tick, mean[tick], nobs[tick], horizontalalignment='center', size='x-small', color='black',
weight='semibold')
# plt.setp(ax3, ylim=(0,500))
plt.setp(ax3, ylabel=('Vol size in ml'))
plt.setp(ax3, xlabel='')
ax3.set_xticklabels(['LV', 'MYO', 'RV'])
ax4 = sns.violinplot(x='variable', y='value', order=["Hausdorff LV", "Hausdorff MYO", "Hausdorff RV"],
palette=hd_pal,
showfliers=outliers, data=df_haus, ax=ax4)
mean = df_haus.groupby(['variable'])['value'].mean().round(2)
sd = df_haus.groupby(['variable'])['value'].std().round(2)
nobs = ['{}+/-{}'.format(m, s) for m, s in zip(mean, sd)]
for tick, label in zip(range(len(ax4.get_xticklabels())), ax4.get_xticklabels()):
_ = ax4.text(tick, mean[tick], nobs[tick], horizontalalignment='center', size='x-small', color='black',
weight='semibold')
plt.setp(ax4, ylim=(0, 50))
plt.setp(ax4, ylabel=('Hausdorff distance'))
plt.setp(ax4, xlabel='')
ax4.set_xticklabels(['LV', 'MYO', 'RV'])
plt.tight_layout()
return fig
def show_slice_transparent(img=None, mask=None, show=True, f_size=(5, 5), ax=None, interpol='none'):
"""
Plot image + masks in one figure
Parameters
----------
img : np.ndarray - image with the shape x,y
mask : np.ndarray - mask with the shape x,y,channel --> one channel per label with bool values
show : bool - this is necessary for the tf.keras callbacks, true returns the ax, otherwise we return the figure
f_size : tuple of int - specify the figure size
ax : matplotlib.axes object - plots into that ax, if given, creates a new one otherwise
Returns ax or figure object
-------
"""
# If mask has int values (0 - #of_labels) instead of channeled bool values, define the labels of interest
# not provided as fn-parameter to reduce the complexity
mask_values = [1, 2, 3]
# define a threshold if we have a mask from a sigmoid/softmax output-layer which is not binary
mask_threshold = 0.5
if isinstance(img, sitk.Image):
img = sitk.GetArrayFromImage(img).astype(np.float32)
if isinstance(mask, sitk.Image):
mask = sitk.GetArrayFromImage(mask).astype(np.float32)
# dont print anything if no images nor masks are given
if img is None and mask is None:
logging.error('No image data given')
return
# replace mask with empty slice if none is given
if mask is None:
shape = img.shape
mask = np.zeros((shape[0], shape[1], 3))
# replace image with empty slice if none is given
if img is None:
shape = mask.shape
img = np.zeros((shape[0], shape[1], 1))
# check image shape
if len(img.shape) == 2:
# image already in 2d shape take it as it is
x_ = (img).astype(np.float32)
elif len(img.shape) == 3:
# take only the first channel, grayscale - ignore the others
x_ = (img[..., 0]).astype(np.float32)
else:
logging.error('invalid dimensions for image: {}'.format(img.shape))
return
# check masks shape, handle mask without channel per label
if len(mask.shape) == 2: # mask with int as label values
y_ = transform_to_binary_mask(mask, mask_values=mask_values).astype(np.float32)
elif len(mask.shape) == 3 and mask.shape[2] == 1: # handle mask with empty additional channel
mask = mask[..., 0]
y_ = transform_to_binary_mask(mask, mask_values=mask_values).astype(np.float32)
elif len(mask.shape) == 3 and mask.shape[2] == 3: # handle mask with three channels
y_ = (mask).astype(np.float32)
elif len(mask.shape) == 3 and mask.shape[2] == 4: # handle mask with 4 channels (backround = first channel)
# ignore background channel for plotting
y_ = (mask[..., 1:] > mask_threshold).astype(np.float32)
else:
logging.error('invalid dimensions for masks: {}'.format(mask.shape))
return
if not ax: # no axis given
fig = plt.figure(figsize=f_size)
ax = fig.add_subplot(1, 1, 1, frameon=False)
else: # axis given get the current fig
fig = plt.gcf()
fig.tight_layout(pad=0)
ax.axis('off')
# normalise image, avoid interpolation by matplotlib to have full control
x_ = (x_ - x_.min()) / (x_.max() - x_.min() + sys.float_info.epsilon)
ax.imshow(x_, 'gray', vmin=0, vmax=0.4)
ax.imshow(y_, interpolation=interpol, alpha=.3)
if show:
return ax
else:
return fig
def bland_altman_metric_plot(metric, ax=None):
'''
Plots a Bland Altmann plot for a evaluation dataframe from the eval scripts
:param metric: pd.Dataframe
:return: plt.ax
Parameters
----------
metric :
ax :
Returns
-------
'''
if ax is None:
fig, ax = plt.subplots(figsize=(20, 15))
| |
<filename>Backend/src/__init__.py
import os
import psycopg2
from . import FunctionML
from . import MLPRegressor
from . import Regressors
from flask import Flask, render_template, request
from flask_cors import CORS
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
import pandas as pd
import os
import ast
from flask_mail import Mail
import numpy as np
app = Flask(__name__)
CORS(app)
app_settings = os.getenv('APP_SETTINGS',
'Backend.src.config.DevelopmentConfig')
app.config.from_object(app_settings)
bcrypt = Bcrypt(app)
db = SQLAlchemy(app)
mail = Mail(app)
from Backend.src.authentication.views import auth_blueprint
app.register_blueprint(auth_blueprint)
user_name = 'postgres'
password = "<PASSWORD>"
database_name = 'postgres'
postgres_local_base = 'postgresql://' + user_name + ':' + password + '@localhost/'
DATABASE_URL=os.getenv('DATABASE_URL', postgres_local_base + database_name)
@app.route('/')
##Fonction de vérification que le back est plutot bon
def result():
return ("le back est plutot bon")
@app.route('/test2', methods=['GET'])
##Fonction test pour verifier que le front est bien relié a la BDD, renvoie les données de la table test1
def config():
hello = ""
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
sql2 = "select * from test1"
cur.execute(sql2)
records = cur.fetchall()
for row in records[1:len(records)]:
hello += '\t'.join(row) + '\t'
cur.close()
conn.close()
return (hello)
@app.route('/data', methods=['GET'])
def getData():
data = []
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
sql2 = "select folder_id,name from folder order by name"
cur.execute(sql2)
records = cur.fetchall()
for row in records:
row = (str(row).split(','))
id = int(row[0][1:])
name = row[1][2:-2]
data.append({"id": id, "name": name})
for i in range(len(data)):
sql = "select tsv.name from tsv where tsv.folder_id=" + str(
i + 1) + " order by tsv.name"
cur.execute(sql)
records = cur.fetchall()
if len(records) != 0:
types = []
for j in range(len(records)):
type_name = str(records[j])[2:-3]
types.append(type_name)
data[i]["type"] = types
else:
data[i]["type"] = ['test' + str(i)]
return (str(data))
def getDataId(data, id):
for i in range(len(data)):
if (data[i]['id'] == id):
return i
return 0
@app.route('/wildtype', methods=['POST', 'GET'])
##Fonction de recuperation des etats stables
def wildtype():
hello = ""
datas = []
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
headers = request.get_json(force=True)
id = int(headers['id'])
sql2 = "select name from folder where folder_id=" + str(id)
cur.execute(sql2)
dossier = cur.fetchall()
dossier = str(dossier)[3:-4]
df_wildtype = pd.read_csv(
'Backend/data/' + dossier + '/' + dossier + '_wildtype.tsv', sep='\t')
datas = (df_wildtype.values)
for row in datas[0]:
hello += ' ' + str(row)
return (hello)
@app.route('/learning', methods=['POST'])
#Route en cas de demande d'apprentissage, le POST contient un json recapitulant les differentes informations: {'name': 'Silico10', 'data': '{"0":"knockout"}', 'learning': 'RandomForest'}
# la fonction renvoie les données utilisées et stocke
def learn():
headers = request.get_json(force=True)
datas = []
for i in range(len(ast.literal_eval(headers['data']))):
x = '\'' + str(0) + '\''
files = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
ast.literal_eval(headers['data'])[str(i)] + '.tsv',
sep='\t')
datas.append(files.values)
return (str(headers))
@app.route('/graph', methods=['POST'])
def graph():
headers = request.get_json(force=True)
if True: #Creer une condition pour choisir la methode
df_knockouts = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
'knockouts' + '.tsv',
sep='\t')
df_knockdowns = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
'knockdowns' + '.tsv',
sep='\t')
df_wildtype = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
'wildtype' + '.tsv',
sep='\t')
df_timeseries = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
'timeseries' + '.tsv',
sep='\t')
if headers['learning'] == 'XGBoost':
M = Regressors.get_relation_matrix(Regressors.get_coef_matrix_from_XGBoost_coef(df_timeseries, df_wildtype), 6)
elif headers['learning'] == 'RL':
if len(np.transpose(df_timeseries)) > 50:
M = Regressors.get_relation_matrix(Regressors.get_RL_coef_from_timeseries(df_timeseries), 0.02)
else:
M = Regressors.get_relation_matrix(Regressors.get_RL_coef_from_timeseries(df_timeseries), 0.2)
elif headers['learning'] == 'Random Forest':
M = Regressors.get_relation_matrix(Regressors.get_coef_matrix_from_RandomForest_coef(df_timeseries, df_wildtype), 6)
else:
M = FunctionML.etudedict(df_knockouts, df_wildtype)
retour = []
for i in range(len(M[0])):
for j in range(len(M[0])):
if abs(M[i][j]) == 1:
retour.append({
'source': i + 1,
'target': j + 1,
'type': 'unknown'
})
return (str(retour))
@app.route('/displayData', methods=['POST'])
def display():
headers = request.get_json(force=True)
dossier = headers['donnee']
name = headers['type']
if name == 'wildtype':
files = pd.read_csv(
'Backend/data/' + dossier + '/' + dossier + '_wildtype.tsv', sep='\t')
displayData = []
data = files.values
length = int(len(files.values[0]))
for i in range(length):
displayData.append({"label": "G" + str(i + 1), "data": []})
for i in range(len(data)):
for j in range(len(data[i])):
displayData[j]["data"].append(data[0][j])
return (str(displayData))
elif name != 'knockouts':
displayData = []
files = pd.read_csv(
'Backend/data/' + dossier + '/' + dossier + '_' + name + '.tsv',
sep='\t')
data = files.values
length = int(len(files.values))
for i in range(length):
displayData.append({"label": "G" + str(i + 1), "data": []})
for i in range(len(data)):
for j in range(len(data[i])):
displayData[j]["data"].append(data[i][j])
return (str(displayData))
else:
displayData = []
data = []
files2 = pd.read_csv(
'Backend/data/' + dossier + '/' + dossier + '_wildtype.tsv',
sep='\t')
data = (files2.values)
files = pd.read_csv(
'Backend/data/' + dossier + '/' + dossier + '_' + name + '.tsv',
sep='\t')
data2 = files.values
length = int(len(files.values))
for i in range(2 * length):
displayData.append({"label": "G" + str(i + 1), "data": []})
for i in range(len(data2)):
for j in range(len(data2[i])):
displayData[(j * 2)]["data"].append(data2[i][j])
for i in range(len(data[0])):
displayData[(i * 2) + 1]["data"].append(data[0][i])
return (str(displayData))
@app.route('/displayTimeseries', methods=['POST'])
#Route créée pour afficher les timeseries sur le graphe.
def displayTimeseries():
headers = request.get_json(force=True)
dossier = headers['donnee']
name = headers['type']
displayData = []
text = open(
'Backend/data/' + dossier + '/' + dossier + '_' + name + '.tsv', 'r+')
content = text.read()
text.close()
datas = str(content)
newdata = ''
#dossier have the form "insilico_size***_1" so the -4 char is eitheir 1<-(size'1'0_1) or 0<-(size1'0'0_1)
if int(dossier[-4])==1:
is_insilico10 = True
else:
is_insilico10 = False
if is_insilico10:
length = 11
for j in range(0, len(datas)):
if datas[j] == "\n":
newdata += '\t'
else:
newdata += datas[j]
records = newdata.split("\t")
for row in range(243):#243=21(nbre de pts)*10(nbre courbe)+21(absisses temporelles)+10(G1-G10)+2
if row < length:
displayData.append({"label": str(records[row])[1:-1], "data": []})
elif row > length:
displayData[(row - 1) % length]["data"].append(records[row])
else:
length = 101
for j in range(0, len(datas)):
if datas[j] == "\n":
newdata += '\t'
else:
newdata += datas[j]
records = newdata.split("\t")
for row in range(2223):#2223=21(nbre de pts)*100(nbre courbe)+21(absisses temporelles)+100(G1-G100)+2
if row < length:
displayData.append({"label": str(records[row])[1:-1], "data": []})
elif row > length:
displayData[(row - 1) % length]["data"].append(records[row])
return (str(displayData))
@app.route('/score', methods=['POST'])
def score():
headers = request.get_json(force=True)
matrice = headers['matrice']
score = FunctionML.score(matrice)
return (str(score))
@app.route('/model', methods=['POST'])
def getModel():
headers = request.get_json(force=True)
if True: #Creer une condition pour choisir la methode
df_knockouts = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
'knockouts' + '.tsv',
sep='\t')
df_knockdowns = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
'knockdowns' + '.tsv',
sep='\t')
df_wildtype = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
'wildtype' + '.tsv',
sep='\t')
df_gold = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
'goldstandard' + '.tsv',
sep='\t')
df_timeseries = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
'timeseries' + '.tsv',
sep='\t')
if headers['learning'] == 'XGBoost':
M = Regressors.get_relation_matrix(Regressors.get_coef_matrix_from_XGBoost_coef(df_timeseries, df_wildtype), 6)
elif headers['learning'] == 'RL':
if len(np.transpose(df_timeseries)) > 50:
M = Regressors.get_relation_matrix(Regressors.get_RL_coef_from_timeseries(df_timeseries), 0.02)
else:
M = Regressors.get_relation_matrix(Regressors.get_RL_coef_from_timeseries(df_timeseries), 0.2)
elif headers['learning'] == 'Random Forest':
M = Regressors.get_relation_matrix(Regressors.get_coef_matrix_from_RandomForest_coef(df_timeseries, df_wildtype), 6)
elif headers['learning'] == 'MLP Regressor':
M=MLPRegressor.testcomplet(df_timeseries,df_wildtype)
elif headers['learning'] == 'Absolute Gap':
if ('knockouts') in headers['data']:
M = FunctionML.etudeRelationAbsolue(df_knockouts,df_wildtype)
else:
M = FunctionML.etudeRelationAbsolue(df_knockdowns,df_wildtype)
elif headers['learning'] == 'Relative Gap':
if ('knockouts') in headers['data']:
M = FunctionML.etudeRelationRelatif(df_knockouts,df_wildtype)
else:
M = FunctionML.etudeRelationRelatif(df_knockdowns,df_wildtype)
elif headers['learning'] == 'Dictionnary':
if ('knockouts') in headers['data']:
M=FunctionML.etudedict(df_knockouts,df_wildtype)
else:
M=FunctionML.etudedict(df_knockdowns,df_wildtype)
else :
M=FunctionML.testcomplet(df_timeseries,df_wildtype)
retour = "["
for i in range(len(M[0])):
retour += "["
for j in range(len(M[i])):
retour += str((int(abs(M[i][j]))))
if j != len(M[i]) - 1:
retour += ','
retour += "]"
if i != len(M[0]) - 1:
retour += ","
retour += "]"
x = FunctionML.getGold(df_gold, int(len(df_wildtype.values[0])))
return (str(retour))
@app.route('/gold', methods=['POST'])
def getGold():
headers = request.get_json(force=True)
if True: #Creer une condition pour choisir la methode
df_gold = pd.read_csv(
'Backend/data/' + headers['name'] + '/' + headers['name'] + '_' +
'goldstandard' + '.tsv',
sep='\t')
if (len(df_gold))>100:
length=100
else:
length=10
x = FunctionML.getGold(df_gold,length)
return (str(x))
@app.route('/prediction', methods=['POST'])
#Route en cas de prediction de knockdown/knockout -> x est un json a 2 parametre de type {pert1: "knockdown G2", pert2 : "knockout G7"}
#La fonction renvoie directement les données récupérées post-traitement (sous forme de 10 valeurs successives)
def predict():
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = conn.cursor()
headers = request.get_json(force=True)
if headers['pert1'][5] != 'o':
G1 = 'd' + headers['pert1'][-1]
else:
G1 = 'o' + headers['pert1'][-1]
if headers['pert2'][5] != 'o':
G2 = 'd' + headers['pert2'][-1]
else:
G2 = 'o' + headers['pert2'][-1]
id = int(headers['id'])
sql2 = "select name from folder where | |
<reponame>gmftbyGMFTBY/SimpleReDial-v1
from header import *
from .utils import *
from .util_func import *
class GPT2Dataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
self.unk = self.vocab.convert_tokens_to_ids('[UNK]')
if self.args['mode'] == 'test':
# for test batch generation
print(f'[!] set the padding side as the left')
self.vocab.padding_side = 'left'
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_gpt2_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
random.seed(args['seed'])
self.data = []
if self.args['mode'] == 'train':
data = read_text_data_line_by_line(path)
self.data = []
for text in tqdm(data):
item = self.vocab.encode(text, add_special_tokens=False)
for idx in range(0, len(item), self.args['max_len']-2):
ids = item[idx:idx+self.args['max_len']-2]
if len(ids) < self.args['min_len']:
continue
ids = [self.cls] + ids + [self.sep]
self.data.append({'ids': ids})
else:
path = f'{args["root_dir"]}/data/{args["dataset"]}/test_gray_simcse.pt'
data = torch.load(path)
# random sample 100 samples
data = random.sample(data, 10)
self.data = []
for item in tqdm(data):
context, pos, neg_responses = item['context'], item['pos_response'], item['neg_responses']
for neg in neg_responses:
# prefix
item = self.vocab.encode(context, add_special_tokens=False)
ids = [self.cls] + item[-(self.args['max_len']-1):]
item = self.vocab.encode(context+pos, add_special_tokens=False)
pos_ids = [self.cls] + item[:self.args['max_len']-2] + [self.sep]
item = self.vocab.encode(context+neg, add_special_tokens=False)
neg_ids = [self.cls] + item[:self.args['max_len']-2] + [self.sep]
self.data.append({
'ids': ids,
'pos_ids': pos_ids,
'pos_text': context+pos,
'neg_ids': neg_ids,
'neg_text': context+neg,
'text': context,
})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
bundle = self.data[i]
if self.args['mode'] == 'train':
ids = torch.LongTensor(bundle['ids'])
return ids
else:
ids = torch.LongTensor(bundle['ids'])
pos_ids = torch.LongTensor(bundle['pos_ids'])
neg_ids = torch.LongTensor(bundle['neg_ids'])
return ids, pos_ids, neg_ids, bundle['pos_text'], bundle['neg_text'], bundle['text']
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
if self.args['mode'] == 'train':
ids = pad_sequence(batch, batch_first=True, padding_value=self.pad)
mask = generate_mask(ids)
ids, mask = to_cuda(ids, mask)
return {
'ids': ids,
'mask': mask,
}
else:
ids = [i[0] for i in batch]
pos_ids = [i[1] for i in batch]
neg_ids = [i[2] for i in batch]
pos_text = [i[3] for i in batch]
neg_text = [i[4] for i in batch]
text = [i[5] for i in batch]
# pad from the left side, batch first
max_length = max([len(i) for i in ids])
n_ids = []
for i in ids:
ids_ = torch.cat([torch.LongTensor([self.pad] * (max_length - len(i))), i])
n_ids.append(ids_)
ids = torch.stack(n_ids)
mask = generate_mask(ids)
pos_ids = pad_sequence(pos_ids, batch_first=True, padding_value=self.pad)
pos_ids_mask = generate_mask(pos_ids)
neg_ids = pad_sequence(neg_ids, batch_first=True, padding_value=self.pad)
neg_ids_mask = generate_mask(neg_ids)
ids, mask, pos_ids, pos_ids_mask, neg_ids, neg_ids_mask = to_cuda(ids, mask, pos_ids, pos_ids_mask, neg_ids, neg_ids_mask)
return {
'ids': ids,
'mask': mask,
'pos_ids': pos_ids,
'pos_ids_mask': pos_ids_mask,
'neg_ids': neg_ids,
'neg_ids_mask': neg_ids_mask,
'pos_text': pos_text,
'text': text,
'neg_text': neg_text,
}
class GPT2UnlikelyhoodDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
self.unk = self.vocab.convert_tokens_to_ids('[UNK]')
random.seed(args['seed'])
if self.args['mode'] == 'test':
# for test batch generation
print(f'[!] set the padding side as the left')
self.vocab.padding_side = 'left'
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_gpt2_unlikelyhood_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
self.data = []
if self.args['mode'] == 'train':
data = read_text_data_unlikelyhood(path)
# for debug
data = random.sample(data, 1000)
self.data = []
for utterances in tqdm(data):
item = self.vocab.batch_encode_plus(utterances, add_special_tokens=False)['input_ids']
ids, cands, counter = [], [], 0
for utterance in item:
if counter + len(utterance) + 2 > self.args['train_min_len'] and len(cands) > 0:
ids = list(chain(*ids))
self.data.append({
'cids': ids,
'pos_rids': utterance,
'cands': cands,
})
ids, cands = [], []
else:
ids.append(utterance)
cands.append(utterance)
counter += len(utterance)
else:
path = f'{args["root_dir"]}/data/{args["dataset"]}/test_gray_simcse.pt'
data = torch.load(path)
# random sample 100 samples
data = random.sample(data, 10)
self.data = []
for item in tqdm(data):
context, pos, neg_responses = item['context'], item['pos_response'], item['neg_responses']
# prefix
item = self.vocab.encode(context, add_special_tokens=False)
ids = [self.cls] + item[-(self.args['max_len']-1):]
cids, rids = self.vocab.batch_encode_plus([context, pos], add_special_tokens=False)['input_ids']
self.truncate_pair(cids, rids, self.args['max_len'])
pos_ids = [self.cls] + cids + rids + [self.sep]
pos_label = [0] * (len(cids) + 1) + rids + [self.sep]
neg_ids_total, neg_ids_label_total, neg_text_total = [], [], []
for neg in neg_responses:
cids, rids = self.vocab.batch_encode_plus([context, neg], add_special_tokens=False)['input_ids']
self.truncate_pair(cids, rids, self.args['max_len'])
neg_ids = [self.cls] + cids + rids + [self.sep]
neg_label = [0] * (len(cids) + 1) + rids + [self.sep]
neg_ids_total.append(neg_ids)
neg_ids_label_total.append(neg_label)
neg_text_total.append(context+neg)
self.data.append({
'ids': ids,
'pos_ids': pos_ids,
'pos_label': pos_label,
'pos_text': context+pos,
'neg_ids': neg_ids_total,
'neg_label': neg_ids_label_total,
'neg_text': neg_text_total,
'text': context,
})
def __len__(self):
return len(self.data)
def truncate_pair(self, ids, rids, max_len):
max_len -= 2
while True:
l = len(ids) + len(rids)
if l <= max_len:
break
if len(ids) > len(rids):
ids.pop(0)
else:
rids.pop()
def __getitem__(self, i):
bundle = self.data[i]
if self.args['mode'] == 'train':
ids, pos_rids, cands = deepcopy(bundle['cids']), deepcopy(bundle['pos_rids']), deepcopy(bundle['cands'])
cand = random.choice(cands)
neg_ids = deepcopy(ids)
truncate_pair(ids, pos_rids, self.args['train_max_len'])
gpt2_ids = [self.cls] + ids + pos_rids + [self.sep]
bert_label = [0] * (len(ids) + 1) + pos_rids + [self.sep]
truncate_pair(neg_ids, cand, self.args['train_max_len'])
neg_gpt2_ids = [self.cls] + neg_ids + cand + [self.sep]
neg_bert_label = [0] * (len(neg_ids) + 1) + cand + [self.sep]
return gpt2_ids, bert_label, neg_gpt2_ids, neg_bert_label
else:
ids = torch.LongTensor(bundle['ids'])
pos_ids = torch.LongTensor(bundle['pos_ids'])
neg_ids = [torch.LongTensor(i) for i in bundle['neg_ids']]
pos_label = torch.LongTensor(bundle['pos_label'])
neg_label = [torch.LongTensor(i) for i in bundle['neg_label']]
return ids, pos_ids, neg_ids, bundle['pos_text'], bundle['neg_text'], bundle['text'], pos_label, neg_label
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
if self.args['mode'] == 'train':
gpt2_ids, bert_label, neg_gpt2_ids, neg_bert_label = [], [], [], []
for a, b, c, d in batch:
gpt2_ids.append(torch.LongTensor(a))
bert_label.append(torch.LongTensor(b))
neg_gpt2_ids.append(torch.LongTensor(c))
neg_bert_label.append(torch.LongTensor(d))
gpt2_ids = pad_sequence(gpt2_ids, batch_first=True, padding_value=self.pad)
neg_gpt2_ids = pad_sequence(neg_gpt2_ids, batch_first=True, padding_value=self.pad)
bert_label = pad_sequence(bert_label, batch_first=True, padding_value=self.pad)
neg_bert_label = pad_sequence(neg_bert_label, batch_first=True, padding_value=self.pad)
gpt2_mask = generate_mask(gpt2_ids)
neg_gpt2_mask = generate_mask(neg_gpt2_ids)
gpt2_ids, gpt2_mask, bert_label = to_cuda(gpt2_ids, gpt2_mask, bert_label)
neg_gpt2_ids, neg_gpt2_mask, neg_bert_label = to_cuda(neg_gpt2_ids, neg_gpt2_mask, neg_bert_label)
return {
'gpt2_ids': gpt2_ids,
'gpt2_mask': gpt2_mask,
'bert_label': bert_label,
'neg_gpt2_ids': neg_gpt2_ids,
'neg_gpt2_mask': neg_gpt2_mask,
'neg_bert_label': neg_bert_label,
}
else:
neg_ids_, neg_text_, neg_ids_mask_, neg_label_ = [], [], [], []
for i in range(10):
neg_ids = [j[2][i] for j in batch]
neg_text = [j[4][i] for j in batch]
neg_label = [j[7][i] for j in batch]
neg_ids = pad_sequence(neg_ids, batch_first=True, padding_value=self.pad)
neg_label = pad_sequence(neg_label, batch_first=True, padding_value=self.pad)
neg_ids_mask = generate_mask(neg_ids)
neg_ids, neg_ids_mask, neg_label = to_cuda(neg_ids, neg_ids_mask, neg_label)
neg_ids_.append(neg_ids)
neg_ids_mask_.append(neg_ids_mask)
neg_label_.append(neg_label)
neg_text_.append(neg_text)
ids = [i[0] for i in batch]
pos_ids = [i[1] for i in batch]
pos_text = [i[3] for i in batch]
text = [i[5] for i in batch]
pos_label = [i[6] for i in batch]
# pad from the left side, batch first
max_length = max([len(i) for i in ids])
n_ids = []
for i in ids:
ids_ = torch.cat([torch.LongTensor([self.pad] * (max_length - len(i))), i])
n_ids.append(ids_)
ids = torch.stack(n_ids)
mask = generate_mask(ids)
pos_ids = pad_sequence(pos_ids, batch_first=True, padding_value=self.pad)
pos_label = pad_sequence(pos_label, batch_first=True, padding_value=self.pad)
pos_ids_mask = generate_mask(pos_ids)
ids, mask, pos_ids, pos_ids_mask, pos_label = to_cuda(ids, mask, pos_ids, pos_ids_mask, pos_label)
return {
'ids': ids,
'mask': mask,
'pos_ids': pos_ids,
'pos_label': pos_label,
'pos_ids_mask': pos_ids_mask,
'neg_ids': neg_ids_,
'neg_label': neg_label_,
'neg_ids_mask': neg_ids_mask_,
'pos_text': pos_text,
'text': text,
'neg_text': neg_text_,
}
class GPT2WithNegDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
self.unk = self.vocab.convert_tokens_to_ids('[UNK]')
if self.args['mode'] == 'test':
# for test batch generation
print(f'[!] set the padding side as the left')
self.vocab.padding_side = 'left'
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_gpt2_with_neg_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
random.seed(args['seed'])
self.data = []
if self.args['mode'] == 'train':
data = read_text_data_line_by_line(path)
self.data = []
for text in tqdm(data):
item = self.vocab.encode(text, add_special_tokens=False)
for idx in range(0, len(item), self.args['max_len']-2):
ids = item[idx:idx+self.args['max_len']-2]
if len(ids) < self.args['min_len']:
continue
ids = [self.cls] + ids + [self.sep]
self.data.append({'ids': ids})
else:
path = f'{args["root_dir"]}/data/{args["dataset"]}/test_gray_simcse.pt'
data = torch.load(path)
# random sample 100 samples
data = random.sample(data, 10)
self.data = []
for item in tqdm(data):
context, pos, neg_responses = item['context'], item['pos_response'], item['neg_responses']
for neg in neg_responses:
# prefix
item = self.vocab.encode(context, add_special_tokens=False)
ids = [self.cls] + item[-(self.args['max_len']-1):]
item = self.vocab.encode(context+pos, add_special_tokens=False)
pos_ids = [self.cls] + item[:self.args['max_len']-2] + [self.sep]
item | |
= resize_image(image, resize_height, resize_width)
image = np.asanyarray(image)
if normalization:
image = image_normalization(image)
# show_image("src resize image",image)
return image
def read_image_batch(image_list):
'''
批量读取图片
:param image_list:
:return:
'''
image_batch = []
out_image_list = []
for image_path in image_list:
image = read_images_url(image_path)
if image is None:
print("no image:{}".format(image_path))
continue
image_batch.append(image)
out_image_list.append(image_path)
return image_batch, out_image_list
def fast_read_image_roi(filename, orig_rect, ImreadModes=cv2.IMREAD_COLOR, normalization=False, use_rgb=True):
'''
快速读取图片的方法
:param filename: 图片路径
:param orig_rect:原始图片的感兴趣区域rect
:param ImreadModes: IMREAD_UNCHANGED
IMREAD_GRAYSCALE
IMREAD_COLOR
IMREAD_ANYDEPTH
IMREAD_ANYCOLOR
IMREAD_LOAD_GDAL
IMREAD_REDUCED_GRAYSCALE_2
IMREAD_REDUCED_COLOR_2
IMREAD_REDUCED_GRAYSCALE_4
IMREAD_REDUCED_COLOR_4
IMREAD_REDUCED_GRAYSCALE_8
IMREAD_REDUCED_COLOR_8
IMREAD_IGNORE_ORIENTATION
:param normalization: 是否归一化
:param use_rgb 输出格式:RGB or BGR
:return: 返回感兴趣区域ROI
'''
# 当采用IMREAD_REDUCED模式时,对应rect也需要缩放
scale = 1
if ImreadModes == cv2.IMREAD_REDUCED_COLOR_2 or ImreadModes == cv2.IMREAD_REDUCED_COLOR_2:
scale = 1 / 2
elif ImreadModes == cv2.IMREAD_REDUCED_GRAYSCALE_4 or ImreadModes == cv2.IMREAD_REDUCED_COLOR_4:
scale = 1 / 4
elif ImreadModes == cv2.IMREAD_REDUCED_GRAYSCALE_8 or ImreadModes == cv2.IMREAD_REDUCED_COLOR_8:
scale = 1 / 8
rect = np.array(orig_rect) * scale
rect = rect.astype(int).tolist()
image = cv2.imread(filename, flags=ImreadModes)
if image is None:
print("Warning: no image:{}".format(filename))
return None
if len(image.shape) == 2: # 若是灰度图则转为三通道
print("Warning:gray image", filename)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
if use_rgb:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # 将BGR转为RGB
image = np.asanyarray(image)
if normalization:
image = image_normalization(image)
roi_image = get_rect_image(image, rect)
return roi_image
def resize_image(image, resize_height=None, resize_width=None):
'''
tf.image.resize_images(images,size),images=[batch, height, width, channels],size=(new_height, new_width)
cv2.resize(image, dsize=(resize_width, resize_height)),与image.shape相反
images[50,10]与image.shape的原理相同,它表示的是image=(y=50,x=10)
:param image:
:param resize_height:
:param resize_width:
:return:
'''
image_shape = np.shape(image)
height = image_shape[0]
width = image_shape[1]
if (resize_height is None) and (resize_width is None): # 错误写法:resize_height and resize_width is None
return image
if resize_height is None:
resize_height = int(height * resize_width / width)
elif resize_width is None:
resize_width = int(width * resize_height / height)
image = cv2.resize(image, dsize=(resize_width, resize_height))
return image
def image_boxes_resize_padding(image, input_size, boxes=None, color=(0, 0, 0)):
"""
等比例图像resize,保持原始图像内容比,避免失真,短边会0填充
input_size = [300, 300]
image_path = "test.jpg"
src_boxes = [[8.20251, 1, 242.2412, 699.2236],
[201.14865, 204.18265, 468.605, 696.36163]]
src_boxes = np.asarray(src_boxes)
image = read_image(image_path)
image1, boxes1 = image_boxes_resize_padding(image, input_size, src_boxes)
image1 = show_image_boxes("Det", image1, boxes1, color=(255, 0, 0), waitKey=3)
boxes = image_boxes_resize_padding_inverse(image.shape, input_size, boxes1)
show_image_boxes("image", image, boxes)
:param size:
"""
height, width = image.shape[:2]
scale = min([input_size[0] / width, input_size[1] / height])
new_size = [int(width * scale), int(height * scale)]
pad_w = input_size[0] - new_size[0]
pad_h = input_size[1] - new_size[1]
top, bottom = pad_h // 2, pad_h - (pad_h // 2)
left, right = pad_w // 2, pad_w - (pad_w // 2)
out = cv2.resize(image, (new_size[0], new_size[1]))
out = cv2.copyMakeBorder(out, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
if not boxes is None and len(boxes) > 0:
boxes[:] = boxes[:] * scale
boxes[:] = boxes[:] + [left, top, left, top]
return out
def image_boxes_resize_padding_inverse(image_size, input_size, boxes=None):
"""
resize_image_padding的逆过程
"""
width, height = image_size
scale = min([input_size[0] / width, input_size[1] / height])
new_size = [int(width * scale), int(height * scale)]
pad_w = input_size[0] - new_size[0]
pad_h = input_size[1] - new_size[1]
top, bottom = pad_h // 2, pad_h - (pad_h // 2)
left, right = pad_w // 2, pad_w - (pad_w // 2)
if not boxes is None and len(boxes) > 0:
boxes[:] = boxes[:] - [left, top, left, top]
boxes[:] = boxes[:] / scale
return boxes
def resize_image_bboxes(image, resize_height=None, resize_width=None, bboxes=None):
"""
:param image:
:param resize_height:
:param resize_width:
:param bboxes:
:return:
"""
height, width, _ = image.shape
if (resize_height is None) and (resize_width is None): # 错误写法:resize_height and resize_width is None
return image, bboxes
if resize_height is None:
scale = [resize_width / width, resize_width / width]
resize_height = int(height * resize_width / width)
elif resize_width is None:
scale = [resize_height / height, resize_height / height]
resize_width = int(width * resize_height / height)
else:
scale = [resize_width / width, resize_height / height]
bboxes = scale * 2 * bboxes
image = cv2.resize(image, dsize=(resize_width, resize_height))
return image, bboxes
def scale_image(image, scale):
'''
:param image:
:param scale: (scale_w,scale_h)
:return:
'''
image = cv2.resize(image, dsize=None, fx=scale[0], fy=scale[1])
return image
def get_rect_image(image, rect):
'''
:param image:
:param rect: [x,y,w,h]
:return:
'''
shape = image.shape # h,w
height = shape[0]
width = shape[1]
image_rect = (0, 0, width, height)
rect = get_rect_intersection(rect, image_rect)
rect = [int(i) for i in rect]
x, y, w, h = rect
cut_image = image[y:(y + h), x:(x + w)]
return cut_image
def get_rects_image(image, rects_list, resize_height=None, resize_width=None):
'''
获得裁剪区域
:param image:
:param rects_list:
:param resize_height:
:param resize_width:
:return:
'''
rect_images = []
for rect in rects_list:
roi = get_rect_image(image, rect)
roi = resize_image(roi, resize_height, resize_width)
rect_images.append(roi)
return rect_images
def get_bboxes_image(image, bboxes_list, resize_height=None, resize_width=None):
'''
获得裁剪区域
:param image:
:param bboxes_list:
:param resize_height:
:param resize_width:
:return:
'''
rects_list = bboxes2rects(bboxes_list)
rect_images = get_rects_image(image, rects_list, resize_height, resize_width)
return rect_images
def bboxes2rects(bboxes_list):
'''
将bboxes=[x1,y1,x2,y2] 转为rect=[x1,y1,w,h]
:param bboxes_list:
:return:
'''
rects_list = []
for bbox in bboxes_list:
x1, y1, x2, y2 = bbox
rect = [x1, y1, (x2 - x1), (y2 - y1)]
rects_list.append(rect)
return rects_list
def rects2bboxes(rects_list):
'''
将rect=[x1,y1,w,h]转为bboxes=[x1,y1,x2,y2]
:param rects_list:
:return:
'''
bboxes_list = []
for rect in rects_list:
x1, y1, w, h = rect
x2 = x1 + w
y2 = y1 + h
b = (x1, y1, x2, y2)
bboxes_list.append(b)
return bboxes_list
def bboxes2center(bboxes_list):
'''
center = (boxes[:, [0, 1]] + boxes[:, [2, 3]]) / 2
将bboxes=[x1,y1,x2,y2] 转为center_list=[cx,cy,w,h]
:param bboxes_list:
:return:
'''
center_list = []
for bbox in bboxes_list:
x1, y1, x2, y2 = bbox
center = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]
center_list.append(center)
return center_list
def center2bboxes(center_list):
'''
将center_list=[cx,cy,w,h] 转为bboxes=[x1,y1,x2,y2]
:param bboxes_list:
:return:
'''
bboxes_list = []
for c in center_list:
cx, cy, w, h = c
bboxes = [cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2]
bboxes_list.append(bboxes)
return bboxes_list
def center2rects(center_list):
'''
将center_list=[cx,cy,w,h] 转为rect=[x,y,w,h]
:param bboxes_list:
:return:
'''
rect_list = []
for c in center_list:
cx, cy, w, h = c
rect = [cx - w / 2, cy - h / 2, w, h]
rect_list.append(rect)
return rect_list
def scale_rect(orig_rect, orig_shape, dest_shape):
'''
对图像进行缩放时,对应的rectangle也要进行缩放
:param orig_rect: 原始图像的rect=[x,y,w,h]
:param orig_shape: 原始图像的维度shape=[h,w]
:param dest_shape: 缩放后图像的维度shape=[h,w]
:return: 经过缩放后的rectangle
'''
new_x = int(orig_rect[0] * dest_shape[1] / orig_shape[1])
new_y = int(orig_rect[1] * dest_shape[0] / orig_shape[0])
new_w = int(orig_rect[2] * dest_shape[1] / orig_shape[1])
new_h = int(orig_rect[3] * dest_shape[0] / orig_shape[0])
dest_rect = [new_x, new_y, new_w, new_h]
return dest_rect
def get_rect_intersection(rec1, rec2):
'''
计算两个rect的交集坐标
:param rec1:
:param rec2:
:return:
'''
xmin1, ymin1, xmax1, ymax1 = rects2bboxes([rec1])[0]
xmin2, ymin2, xmax2, ymax2 = rects2bboxes([rec2])[0]
x1 = max(xmin1, xmin2)
y1 = max(ymin1, ymin2)
x2 = min(xmax1, xmax2)
y2 = min(ymax1, ymax2)
w = max(0, x2 - x1)
h = max(0, y2 - y1)
return (x1, y1, w, h)
def get_bbox_intersection(box1, box2):
'''
计算两个boxes的交集坐标
:param rec1:
:param rec2:
:return:
'''
xmin1, ymin1, xmax1, ymax1 = box1
xmin2, ymin2, xmax2, ymax2 = box2
x1 = max(xmin1, xmin2)
y1 = max(ymin1, ymin2)
x2 = min(xmax1, xmax2)
y2 = min(ymax1, ymax2)
return (x1, y1, x2, y2)
def draw_image_rects(bgr_image, rect_list, color=(0, 0, 255), thickness=2):
for rect in rect_list:
x, y, w, h = rect
point1 = (int(x), int(y))
point2 = (int(x + w), int(y + h))
cv2.rectangle(bgr_image, point1, point2, color, thickness=thickness)
return bgr_image
def draw_image_boxes(bgr_image, boxes_list, color=(0, 0, 255), thickness=1):
for box in boxes_list:
x1, y1, x2, y2 = box[:4]
point1 = (int(x1), int(y1))
point2 = (int(x2), int(y2))
cv2.rectangle(bgr_image, point1, point2, color, thickness=thickness)
return bgr_image
def show_image_rects(win_name, image, rect_list, color=(0, 0, 255), waitKey=0):
'''
:param win_name:
:param image:
:param rect_list:[[ x, y, w, h],[ x, y, w, h]]
:return:
'''
image = draw_image_rects(image.copy(), rect_list, color)
cv_show_image(win_name, image, waitKey=waitKey)
return image
def show_image_boxes(win_name, image, boxes_list, color=(0, 0, 255), waitKey=0):
'''
:param win_name:
:param image:
:param boxes_list:[[ x1, y1, x2, y2],[ x1, y1, x2, y2]]
:return:
'''
image = draw_image_boxes(image, boxes_list, color)
cv_show_image(win_name, image, waitKey=waitKey)
return image
def draw_image_bboxes_text(rgb_image, boxes, boxes_name, color, drawType="custom", top=True):
"""
:param boxes_name:
:param bgr_image: bgr image
:param color: BGR color:[B,G,R]
:param boxes: [[x1,y1,x2,y2],[x1,y1,x2,y2]]
:return:
"""
rgb_image = rgb_image.copy()
# color_map=list(matplotlib.colors.cnames.values())
# color_map=list(reversed(color_map))
if isinstance(boxes_name, np.ndarray):
boxes_name = boxes_name.reshape(-1).tolist()
for name, box in zip(boxes_name, boxes):
box = [int(b) for b in box]
# cv2.rectangle(bgr_image, (crop_type[0], crop_type[1]), (crop_type[2], crop_type[3]), (0, 255, 0), | |
string names of the analysis and the values stored are
6x1 np.array[float] vectors containing the 3 internal forces and
3 moments at the first node.
- `F2 (dict)`: This dictionary contains the results of an analysis set. The
keys are the string names of the analysis and the values stored are
6x1 np.array[float] vectors containing the 3 internal forces and
3 moments at the second node.
- `Fmode1 (dict)`: This dictionary contains the results of a modal analysis
set. The keys are the string names of the analysis and the values
stored are 6xN np.array[float]. The columns of the array are the
forces and moments at the first node associated with the
particular mode.*
- `Fmode2 (dict)`: This dictionary contains the results of a modal analysis
set. The keys are the string names of the analysis and the values
stored are 6xN np.array[float]. The columns of the array are the
forces and moments at the second node associated with the
particular mode.*
- `xsect (obj)`: The cross-section object used to determine the beams
stiffnesses.
- `EID (int)`: The element ID of the beam.
- `SBID (int)`: The associated Superbeam ID the beam object belongs to.
- `n1 (obj)`: The first nodal object used by the beam.
- `n2 (obj)`: The second nodal object used by the beam.
- `Fe (12x1 np.array[float])`: The distributed force vector of the element
- `Ke (12x12 np.array[float])`: The stiffness matrix of the beam.
- `Keg (12x12 np.array[float])`: The geometric stiffness matrix of the
beam. Used for beam buckling calculations.
- `Me (12x12 np.array[float])`: The mass matrix of the beam.
- `analysis_names (array[str])`: An array containing all of the string
names being used as keys in either U1,U2,F1,F2,Umode1,Umode2,Fmode1
Fmode2
:Methods:
- `printSummary`: This method prints out characteristic attributes of the
beam finite element.
.. Note:: The force and moments in the Fmode1 and Fmode2 could be completely
fictitious and be left as an artifact to fascilitate plotting of warped
cross-sections. DO NOT rely on this information being meaningful.
"""
#TODO: Add some kind of print results methods for both printing to command
# line and saving to a file.
def __init__(self,xsect,EID,SBID):
"""Initializes a blank beam object.
This method initializes attributes that are shared by all of the
possible beam elements.
:Args:
- `xsect (obj)`: The cross-section object used by the beam.
- `EID (int)`: The integer identifier of the beam element.
- `SBID (int)`: The associated superbeam ID
:Returns:
- None
"""
# Nodal displacement dictionary
self.U1 = {}
self.U2 = {}
# Nodal displacements for eigenvalue solutions with multiple modes
self.Umode1 = {}
self.Umode2 = {}
# Nodal force and moment resultant dictionary
self.F1 = {}
self.F2 = {}
# Nodal force and moment resultant for eigenvalue solutions with
# multiple modes
self.Fmode1 = {}
self.Fmode2 = {}
# The beam's cross-section
self.xsect = xsect
# INITIALIZE ID's
# The element ID
self.EID = EID
# The xsect ID
self.XID = xsect.XID
# The beam super-element ID
self.SBID = SBID
# Initialize Stifness and force matricies/vectors
# Element force vector
self.Fe = np.zeros((12,1),dtype=float)
self.Ke = np.zeros((12,12),dtype=float)
self.Keg = np.zeros((12,12),dtype=float)
self.Me = np.zeros((12,12),dtype=float)
self.T = np.zeros((12,12),dtype=float)
# Initialize a dictionary to hold analysis names
self.analysis_names = []
def printSummary(self,decimals=8,**kwargs):
"""Prints out characteristic information about the beam element.
This method by default prints out the EID, XID, SBID and the NIDs along
with the nodes associated coordinates. Upon request, it can also print
out the beam element stiffness, geometric stiffness, mass matricies and
distributed force vector.
:Args:
- `nodeCoord (bool)`: A boolean to determine if the node coordinate
information should also be printed.
- `Ke (bool)`: A boolean to determine if the element stiffness matrix
should be printed.
- `Keg (bool)`: A boolean to determine if the element gemoetric
stiffness matrix should be printed.
- `Me (bool)`: A boolean to determine if the element mass matrix
should be printed.
- `Fe (bool)`: A boolean to determine if the element distributed force
and moment vector should be printed.
:Returns:
- `(str)`: Printed summary of the requested attributes.
"""
# Print the element ID
print('Element: %d' %(self.EID))
# Print the associated xsect ID
XID = kwargs.pop('XID',False)
# Print the associated superbeam ID
SBID = kwargs.pop('SBID',False)
# Determine if node coordinates should also be printed
nodeCoord = kwargs.pop('nodeCoord',True)
# Print the stiffness matrix
Ke = kwargs.pop('Ke',False)
# Print the geometric stiffness matrix
Keg = kwargs.pop('Keg',False)
# Print the mass matrix
Me = kwargs.pop('Me',False)
# Print the distributed force vector
Fe = kwargs.pop('Fe',False)
if XID:
print('Cross-section: %d' %(self.XID))
if SBID:
print('Superbeam: %d' %(self.SBID))
# Print the node information
if nodeCoord:
self.n1.printSummary()
self.n2.printSummary()
else:
print('NID_1: %d' %(self.n1.NID))
print('NID_2: %d' %(self.n2.NID))
if Ke:
print('The beam element stiffness matrix is:')
print(tabulate(np.around(self.Ke,decimals=decimals)))
if Keg:
print('The beam element geometric stiffness matrix is:')
print(tabulate(np.around(self.Keg,decimals=decimals)))
if Me:
print('The beam element mass matrix is:')
print(tabulate(np.around(self.Me,decimals=decimals)))
if Fe:
print('The beam element force vector is:')
print(tabulate(np.around(self.Fe,decimals=decimals)))
class EBBeam(Beam):
"""Euler-Bernoulli beam class.
This class is currently unsuppoted. Please use the more accurace timoshenko
beam class
"""
def __init__(self,x1,x2,xsect,EID,SBID,nid1=1,nid2=2):
#Description: Creases a single beam element, capable of being orientied
#in any desired manner within 3d space.
#INPUTS:
#x1 - The x,y,z coordinate of the first node in the element
#x2 - The x,y,z coordinate of the second node in the element
#xsect - Cross-section object
#eid - The element ID
# Note, the EID is inherited from the Superbeam EID if not otherwise specified
#leid - The local element ID within a superbeam
#nid1 - the first node ID
#nid2 - the second node ID
#Note, for now these elements can only sustain constant distributed loads
Beam.__init__(self,xsect)
self.type = 'EBbeam'
self.n1 = Node(x1,nid1)
self.n2 = Node(x2,nid2)
h = np.sqrt((x2[0]-x1[0])**2+(x2[1]-x1[1])**2+(x2[2]-x1[2])**2)
self.h = h
self.xsect = xsect
K = xsect.K
#Lines below not needed, there for visual neatness
C33 = K[2,2];C34 = K[2,3];C35 = K[2,4];C36 = K[2,5]
C44 = K[3,3];C45 = K[3,4];C46 = K[3,5]
C55 = K[4,4];C56 = K[4,5]
C66 = K[5,5]
ketmp = np.array([[12.*C44/h**3,12.*C45/h**3,0.,-6.*C44/h**2,-6.*C45/h**2,0.,-12.*C44/h**3,-12.*C45/h**3,0.,-6.*C44/h**2,-6.*C45/h**2,0.],\
[12.*C45/h**3,12.*C55/h**3,0.,-6.*C45/h**2,-6.*C55/h**2,0.,-12.*C45/h**3,-12.*C55/h**3,0.,-6.*C45/h**2,-6.*C55/h**2,0.],\
[0.,0.,C33/h,-C34/h,-C35/h,C36/h,0.,0.,-C33/h,C34/h,C35/h,-C36/h],\
[-6.*C44/h**2,-6.*C45/h**2,-C34/h,4.*C44/h,4.*C45/h,-C46/h,6.*C44/h**2,6.*C45/h**2,C34/h,2.*C44/h,2.*C45/h,C46/h],\
[-6.*C45/h**2,-6.*C55/h**2,-C35/h,4.*C45/h,4.*C55/h,-C56/h,6.*C45/h**2,6.*C55/h**2,C35/h,2.*C45/h,2.*C55/h,C56/h],\
[0.,0.,C36/h,-C46/h,-C56/h,C66/h,0.,0.,-C36/h,C46/h,C56/h,-C66/h],\
[-12.*C44/h**3,-12.*C45/h**3,0.,6.*C44/h**2,6.*C45/h**2,0.,12.*C44/h**3,12.*C45/h**3,0.,6.*C44/h**2,6.*C45/h**2,0.],\
[-12.*C45/h**3,-12.*C55/h**3,0.,6.*C45/h**2,6.*C55/h**2,0.,12.*C45/h**3,12.*C55/h**3,0.,6.*C45/h**2,6.*C55/h**2,0.],\
[0.,0.,-C33/h,C34/h,C35/h,-C36/h,0.,0.,C33/h,-C34/h,-C35/h,C36/h],\
[-6.*C44/h**2,-6.*C45/h**2,C34/h,2.*C44/h,2.*C45/h,C46/h,6.*C44/h**2,6.*C45/h**2,-C34/h,4.*C44/h,4.*C45/h,-C46/h],\
[-6.*C45/h**2,-6.*C55/h**2,C35/h,2.*C45/h,2.*C55/h,C56/h,6.*C45/h**2,6.*C55/h**2,-C35/h,4.*C45/h,4.*C55/h,-C56/h],\
[0.,0.,-C36/h,C46/h,C56/h,-C66/h,0.,0.,C36/h,-C46/h,-C56/h,C66/h]])
self.Ke = ketmp
self.Fe = np.zeros((12,1),dtype=float)
#Initialize the Geometric Stiffness Matrix
kgtmp = np.array([[6./(5.*h),0.,0.,-1./10.,0.,0.,-6/(5.*h),0.,0.,-1./10.,0.,0.],\
[0.,6./(5.*h),0.,0.,-1./10.,0.,0.,-6./(5.*h),0.,0.,-1./10.,0.],\
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],\
[-1./10.,0.,0.,2.*h/15.,0.,0.,1./10.,0.,0.,-h/30.,0.,0.],\
[0.,-1./10.,0.,0.,2.*h/15.,0.,0.,1./10.,0.,0.,-h/30.,0.],\
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],\
[-6./(5.*h),0.,0.,1./10.,0.,0.,6./(5.*h),0.,0.,1./10.,0.,0.],\
[0.,-6./(5.*h),0.,0.,1./10.,0.,0.,6./(5.*h),0.,0.,1./10.,0.],\
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.],\
[-1./10.,0.,0.,-h/30.,0.,0.,1./10.,0.,0.,2.*h/15.,0.,0.],\
[0.,-1./10.,0.,0.,-h/30.,0.,0.,1./10.,0.,0.,2.*h/15.,0.],\
[0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]])
self.Keg = kgtmp
class TBeam(Beam):
"""Creates a Timoshenko beam finite element object.
The primary beam finite element used by AeroComBAT, this beam element is
similar to the Euler-Bernoulli beam finite element most are farmiliar with,
with the exception that it has the ability to experience shear deformation
in addition to just bending.
:Attributes:
- `type (str)`:String describing the type of beam element being used.
- `U1 (dict)`: This dictionary contains the results of an analysis set. The
keys are the string names of the analysis and the values stored are
6x1 np.array[float] vectors containing the 3 displacements and
3 rotations at the first node.
- `U2 (dict)`: This dictionary contains the results of an analysis set. The
keys are the string names of the analysis and the values stored are
6x1 np.array[float] vectors containing the 3 displacements and
3 rotations at the second node.
- `Umode1 (dict)`: This dictionary contains the results of a modal analysis
set. The keys are the string names of the analysis and the values
stored are 6xN np.array[float]. The columns of the array are the
displacements and rotations at the first node associated with the
particular mode.
- `Umode2 (dict)`: This dictionary contains the results of a modal analysis
set. The keys are the string names of the analysis and the values
stored are 6xN np.array[float]. The columns of the array are the
displacements and rotations at the second node associated with the
particular mode.
- `F1 | |
return -1
try:
optimizerName=datHyperPara['optimizer']
except Exception as e:
self.pathOfData=None
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed',"Couldn't find hyperparameters optimizerName >> "+ str(e),traceback.format_exc(),self.statusFile)
return -1
try:
learningRate=datHyperPara['learningRate']
except Exception as e:
self.pathOfData=None
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed',"Couldn't find hyperparameters learningRate >> "+ str(e),traceback.format_exc(),self.statusFile)
return -1
try:
testSize=datHyperPara['testSize']
except Exception as e:
testSize=None
return 'done'
def trainModelObjectDict(self,modelObj,idforData,tensorboardLogFolder):
# print ('modelObj???????????????????',modelObj)
try:
dataObj=modelObj['Data']
print ('dataObj',dataObj)
except:
dataObj=None
modelArch=modelObj['modelObj']['modelArchType']
try:
scriptOutputPrepro=modelObj['preprocessing']['scriptOutput']
except:
scriptOutputPrepro=None
# print ('scriptOutputPrepro',scriptOutputPrepro,dataObj,modelArch)
datHyperPara=modelObj['modelObj']['hyperparameters']
# print (datHyperPara)
if modelArch == 'NNModel':
print ('came to final model training')
checkVal=self.verifyHyperparameters(datHyperPara)
if checkVal == 'done':
pass
else:
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed',"Some issue with hyperparameters >> ",'No info',self.statusFile)
return -1
print ('Passed Step NN 1',dataObj,scriptOutputPrepro)
try:
if (dataObj == None) & (scriptOutputPrepro != None):
print ('To complicated 1')
modelObjTrained=self.trainComplicatedDNNObj(modelObj,tensorboardLogFolder,scriptOutputPrepro)
elif (os.path.isdir(dataObj)) & (scriptOutputPrepro==None):
print ('Came to Image classifier')
modelObjTrained=self.trainImageClassifierNN(modelObj,tensorboardLogFolder)
elif (pathlib.Path(dataObj).suffix == '.csv') & (scriptOutputPrepro==None) :
print('Simple DNN')
dataObjPd=pd.read_csv(modelObj['Data'])
print (dataObjPd.shape)
modelObjTrained=self.trainSimpleDNNObj(modelObj,tensorboardLogFolder,dataObjPd)
elif (pathlib.Path(dataObj).suffix == '.csv') & (scriptOutputPrepro!=None) :
dataObjPd=pd.read_csv(modelObj['Data'])
print (dataObjPd.shape)
print('Simple DNN with preprocessing',modelObj,tensorboardLogFolder)
modelObjTrained=self.trainSimpleDNNObjWithPrepro(modelObj,tensorboardLogFolder,dataObjPd)
else:
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed',"Not supported >> ",'No traceback',self.statusFile)
return -1
except:
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed',"Not supported >> ",'No traceback',self.statusFile)
return -1
return modelObjTrained
def trainSimpleDNNObj(self,modelObj,tensorboardLogFolder,dataObj):
dataFolder=modelObj['Data']
# modelToCompile=modelObj['modelObj']['recoModelObj']
datHyperPara=modelObj['modelObj']['hyperparameters']
listOfMetrics=datHyperPara['metrics']
modelV1=modelObj['modelObj']['recoModelObj'].model
print(">>>>>>>>>>>>>>SimpleDNN")
print('pathofdata>>>>>',dataFolder)
predictedClasses=None
targetColumnName = 'target'
df = dataObj
indevar=list(df.columns)
indevar.remove('target')
targetCol = df[targetColumnName]
if datHyperPara['problemType']=='classification':
lb=preprocessing.LabelBinarizer()
y=lb.fit_transform(targetCol)
predictedClass = list(targetCol.unique())
else:
y=df[targetColumnName]
predictedClass=None
##### Split data into test and validation set for training#################################
trainDataX,testDataX,trainDataY,testDataY=model_selection.train_test_split(df[indevar],y,
test_size=datHyperPara['testSize'])
stepsPerEpochT=int(len(trainDataX)/datHyperPara['batchSize'])
stepsPerEpochV=int(len(testDataX)/datHyperPara['batchSize'])
kerasUtilities.updateStatusOfTraining(self.statusFile,'Data split in Train validation part')
# modelObj = self.generateAndCompileModel(datHyperPara['lossType'],datHyperPara['optimizerName'],datHyperPara['learningRate'],listOfMetrics)
# if modelObj.__class__.__name__ == 'dict':
# return
# model = modelObj.model
tensor_board = self.startTensorBoard(tensorboardLogFolder)
try:
# print ('Came here 1'*5 )
model_graph = modelObj['modelObj']['model_graph']
tf_session = modelObj['modelObj']['tf_session']
with model_graph.as_default():
with tf_session.as_default():
if 'f1' in listOfMetrics:
listOfMetrics.remove('f1')
optiMi=self.setOptimizer(datHyperPara['optimizer'],datHyperPara['learningRate'])
modelV1.compile(optimizer=optiMi, loss=datHyperPara['loss'],metrics=listOfMetrics+[self.f1])
import tensorflow as tf
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Started')
with tf.device(gpuCPUSelect(selDev)):
modelV1.fit(x=trainDataX, y=trainDataY, epochs=datHyperPara['epoch'], callbacks=[tensor_board],\
validation_data=(testDataX, testDataY), steps_per_epoch=stepsPerEpochT, validation_steps=stepsPerEpochV)
else:
try:
optiMi=self.setOptimizer(datHyperPara['optimizer'],datHyperPara['learningRate'])
modelV1.compile(optimizer=optiMi, loss=datHyperPara['loss'], metrics=listOfMetrics)
import tensorflow as tf
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Started')
with tf.device(gpuCPUSelect(selDev)):
modelV1.fit(x=trainDataX, y=trainDataY, epochs=datHyperPara['epoch'], callbacks=[tensor_board],\
validation_data=(testDataX, testDataY), steps_per_epoch=stepsPerEpochT, validation_steps=stepsPerEpochV)
except:
trShape=trainDataX.shape
teShape=testDataX.shape
trainDataX=trainDataX.values.reshape(trShape[0],1,trShape[1])
testDataX=testDataX.values.reshape(teShape[0],1,teShape[1])
optiMi=self.setOptimizer(datHyperPara['optimizer'],datHyperPara['learningRate'])
modelV1.compile(optimizer=optiMi, loss=datHyperPara['loss'], metrics=listOfMetrics)
import tensorflow as tf
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Started')
with tf.device(gpuCPUSelect(selDev)):
modelV1.fit(x=trainDataX, y=trainDataY, epochs=datHyperPara['epoch'], callbacks=[tensor_board],\
validation_data=(testDataX, testDataY), steps_per_epoch=stepsPerEpochT, validation_steps=stepsPerEpochV)
print ('9'*500)
except Exception as e:
print ('Came here 2'*5 )
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed','Error while fitting data to Keras Model >> '+ str(e),traceback.format_exc(),self.statusFile)
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Completed')
modelObj['modelObj']['recoModelObj'].model=modelV1
modelObj['modelObj']['predictedClasses']=predictedClass
modelObj['modelObj']['dataSet']=None
return modelObj
def trainSimpleDNNObjWithPrepro(self,modelObj,tensorboardLogFolder,dataObj):
dataFolder=modelObj['Data']
# modelToCompile=modelObj['modelObj']['recoModelObj']
datHyperPara=modelObj['modelObj']['hyperparameters']
listOfMetrics=datHyperPara['metrics']
modelV1=modelObj['modelObj']['recoModelObj'].model
print(">>>>>>>>>>>>>>SimpleDNN with Prepro")
print('pathofdata>>>>>',dataFolder)
predictedClasses=None
targetColumnName = 'target'
# df = dataObj
print (modelObj['preprocessing']['codeObj'])
print (dataObj.shape)
scriptCode=modelObj['preprocessing']['codeObj']
dfX,dfY=scriptCode(dataObj)
print ('data prepared',dfX.shape)
indevar=list(dfX.columns)
# indevar.remove('target')
targetCol = list(dfY.columns)[0]
if datHyperPara['problemType']=='classification':
lb=preprocessing.LabelBinarizer()
y=lb.fit_transform(targetCol)
predictedClass = list(targetCol.unique())
else:
y=dfY[targetCol]
predictedClass=None
##### Split data into test and validation set for training#################################
trainDataX,testDataX,trainDataY,testDataY=model_selection.train_test_split(dfX,dfY,
test_size=datHyperPara['testSize'])
stepsPerEpochT=int(len(trainDataX)/datHyperPara['batchSize'])
stepsPerEpochV=int(len(testDataX)/datHyperPara['batchSize'])
kerasUtilities.updateStatusOfTraining(self.statusFile,'Data split in Train validation part')
# modelObj = self.generateAndCompileModel(datHyperPara['lossType'],datHyperPara['optimizerName'],datHyperPara['learningRate'],listOfMetrics)
# if modelObj.__class__.__name__ == 'dict':
# return
# model = modelObj.model
tensor_board = self.startTensorBoard(tensorboardLogFolder)
try:
# print ('Came here 1'*5 )
model_graph = modelObj['modelObj']['model_graph']
tf_session = modelObj['modelObj']['tf_session']
with model_graph.as_default():
with tf_session.as_default():
if 'f1' in listOfMetrics:
listOfMetrics.remove('f1')
optiMi=self.setOptimizer(datHyperPara['optimizer'],datHyperPara['learningRate'])
modelV1.compile(optimizer=optiMi, loss=datHyperPara['loss'],metrics=listOfMetrics+[self.f1])
import tensorflow as tf
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Started')
with tf.device(gpuCPUSelect(selDev)):
modelV1.fit(x=trainDataX, y=trainDataY, epochs=datHyperPara['epoch'], callbacks=[tensor_board],\
validation_data=(testDataX, testDataY), steps_per_epoch=stepsPerEpochT, validation_steps=stepsPerEpochV)
else:
optiMi=self.setOptimizer(datHyperPara['optimizer'],datHyperPara['learningRate'])
modelV1.compile(optimizer=optiMi, loss=datHyperPara['loss'], metrics=listOfMetrics)
import tensorflow as tf
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Started')
with tf.device(gpuCPUSelect(selDev)):
modelV1.fit(x=trainDataX, y=trainDataY, epochs=datHyperPara['epoch'], callbacks=[tensor_board],\
validation_data=(testDataX, testDataY), steps_per_epoch=stepsPerEpochT, validation_steps=stepsPerEpochV)
print ('9'*500)
except Exception as e:
print ('Came here 2'*5 )
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed','Error while fitting data to Keras Model >> '+ str(e),traceback.format_exc(),self.statusFile)
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Completed')
modelObj['modelObj']['recoModelObj'].model=modelV1
modelObj['modelObj']['predictedClasses']=predictedClass
modelObj['modelObj']['dataSet']=None
return modelObj
def trainImageClassifierNN(self,modelObj,tensorboardLogFolder):
print ('Enter image classifier')
dataFolder=modelObj['Data']
# modelToCompile=modelObj['modelObj']['recoModelObj']
datHyperPara=modelObj['modelObj']['hyperparameters']
print ('datHyperPara',datHyperPara)
listOfMetrics=datHyperPara['metrics']
modelV1=modelObj['modelObj']['recoModelObj'].model
print ('Classification data folder at',dataFolder)
try:
self.trainFolder=dataFolder+'/'+'train/'
self.validationFolder=dataFolder+'/'+'validation/'
kerasUtilities.checkCreatePath(self.trainFolder)
kerasUtilities.checkCreatePath(self.validationFolder)
except Exception as e:
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed','Unable to find train and validation folder >> '+ str(e),traceback.format_exc(),self.statusFile)
return -1
try:
img_height, img_width=modelV1.input_shape[1:3]
except Exception as e:
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed','Model input_shape is invalid >> '+ str(e),traceback.format_exc(),self.statusFile)
return -1
try:
tGen,vGen,nClass=self.kerasDataPrep(dataFolder,datHyperPara['batchSize'],img_height,img_width)
stepsPerEpochT=tGen.n/tGen.batch_size
stepsPerEpochV=vGen.n/vGen.batch_size
except Exception as e:
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed','Error while generating data for Keras >> '+ str(e),traceback.format_exc(),self.statusFile)
return -1
tensor_board = self.startTensorBoard(tensorboardLogFolder)
try:
print ('Came here 1'*5 )
model_graph = modelObj['modelObj']['model_graph']
tf_session = modelObj['modelObj']['tf_session']
with model_graph.as_default():
with tf_session.as_default():
if 'f1' in listOfMetrics:
listOfMetrics.remove('f1')
optiMi=self.setOptimizer(datHyperPara['optimizer'],datHyperPara['learningRate'])
modelV1.compile(optimizer=optiMi, loss=datHyperPara['loss'], metrics=listOfMetrics+[self.f1])
import tensorflow as tf
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Started')
with tf.device(gpuCPUSelect(selDev)):
modelV1.fit_generator(tGen,steps_per_epoch=stepsPerEpochT,validation_steps=stepsPerEpochV,epochs=datHyperPara['epoch'],validation_data=vGen,callbacks=[tensor_board])
else:
optiMi=self.setOptimizer(datHyperPara['optimizer'],datHyperPara['learningRate'])
modelV1.compile(optimizer=optiMi, loss=datHyperPara['loss'], metrics=listOfMetrics)
import tensorflow as tf
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Started')
with tf.device(gpuCPUSelect(selDev)):
modelV1.fit_generator(tGen,steps_per_epoch=stepsPerEpochT,validation_steps=stepsPerEpochV,epochs=datHyperPara['epoch'],validation_data=vGen,callbacks=[tensor_board])
except Exception as e:
print ('Came here 2'*5 )
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed','Error while compiling Model >> '+ str(e),traceback.format_exc(),self.statusFile)
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Completed')
predictedClass=list(tGen.class_indices.keys())
modelObj['modelObj']['recoModelObj'].model=modelV1
modelObj['modelObj']['predictedClasses']=predictedClass
modelObj['modelObj']['dataSet']='image'
print (modelObj)
return modelObj
def getSKLMOdelObjtoFit(self,modelV1):
from sklearn import ensemble,tree,linear_model
# print (str(modelV1))
if 'RandomForestRegressor' in str(modelV1):
return ensemble.RandomForestRegressor()
elif 'RandomForestClassifier' in str(modelV1):
return ensemble.RandomForestClassifier()
elif 'GradientBoostingClassifier' in str(modelV1):
return ensemble.GradientBoostingClassifier()
elif 'GradientBoostingRegressor' in str(modelV1):
return ensemble.GradientBoostingRegressor()
elif 'ExtraTreesClassifier' in str(modelV1):
return ensemble.ExtraTreesClassifier()
elif 'ExtraTreesRegressor' in str(modelV1):
return ensemble.ExtraTreesRegressor()
elif 'LinearRegression' in str(modelV1):
return linear_model.LinearRegression()
elif 'LogisticRegression' in str(modelV1):
return linear_model.LogisticRegression()
return modelV1
def trainComplicatedDNNObj(self,modelObj,tensorboardLogFolder,scriptOutputPrepro):
# print ('*'*500)
# print ('Came to complicated part',modelObj)
# print (scriptOutputPrepro)
if scriptOutputPrepro=='DATA':
dataObj,tar=modelObj['preprocessing']['codeObj']()
# print (dataObj.shape)
else:
pass
predictedClass=None
# df = dataObj
datHyperPara=modelObj['modelObj']['hyperparameters']
# print (datHyperPara)
if modelObj['modelObj']['modelArchType']=='SKLModel':
modelV1=modelObj['modelObj']['recoModelObj']
if str(type(modelV1))=="<class 'lightgbm.basic.Booster'>":
print('Booster Model started')
import lightgbm as lgb
train_data=lgb.Dataset(dataObj,tar)
newmodel1_ = lgb.train(datHyperPara, train_data,init_model=modelV1)
newmodel1_.params['objective']=datHyperPara['objective']
modelObj['modelObj']['recoModelObj']=newmodel1_
print ('Training Finished!')
else:
modelV1=self.getSKLMOdelObjtoFit(modelV1)
modelV1.fit(df,tar)
modelObj['modelObj']['recoModelObj']=modelV1
else:
listOfMetrics=datHyperPara['metrics']
targetCol = tar
# print(">>>>>>>>>>>>>>SimpleDNN")
modelV1=modelObj['modelObj']['recoModelObj'].model
if datHyperPara['problemType']=='classification':
lb=preprocessing.LabelBinarizer()
y=lb.fit_transform(targetCol)
predictedClass = list(targetCol.unique())
else:
y=tar
predictedClass=None
# print(df.shape,y.shape,datHyperPara['testSize'])
trainDataX,testDataX,trainDataY,testDataY=model_selection.train_test_split(df,tar,test_size=datHyperPara['testSize'])
stepsPerEpochT=int(len(trainDataX)/datHyperPara['batchSize'])
stepsPerEpochV=int(len(testDataX)/datHyperPara['batchSize'])
kerasUtilities.updateStatusOfTraining(self.statusFile,'Data split in Train validation part')
# modelObj = self.generateAndCompileModel(datHyperPara['lossType'],datHyperPara['optimizerName'],datHyperPara['learningRate'],listOfMetrics)
# if modelObj.__class__.__name__ == 'dict':
# return
# model = modelObj.model
tensor_board = self.startTensorBoard(tensorboardLogFolder)
try:
# print ('Came here 1'*5 )
model_graph = modelObj['modelObj']['model_graph']
tf_session = modelObj['modelObj']['tf_session']
with model_graph.as_default():
with tf_session.as_default():
if 'f1' in listOfMetrics:
listOfMetrics.remove('f1')
optiMi=self.setOptimizer(datHyperPara['optimizer'],datHyperPara['learningRate'])
modelV1.compile(optimizer=optiMi, loss=datHyperPara['lossType'],metrics=listOfMetrics+[self.f1])
import tensorflow as tf
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Started')
with tf.device(gpuCPUSelect(selDev)):
modelV1.fit(x=trainDataX, y=trainDataY, epochs=datHyperPara['epoch'], callbacks=[tensor_board],\
validation_data=(testDataX, testDataY), steps_per_epoch=stepsPerEpochT, validation_steps=stepsPerEpochV)
else:
optiMi=self.setOptimizer(datHyperPara['optimizer'],datHyperPara['learningRate'])
modelV1.compile(optimizer=optiMi, loss=datHyperPara['lossType'], metrics=listOfMetrics)
import tensorflow as tf
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training Started')
with tf.device(gpuCPUSelect(selDev)):
modelV1.fit(x=trainDataX, y=trainDataY, epochs=datHyperPara['epoch'], callbacks=[tensor_board],\
validation_data=(testDataX, testDataY), steps_per_epoch=stepsPerEpochT, validation_steps=stepsPerEpochV)
# print ('9'*500)
modelObj['modelObj']['recoModelObj'].model=modelV1
except Exception as e:
print ('Came here 2'*5 )
data_details=self.upDateStatus()
self.updateStatusWithError(data_details,'Training Failed','Error while fitting data to Keras Model >> '+ str(e),traceback.format_exc(),self.statusFile)
kerasUtilities.updateStatusOfTraining(self.statusFile,'Training 1st part completed')
return modelObj
def restructureModelInforForExportDict(self,tempDict):
# print ('tempDict',tempDict)
listOfModelNames=set([k for j in tempDict for k in tempDict[j]])
toExportDict={}
for objDet in listOfModelNames:
toExportDict[objDet]={'hyperparameters':None,'data':None,
'preProcessingScript':{'scripts':[], 'scriptpurpose':[],'scriptOutput':[],'scriptPath':[]},
'modelObj':None,'pipelineObj':None,'featuresUsed':None,'targetName':None,
'postProcessingScript':{'scripts':[], 'scriptpurpose':[],'scriptOutput':[],'scriptPath':[]},
'taskType': None,'modelPath':None,'predictedClasses':None,'dataSet':None}
for modObjeCom in tempDict:
# print ('>>>>>>>>modObj >>>>>> ',modObjeCom)
if modObjeCom == 'train':
for echMod in toExportDict:
if echMod in tempDict[modObjeCom]:
# print ('>>>>>',echMod)
# print ('8'*100)
# print ('tempDict.keys()',tempDict.keys())
# print (tempDict[modObjeCom][echMod]['scriptPath'])
if tempDict[modObjeCom][echMod]['modelObj']['modelArchType']=='NNModel':
# print (tempDict[modObjeCom][echMod]['modelObj'])
toExportDict[echMod]['modelObj']=tempDict[modObjeCom][echMod]['modelObj']['recoModelObj'].model
toExportDict[echMod]['model_graph']=tempDict[modObjeCom][echMod]['modelObj']['model_graph']
toExportDict[echMod]['tf_session']=tempDict[modObjeCom][echMod]['modelObj']['tf_session']
else:
toExportDict[echMod]['modelObj']=tempDict[modObjeCom][echMod]['modelObj']['recoModelObj']
if 'preprocessing' in tempDict[modObjeCom][echMod]:
toExportDict[echMod]['preProcessingScript']['scripts'].append(tempDict[modObjeCom][echMod]['preprocessing']['codeCont'])
toExportDict[echMod]['preProcessingScript']['scriptpurpose'].append(modObjeCom)
toExportDict[echMod]['preProcessingScript']['scriptOutput'].append(tempDict[modObjeCom][echMod]['preprocessing']['scriptOutput'])
toExportDict[echMod]['preProcessingScript']['scriptPath'].append(tempDict[modObjeCom][echMod]['preprocessing']['scriptPath'])
if 'postprocessing' in tempDict[modObjeCom][echMod]:
toExportDict[echMod]['postProcessingScript']['scripts'].append(tempDict[modObjeCom][echMod]['postprocessing']['codeCont'])
toExportDict[echMod]['postProcessingScript']['scriptpurpose'].append(modObjeCom)
toExportDict[echMod]['postProcessingScript']['scriptOutput'].append(tempDict[modObjeCom][echMod]['postprocessing']['scriptOutput'])
toExportDict[echMod]['postProcessingScript']['scriptPath'].append(tempDict[modObjeCom][echMod]['postprocessing']['scriptPath'])
toExportDict[echMod]['taskType']=modObjeCom
toExportDict[echMod]['featuresUsed']=tempDict[modObjeCom][echMod]['modelObj']['listOFColumns']
toExportDict[echMod]['targetName']=tempDict[modObjeCom][echMod]['modelObj']['targetCol']
toExportDict[echMod]['hyperparameters']=tempDict[modObjeCom][echMod]['modelObj']['hyperparameters']
toExportDict[echMod]['modelPath']=tempDict[modObjeCom][echMod]['modelObj']['modelPath']
toExportDict[echMod]['predictedClasses']=tempDict[modObjeCom][echMod]['modelObj']['predictedClasses']
if 'Data' in tempDict[modObjeCom][echMod]:
print ('Came here to train model')
import pathlib
pathObj=pathlib.Path(tempDict[modObjeCom][echMod]['Data'])
if pathObj.is_dir():
toExportDict[echMod]['dataSet']='image'
else:
toExportDict[echMod]['dataSet']=None
toExportDict[echMod]['data']=tempDict[modObjeCom][echMod]['Data']
if modObjeCom == 'score':
for echMod in toExportDict:
if echMod in tempDict[modObjeCom]:
# print ('>>>>',echMod)
if tempDict[modObjeCom][echMod]['modelObj']['modelArchType']=='NNModel':
# print (tempDict[modObjeCom][echMod]['modelObj'])
toExportDict[echMod]['modelObj']=tempDict[modObjeCom][echMod]['modelObj']['recoModelObj'].model
toExportDict[echMod]['model_graph']=tempDict[modObjeCom][echMod]['modelObj']['model_graph']
toExportDict[echMod]['tf_session']=tempDict[modObjeCom][echMod]['modelObj']['tf_session']
else:
toExportDict[echMod]['modelObj']=tempDict[modObjeCom][echMod]['modelObj']['recoModelObj']
if 'preprocessing' in tempDict[modObjeCom][echMod]:
toExportDict[echMod]['preProcessingScript']['scripts'].append(tempDict[modObjeCom][echMod]['preprocessing']['codeCont'])
toExportDict[echMod]['preProcessingScript']['scriptpurpose'].append(modObjeCom)
toExportDict[echMod]['preProcessingScript']['scriptOutput'].append(tempDict[modObjeCom][echMod]['preprocessing']['scriptOutput'])
toExportDict[echMod]['preProcessingScript']['scriptPath'].append(tempDict[modObjeCom][echMod]['preprocessing']['scriptPath'])
if 'postprocessing' in tempDict[modObjeCom][echMod]:
toExportDict[echMod]['postProcessingScript']['scripts'].append(tempDict[modObjeCom][echMod]['postprocessing']['codeCont'])
toExportDict[echMod]['postProcessingScript']['scriptpurpose'].append(modObjeCom)
toExportDict[echMod]['postProcessingScript']['scriptOutput'].append(tempDict[modObjeCom][echMod]['postprocessing']['scriptOutput'])
toExportDict[echMod]['postProcessingScript']['scriptPath'].append(tempDict[modObjeCom][echMod]['postprocessing']['scriptPath'])
toExportDict[echMod]['taskType']=modObjeCom
toExportDict[echMod]['featuresUsed']=tempDict[modObjeCom][echMod]['modelObj']['listOFColumns']
toExportDict[echMod]['targetName']=tempDict[modObjeCom][echMod]['modelObj']['targetCol']
toExportDict[echMod]['hyperparameters']=tempDict[modObjeCom][echMod]['modelObj']['hyperparameters']
toExportDict[echMod]['modelPath']=tempDict[modObjeCom][echMod]['modelObj']['modelPath']
toExportDict[echMod]['predictedClasses']=tempDict[modObjeCom][echMod]['modelObj']['predictedClasses']
if 'Data' in tempDict[modObjeCom][echMod]:
import pathlib
pathObj=pathlib.Path(tempDict[modObjeCom][echMod]['Data'])
if pathObj.is_dir():
toExportDict[echMod]['dataSet']='image'
else:
toExportDict[echMod]['dataSet']=None
toExportDict[echMod]['data']=tempDict[modObjeCom][echMod]['Data']
for modNa in listOfModelNames:
if ('train' in list(tempDict.keys())) & ('score' in list(tempDict.keys())):
if (modNa in tempDict['train']) & (modNa in tempDict['score']):
toExportDict[modNa]['taskType']='trainAndscore'
print (tempDict['train'][modNa].keys())
print (tempDict['score'][modNa].keys())
print ('p'*100)
if ('scriptOutput' in tempDict['train'][modNa]) & ('scriptOutput' in tempDict['score'][modNa]):
print ('Condition pass 1')
if (tempDict['train'][modNa]['scriptOutput']==tempDict['score'][modNa]['scriptOutput']):
print ('Condition pass 2')
print ('Came here >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>',toExportDict[modNa])
if toExportDict[modNa]['preProcessingScript']['scripts'] != None:
print ('Condition pass 3')
toExportDict[modNa]['preProcessingScript']['scriptOutput']=tempDict['train'][modNa]['scriptOutput']
toExportDict[modNa]['preProcessingScript']['scriptpurpose']=['trainAndscore']
# print( '>>>>>>>>',tempDict['score'][modNa]['preprocessing_code'])
toExportDict[modNa]['preProcessingScript']['scripts']=[tempDict['score'][modNa]['preprocessing']['codeCont']]
toExportDict[modNa]['preProcessingScript']['scriptPath']=[tempDict['score'][modNa]['scriptPath']]
elif toExportDict[modNa]['postProcessingScript']['scripts'] != None:
print ('Condition pass 4')
toExportDict[modNa]['postProcessingScript']['scriptOutput']=tempDict['train'][modNa]['scriptOutput']
toExportDict[modNa]['postProcessingScript']['scriptpurpose']=['trainAndscore']
# print( '>>>>>>>>',tempDict['score'][modNa]['preprocessing_code'])
toExportDict[modNa]['postProcessingScript']['scripts']=[tempDict['score'][modNa]['postprocessing']['codeCont']]
toExportDict[modNa]['postProcessingScript']['scriptPath']=[tempDict['score'][modNa]['scriptPath']]
elif ('train' not in list(tempDict.keys())) & ('score' in list(tempDict.keys())):
print ('Condition pass 5')
print ('no train found')
if modNa in tempDict['score']:
toExportDict[modNa]['taskType']='score'
print ('p'*100)
if 'scriptOutput' in tempDict['score'][modNa]:
print ('Came here >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>',toExportDict[modNa])
if toExportDict[modNa]['preProcessingScript']['scripts'] != None:
toExportDict[modNa]['preProcessingScript']['scriptOutput']=tempDict['score'][modNa]['scriptOutput']
toExportDict[modNa]['preProcessingScript']['scriptpurpose']=['score']
# print( '>>>>>>>>',tempDict['score'][modNa]['preprocessing_code'])
toExportDict[modNa]['preProcessingScript']['scripts']=[tempDict['score'][modNa]['preprocessing']['codeCont']]
toExportDict[modNa]['preProcessingScript']['scriptPath']=[tempDict['score'][modNa]['scriptPath']]
elif toExportDict[modNa]['postProcessingScript']['scripts'] != None:
toExportDict[modNa]['postProcessingScript']['scriptOutput']=tempDict['score'][modNa]['scriptOutput']
toExportDict[modNa]['postProcessingScript']['scriptpurpose']=['score']
# print( '>>>>>>>>',tempDict['score'][modNa]['preprocessing_code'])
toExportDict[modNa]['postProcessingScript']['scripts']=[tempDict['score'][modNa]['postprocessing']['codeCont']]
toExportDict[modNa]['postProcessingScript']['scriptPath']=[tempDict['score'][modNa]['scriptPath']]
elif ('train' in list(tempDict.keys())) & ('score' not in list(tempDict.keys())):
| |
<filename>nngen/verify/basic.py
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import nngen.util as util
def add(x, y, dtype=None, name=None, par=1,
x_dtype=None, y_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
y_point = 0 if y_dtype is None else y_dtype.point
xy_point = max(x_point, y_point)
out_point = xy_point if dtype is None else dtype.point
out_shift = out_point - xy_point
x = x << (out_point - x_point)
y = y << (out_point - y_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(x + y)
return ret
def sub(x, y, dtype=None, name=None, par=1,
x_dtype=None, y_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
y_point = 0 if y_dtype is None else y_dtype.point
xy_point = max(x_point, y_point)
out_point = xy_point if dtype is None else dtype.point
out_shift = out_point - xy_point
x = x << (out_point - x_point)
y = y << (out_point - y_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(x - y)
return ret
def neg(x, dtype=None, name=None, par=1,
x_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
out_point = x_point if dtype is None else dtype.point
out_shift = out_point - x_point
x = x << (out_point - x_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(np.negative(x))
return ret
def abs(x, dtype=None, name=None, par=1,
x_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
out_point = x_point if dtype is None else dtype.point
out_shift = out_point - x_point
x = x << (out_point - x_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(np.abs(x))
return ret
def zeros_imm(shape, dtype=None, name=None, par=1):
out_point = x_point if dtype is None else dtype.point
out_shift = out_point
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(np.zeros(shape, dtype=np.int64))
return ret
def zeros_imm_like(x, dtype=None, name=None, par=1):
shape = x.shape
return zeros_imm(shape, dtype, name, par)
def ones_imm(shape, dtype=None, name=None, par=1):
out_point = x_point if dtype is None else dtype.point
out_shift = out_point
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(np.ones(shape, dtype=np.int64))
return ret
def ones_imm_like(x, dtype=None, name=None, par=1):
shape = x.shape
return ones_imm(shape, dtype, name, par)
def full_imm(shape, fill_value, dtype=None, name=None, par=1):
out_point = x_point if dtype is None else dtype.point
out_shift = out_point
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(np.full(shape, fill_value, dtype=np.int64))
return ret
def full_imm_like(x, fill_value, dtype=None, name=None, par=1):
shape = x.shape
return full_imm(shape, fill_value, dtype, name, par)
def equal(x, y, dtype=None, name=None, par=1,
x_dtype=None, y_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
y_point = 0 if y_dtype is None else y_dtype.point
xy_point = max(x_point, y_point)
out_point = 0 if dtype is None else dtype.point
out_shift = out_point - xy_point
x = x << (out_point - x_point)
y = y << (out_point - y_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(x == y)
return ret
def not_equal(x, y, dtype=None, name=None, par=1,
x_dtype=None, y_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
y_point = 0 if y_dtype is None else y_dtype.point
xy_point = max(x_point, y_point)
out_point = 0 if dtype is None else dtype.point
out_shift = out_point - xy_point
x = x << (out_point - x_point)
y = y << (out_point - y_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(x != y)
return ret
def less(x, y, dtype=None, name=None, par=1,
x_dtype=None, y_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
y_point = 0 if y_dtype is None else y_dtype.point
xy_point = max(x_point, y_point)
out_point = 0 if dtype is None else dtype.point
out_shift = out_point - xy_point
x = x << (out_point - x_point)
y = y << (out_point - y_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(x < y)
return ret
def less_equal(x, y, dtype=None, name=None, par=1,
x_dtype=None, y_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
y_point = 0 if y_dtype is None else y_dtype.point
xy_point = max(x_point, y_point)
out_point = 0 if dtype is None else dtype.point
out_shift = out_point - xy_point
x = x << (out_point - x_point)
y = y << (out_point - y_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(x <= y)
return ret
def greater(x, y, dtype=None, name=None, par=1,
x_dtype=None, y_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
y_point = 0 if y_dtype is None else y_dtype.point
xy_point = max(x_point, y_point)
out_point = 0 if dtype is None else dtype.point
out_shift = out_point - xy_point
x = x << (out_point - x_point)
y = y << (out_point - y_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(x > y)
return ret
def greater_equal(x, y, dtype=None, name=None, par=1,
x_dtype=None, y_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
y_point = 0 if y_dtype is None else y_dtype.point
xy_point = max(x_point, y_point)
out_point = 0 if dtype is None else dtype.point
out_shift = out_point - xy_point
x = x << (out_point - x_point)
y = y << (out_point - y_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(x >= y)
return ret
def sign_binary(x, dtype=None, name=None, par=1,
x_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
out_point = x_point if dtype is None else dtype.point
out_shift = out_point - x_point
x = x << (out_point - x_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(np.where(x > np.zeros_like(x, dtype=np.int64),
np.ones_like(x, dtype=np.int64),
np.negative(np.ones_like(x, dtype=np.int64))))
return ret
def sign_ternary(x, dtype=None, name=None, par=1,
x_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
out_point = x_point if dtype is None else dtype.point
out_shift = out_point - x_point
x = x << (out_point - x_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(np.where(x > np.zeros_like(x, dtype=np.int64),
np.ones_like(x, dtype=np.int64),
np.where(x == np.zeros_like(x, dtype=np.int64),
np.zeros_like(x, dtype=np.int64),
np.negative(np.ones_like(x, dtype=np.int64)))))
return ret
def where(condition, x, y, dtype=None, name=None, par=1,
condition_dtype=None, x_dtype=None, y_dtype=None):
x_point = 0 if x_dtype is None else x_dtype.point
y_point = 0 if y_dtype is None else y_dtype.point
xy_point = max(x_point, y_point)
out_point = xy_point if dtype is None else dtype.point
out_shift = out_point - xy_point
x = x << (out_point - x_point)
y = y << (out_point - y_point)
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = out_op(np.where(condition, x, y))
return ret
def add_n(arg, dtype=None, name=None, par=1,
arg_dtypes=None):
if not isinstance(arg, (tuple, list)):
raise TypeError('expected tuple or list')
if arg_dtypes is None:
arg_dtypes = [None for _ in arg]
if not isinstance(arg_dtypes, (tuple, list)):
raise TypeError('expected tuple or list')
if len(arg) != len(arg_dtypes):
raise ValueError('length mismatch: %d != %d' % (len(arg), len(arg_dtypes)))
arg_points = [0 if a_dtype is None else a_dtype.point
for a_dtype in arg_dtypes]
max_arg_point = max(*arg_points)
out_point = max_arg_point if dtype is None else dtype.point
out_shift = out_point - max_arg_point
values = [a << (out_point - a_point)
for a, a_point in zip(arg, arg_points)]
out_op = ((lambda x: x << out_shift) if out_shift >= 0 else
(lambda x: x >> -out_shift))
ret = values[0]
for value in values[1:]:
ret += value
ret = out_op(ret)
return ret
def lshift(x, y, dtype=None, name=None, | |
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, gneiss development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import abc
from collections import namedtuple
from skbio import TreeNode
import pandas as pd
import numpy
class Dendrogram(TreeNode):
""" Stores data to be plotted as a dendrogram.
A `Dendrogram` object is represents a tree in addition to the
key information required to create a tree layout prior to
visualization. No layouts are specified within this class,
since this serves as a super class for different tree layouts.
Parameters
----------
use_lengths: bool
Specifies if the branch lengths should be included in the
resulting visualization (default True).
Attributes
----------
length
leafcount
height
depth
Notes
-----
`length` refers to the branch length of a node to its parent.
`leafcount` is the number of tips within a subtree. `height` refers
to the longest path from root to the deepst leaf in that subtree.
`depth` is the number of nodes found in the longest path.
"""
def __init__(self, use_lengths=True, **kwargs):
""" Constructs a Dendrogram object for visualization.
"""
super().__init__(**kwargs)
def _cache_ntips(self):
""" Counts the number of leaves under each subtree."""
for n in self.postorder():
if n.is_tip():
n.leafcount = 1
else:
n.leafcount = sum(c.leafcount for c in n.children)
def update_geometry(self, use_lengths, depth=None):
"""Calculate tree node attributes such as height and depth.
Parameters
----------
use_lengths: bool
Specify if the branch length should be incorporated into
the geometry calculations for visualization.
depth: int
The number of nodes in the longest path from root to leaf.
This is agnostic to scale and orientation.
"""
if self.length is None or not use_lengths:
if depth is None:
self.length = 0
else:
self.length = 1
self.depth = (depth or 0) + self.length
children = self.children
if children:
for c in children:
c.update_geometry(use_lengths, self.depth)
self.height = max([c.height for c in children]) + self.length
self.leafcount = sum([c.leafcount for c in children])
else:
self.height = self.length
self.leafcount = self.edgecount = 1
def coords(self, height, width):
""" Returns coordinates of nodes to be rendered in plot.
Parameters
----------
height : int
The height of the canvas.
width : int
The width of the canvas.
Returns
-------
pd.DataFrame
index : str
Name of node.
x : float
x-coordinate of node.
y : float
y-coordinate of node.
child(i) : str
Name of ith child node in that specific node.
in the tree.
is_tip : str
Specifies if the node is a tip in the treee.
"""
self.rescale(width, height)
result = {}
for node in self.postorder():
children = {'child%d' % i: n.name
for i, n in enumerate(node.children)}
coords = {'x': node.x2, 'y': node.y2}
is_tip = {'is_tip': node.is_tip()}
result[node.name] = {**coords, **children, **is_tip}
result = pd.DataFrame(result).T
# reorder so that x and y are first
cols = ['x', 'y'] + sorted(list(set(result.columns) - set(['x', 'y'])))
return result.loc[:, cols]
@abc.abstractmethod
def rescale(self, width, height):
pass
class UnrootedDendrogram(Dendrogram):
""" Stores data to be plotted as an unrooted dendrogram.
A `UnrootedDendrogram` object is represents a tree in addition to the
key information required to create a radial tree layout prior to
visualization.
Parameters
----------
use_lengths: bool
Specifies if the branch lengths should be included in the
resulting visualization (default True).
Attributes
----------
length
leafcount
height
depth
"""
def __init__(self, **kwargs):
""" Constructs a UnrootedDendrogram object for visualization.
Parameters
----------
use_lengths: bool
Specifies if the branch lengths should be included in the
resulting visualization (default True).
"""
super().__init__(**kwargs)
@classmethod
def from_tree(cls, tree, use_lengths=True):
""" Creates an UnrootedDendrogram object from a skbio tree.
Parameters
----------
tree : skbio.TreeNode
Input skbio tree
Returns
-------
UnrootedDendrogram
"""
for n in tree.postorder():
n.__class__ = UnrootedDendrogram
tree.update_geometry(use_lengths)
return tree
def rescale(self, width, height):
""" Find best scaling factor for fitting the tree in the figure.
This method will find the best orientation and scaling possible
to fit the tree within the dimensions specified by width and height.
Parameters
----------
width : float
width of the canvas
height : float
height of the canvas
Returns
-------
best_scaling : float
largest scaling factor in which the tree can fit in the canvas.
Notes
-----
"""
angle = (2 * numpy.pi) / self.leafcount
# this loop is a horrible brute force hack
# there are better (but complex) ways to find
# the best rotation of the tree to fit the display.
best_scale = 0
for i in range(60):
direction = i / 60.0 * numpy.pi
# TODO:
# This function has a little bit of recursion. This will
# need to be refactored to remove the recursion.
points = self.update_coordinates(1.0, 0, 0, direction, angle)
xs, ys = zip(*points)
# double check that the tree fits within the margins
scale = min(float(width) / (max(xs) - min(xs)),
float(height) / (max(ys) - min(ys)))
# TODO: This margin seems a bit arbituary.
# will need to investigate.
scale *= 0.95 # extra margin for labels
if scale > best_scale:
best_scale = scale
mid_x = width / 2 - ((max(xs) + min(xs)) / 2) * scale
mid_y = height / 2 - ((max(ys) + min(ys)) / 2) * scale
best_args = (scale, mid_x, mid_y, direction, angle)
self.update_coordinates(*best_args)
return best_scale
def update_coordinates(self, s, x1, y1, a, da):
""" Update x, y coordinates of tree nodes in canvas.
`update_coordinates` will recursively updating the
plotting parameters for all of the nodes within the tree.
This can be applied when the tree becomes modified (i.e. pruning
or collapsing) and the resulting coordinates need to be modified
to reflect the changes to the tree structure.
Parameters
----------
s : float
scaling
x1 : float
x midpoint
y1 : float
y midpoint
a : float
angle (degrees)
da : float
angle resolution (degrees)
Returns
-------
points : list of tuple
2D coordinates of all of the nodes.
Notes
-----
This function has a little bit of recursion. This will
need to be refactored to remove the recursion.
"""
# Constant angle algorithm. Should add maximum daylight step.
x2 = x1 + self.length * s * numpy.sin(a)
y2 = y1 + self.length * s * numpy.cos(a)
(self.x1, self.y1, self.x2, self.y2, self.angle) = (x1, y1, x2, y2, a)
# TODO: Add functionality that allows for collapsing of nodes
a = a - self.leafcount * da / 2
if self.is_tip():
points = [(x2, y2)]
else:
points = []
# TODO:
# This function has a little bit of recursion. This will
# need to be refactored to remove the recursion.
for child in self.children:
# calculate the arc that covers the subtree.
ca = child.leafcount * da
points += child.update_coordinates(s, x2, y2, a + ca / 2, da)
a += ca
return points
Dimensions = namedtuple('Dimensions', ['x', 'y', 'height'])
class RootedDendrogram(Dendrogram):
""" Stores data to be plotted as an rooted dendrogram.
A `RootedDendrogram` object is represents a tree in addition to the
key information required to create a radial tree layout prior to
visualization.
Parameters
----------
use_lengths: bool
Specifies if the branch lengths should be included in the
resulting visualization (default True).
Attributes
----------
length
leafcount
height
depth
"""
def width_required(self):
return self.leafcount
@abc.abstractmethod
def xcoords(self, scale, x1):
pass
@abc.abstractmethod
def ycoords(self, scale, y1):
pass
def rescale(self, width, height):
""" Update x, y coordinates of tree nodes in canvas.
Parameters
----------
scale : Dimensions
Scaled dimensions of the tree
x1 : int
X-coordinate of parent
"""
xscale = width / self.height
yscale = height / self.width_required()
scale = Dimensions(xscale, yscale, self.height)
# y coords done postorder, x preorder, y first.
# so it has to be done in 2 passes.
self.update_y_coordinates(scale)
self.update_x_coordinates(scale)
return xscale
def update_y_coordinates(self, scale, y1=None):
"""The second pass through the tree. Y coordinates only
depend on the shape of the tree and yscale.
Parameters
----------
scale : Dimensions
Scaled dimensions of the tree
x1 : int
X-coordinate of parent
"""
if y1 is None:
y1 = self.width_required() * scale.y
child_y = y1
| |
4 7 4 7 7 7]
[2 4 2 6 4 5 6 7]
[3 7 6 3 7 7 6 7]
[4 4 4 7 4 7 7 7]
[5 7 5 7 7 5 7 7]
[6 7 6 6 7 7 6 7]
[7 7 7 7 7 7 7 7]
TESTS::
sage: from sage.combinat.posets.hasse_diagram import HasseDiagram
sage: H = HasseDiagram({0:[2,3],1:[2,3]})
sage: H.join_matrix()
Traceback (most recent call last):
...
ValueError: Not a join-semilattice: no top element.
sage: H = HasseDiagram({0:[2,3],1:[2,3],2:[4],3:[4]})
sage: H.join_matrix()
Traceback (most recent call last):
...
ValueError: No join for x=...
sage: L = LatticePoset({0:[1,2,3],1:[4],2:[4],3:[4]})
sage: P = L.dual()
sage: P.join(2,3)
0
"""
n = self.cardinality()
join = [[0 for x in range(n)] for x in range(n)]
le = copy(self.lequal_matrix())
for i in range(n): le[i,i] = 1
if not all([le[x,n-1]==1 for x in range(n)]):
raise ValueError("Not a join-semilattice: no top element.")
uc = [sorted([n-1-y[1] for y in self.outgoing_edges([x])]) for
x in reversed(range(n))]
for x in range(n): # x=x_k
join[x][x] = x
for y in range(x):
T = []
for z in uc[x]:
T.append(join[y][z]) # T = {x_i \vee z : z>-x_k}
q = T[0]
for z in T:
if z>q: q = z
for z in T:
if not le[n-1-q,n-1-z]:
raise ValueError("No join for x=%s y=%s"%(x,y))
join[x][y] = q
join[y][x] = q
return matrix(ZZ,[[n-1-join[n-1-x][n-1-y] for y in range(n)] for x in range(n)])
def join_matrix(self):
r"""
Returns the matrix of joins of ``self``. The ``(x,y)``-entry
of this matrix is the join of ``x`` and ``y`` in ``self``.
This algorithm is modelled after the algorithm of Freese-Jezek-Nation
(p217). It can also be found on page 140 of [Gec81]_.
.. note::
Once the matrix has been computed, it is stored in
:meth:`_join_matrix`. Delete this attribute if you want
to recompute the matrix.
EXAMPLES::
sage: from sage.combinat.posets.hasse_diagram import HasseDiagram
sage: H = HasseDiagram({0:[1,3,2],1:[4],2:[4,5,6],3:[6],4:[7],5:[7],6:[7],7:[]})
sage: H.join_matrix()
[0 1 2 3 4 5 6 7]
[1 1 4 7 4 7 7 7]
[2 4 2 6 4 5 6 7]
[3 7 6 3 7 7 6 7]
[4 4 4 7 4 7 7 7]
[5 7 5 7 7 5 7 7]
[6 7 6 6 7 7 6 7]
[7 7 7 7 7 7 7 7]
TESTS::
sage: from sage.combinat.posets.hasse_diagram import HasseDiagram
sage: H = HasseDiagram({0:[2,3],1:[2,3]})
sage: H.join_matrix()
Traceback (most recent call last):
...
ValueError: Not a join-semilattice: no top element.
sage: H = HasseDiagram({0:[2,3],1:[2,3],2:[4],3:[4]})
sage: H.join_matrix()
Traceback (most recent call last):
...
ValueError: No join for x=...
"""
return self._join
def is_join_semilattice(self):
r"""
Returns ``True`` if ``self`` has a join operation, and
``False`` otherwise.
EXAMPLES::
sage: from sage.combinat.posets.hasse_diagram import HasseDiagram
sage: H = HasseDiagram({0:[1,3,2],1:[4],2:[4,5,6],3:[6],4:[7],5:[7],6:[7],7:[]})
sage: H.is_join_semilattice()
True
sage: H = HasseDiagram({0:[2,3],1:[2,3]})
sage: H.is_join_semilattice()
False
sage: H = HasseDiagram({0:[2,3],1:[2,3],2:[4],3:[4]})
sage: H.is_join_semilattice()
False
"""
try:
self.join_matrix()
except ValueError:
return False
else:
return True
def is_distributive_lattice(self): # still a dumb algorithm...
r"""
Returns ``True`` if ``self`` is the Hasse diagram of a
distributive lattice, and ``False`` otherwise.
EXAMPLES::
sage: from sage.combinat.posets.hasse_diagram import HasseDiagram
sage: H = HasseDiagram({0:[1,3,2],1:[4],2:[4,5,6],3:[6],4:[7],5:[7],6:[7],7:[]})
sage: H.is_distributive_lattice()
False
sage: H = HasseDiagram({0:[1,2],1:[3],2:[3]})
sage: H.is_distributive_lattice()
True
sage: H = HasseDiagram({0:[1,2,3],1:[4],2:[4],3:[4]})
sage: H.is_distributive_lattice()
False
"""
try:
jn = self.join_matrix()
mt = self.meet_matrix()
except ValueError:
return False
n = jn.ncols()
for x in range(n):
for y in range(n):
for z in range(n):
if mt[x][jn[y][z]]!=jn[mt[x][y]][mt[x][z]]: return False
return True
def is_complemented_lattice(self):
r"""
Returns ``True`` if ``self`` is the Hasse diagram of a
complemented lattice, and ``False`` otherwise.
EXAMPLES::
sage: from sage.combinat.posets.hasse_diagram import HasseDiagram
sage: H = HasseDiagram({0:[1,2,3],1:[4],2:[4],3:[4]})
sage: H.is_complemented_lattice()
True
sage: H = HasseDiagram({0:[1,2],1:[3],2:[3],3:[4]})
sage: H.is_complemented_lattice()
False
"""
try:
jn = self.join_matrix()
mt = self.meet_matrix()
except ValueError:
return False
n = self.cardinality()
c = [-1 for x in range(n)]
for x in range(n):
for y in range(x,n):
if jn[x][y]==n-1 and mt[x][y]==0:
c[x]=y
c[y]=x
return all([c[x]!=-1 for x in range(n)])
def complements(self):
r"""
Returns a list ``l`` such that ``l[i]`` is a complement of
``i`` in ``self``.
A complement of ``x`` is an element ``y`` such that the meet
of ``x`` and ``y`` is the bottom element of ``self`` and the
join of ``x`` and ``y`` is the top element of ``self``.
EXAMPLES::
sage: from sage.combinat.posets.hasse_diagram import HasseDiagram
sage: H = HasseDiagram({0:[1,2,3],1:[4],2:[4],3:[4]})
sage: H.complements()
[4, 3, 3, 2, 0]
sage: H = HasseDiagram({0:[1,2],1:[3],2:[3],3:[4]})
sage: H.complements()
[4, None, None, None, 0]
"""
jn = self.join_matrix()
mt = self.meet_matrix()
n = self.cardinality()
c = [None for x in range(n)]
for x in range(n):
for y in range(x,n):
if jn[x][y]==n-1 and mt[x][y]==0:
c[x]=y
c[y]=x
return c
def antichains_iterator(self):
r"""
Return an iterator over the antichains of the poset.
.. note::
The algorithm is based on Freese-Jezek-Nation p. 226.
It does a depth first search through the set of all
antichains organized in a prefix tree.
EXAMPLES::
sage: P = posets.PentagonPoset()
sage: H = P._hasse_diagram
sage: H.antichains_iterator()
<generator object antichains_iterator at ...>
sage: list(H.antichains_iterator())
[[], [4], [3], [2], [1], [1, 3], [1, 2], [0]]
sage: from sage.combinat.posets.hasse_diagram import HasseDiagram
sage: H = HasseDiagram({0:[1,2],1:[4],2:[3],3:[4]})
sage: list(H.antichains_iterator())
[[], [4], [3], [2], [1], [1, 3], [1, 2], [0]]
sage: H = HasseDiagram({0:[],1:[],2:[]})
sage: list(H.antichains_iterator())
[[], [2], [1], [1, 2], [0], [0, 2], [0, 1], [0, 1, 2]]
sage: H = HasseDiagram({0:[1],1:[2],2:[3],3:[4]})
sage: list(H.antichains_iterator())
[[], [4], [3], [2], [1], [0]]
TESTS::
sage: H = Poset()._hasse_diagram
sage: list(H.antichains_iterator())
[[]]
"""
# Complexity note:
# antichains_queues never grows longer than self.cardinality().
# Indeed, if a appears before b in antichains_queues, then
# the largest element of a is strictly smaller than that of b.
antichains_queues = [([], range(self.cardinality()-1,-1,-1))]
leq = self.lequal_matrix()
while antichains_queues:
(antichain, queue) = antichains_queues.pop()
# Invariant:
# - the elements of antichain are independent
# - the elements of queue are independent from those of antichain
yield antichain
while queue:
x = queue.pop()
new_antichain = antichain + [x]
new_queue = [t for t in queue if not (leq[t,x] or leq[x,t])]
antichains_queues.append((new_antichain, new_queue))
def are_incomparable(self, i, j):
"""
Returns whether ``i`` and ``j`` are incomparable in the poset
INPUT:
- ``i``, ``j`` -- vertices of this Hasse diagram
EXAMPLES::
sage: P = posets.PentagonPoset()
sage: H = P._hasse_diagram
sage: H.are_incomparable(1,2)
True
sage: [ (i,j) for i in H.vertices() for j in H.vertices() if H.are_incomparable(i,j)]
[(1, 2), (1, 3), (2, 1), (3, 1)]
"""
mat = self._leq_matrix
return not mat[i,j] and not mat[j,i]
def are_comparable(self, i, j):
"""
Returns whether ``i`` and ``j`` are comparable in the poset
INPUT:
- ``i``, ``j`` -- vertices of this Hasse diagram
EXAMPLES::
sage: P = posets.PentagonPoset()
sage: H = P._hasse_diagram
sage: H.are_comparable(1,2)
False
sage: [ (i,j) for i in H.vertices() for j in H.vertices() if H.are_comparable(i,j)]
[(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1), (1, 4), (2, 0), (2, 2), (2, 3), (2, 4), (3, 0), (3, 2), (3, 3), (3, 4), (4, 0), (4, 1), (4, 2), (4, 3), (4, 4)]
"""
mat = self._leq_matrix
return bool(mat[i,j]) or bool(mat[j,i])
def antichains(self, element_class = list):
"""
Returns all antichains of ``self``, organized as a
prefix tree
INPUT:
- ``element_class`` -- (default:list) an iterable type
EXAMPLES::
sage: P = posets.PentagonPoset()
sage: H = P._hasse_diagram
sage: A = H.antichains()
sage: list(A)
[[], [0], [1], [1, 2], [1, 3], [2], [3], [4]]
sage: A.cardinality()
8
sage: [1,3] in A
True
sage: [1,4] in A
False
TESTS::
sage: TestSuite(A).run(skip = "_test_pickling")
.. note:: It's actually the pickling of the cached method
:meth:`coxeter_transformation` that fails ...
TESTS::
sage: A = Poset()._hasse_diagram.antichains()
sage: list(A)
[[]]
sage: TestSuite(A).run()
"""
from sage.combinat.subsets_pairwise import PairwiseCompatibleSubsets
return PairwiseCompatibleSubsets(self.vertices(),
self.are_incomparable,
element_class = element_class)
def chains(self, element_class = list):
"""
Returns all chains of ``self``, organized as a prefix tree
INPUT:
- ``element_class`` -- (default:list) an iterable type
EXAMPLES::
sage: P = posets.PentagonPoset()
sage: H = P._hasse_diagram
sage: A = H.chains()
sage: list(A)
[[], [0], [0, 1], [0, 1, 4], | |
(33.9225, -116.850555556),
"KBNO": (43.5919444444, -118.955555556),
"KBNW": (42.0494444444, -93.8475),
"KBOI": (43.5641666667, -116.222777778),
"KBOS": (42.3641666667, -71.005),
"KBOW": (27.9433333333, -81.7833333333),
"KBPG": (32.2125, -101.521666667),
"KBPI": (42.585, -110.111111111),
"KBPK": (36.3688888889, -92.4705555556),
"KBPP": (46.1869444444, -103.428055556),
"KBPT": (29.9508333333, -94.0205555556),
"KBQK": (31.2588888889, -81.4663888889),
"KBQR": (42.9222222222, -78.6122222222),
"KBRD": (46.4022222222, -94.1355555556),
"KBRL": (40.7830555556, -91.1255555556),
"KBRO": (25.9066666667, -97.4258333333),
"KBRY": (37.8144444444, -85.4997222222),
"KBST": (44.4094444444, -69.0119444444),
"KBTF": (40.8694444444, -111.927222222),
"KBTL": (42.3063888889, -85.25),
"KBTM": (45.9547222222, -112.4975),
"KBTN": (45.815, -97.7427777778),
"KBTP": (40.7769444444, -79.9497222222),
"KBTR": (30.5330555556, -91.1494444444),
"KBTV": (44.4716666667, -73.1530555556),
"KBTY": (36.8611111111, -116.786944444),
"KBUF": (42.9402777778, -78.7319444444),
"KBUR": (34.2005555556, -118.358611111),
"KBUU": (42.6908333333, -88.3047222222),
"KBUY": (36.0486111111, -79.475),
"KBVI": (40.7725, -80.3913888889),
"KBVN": (41.7286111111, -98.0558333333),
"KBVO": (36.7641666667, -96.0111111111),
"KBVS": (48.4708333333, -122.420833333),
"KBVU": (35.9475, -114.861111111),
"KBVX": (35.7261111111, -91.6475),
"KBVY": (42.5841666667, -70.9165),
"KBWC": (32.9930555556, -115.516944444),
"KBWD": (31.7936111111, -98.9563888889),
"KBWG": (36.9644444444, -86.4197222222),
"KBWI": (39.1752777778, -76.6683333333),
"KBWP": (46.2441666667, -96.6072222222),
"KBXK": (33.4205555556, -112.686111111),
"KBXM": (43.8922222222, -69.9388888889),
"KBYH": (35.9641666667, -89.9433333333),
"KBYI": (42.5425, -113.771666667),
"KBYS": (35.2802777778, -116.63),
"KBYY": (28.9733333333, -95.8633333333),
"KBZN": (45.7775, -111.151944444),
"KCAD": (44.2753111111, -85.418925),
"KCAE": (33.9386111111, -81.1194444444),
"KCAG": (40.4952777778, -107.521666667),
"KCAK": (40.9161111111, -81.4422222222),
"KCAO": (36.4463888889, -103.149722222),
"KCAR": (46.8713888889, -68.0177777778),
"KCAV": (42.7419444444, -93.7588888889),
"KCBE": (39.6155555556, -78.7608333333),
"KCBF": (41.26, -95.7586111111),
"KCBG": (45.5575, -93.2641666667),
"KCBK": (39.4275, -101.046666667),
"KCBM": (33.6438888889, -88.4436111111),
"KCCA": (35.5977777778, -92.4516666667),
"KCCB": (34.1116666667, -117.6875),
"KCCO": (33.3116666667, -84.7697222222),
"KCCR": (37.9897222222, -122.056944444),
"KCCY": (43.0725, -92.6108333333),
"KCDA": (44.5691666667, -72.0180555556),
"KCDC": (37.7008333333, -113.098611111),
"KCDD": (48.2666666667, -92.4836111111),
"KCDH": (33.6227777778, -92.7633333333),
"KCDI": (39.975, -81.5775),
"KCDK": (29.1341666667, -83.0505555556),
"KCDN": (34.2836111111, -80.565),
"KCDR": (42.8375, -103.095277778),
"KCDS": (34.4336111111, -100.288055556),
"KCDW": (40.8752777778, -74.2813888889),
"KCEA": (37.6486111111, -97.2505555556),
"KCEC": (41.7802777778, -124.236666667),
"KCEF": (42.1980555556, -72.5341666667),
"KCEU": (34.6719166667, -82.8868055556),
"KCEV": (39.6983333333, -85.1311111111),
"KCEW": (30.7786111111, -86.5219444444),
"KCEY": (36.6644444444, -88.3727777778),
"KCEZ": (37.3030555556, -108.628055556),
"KCFD": (30.7155555556, -96.3311111111),
"KCFE": (45.1591666667, -93.8433333333),
"KCFS": (43.4586111111, -83.4452777778),
"KCFT": (32.9569444444, -109.211111111),
"KCFV": (37.0941666667, -95.5719444444),
"KCGC": (28.8672222222, -82.5713888889),
"KCGE": (38.5394444444, -76.0302777778),
"KCGF": (41.565, -81.4863888889),
"KCGI": (37.2252777778, -89.5708333333),
"KCGS": (38.9805833333, -76.9223055556),
"KCGZ": (32.955, -111.766944444),
"KCHA": (35.0352777778, -85.2036111111),
"KCHD": (33.2691666667, -111.811111111),
"KCHK": (35.0972222222, -97.9677777778),
"KCHN": (27.5149055556, -81.8804638889),
"KCHO": (38.1386388889, -78.4528611111),
"KCHQ": (36.8422222222, -89.3597222222),
"KCHS": (32.8986111111, -80.0402777778),
"KCHU": (43.5963888889, -91.5038888889),
"KCIC": (39.7952777778, -121.858333333),
"KCID": (41.8846944444, -91.7108055556),
"KCII": (47.8283333333, -112.168333333),
"KCIN": (42.0461111111, -94.7888888889),
"KCIU": (46.2508333333, -84.4725),
"KCJR": (38.5255555556, -77.8597222222),
"KCKA": (36.7380555556, -98.1261111111),
"KCKB": (39.2966666667, -80.2280555556),
"KCKC": (47.8383333333, -90.3830555556),
"KCKF": (31.9888888889, -83.7738888889),
"KCKN": (47.8408333333, -96.6216666667),
"KCKV": (36.6219444444, -87.415),
"KCLE": (41.4116666667, -81.8497222222),
"KCLI": (44.6133333333, -88.7308333333),
"KCLK": (35.5382722222, -98.9327777778),
"KCLL": (30.5883333333, -96.3636111111),
"KCLM": (48.1202777778, -123.499722222),
"KCLR": (33.1313888889, -115.521388889),
"KCLT": (35.2138888889, -80.9430555556),
"KCLW": (27.9766666667, -82.7586111111),
"KCMA": (34.2138888889, -119.094444444),
"KCMH": (39.9977777778, -82.8916666667),
"KCMI": (40.0391666667, -88.2780555556),
"KCMR": (35.3054722222, -112.194388889),
"KCMX": (47.1683333333, -88.4891666667),
"KCMY": (43.9583333333, -90.7377777778),
"KCNH": (43.3705555556, -72.3686111111),
"KCNK": (39.5491666667, -97.6522222222),
"KCNM": (32.3372222222, -104.263055556),
"KCNO": (33.9747222222, -117.636666667),
"KCNU": (37.6688888889, -95.485),
"KCNW": (31.6377777778, -97.0738888889),
"KCNY": (38.755, -109.754722222),
"KCOD": (44.5202777778, -109.023888889),
"KCOE": (47.7744444444, -116.819444444),
"KCOF": (28.2347222222, -80.61),
"KCOI": (28.3416666667, -80.6855555556),
"KCOM": (31.8411111111, -99.4033333333),
"KCON": (43.2027777778, -71.5022222222),
"KCOQ": (46.7011111111, -92.5036111111),
"KCOS": (38.8055555556, -104.7),
"KCOT": (28.4580555556, -99.22),
"KCOU": (38.8180555556, -92.2194444444),
"KCPC": (34.2727777778, -78.715),
"KCPK": (36.6655555556, -76.3205555556),
"KCPM": (33.89, -118.243611111),
"KCPR": (42.9077777778, -106.464166667),
"KCPS": (38.5708333333, -90.1561111111),
"KCPT": (32.3538888889, -97.4338888889),
"KCPU": (38.1461111111, -120.648055556),
"KCQB": (35.7238888889, -96.8202777778),
"KCQF": (30.4605555556, -87.8769444444),
"KCQW": (34.7127777778, -79.9569444444),
"KCQX": (41.6883333333, -69.9894444444),
"KCRE": (33.8116666667, -78.7238888889),
"KCRG": (30.3363888889, -81.5144444444),
"KCRO": (36.1027777778, -119.594722222),
"KCRP": (27.7702777778, -97.5011111111),
"KCRQ": (33.1283333333, -117.28),
"KCRS": (32.0280555556, -96.4005555556),
"KCRT": (33.1783333333, -91.8802777778),
"KCRW": (38.3758333333, -81.5930555556),
"KCRX": (34.915, -88.6036111111),
"KCRZ": (40.9941666667, -94.755),
"KCSB": (40.3066666667, -100.162222222),
"KCSG": (32.5163888889, -84.9388888889),
"KCSM": (35.3397222222, -99.2005555556),
"KCSQ": (41.0213888889, -94.3633333333),
"KCTB": (48.6083333333, -112.376111111),
"KCTJ": (33.6316666667, -85.1522222222),
"KCTK": (40.5691666667, -90.0747222222),
"KCTY": (29.6355555556, -83.1047222222),
"KCTZ": (34.975, -78.3655555556),
"KCUB": (33.9705555556, -80.9952777778),
"KCUH": (35.95, -96.7733333333),
"KCVG": (39.0461111111, -84.6619444444),
"KCVH": (36.8933333333, -121.410277778),
"KCVK": (36.265, -91.5627777778),
"KCVN": (34.4252777778, -103.079166667),
"KCVO": (44.4972222222, -123.289444444),
"KCVS": (34.3825, -103.321944444),
"KCVX": (45.3047222222, -85.2752777778),
"KCWA": (44.7775, -89.6666666667),
"KCWF": (30.2105555556, -93.1430555556),
"KCWI": (41.8311111111, -90.3291666667),
"KCWS": (35.0808333333, -92.425),
"KCXL": (32.6694444444, -115.513055556),
"KCXO": (30.3516666667, -95.4144444444),
"KCXP": (39.1922222222, -119.734444444),
"KCXY": (40.2172222222, -76.8513888889),
"KCYS": (41.1555555556, -104.811666667),
"KCYW": (39.3872222222, -97.1572222222),
"KCZG": (42.0786111111, -76.0963888889),
"KCZK": (45.6769444444, -121.878888889),
"KCZL": (34.4555555556, -84.9391666667),
"KDAA": (38.715, -77.1811111111),
"KDAB": (29.1847222222, -81.0605555556),
"KDAF": (44.0333333333, -90.085),
"KDAG": (34.8536111111, -116.786666667),
"KDAL": (32.8469444444, -96.8516666667),
"KDAN": (36.5727777778, -79.3361111111),
"KDAW": (43.2841666667, -70.9291666667),
"KDAY": (39.9022222222, -84.2191666667),
"KDBN": (32.5644444444, -82.9852777778),
"KDBQ": (42.4019444444, -90.7094444444),
"KDCA": (38.8519444444, -77.0375),
"KDCU": (34.6541666667, -86.9452777778),
"KDCY": (38.7005555556, -87.1297222222),
"KDDC": (37.7630555556, -99.9655555556),
"KDDH": (42.8911111111, -73.2461111111),
"KDEC": (39.8344444444, -88.8655555556),
"KDED": (29.0669444444, -81.2838888889),
"KDEH": (43.2755555556, -91.7394444444),
"KDEN": (39.8583333333, -104.666944444),
"KDEQ": (34.0469444444, -94.3994444444),
"KDET": (42.4091666667, -83.0097222222),
"KDEW": (47.9669444444, -117.428611111),
"KDFI": (41.3375, -84.4288888889),
"KDFW": (32.8963888889, -97.0375),
"KDGL": (31.3425, -109.506388889),
"KDGW": (42.7972222222, -105.385833333),
"KDHN": (31.3211111111, -85.4494444444),
"KDHT": (36.0225, -102.547222222),
"KDIJ": (43.7425, -111.0975),
"KDIK": (46.7972222222, -102.801944444),
"KDKB": (39.0705555556, -88.5336111111),
"KDKK": (42.4933333333, -79.2719444444),
"KDKR": (31.3069444444, -95.4038888889),
"KDKX": (35.9638888889, -83.8736111111),
"KDLC": (34.4491666667, -79.3686111111),
"KDLF": (29.3594444444, -100.777777778),
"KDLH": (46.8419444444, -92.1936111111),
"KDLL": (43.5219444444, -89.7708333333),
"KDLN": (45.2552777778, -112.5525),
"KDLO": (35.7455555556, -119.236388889),
"KDLS": (45.6186111111, -121.167222222),
"KDMA": (32.1663888889, -110.883055556),
"KDMN": (32.2622222222, -107.720555556),
"KDMO": (38.7075, -93.1758333333),
"KDMW": (39.6083333333, -77.0077777778),
"KDNL": (33.4663888889, -82.0394444444),
"KDNN": (34.7230555556, -84.8702777778),
"KDNS": (41.9866666667, -95.3805555556),
"KDNV": (40.1991666667, -87.5955555556),
"KDOV": (39.13, -75.4663888889),
"KDPA": (41.9077777778, -88.2486111111),
"KDPG": (40.1994444444, -112.9375),
"KDPL": (35.0, -77.9816666667),
"KDQH": (31.4766666667, -82.8605555556),
"KDRA": (36.6194444444, -116.032777778),
"KDRI": (30.8316666667, -93.3397222222),
"KDRO": (37.1513888889, -107.753611111),
"KDRT": (29.3727777778, -100.925833333),
"KDSM": (41.5338888889, -93.6625),
"KDSV": (42.5708333333, -77.7130555556),
"KDTA": (39.3805555556, -112.507777778),
"KDTL": (46.8252777778, -95.8847222222),
"KDTN": (32.5402777778, -93.745),
"KDTO": (33.2008333333, -97.1980555556),
"KDTS": (30.4, -86.4713888889),
"KDTW": (42.2122222222, -83.3533333333),
"KDUA": (33.9422222222, -96.3944444444),
"KDUC": (34.4708333333, -97.96),
"KDUG": (31.4688888889, -109.603611111),
"KDUH": (41.7358333333, -83.6558333333),
"KDUJ": (41.1783333333, -78.8986111111),
"KDUX": (35.8572222222, -102.013055556),
"KDVK": (37.5777777778, -84.7697222222),
"KDVL": (48.1144444444, -98.9083333333),
"KDVN": (41.6102777778, -90.5883333333),
"KDVO": (38.1436111111, -122.556111111),
"KDVT": (33.6883333333, -112.0825),
"KDWH": (30.0619444444, -95.5527777778),
"KDWU": (38.5544444444, -82.7380555556),
"KDXR": (41.3716666667, -73.4822222222),
"KDXX": (44.9863888889, -96.1777777778),
"KDXZ": (37.0363888889, -113.510277778),
"KDYA": (32.4638888889, -87.9541666667),
"KDYB": (33.0633333333, -80.2794444444),
"KDYL": (40.3330555556, -75.1219444444),
"KDYR": (35.9980555556, -89.4066666667),
"KDYS": (32.4205555556, -99.8544444444),
"KDZJ": (34.8544444444, -83.9972222222),
"KEAR": (40.7269444444, -99.0066666667),
"KEAT": (47.3980555556, -120.205833333),
"KEAU": (44.8658333333, -91.4841666667),
"KEBG": (26.4416666667, -98.1222222222),
"KEBS": (42.4363888889, -93.8691666667),
"KECG": (36.2605555556, -76.1744444444),
"KEDC": (30.3966666667, -97.5733333333),
"KEDE": (36.0277777778, -76.5672222222),
"KEDJ": (40.3722222222, -83.8188888889),
"KEDN": (31.2997222222, -85.8997222222),
"KEDU": (38.5313888889, -121.786388889),
"KEDW": (34.9052777778, -117.883611111),
"KEED": (34.7663888889, -114.623333333),
"KEEN": (42.8983333333, -72.2708333333),
"KEET": (33.1777777778, -86.7830555556),
"KEFD": (29.6072222222, -95.1586111111),
"KEFK": (44.8888888889, -72.2291666667),
"KEFT": (42.615, -89.5902777778),
"KEGE": (39.6425, -106.917777778),
"KEGI": (30.6502777778, -86.5227777778),
"KEGP": (28.7, -100.479444444),
"KEGT": (37.3236111111, -97.3883333333),
"KEGV": (45.9322222222, -89.2683333333),
"KEHA": (37.0008333333, -101.88),
"KEHO": (35.2558333333, -81.6008333333),
"KEHR": (37.8077777778, -87.6855555556),
"KEIK": (40.0102777778, -105.048055556),
"KEKA": (40.8033333333, -124.112777778),
"KEKM": (41.7194444444, -86.0033333333),
"KEKN": (38.8894444444, -79.8569444444),
"KEKO": (40.825, -115.791666667),
"KEKQ": (36.8552777778, -84.8561111111),
"KEKX": (37.6861111111, -85.925),
"KEKY": (33.3125, -86.9263888889),
"KELD": (33.2208333333, -92.8130555556),
"KELM": (42.16, -76.8916666667),
"KELN": (47.0330555556, -120.530555556),
"KELO": (47.8244444444, -91.8308333333),
"KELP": (31.8066666667, -106.377777778),
"KELY": (39.2997222222, -114.841944444),
"KELZ": (42.1094444444, -77.99),
"KEMM": (41.8241666667, -110.556944444),
"KEMP": (38.3305555556, -96.1899444444),
"KEMT": (34.0861111111, -118.034722222),
"KEND": (36.3397222222, -97.9161111111),
"KENV": (40.7186111111, -114.030833333),
"KENW": (42.5955555556, -87.9277777778),
"KEOK": (40.46, -91.4286111111),
"KEOP": (39.1669444444, -82.9280555556),
"KEOS": (36.8108333333, -94.3916666667),
"KEPH": (47.3080555556, -119.516944444),
"KEPM": (44.91, -67.0127777778),
"KEQA": (37.7741666667, -96.8175),
"KEQY": (35.0188888889, -80.6202777778),
"KERI": (42.0819444444, -80.1761111111),
"KERV": (29.9766666667, -99.0855555556),
"KERY": (46.311, -85.4571666667),
"KESC": (45.7227777778, -87.0936111111),
"KESF": (31.3947222222, -92.2955555556),
"KESN": (38.8041666667, -76.0688888889),
"KESW": (47.2541666667, -121.185555556),
"KETB": (43.4222222222, -88.1277777778),
"KEUF": (31.9513888889, -85.1288888889),
"KEUG": (44.1230555556, -123.218611111),
"KEUL": (43.6419444444, -116.635833333),
"KEVB": (29.0555555556, -80.9488888889),
"KEVU": (40.3533333333, -94.9166666667),
"KEVV": (38.0383333333, -87.5308333333),
"KEVY": (39.5202777778, -75.7205555556),
"KEWB": (41.6761111111, -70.9569444444),
"KEWK": (38.0583333333, -97.2744444444),
"KEWN": (35.0727777778, -77.0427777778),
"KEWR": (40.6922222222, -74.1686111111),
"KEXX": (35.7811111111, -80.3038888889),
"KEYE": (39.8308333333, -86.2944444444),
"KEYF": (34.6016666667, -78.5791666667),
"KEYQ": (29.9352777778, -95.6397222222),
"KEYW": (24.5561111111, -81.7594444444),
"KEZM": (32.2163888889, -83.1286111111),
"KEZS": (44.7872222222, -88.56),
"KFAF": (37.1325, -76.6086111111),
"KFAR": (46.9205555556, -96.8158333333),
"KFAT": (36.7761111111, -119.718055556),
"KFAY": (34.9911111111, -78.8802777778),
"KFBG": (35.1319444444, -78.9363888889),
"KFCA": (48.3105555556, -114.256111111),
"KFCH": (36.7322222222, -119.820277778),
"KFCI": (37.4066666667, -77.525),
"KFCM": (44.8272222222, -93.4572222222),
"KFCS": (38.6783333333, -104.756388889),
"KFCT": (46.6694444444, -120.458333333),
"KFCY": (34.9419444444, -90.775),
"KFDK": (39.4175, -77.3744444444),
"KFDR": (34.3522222222, -98.9838888889),
"KFDW": (34.3155555556, -81.1088888889),
"KFDY": (41.0119444444, -83.6686111111),
"KFET": (41.4491666667, -96.5202777778),
"KFFA": (36.0183333333, -75.6713888889),
"KFFC": (33.3572222222, -84.5719444444),
"KFFL": (41.0558333333, -91.9808333333),
"KFFM": (46.2844444444, -96.1566666667),
"KFFO": (39.8261111111, -84.0483333333),
"KFFT": (38.1819444444, -84.9061111111),
"KFFZ": (33.4608333333, -111.728333333),
"KFGX": (38.5416666667, -83.7433333333),
"KFHB": (30.6119444444, -81.4611111111),
"KFHR": (48.5219444444, -123.024444444),
"KFHU": (31.5883333333, -110.344166667),
"KFIG": (41.0491666667, -78.4152777778),
"KFIT": (42.5541666667, -71.7588888889),
"KFKA": (43.6761111111, -92.1794444444),
"KFKL": (41.3777777778, -79.8602777778),
"KFKN": (36.6980555556, -76.9038888889),
"KFKS": (44.6252777778, -86.2008333333),
"KFLD": (43.7711111111, -88.4883333333),
"KFLG": (35.1402777778, -111.669166667),
"KFLL": (26.0725, -80.1525),
"KFLO": (34.1852777778, -79.7238888889),
"KFLP": (36.2908333333, -92.5902777778),
"KFLV": (39.3683333333, -94.9144444444),
"KFLX": (39.4991666667, -118.748888889),
"KFLY": (38.9458333333, -104.57),
"KFME": (39.0852777778, -76.7591666667),
"KFMH": (41.6583333333, -70.5213888889),
"KFMM": (40.3338888889, -103.804166667),
"KFMN": (36.7411111111, -108.229722222),
"KFMY": (26.5863888889, -81.8630555556),
"KFMZ": (40.5861111111, -97.5730555556),
"KFNB": (40.0786111111, -95.5919444444),
"KFNL": (40.4519444444, -105.011388889),
"KFNT": (42.9655555556, -83.7436111111),
"KFOD": (42.5513888889, -94.1925),
"KFOE": (38.9508333333, -95.6636111111),
"KFOK": (40.8436111111, -72.6316666667),
"KFOM": (38.9580555556, -112.363055556),
"KFOT": (40.5538888889, -124.132777778),
"KFPK": (42.5744444444, -84.8113888889),
"KFPR": (27.495, -80.3683333333),
"KFRG": (40.7288888889, -73.4133333333),
"KFRH": (38.5061111111, -86.6369444444),
"KFRI": (39.055, -96.7644444444),
| |
<reponame>Tim232/Python-Things<filename>Projects/DeepLearningProject/image_classification/cnn_for_cifar_10/ensemble_cnn_model_cifar_10_v02.py
import tensorflow as tf
class Model:
def __init__(self, sess, name):
self.sess = sess
self.name = name
self._build_net()
def _build_net(self):
with tf.variable_scope(self.name):
with tf.name_scope('input_layer') as scope:
self.dropout_rate = tf.Variable(tf.constant(value=0.5), name='dropout_rate')
self.training = tf.placeholder(tf.bool, name='training')
self.X = tf.placeholder(tf.float32, [None, 1024], name='x_data')
X_img = tf.reshape(self.X, shape=[-1, 32, 32, 1])
self.Y = tf.placeholder(tf.float32, [None, 10], name='y_data')
with tf.name_scope('stem_layer') as scope:
self.W1_sub = tf.get_variable(name='W1_sub', shape=[1, 3, 1, 20], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.L1_sub = tf.nn.conv2d(input=X_img, filter=self.W1_sub, strides=[1, 1, 1, 1], padding='VALID') # 32x32 -> 32x30
self.L1_sub = self.BN(input=self.L1_sub, training=self.training, name='L1_sub_BN')
self.L1_sub = tf.nn.elu(self.L1_sub, 'R1_sub')
self.W2_sub = tf.get_variable(name='W2_sub', shape=[3, 1, 20, 20], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.L2_sub = tf.nn.conv2d(input=self.L1_sub, filter=self.W2_sub, strides=[1, 1, 1, 1], padding='VALID') # 32x30 -> 30x30
self.L2_sub = self.BN(input=self.L2_sub, training=self.training, name='L2_sub_BN')
self.L2_sub = tf.nn.elu(self.L2_sub, 'R2_sub')
self.W3_sub = tf.get_variable(name='W3_sub', shape=[1, 3, 20, 40], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.L3_sub = tf.nn.conv2d(input=self.L2_sub, filter=self.W3_sub, strides=[1, 1, 1, 1], padding='VALID') # 30x30 -> 30x28
self.L3_sub = self.BN(input=self.L3_sub, training=self.training, name='L3_sub_BN')
self.L3_sub = tf.nn.elu(self.L3_sub, 'R3_sub')
self.W4_sub = tf.get_variable(name='W4_sub', shape=[3, 1, 40, 40], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.L4_sub = tf.nn.conv2d(input=self.L3_sub, filter=self.W4_sub, strides=[1, 1, 1, 1], padding='VALID') # 30x28 -> 28x28
self.L4_sub = self.BN(input=self.L4_sub, training=self.training, name='L4_sub_BN')
self.L4_sub = tf.nn.elu(self.L4_sub, 'R4_sub')
self.W5_sub = tf.get_variable(name='W5_sub', shape=[1, 3, 40, 80], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.L5_sub = tf.nn.conv2d(input=self.L4_sub, filter=self.W5_sub, strides=[1, 1, 1, 1], padding='VALID') # 28x28 -> 28x26
self.L5_sub = self.BN(input=self.L5_sub, training=self.training, name='L5_sub_BN')
self.L5_sub = tf.nn.elu(self.L5_sub, 'R5_sub')
self.W6_sub = tf.get_variable(name='W6_sub', shape=[3, 1, 80, 80], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.L6_sub = tf.nn.conv2d(input=self.L5_sub, filter=self.W6_sub, strides=[1, 1, 1, 1], padding='VALID') # 28x26 -> 26x26
self.L6_sub = self.BN(input=self.L6_sub, training=self.training, name='L6_sub_BN')
self.L6_sub = tf.nn.elu(self.L6_sub, 'R6_sub')
# self.L1 = tf.nn.max_pool(value=self.L3_sub, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # 26x26 -> 13x13
with tf.name_scope('inception_layer1') as scope:
self.Inception_L1 = self.inception_A(self.L6_sub, 3, 120, name='inception_layer1') # 26x26 -> 13x13
with tf.name_scope('inception_layer2') as scope:
self.Inception_L2 = self.inception_B(self.Inception_L1, 3, 160, name='inception_layer2') # 13x13 -> 7x7
with tf.name_scope('inception_layer3') as scope:
self.Inception_L3 = self.inception_C(self.Inception_L2, 3, 200, name='inception_layer3') # 7x7 -> 4x4
self.Inception_L3 = tf.layers.dropout(inputs=self.Inception_L3, rate=self.dropout_rate, training=self.training)
# self.Inception_L3 = tf.reshape(self.Inception_L3, shape=[-1, 4 * 4 * 200])
with tf.name_scope('conv_layer1') as scope:
self.W4 = tf.get_variable(name='W4', shape=[3, 3, 200, 240], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.L4 = tf.nn.conv2d(input=self.Inception_L3, filter=self.W4, strides=[1, 1, 1, 1], padding='SAME')
self.L4 = self.BN(input=self.L4, training=self.training, name='conv1_BN')
self.L4 = tf.nn.elu(self.L4, 'R4')
# self.L4 = tf.reshape(self.L4, shape=[-1, 4 * 4 * 240])
self.L4 = tf.layers.dropout(inputs=self.L4, rate=self.dropout_rate, training=self.training)
with tf.name_scope('conv_layer2') as scope:
self.W5 = tf.get_variable(name='W5', shape=[3, 3, 240, 300], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.L5 = tf.nn.conv2d(input=self.L4, filter=self.W5, strides=[1, 1, 1, 1], padding='SAME')
self.L5 = self.BN(input=self.L5, training=self.training, name='conv2_BN')
self.L5 = tf.nn.elu(self.L5, 'R5')
self.L5 = tf.reshape(self.L5, shape=[-1, 4 * 4 * 300])
self.L5 = tf.layers.dropout(inputs=self.L5, rate=self.dropout_rate, training=self.training)
with tf.name_scope('fc_layer1') as scope:
self.W_fc1 = tf.get_variable(name='W_fc1', shape=[4 * 4 * 300, 1000], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.b_fc1 = tf.Variable(tf.constant(value=0.001, shape=[1000], name='b_fc1'))
self.L_fc1 = tf.matmul(self.L5, self.W_fc1) + self.b_fc1
self.L_fc1 = self.BN(input=self.L_fc1, training=self.training, name='fc1_BN')
self.L_fc1 = tf.nn.elu(self.L_fc1, 'R_fc1')
self.L_fc1 = tf.layers.dropout(inputs=self.L_fc1, rate=self.dropout_rate, training=self.training)
with tf.name_scope('fc_layer2') as scope:
self.W_fc2 = tf.get_variable(name='W_fc2', shape=[1000, 1000], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.b_fc2 = tf.Variable(tf.constant(value=0.001, shape=[1000], name='b_fc2'))
self.L_fc2 = tf.matmul(self.L_fc1, self.W_fc2) + self.b_fc2
self.L_fc2 = self.BN(input=self.L_fc2, training=self.training, name='fc2_BN')
self.L_fc2 = tf.nn.elu(self.L_fc2, 'R_fc2')
self.L_fc2 = tf.layers.dropout(inputs=self.L_fc2, rate=self.dropout_rate, training=self.training)
self.W_out = tf.get_variable(name='W_out', shape=[1000, 10], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
self.b_out = tf.Variable(tf.constant(value=0.001, shape=[10], name='b_out'))
self.logits = tf.matmul(self.L_fc2, self.W_out) + self.b_out
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y)) + \
(0.01 / (2 * tf.to_float(tf.shape(self.Y)[0]))) * tf.reduce_sum(tf.square(self.W_out))
self.optimizer = tf.train.AdamOptimizer(learning_rate=0.005).minimize(self.cost)
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(self.logits, 1), tf.arg_max(self.Y, 1)), dtype=tf.float32))
def predict(self, x_test):
return self.sess.run(self.logits, feed_dict={self.X: x_test, self.training: False})
def get_accuracy(self, x_test, y_test):
return self.sess.run(self.accuracy, feed_dict={self.X: x_test, self.Y: y_test, self.training: False})
def train(self, x_data, y_data):
return self.sess.run([self.cost, self.optimizer], feed_dict={self.X: x_data, self.Y: y_data, self.training: True})
def parametric_relu(self, _x, name):
alphas = tf.get_variable(name, _x.get_shape()[-1], initializer=tf.constant_initializer(0.01), dtype=tf.float32)
pos = tf.nn.relu(_x)
neg = alphas * (_x - abs(_x)) * 0.5
return pos + neg
def BN(self, input, training, name):
# if training is True:
# bn = tf.contrib.layers.batch_norm(input, decay, scale=scale, is_training=True, updates_collections=None, scope=name)
# else:
# bn = tf.contrib.layers.batch_norm(input, decay, scale=scale, is_training=True, updates_collections=None, scope=name)
return tf.contrib.layers.batch_norm(input, decay=0.9, scale=False, is_training=training, updates_collections=None, scope=name, zero_debias_moving_mean=True)
def inception_A(self, x, n, output, name):
OPL = int(output/4)
B, H, W, C = x.get_shape()
with tf.variable_scope(name):
# 1x1
W1x1 = tf.get_variable(name='W1x1', shape=[1, 1, C, OPL], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L1x1 = tf.nn.conv2d(name='L1x1', input=x, filter=W1x1, strides=[1, 2, 2, 1], padding='SAME')
L1x1 = self.BN(input=L1x1, training=self.training, name='inceptionA_L1x1_BN')
L1x1 = tf.nn.elu(L1x1 , 'inceptionA_L1x1_R')
# 5x5 -> 1x1, 3x3, 3x3
W5x5_sub1 = tf.get_variable(name='W5x5_sub1', shape=[1, 1, C, 15], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L5x5_sub1 = tf.nn.conv2d(name='L5x5_sub1', input=x, filter=W5x5_sub1, strides=[1, 1, 1, 1], padding='SAME')
L5x5_sub1 = self.BN(input=L5x5_sub1, training=self.training, name='inceptionA_L5x5_sub1_BN')
L5x5_sub1 = tf.nn.elu(L5x5_sub1, 'inceptionA_L5x5_sub1_R')
W5x5_sub2 = tf.get_variable(name='W5x5_sub2', shape=[n, n, 15, OPL], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L5x5_sub2 = tf.nn.conv2d(name='L5x5_sub2', input=L5x5_sub1, filter=W5x5_sub2, strides=[1, 1, 1, 1], padding='SAME')
L5x5_sub2 = self.BN(input=L5x5_sub2, training=self.training, name='inceptionA_L5x5_sub2_BN')
L5x5_sub2 = tf.nn.elu(L5x5_sub2, 'inceptionA_L5x5_sub2_R')
W5x5_sub3 = tf.get_variable(name='W5x5_sub3', shape=[n, n, OPL, OPL], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L5x5_sub3 = tf.nn.conv2d(name='L5x5_sub3', input=L5x5_sub2, filter=W5x5_sub3, strides=[1, 2, 2, 1], padding='SAME')
L5x5_sub3 = self.BN(input=L5x5_sub3, training=self.training, name='inceptionA_L5x5_sub3_BN')
L5x5_sub3 = tf.nn.elu(L5x5_sub3, 'inceptionA_L5x5_sub3_R')
# 3x3 -> 1x1, 3x3
W3x3_sub1 = tf.get_variable(name='W3x3_sub1', shape=[1, 1, C, 15], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L3x3_sub1 = tf.nn.conv2d(name='L3x3_sub1', input=x, filter=W3x3_sub1, strides=[1, 1, 1, 1], padding='SAME')
L3x3_sub1 = self.BN(input=L3x3_sub1, training=self.training, name='inceptionA_L3x3_sub1_BN')
L3x3_sub1 = tf.nn.elu(L3x3_sub1, 'inceptionA_L3x3_sub1_R')
W3x3_sub2 = tf.get_variable(name='W3x3_sub2', shape=[n, n, 15, OPL], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L3x3_sub2 = tf.nn.conv2d(name='L3x3_sub2', input=L3x3_sub1, filter=W3x3_sub2, strides=[1, 2, 2, 1], padding='SAME')
L3x3_sub2 = self.BN(input=L3x3_sub2, training=self.training, name='inceptionA_L3x3_sub2_BN')
L3x3_sub2 = tf.nn.elu(L3x3_sub2, 'inceptionA_L3x3_sub2_R')
# avg pooling -> avg pooling, 1x1
L_pool = tf.nn.avg_pool(name='L_pool', value=x, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='SAME')
W_pool_sub1 = tf.get_variable(name='W_pool_sub1', shape=[1, 1, C, OPL], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L_pool_sub1 = tf.nn.conv2d(name='L_pool_sub1', input=L_pool, filter=W_pool_sub1, strides=[1, 2, 2, 1], padding='SAME')
L_pool_sub1 = self.BN(input=L_pool_sub1, training=self.training, name='inceptionA_L_pool_sub1_BN')
L_pool_sub1 = tf.nn.elu(L_pool_sub1, 'inceptionA_L_pool_sub1_R')
tot_layers = tf.concat([L1x1, L5x5_sub3, L3x3_sub2, L_pool_sub1], axis=3) # Concat in the 4th dim to stack
return tot_layers
def inception_B(self, x, n, output, name):
OPL = int(output/4)
B, H, W, C = x.get_shape()
with tf.variable_scope(name):
# 1x1
W1x1 = tf.get_variable(name='W1x1', shape=[1, 1, C, 60], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L1x1 = tf.nn.conv2d(name='L1x1', input=x, filter=W1x1, strides=[1, 2, 2, 1], padding='SAME')
L1x1 = self.BN(input=L1x1, training=self.training, name='inceptionB_L1x1_BN')
L1x1 = tf.nn.elu(L1x1, 'inceptionB_L1x1_R')
# 5x5 -> 1x1, 1x3, 3x1, 1x3, 3x1
W5x5_sub1 = tf.get_variable(name='W5x5_sub1', shape=[1, 1, C, 20], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L5x5_sub1 = tf.nn.conv2d(name='L5x5_sub1', input=x, filter=W5x5_sub1, strides=[1, 1, 1, 1], padding='SAME')
L5x5_sub1 = self.BN(input=L5x5_sub1, training=self.training, name='inceptionB_L5x5_sub1_BN')
L5x5_sub1 = tf.nn.elu(L5x5_sub1, 'inceptionB_L5x5_sub1_R')
W5x5_sub2 = tf.get_variable(name='W5x5_sub2', shape=[1, n, 20, 20], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L5x5_sub2 = tf.nn.conv2d(name='L5x5_sub2', input=L5x5_sub1, filter=W5x5_sub2, strides=[1, 1, 1, 1], padding='SAME')
L5x5_sub2 = self.BN(input=L5x5_sub2, training=self.training, name='inceptionB_L5x5_sub2_BN')
L5x5_sub2 = tf.nn.elu(L5x5_sub2, 'inceptionB_L5x5_sub2_R')
W5x5_sub3 = tf.get_variable(name='W5x5_sub3', shape=[n, 1, 20, 30], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L5x5_sub3 = tf.nn.conv2d(name='L5x5_sub3', input=L5x5_sub2, filter=W5x5_sub3, strides=[1, 1, 1, 1], padding='SAME')
L5x5_sub3 = self.BN(input=L5x5_sub3, training=self.training, name='inceptionB_L5x5_sub3_BN')
L5x5_sub3 = tf.nn.elu(L5x5_sub3, 'inceptionB_L5x5_sub3_R')
W5x5_sub4 = tf.get_variable(name='W5x5_sub4', shape=[1, n, 30, 30], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L5x5_sub4 = tf.nn.conv2d(name='L5x5_sub4', input=L5x5_sub3, filter=W5x5_sub4, strides=[1, 1, 2, 1], padding='SAME')
L5x5_sub4 = self.BN(input=L5x5_sub4, training=self.training, name='inceptionB_L5x5_sub4_BN')
L5x5_sub4 = tf.nn.elu(L5x5_sub4, 'inceptionB_L5x5_sub4_R')
W5x5_sub5 = tf.get_variable(name='W5x5_sub5', shape=[n, 1, 30, 40], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L5x5_sub5 = tf.nn.conv2d(name='L5x5_sub5', input=L5x5_sub4, filter=W5x5_sub5, strides=[1, 2, 1, 1], padding='SAME')
L5x5_sub5 = self.BN(input=L5x5_sub5, training=self.training, name='inceptionB_L5x5_sub5_BN')
L5x5_sub5 = tf.nn.elu(L5x5_sub5, 'inceptionB_L5x5_sub5_R')
# 3x3 -> 1x1, 1x3, 3x1
W3x3_sub1 = tf.get_variable(name='W3x3_sub1', shape=[1, 1, C, 20], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L3x3_sub1 = tf.nn.conv2d(name='L3x3_sub1', input=x, filter=W3x3_sub1, strides=[1, 1, 1, 1], padding='SAME')
L3x3_sub1 = self.BN(input=L3x3_sub1, training=self.training, name='inceptionB_L3x3_sub1_BN')
L3x3_sub1 = tf.nn.elu(L3x3_sub1, 'inceptionB_L3x3_sub1_R')
W3x3_sub2 = tf.get_variable(name='W3x3_sub2', shape=[1, n, 20, 30], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L3x3_sub2 = tf.nn.conv2d(name='L3x3_sub2', input=L3x3_sub1, filter=W3x3_sub2, strides=[1, 1, 2, 1], padding='SAME')
L3x3_sub2 = self.BN(input=L3x3_sub2, training=self.training, name='inceptionB_L3x3_sub2_BN')
L3x3_sub2 = tf.nn.elu(L3x3_sub2, 'inceptionB_L3x3_sub2_R')
W3x3_sub3 = tf.get_variable(name='W3x3_sub3', shape=[n, 1, 30, 40], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L3x3_sub3 = tf.nn.conv2d(name='L3x3_sub3', input=L3x3_sub2, filter=W3x3_sub3, strides=[1, 2, 1, 1], padding='SAME')
L3x3_sub3 = self.BN(input=L3x3_sub3, training=self.training, name='inceptionB_L3x3_sub3_BN')
L3x3_sub3 = tf.nn.elu(L3x3_sub3, 'inceptionB_L3x3_sub3_R')
# max pooling -> max pooling, 1x1
L_pool = tf.nn.avg_pool(name='L_pool', value=x, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='SAME')
W_pool_sub1 = tf.get_variable(name='W_pool_sub1', shape=[1, 1, C, 20], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L_pool_sub1 = tf.nn.conv2d(name='L_pool_sub1', input=L_pool, filter=W_pool_sub1, strides=[1, 2, 2, 1], padding='SAME')
L_pool_sub1 = self.BN(input=L_pool_sub1, training=self.training, name='inceptionB_L_pool_sub1_BN')
L_pool_sub1 = tf.nn.elu(L_pool_sub1, 'inceptionB_L_pool_sub1_R')
tot_layers = tf.concat([L1x1, L5x5_sub5, L3x3_sub3, L_pool_sub1], axis=3) # Concat in the 4th dim to stack
return tot_layers
def inception_C(self, x, n, output, name):
OPL = int(output/4)
B, H, W, C = x.get_shape()
with tf.variable_scope(name):
# 1x1
W1x1 = tf.get_variable(name='W1x1', shape=[1, 1, C, 20], dtype=tf.float32, initializer=tf.contrib.layers.variance_scaling_initializer())
L1x1 = tf.nn.conv2d(name='L1x1', input=x, filter=W1x1, strides=[1, 2, 2, 1], padding='SAME')
L1x1 = self.BN(input=L1x1, training=self.training, name='inceptionC_L1x1_BN')
L1x1 = | |
= value
@property
def region_name(self) -> str:
"""Name of the region for which FCI is calculated ??? Developed Markets, Emerging
Markets, Euro Area, Global."""
return self.__region_name
@region_name.setter
def region_name(self, value: str):
self._property_changed('region_name')
self.__region_name = value
@property
def description(self) -> str:
"""Description of asset."""
return self.__description
@description.setter
def description(self, value: str):
self._property_changed('description')
self.__description = value
@property
def asset_classifications_is_country_primary(self) -> bool:
"""Whether or not it is the primary exchange asset for the exchange country."""
return self.__asset_classifications_is_country_primary
@asset_classifications_is_country_primary.setter
def asset_classifications_is_country_primary(self, value: bool):
self._property_changed('asset_classifications_is_country_primary')
self.__asset_classifications_is_country_primary = value
@property
def value_revised(self) -> str:
"""Revised value."""
return self.__value_revised
@value_revised.setter
def value_revised(self, value: str):
self._property_changed('value_revised')
self.__value_revised = value
@property
def adjusted_trade_price(self) -> float:
"""Last trade price or value adjusted for corporate actions."""
return self.__adjusted_trade_price
@adjusted_trade_price.setter
def adjusted_trade_price(self, value: float):
self._property_changed('adjusted_trade_price')
self.__adjusted_trade_price = value
@property
def forecast_time(self) -> datetime.datetime:
"""Time of forecasted electricity loads (in UTC)."""
return self.__forecast_time
@forecast_time.setter
def forecast_time(self, value: datetime.datetime):
self._property_changed('forecast_time')
self.__forecast_time = value
@property
def is_adr(self) -> bool:
"""Is ADR or not."""
return self.__is_adr
@is_adr.setter
def is_adr(self, value: bool):
self._property_changed('is_adr')
self.__is_adr = value
@property
def factor(self) -> str:
"""For Axioma, one of: Exchange Rate Sensitivity, Growth, Leverage, Medium-Term
Momentum, Short-Term Momentum, Size, Value, Volatility. For Prime,
one of: Long Concentration, Short Concentration, Long Crowdedness,
Short Crowdedness, Crowdedness momentum, Short Conviction."""
return self.__factor
@factor.setter
def factor(self, value: str):
self._property_changed('factor')
self.__factor = value
@property
def days_on_loan(self) -> float:
"""The number of days this loan as been on our books."""
return self.__days_on_loan
@days_on_loan.setter
def days_on_loan(self, value: float):
self._property_changed('days_on_loan')
self.__days_on_loan = value
@property
def long_conviction_small(self) -> float:
"""The count of long ideas with small conviction."""
return self.__long_conviction_small
@long_conviction_small.setter
def long_conviction_small(self, value: float):
self._property_changed('long_conviction_small')
self.__long_conviction_small = value
@property
def service_id(self) -> str:
"""Service ID."""
return self.__service_id
@service_id.setter
def service_id(self, value: str):
self._property_changed('service_id')
self.__service_id = value
@property
def gsfeer(self) -> float:
"""Goldman Sachs Fundamental Equilibrium Exchange Rate."""
return self.__gsfeer
@gsfeer.setter
def gsfeer(self, value: float):
self._property_changed('gsfeer')
self.__gsfeer = value
@property
def wam(self) -> float:
"""Weighted average maturity, average of effective maturities of all securities
held in portfolio, weighted."""
return self.__wam
@wam.setter
def wam(self, value: float):
self._property_changed('wam')
self.__wam = value
@property
def wal(self) -> float:
"""Weighted average life, measures sensitivity to changes in liquidity."""
return self.__wal
@wal.setter
def wal(self, value: float):
self._property_changed('wal')
self.__wal = value
@property
def backtest_id(self) -> str:
"""Marquee unique backtest identifier."""
return self.__backtest_id
@backtest_id.setter
def backtest_id(self, value: str):
self._property_changed('backtest_id')
self.__backtest_id = value
@property
def leg_two_index_location(self) -> str:
"""Location of leg."""
return self.__leg_two_index_location
@leg_two_index_location.setter
def leg_two_index_location(self, value: str):
self._property_changed('leg_two_index_location')
self.__leg_two_index_location = value
@property
def g_score(self) -> float:
"""Score for governance metrics."""
return self.__g_score
@g_score.setter
def g_score(self, value: float):
self._property_changed('g_score')
self.__g_score = value
@property
def corporate_spread_contribution(self) -> float:
"""Contribution of corporate spread component to FCI."""
return self.__corporate_spread_contribution
@corporate_spread_contribution.setter
def corporate_spread_contribution(self, value: float):
self._property_changed('corporate_spread_contribution')
self.__corporate_spread_contribution = value
@property
def market_value(self) -> float:
"""Marketable value of a given position, generally the market price for a given
date."""
return self.__market_value
@market_value.setter
def market_value(self, value: float):
self._property_changed('market_value')
self.__market_value = value
@property
def notional_currency1(self) -> str:
"""An indication of the type of currency of the notional or principal amount."""
return self.__notional_currency1
@notional_currency1.setter
def notional_currency1(self, value: str):
self._property_changed('notional_currency1')
self.__notional_currency1 = value
@property
def net_debt_to_ebitda(self) -> float:
"""Net Debt to EBITDA."""
return self.__net_debt_to_ebitda
@net_debt_to_ebitda.setter
def net_debt_to_ebitda(self, value: float):
self._property_changed('net_debt_to_ebitda')
self.__net_debt_to_ebitda = value
@property
def notional_currency2(self) -> str:
"""Same as Notional Currency 1."""
return self.__notional_currency2
@notional_currency2.setter
def notional_currency2(self, value: str):
self._property_changed('notional_currency2')
self.__notional_currency2 = value
@property
def multiple_score(self) -> float:
"""Multiple percentile relative to Americas coverage universe (a higher score means
more expensive)."""
return self.__multiple_score
@multiple_score.setter
def multiple_score(self, value: float):
self._property_changed('multiple_score')
self.__multiple_score = value
@property
def beta_adjusted_exposure(self) -> float:
"""Beta adjusted exposure."""
return self.__beta_adjusted_exposure
@beta_adjusted_exposure.setter
def beta_adjusted_exposure(self, value: float):
self._property_changed('beta_adjusted_exposure')
self.__beta_adjusted_exposure = value
@property
def paid(self) -> float:
"""Number of trades paid."""
return self.__paid
@paid.setter
def paid(self, value: float):
self._property_changed('paid')
self.__paid = value
@property
def short(self) -> float:
"""Short exposure."""
return self.__short
@short.setter
def short(self, value: float):
self._property_changed('short')
self.__short = value
@property
def bos_in_ticks_description(self) -> str:
"""Description of the Stock's Bid-Offer Spread in Ticks on the particular date."""
return self.__bos_in_ticks_description
@bos_in_ticks_description.setter
def bos_in_ticks_description(self, value: str):
self._property_changed('bos_in_ticks_description')
self.__bos_in_ticks_description = value
@property
def time(self) -> datetime.datetime:
"""ISO 8601 formatted date and time."""
return self.__time
@time.setter
def time(self, value: datetime.datetime):
self._property_changed('time')
self.__time = value
@property
def implied_correlation(self) -> float:
"""Correlation of an asset implied by observations of market prices."""
return self.__implied_correlation
@implied_correlation.setter
def implied_correlation(self, value: float):
self._property_changed('implied_correlation')
self.__implied_correlation = value
@property
def normalized_performance(self) -> float:
"""Performance that is normalized to 1."""
return self.__normalized_performance
@normalized_performance.setter
def normalized_performance(self, value: float):
self._property_changed('normalized_performance')
self.__normalized_performance = value
@property
def taxonomy(self) -> str:
"""An indication of the product taxonomy."""
return self.__taxonomy
@taxonomy.setter
def taxonomy(self, value: str):
self._property_changed('taxonomy')
self.__taxonomy = value
@property
def swaption_vol(self) -> float:
"""Historical implied normal volatility for a liquid point on swaption vol surface."""
return self.__swaption_vol
@swaption_vol.setter
def swaption_vol(self, value: float):
self._property_changed('swaption_vol')
self.__swaption_vol = value
@property
def source_origin(self) -> str:
"""Source origin."""
return self.__source_origin
@source_origin.setter
def source_origin(self, value: str):
self._property_changed('source_origin')
self.__source_origin = value
@property
def measures(self) -> Tuple[str, ...]:
"""Fields that are nullable."""
return self.__measures
@measures.setter
def measures(self, value: Tuple[str, ...]):
self._property_changed('measures')
self.__measures = value
@property
def total_quantity(self) -> float:
"""Rounded total quantity."""
return self.__total_quantity
@total_quantity.setter
def total_quantity(self, value: float):
self._property_changed('total_quantity')
self.__total_quantity = value
@property
def internal_user(self) -> bool:
"""Whether user is internal or not."""
return self.__internal_user
@internal_user.setter
def internal_user(self, value: bool):
self._property_changed('internal_user')
self.__internal_user = value
@property
def created_time(self) -> datetime.datetime:
"""Time created. ISO 8601 formatted string"""
return self.__created_time
@created_time.setter
def created_time(self, value: datetime.datetime):
self._property_changed('created_time')
self.__created_time = value
@property
def identifier(self) -> str:
"""Filter by any identifier of an asset like ticker, bloomberg id etc."""
return self.__identifier
@identifier.setter
def identifier(self, value: str):
self._property_changed('identifier')
self.__identifier = value
@property
def price_unit(self) -> str:
"""Unit of reported price."""
return self.__price_unit
@price_unit.setter
def price_unit(self, value: str):
self._property_changed('price_unit')
self.__price_unit = value
@property
def redemption_option(self) -> str:
"""Indicates the calculation convention for callable instruments."""
return self.__redemption_option
@redemption_option.setter
def redemption_option(self, value: str):
self._property_changed('redemption_option')
self.__redemption_option = value
@property
def notional_unit2(self) -> str:
"""Unit of reported notional price."""
return self.__notional_unit2
@notional_unit2.setter
def notional_unit2(self, value: str):
self._property_changed('notional_unit2')
self.__notional_unit2 = value
@property
def unadjusted_low(self) -> float:
"""Unadjusted low level of an asset based on official exchange fixing or
calculation agent marked level."""
return self.__unadjusted_low
@unadjusted_low.setter
def unadjusted_low(self, value: float):
self._property_changed('unadjusted_low')
self.__unadjusted_low = value
@property
def notional_unit1(self) -> str:
"""Unit of reported notional price."""
return self.__notional_unit1
@notional_unit1.setter
def notional_unit1(self, value: str):
self._property_changed('notional_unit1')
self.__notional_unit1 = value
@property
def sedol(self) -> str:
"""SEDOL - Stock Exchange Daily Official List (subject to licensing)."""
return self.__sedol
@sedol.setter
def sedol(self, value: str):
self._property_changed('sedol')
self.__sedol = value
@property
def rounding_cost_pnl(self) -> float:
"""Rounding Cost Profit and Loss."""
return self.__rounding_cost_pnl
@rounding_cost_pnl.setter
def rounding_cost_pnl(self, value: float):
self._property_changed('rounding_cost_pnl')
self.__rounding_cost_pnl = value
@property
def sustain_global(self) -> bool:
"""True if the stock is on the SUSTAIN (Global) 50 list as of the corresponding
date. False if the stock is removed from the SUSTAIN (Global) 50 list
on the corresponding date."""
return self.__sustain_global
@sustain_global.setter
def sustain_global(self, value: bool):
self._property_changed('sustain_global')
self.__sustain_global = value
@property
def portfolio_id(self) -> str:
"""Marquee unique identifier for a portfolio."""
return self.__portfolio_id
@portfolio_id.setter
def portfolio_id(self, value: str):
self._property_changed('portfolio_id')
self.__portfolio_id = value
@property
def ending_date(self) -> str:
"""End date of the period the valuation refers to."""
return self.__ending_date
@ending_date.setter
def ending_date(self, value: str):
self._property_changed('ending_date')
self.__ending_date = value
@property
def cap_floor_atm_fwd_rate(self) -> float:
"""Cap Floor ATM forward rate."""
return self.__cap_floor_atm_fwd_rate
@cap_floor_atm_fwd_rate.setter
def cap_floor_atm_fwd_rate(self, value: float):
self._property_changed('cap_floor_atm_fwd_rate')
self.__cap_floor_atm_fwd_rate = value
@property
def es_percentile(self) -> float:
"""Sector relative percentile based on E&S score."""
return self.__es_percentile
@es_percentile.setter
def es_percentile(self, value: float):
self._property_changed('es_percentile')
self.__es_percentile = value
@property
def ann_return3_year(self) -> float:
"""Total return representing past performance, used for GS Money Market onshore
| |
import qiskit
import qtm.progress_bar
import qtm.constant
import qtm.qfim
import qtm.noise
import qtm.optimizer
import qtm.fubini_study
import numpy as np
import types, typing
def measure(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""Measuring the quantu circuit which fully measurement gates
Args:
- qc (QuantumCircuit): Measured circuit
- qubits (np.ndarray): List of measured qubit
Returns:
- float: Frequency of 00.. cbit
"""
n = len(qubits)
if cbits == []:
cbits = qubits.copy()
for i in range(0, n):
qc.measure(qubits[i], cbits[i])
if qtm.constant.noise_prob > 0:
noise_model = qtm.noise.generate_noise_model(
n, qtm.constant.noise_prob)
results = qiskit.execute(qc, backend=qtm.constant.backend,
noise_model=noise_model,
shots=qtm.constant.num_shots).result()
# Raw counts
counts = results.get_counts()
# Mitigating noise based on https://qiskit.org/textbook/ch-quantum-hardware/measurement-error-mitigation.html
meas_filter = qtm.noise.generate_measurement_filter(
n, noise_model=noise_model)
# Mitigated counts
counts = meas_filter.apply(counts.copy())
else:
counts = qiskit.execute(
qc, backend=qtm.constant.backend,
shots=qtm.constant.num_shots).result().get_counts()
return counts.get("0" * len(qubits), 0) / qtm.constant.num_shots
def x_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.h(qubits[i])
qc.measure(qubits[i], cbits[i])
return qc
def y_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.sdg(qubits[i])
qc.h(qubits[i])
qc.measure(qubits[i], cbits[i])
return qc
def z_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.measure(qubits[i], cbits[i])
return qc
def get_u_hat(thetas: np.ndarray, create_circuit_func: types.FunctionType, num_qubits: int,
**kwargs):
"""Return inverse of reconstructed gate
Args:
- thetas (np.ndarray): Parameters
- num_qubits (Int): number of qubit
Returns:
- Statevector: The state vector of when applying u_1q gate
"""
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
if not kwargs:
qc = create_circuit_func(qc, thetas).inverse()
else:
qc = create_circuit_func(qc, thetas, **kwargs).inverse()
return qiskit.quantum_info.Statevector.from_instruction(qc)
def get_cry_index(create_circuit_func: types.FunctionType, thetas: np.ndarray, num_qubits, **kwargs):
"""Return a list where i_th = 1 mean thetas[i] is parameter of CRY gate
Args:
- func (types.FunctionType): The creating circuit function
- thetas (np.ndarray): Parameters
Returns:
- np.ndarray: The index list has length equal with number of parameters
"""
qc = qiskit.QuantumCircuit(num_qubits)
qc = create_circuit_func(qc, thetas, **kwargs)
layers = qtm.fubini_study.split_into_layers(qc)
index_list = []
for layer in layers:
for gate in layer[1]:
if gate[0] == 'cry':
index_list.append(1)
else:
index_list.append(0)
if len(index_list) == len(thetas):
return index_list
return index_list
def grad_loss(qc: qiskit.QuantumCircuit, create_circuit_func: types.FunctionType,
thetas: np.ndarray, **kwargs):
"""Return the gradient of the loss function
L = 1 - |<psi~|psi>|^2 = 1 - P_0
=> nabla_L = - nabla_P_0 = - r (P_0(+s) - P_0(-s))
Args:
- qc (QuantumCircuit): The quantum circuit want to calculate the gradient
- create_circuit_func (Function): The creating circuit function
- thetas (np.ndarray): Parameters
- c_0 (float): cost value
- **kwargs: additional parameters for different create_circuit_func()
Returns:
- np.ndarray: the gradient vector
"""
index_list = get_cry_index(create_circuit_func, thetas,
num_qubits=qc.num_qubits, **kwargs)
grad_loss = np.zeros(len(thetas))
for i in range(0, len(thetas)):
if index_list[i] == 0:
# In equation (13)
thetas1, thetas2 = thetas.copy(), thetas.copy()
thetas1[i] += qtm.constant.two_term_psr['s']
thetas2[i] -= qtm.constant.two_term_psr['s']
qc1 = create_circuit_func(qc.copy(), thetas1, **kwargs)
qc2 = create_circuit_func(qc.copy(), thetas2, **kwargs)
grad_loss[i] = -qtm.constant.two_term_psr['r'] * (
qtm.base.measure(qc1, list(range(qc1.num_qubits))) -
qtm.base.measure(qc2, list(range(qc2.num_qubits))))
if index_list[i] == 1:
# In equation (14)
thetas1, thetas2 = thetas.copy(), thetas.copy()
thetas3, thetas4 = thetas.copy(), thetas.copy()
thetas1[i] += qtm.constant.four_term_psr['alpha']
thetas2[i] -= qtm.constant.four_term_psr['alpha']
thetas3[i] += qtm.constant.four_term_psr['beta']
thetas4[i] -= qtm.constant.four_term_psr['beta']
qc1 = create_circuit_func(qc.copy(), thetas1, **kwargs)
qc2 = create_circuit_func(qc.copy(), thetas2, **kwargs)
qc3 = create_circuit_func(qc.copy(), thetas3, **kwargs)
qc4 = create_circuit_func(qc.copy(), thetas4, **kwargs)
grad_loss[i] = - (qtm.constant.four_term_psr['d_plus'] * (
qtm.base.measure(qc1, list(range(qc1.num_qubits))) -
qtm.base.measure(qc2, list(range(qc2.num_qubits)))) - qtm.constant.four_term_psr['d_minus'] * (
qtm.base.measure(qc3, list(range(qc3.num_qubits))) -
qtm.base.measure(qc4, list(range(qc4.num_qubits)))))
return grad_loss
def grad_psi(qc: qiskit.QuantumCircuit, create_circuit_func: types.FunctionType,
thetas: np.ndarray, r: float, s: float, **kwargs):
"""Return the derivatite of the psi base on parameter shift rule
Args:
- qc (qiskit.QuantumCircuit): circuit
- create_circuit_func (types.FunctionType)
- thetas (np.ndarray): parameters
- r (float): in psr
- s (float): in psr
Returns:
- np.ndarray: N x N matrix
"""
gradient_psi = []
for i in range(0, len(thetas)):
thetas_copy = thetas.copy()
thetas_copy[i] += s
qc_copy = create_circuit_func(qc.copy(), thetas_copy, **kwargs)
psi_qc = qiskit.quantum_info.Statevector.from_instruction(qc_copy).data
psi_qc = np.expand_dims(psi_qc, 1)
gradient_psi.append(r * psi_qc)
gradient_psi = np.array(gradient_psi)
return gradient_psi
def fit_state_tomography(u: qiskit.QuantumCircuit,
create_vdagger_func: types.FunctionType,
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
"""Return the new thetas that fit with the circuit from create_vdagger_func function
Args:
- u (QuantumCircuit): fitting circuit
- create_vdagger_func (types.FunctionType): added circuit function
- thetas (np.ndarray): parameters
- num_steps (Int): number of iterations
- loss_func (types.FunctionType): loss function
- optimizer (types.FunctionType): otimizer function
- verbose (Int): the seeing level of the fitting process (0: nothing, 1: progress bar, 2: one line per step)
- **kwargs: additional parameters for create_circuit_func()
Returns:
- thetas (np.ndarray): the optimized parameters
- loss_values (np.ndarray): the list of loss_value
"""
thetass = []
loss_values = []
if verbose == 1:
bar = qtm.progress_bar.ProgressBar(max_value=num_steps, disable=False)
for i in range(0, num_steps):
grad_loss = qtm.base.grad_loss(u, create_vdagger_func, thetas, **kwargs)
optimizer_name = optimizer.__name__
if optimizer_name == 'sgd':
thetas = qtm.optimizer.sgd(thetas, grad_loss)
elif optimizer_name == 'adam':
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.adam(thetas, m, v, i, grad_loss)
elif optimizer_name in ['qng_fubini_study', 'qng_qfim', 'qng_adam']:
grad_psi1 = grad_psi(u,
create_vdagger_func,
thetas,
r=qtm.constant.two_term_psr['s'],
s=np.pi,
**kwargs)
u_copy = create_vdagger_func(u.copy(), thetas, **kwargs)
psi = qiskit.quantum_info.Statevector.from_instruction(u_copy).data
psi = np.expand_dims(psi, 1)
if optimizer_name == 'qng_fubini_study':
G = qtm.fubini_study.qng(
u.copy(), thetas, create_vdagger_func, **kwargs)
thetas = qtm.optimizer.qng_fubini_study(thetas, G, grad_loss)
if optimizer_name == 'qng_qfim':
thetas = qtm.optimizer.qng_qfim(
thetas, psi, grad_psi1, grad_loss)
if optimizer_name == 'qng_adam':
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.qng_adam(
thetas, m, v, i, psi, grad_psi1, grad_loss)
else:
thetas = optimizer(thetas, grad_loss)
u_copy = create_vdagger_func(u.copy(), thetas, **kwargs)
loss = loss_func(
qtm.base.measure(u_copy, list(range(u_copy.num_qubits))))
loss_values.append(loss)
thetass.append(thetas.copy())
if verbose == 1:
bar.update(1)
if verbose == 2 and i % 10 == 0:
print("Step " + str(i) + ": " + str(loss))
if verbose == 1:
bar.close()
if is_return_all_thetas:
return thetass, loss_values
else:
return thetas, loss_values
def fit_state_preparation(create_u_func: types.FunctionType,
vdagger: qiskit.QuantumCircuit,
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
"""Return the new thetas that fit with the circuit from create_u_func function
Args:
- create_u_func (types.FunctionType): added circuit function
- vdagger (QuantumCircuit): fitting circuit
- thetas (np.ndarray): parameters
- num_steps (Int): number of iterations
- loss_func (types.FunctionType): loss function
- optimizer (types.FunctionType): otimizer function
- verbose (Int): the seeing level of the fitting process (0: nothing, 1: progress bar, 2: one line per step)
- **kwargs: additional parameters for create_circuit_func()
Returns:
- thetas (np.ndarray): the optimized parameters
- loss_values (np.ndarray): the list of loss_value
"""
if verbose == 1:
bar = qtm.progress_bar.ProgressBar(max_value=num_steps, disable=False)
thetass = []
loss_values = []
def create_circuit_func(vdagger: qiskit.QuantumCircuit, thetas: np.ndarray, **kwargs):
return create_u_func(qiskit.QuantumCircuit(vdagger.num_qubits, vdagger.num_qubits), thetas, **kwargs).combine(vdagger)
for i in range(0, num_steps):
grad_loss = qtm.base.grad_loss(vdagger, create_circuit_func, thetas, **kwargs)
optimizer_name = optimizer.__name__
if optimizer_name == 'sgd':
thetas = qtm.optimizer.sgd(thetas, grad_loss)
elif optimizer_name == 'adam':
if i == 0:
m, v1 = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.adam(thetas, m, v1, i, grad_loss)
elif optimizer_name in ['qng_fubini_study', 'qng_qfim', 'qng_adam']:
grad_psi1 = grad_psi(vdagger,
create_circuit_func,
thetas,
r=1 / 2,
s=np.pi,
**kwargs)
v_copy = create_circuit_func(vdagger.copy(), thetas, **kwargs)
psi = qiskit.quantum_info.Statevector.from_instruction(
v_copy).data
psi = np.expand_dims(psi, 1)
if optimizer_name == 'qng_fubini_study':
G = qtm.fubini_study.qng(
vdagger.copy(), thetas, create_circuit_func, **kwargs)
thetas = qtm.optimizer.qng_fubini_study(thetas, G, grad_loss)
if optimizer_name == 'qng_qfim':
thetas = qtm.optimizer.qng_qfim(
thetas, psi, grad_psi1, grad_loss)
if optimizer_name == 'qng_adam':
if i == 0:
m, v1 = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.qng_adam(
thetas, m, v1, i, psi, grad_psi1, grad_loss)
else:
thetas = optimizer(thetas, grad_loss)
v_copy = create_circuit_func(vdagger.copy(), thetas, **kwargs)
loss = loss_func(
qtm.base.measure(v_copy, list(range(v_copy.num_qubits))))
loss_values.append(loss)
thetass.append(thetas.copy())
if verbose == 1:
bar.update(1)
if verbose == 2 and i % 10 == 0:
print("Step " + str(i) + ": " | |
:])
elif survey_type.lower() in ["dipole-dipole", "dipole-pole"]:
srcClass = dc.sources.Dipole([rxClass], P[ii, :], P[ii + 1, :])
source_list.append(srcClass)
return source_list
def xy_2_lineID(dc_survey):
"""
Read DC survey class and append line ID.
Assumes that the locations are listed in the order
they were collected. May need to generalize for random
point locations, but will be more expensive
Input:
:param DCdict Vectors of station location
Output:
:return LineID Vector of integers
"""
# Compute unit vector between two points
nstn = dc_survey.nSrc
# Pre-allocate space
lineID = np.zeros(nstn)
linenum = 0
indx = 0
for ii in range(nstn):
if ii == 0:
A = dc_survey.source_list[ii].location[0]
B = dc_survey.source_list[ii].location[1]
xout = np.mean([A[0:2], B[0:2]], axis=0)
xy0 = A[:2]
xym = xout
# Deal with replicate pole location
if np.all(xy0 == xym):
xym[0] = xym[0] + 1e-3
continue
A = dc_survey.source_list[ii].location[0]
B = dc_survey.source_list[ii].location[1]
xin = np.mean([A[0:2], B[0:2]], axis=0)
vec1, r1 = r_unit(xout, xin) # Compute vector between neighbours
vec2, r2 = r_unit(xym, xin) # Compute vector between current stn and mid-point
vec3, r3 = r_unit(xy0, xin) # Compute vector between current stn and start line
vec4, r4 = r_unit(xym, xy0) # Compute vector between mid-point and start line
# Compute dot product
ang1 = np.abs(vec1.dot(vec2))
ang2 = np.abs(vec3.dot(vec4))
# If the angles are smaller then 45d, than next point is on a new line
if ((ang1 < np.cos(np.pi / 4.0)) | (ang2 < np.cos(np.pi / 4.0))) & (
np.all(np.r_[r1, r2, r3, r4] > 0)
):
# Re-initiate start and mid-point location
xy0 = A[:2]
xym = xin
# Deal with replicate pole location
if np.all(xy0 == xym):
xym[0] = xym[0] + 1e-3
linenum += 1
indx = ii
else:
xym = np.mean([xy0, xin], axis=0)
lineID[ii] = linenum
xout = xin
return lineID
def r_unit(p1, p2):
"""
r_unit(x, y) : Function computes the unit vector
between two points with coordinates p1(x1, y1) and p2(x2, y2)
"""
assert len(p1) == len(p2), "locs must be the same shape."
dx = []
for ii in range(len(p1)):
dx.append((p2[ii] - p1[ii]))
# Compute length of vector
r = np.linalg.norm(np.asarray(dx))
if r != 0:
vec = dx / r
else:
vec = np.zeros(len(p1))
return vec, r
def gettopoCC(mesh, actind, option="top"):
"""
Get topography from active indices of mesh.
"""
if mesh._meshType == "TENSOR":
if mesh.dim == 3:
mesh2D = discretize.TensorMesh([mesh.hx, mesh.hy], mesh.x0[:2])
zc = mesh.cell_centers[:, 2]
ACTIND = actind.reshape((mesh.vnC[0] * mesh.vnC[1], mesh.vnC[2]), order="F")
ZC = zc.reshape((mesh.vnC[0] * mesh.vnC[1], mesh.vnC[2]), order="F")
topoCC = np.zeros(ZC.shape[0])
for i in range(ZC.shape[0]):
ind = np.argmax(ZC[i, :][ACTIND[i, :]])
if option == "top":
dz = mesh.hz[ACTIND[i, :]][ind] * 0.5
elif option == "center":
dz = 0.0
else:
raise Exception()
topoCC[i] = ZC[i, :][ACTIND[i, :]].max() + dz
return mesh2D, topoCC
elif mesh.dim == 2:
mesh1D = discretize.TensorMesh([mesh.hx], [mesh.x0[0]])
yc = mesh.cell_centers[:, 1]
ACTIND = actind.reshape((mesh.vnC[0], mesh.vnC[1]), order="F")
YC = yc.reshape((mesh.vnC[0], mesh.vnC[1]), order="F")
topoCC = np.zeros(YC.shape[0])
for i in range(YC.shape[0]):
ind = np.argmax(YC[i, :][ACTIND[i, :]])
if option == "top":
dy = mesh.hy[ACTIND[i, :]][ind] * 0.5
elif option == "center":
dy = 0.0
else:
raise Exception()
topoCC[i] = YC[i, :][ACTIND[i, :]].max() + dy
return mesh1D, topoCC
elif mesh._meshType == "TREE":
inds = mesh.get_boundary_cells(actind, direction="zu")[0]
if option == "top":
dz = mesh.h_gridded[inds, -1] * 0.5
elif option == "center":
dz = 0.0
return mesh.cell_centers[inds, :-1], mesh.cell_centers[inds, -1] + dz
def drapeTopotoLoc(mesh, pts, actind=None, option="top", topo=None):
"""
Drape location right below (cell center) the topography
"""
if mesh.dim == 2:
# if shape is (*, 1) or (*, 2) just grab first column
if pts.ndim == 2 and pts.shape[1] in [1, 2]:
pts = pts[:, 0]
if pts.ndim > 1:
raise ValueError("pts should be 1d array")
elif mesh.dim == 3:
if pts.shape[1] not in [2, 3]:
raise ValueError("shape of pts should be (x, 3) or (x, 2)")
# just grab the xy locations in the first two columns
pts = pts[:, :2]
else:
raise NotImplementedError()
if actind is None:
actind = surface2ind_topo(mesh, topo)
if mesh._meshType == "TENSOR":
meshtemp, topoCC = gettopoCC(mesh, actind, option=option)
inds = closestPoints(meshtemp, pts)
topo = topoCC[inds]
out = np.c_[pts, topo]
elif mesh._meshType == "TREE":
if mesh.dim == 3:
uniqXYlocs, topoCC = gettopoCC(mesh, actind, option=option)
inds = closestPointsGrid(uniqXYlocs, pts)
out = np.c_[uniqXYlocs[inds, :], topoCC[inds]]
else:
uniqXlocs, topoCC = gettopoCC(mesh, actind, option=option)
inds = closestPointsGrid(uniqXlocs, pts, dim=1)
out = np.c_[uniqXlocs[inds], topoCC[inds]]
else:
raise NotImplementedError()
return out
def genTopography(mesh, zmin, zmax, seed=None, its=100, anisotropy=None):
if mesh.dim == 3:
mesh2D = discretize.TensorMesh([mesh.hx, mesh.hy], x0=[mesh.x0[0], mesh.x0[1]])
out = model_builder.randomModel(
mesh.vnC[:2], bounds=[zmin, zmax], its=its, seed=seed, anisotropy=anisotropy
)
return out, mesh2D
elif mesh.dim == 2:
mesh1D = discretize.TensorMesh([mesh.hx], x0=[mesh.x0[0]])
out = model_builder.randomModel(
mesh.vnC[:1], bounds=[zmin, zmax], its=its, seed=seed, anisotropy=anisotropy
)
return out, mesh1D
else:
raise Exception("Only works for 2D and 3D models")
def closestPointsGrid(grid, pts, dim=2):
"""Move a list of points to the closest points on a grid.
:param numpy.ndarray pts: Points to move
:rtype: numpy.ndarray
:return: nodeInds
"""
if dim == 1:
nodeInds = np.asarray(
[np.abs(pt - grid).argmin() for pt in pts.tolist()], dtype=int
)
else:
tree = cKDTree(grid)
_, nodeInds = tree.query(pts)
return nodeInds
def gen_3d_survey_from_2d_lines(
survey_type,
a,
b,
n_spacing,
n_lines=5,
line_length=200.0,
line_spacing=20.0,
x0=0,
y0=0,
z0=0,
src_offset_y=0.0,
dim=3,
is_IO=True,
):
"""
Generate 3D DC survey using gen_DCIPsurvey function.
Input:
:param str survey_type: 'dipole-dipole' | 'pole-dipole' |
'dipole-pole' | 'pole-pole' | 'gradient'
:param int a: pole seperation
:param int b: dipole separation
:param int n_spacing: number of rx dipoles per tx
Output:
:return SimPEG.dc.SurveyDC.Survey survey_3d: 3D DC survey object
"""
ylocs = np.arange(n_lines) * line_spacing + y0
survey_lists_2d = []
srcList = []
line_inds = []
for i, y in enumerate(ylocs):
# Generate DC survey object
xmin, xmax = x0, x0 + line_length
ymin, ymax = y, y
zmin, zmax = 0, 0
IO_2d = dc.IO()
endl = np.array([[xmin, ymin, zmin], [xmax, ymax, zmax]])
survey_2d = gen_DCIPsurvey(endl, survey_type, a, b, n_spacing, dim=3,)
srcList.append(survey_2d.source_list)
survey_2d = IO_2d.from_abmn_locations_to_survey(
survey_2d.locations_a[:, [0, 2]],
survey_2d.locations_b[:, [0, 2]],
survey_2d.locations_m[:, [0, 2]],
survey_2d.locations_n[:, [0, 2]],
survey_type,
dimension=2,
)
survey_lists_2d.append(survey_2d)
line_inds.append(np.ones(survey_2d.nD, dtype=int) * i)
line_inds = np.hstack(line_inds)
srcList = sum(srcList, [])
survey_3d = dc.Survey(srcList)
IO_3d = dc.IO()
survey_3d.locations_a[:, 1] += src_offset_y
survey_3d.locations_b[:, 1] += src_offset_y
survey_3d = IO_3d.from_abmn_locations_to_survey(
survey_3d.locations_a,
survey_3d.locations_b,
survey_3d.locations_m,
survey_3d.locations_n,
survey_type,
dimension=3,
line_inds=line_inds,
)
return IO_3d, survey_3d
############
# Deprecated
############
def plot_pseudoSection(
data,
ax=None,
survey_type="dipole-dipole",
data_type="appConductivity",
space_type="half-space",
clim=None,
scale="linear",
sameratio=True,
pcolorOpts={},
data_location=False,
dobs=None,
dim=2,
):
warnings.warn(
"The plot_pseudoSection method has been deprecated. Please use "
"plot_pseudosection instead. This will be removed in version"
" 0.16.0 of SimPEG",
FutureWarning,
)
return plot_pseudosection(
data,
ax=ax,
survey_type=survey_type,
data_type=data_type,
space_type=space_type,
clim=clim,
scale=scale,
pcolor_opts=pcolorOpts,
data_locations=data_location,
dobs=dobs,
)
def apparent_resistivity(
data_object,
survey_type=None,
space_type="half space",
dobs=None,
eps=1e-10,
**kwargs,
):
warnings.warn(
"The apparent_resistivity method has been deprecated. Please use "
"apparent_resistivity_from_voltage instead. This will be removed in version"
" 0.16.0 of SimPEG",
DeprecationWarning,
)
if survey_type is not None:
warnings.warn(
"Keyword argument 'survey_type' is no longer necessary. "
"Survey may now have a mix of pole and dipole sources and receivers. "
"This will be removed in version 0.16.0 of SimPEG",
FutureWarning,
)
if dobs is None:
dobs = data_object.dobs
return apparent_resistivity_from_voltage(
data_object.survey, dobs, space_type=space_type, eps=eps, **kwargs
)
source_receiver_midpoints = deprecate_method(
pseudo_locations, "source_receiver_midpoints", "0.16.0"
)
def plot_layer(rho, mesh, **kwargs):
warnings.warn(
"The plot_layer method has been deprecated. Please use "
"plot_1d_layer_model instead. This will be removed in version"
" 0.17.0 of SimPEG",
DeprecationWarning,
)
return plot_1d_layer_model(mesh.hx, rho, z0=mesh.origin[0], **kwargs)
def convertObs_DC3D_to_2D(survey, lineID, flag="local"):
warnings.warn(
"The convertObs_DC3D_to_2D method has been deprecated. Please use "
"convert_3d_survey_to_2d. This will be removed in version"
" 0.16.0 of SimPEG",
FutureWarning,
)
return convert_survey_3d_to_2d_lines(survey, lineID)
def getSrc_locs(survey):
warnings.warn(
"The getSrc_locs method has been deprecated. Source "
"locations are now computed as a method of the survey "
"class. Please use Survey.source_locations(). This method "
" will be removed in version 0.17.0 of SimPEG",
DeprecationWarning,
)
return survey.source_locations()
def writeUBC_DCobs(
fileName,
data,
dim,
format_type,
survey_type="dipole-dipole",
ip_type=0,
comment_lines="",
):
"""
Write UBC GIF DCIP 2D or 3D observation file
Input:
:param str fileName: including path where the file is written out
:param SimPEG.Data data: DC data object
:param int dim: either 2 | | |
0], dtype=np.float32
)
output[name]["colors"][s]["g"] = np.array(
colors[:, 1], dtype=np.float32
)
output[name]["colors"][s]["b"] = np.array(
colors[:, 2], dtype=np.float32
)
else:
colors = np.array([cmaps[s](x) for x in data[mapping["c"]][s]])
colors = np.round(colors * 255.0)
output[name]["colors"][s]["r"] = np.array(
colors[:, 0], dtype=np.float32
)
output[name]["colors"][s]["g"] = np.array(
colors[:, 1], dtype=np.float32
)
output[name]["colors"][s]["b"] = np.array(
colors[:, 2], dtype=np.float32
)
for name, data in self.trees_data.items():
mapping = self.trees[name]["mapping"]
point_helper = self.trees[name]["point_helper"]
output[name] = {}
output[name]["meta"] = self.trees[name]
output[name]["type"] = "tree"
if point_helper is not None and point_helper in self.scatters_data:
scatter = self.scatters_data[point_helper]
scatter_mapping = self.scatters[point_helper]["mapping"]
x_t = []
y_t = []
z_t = []
for i in range(len(data[mapping["from"]])):
x_t.append(scatter[scatter_mapping["x"]][data[mapping["from"]][i]])
x_t.append(scatter[scatter_mapping["x"]][data[mapping["to"]][i]])
y_t.append(scatter[scatter_mapping["y"]][data[mapping["from"]][i]])
y_t.append(scatter[scatter_mapping["y"]][data[mapping["to"]][i]])
z_t.append(scatter[scatter_mapping["z"]][data[mapping["from"]][i]])
z_t.append(scatter[scatter_mapping["z"]][data[mapping["to"]][i]])
output[name]["x"] = np.array(
[s * (x - minimum) / diff for x in x_t], dtype=np.float32
)
output[name]["y"] = np.array(
[s * (y - minimum) / diff for y in y_t], dtype=np.float32
)
output[name]["z"] = np.array(
[s * (z - minimum) / diff for z in z_t], dtype=np.float32
)
else:
output[name]["x"] = np.array(
[s * (x - minimum) / diff for x in data[mapping["x"]]],
dtype=np.float32,
)
output[name]["y"] = np.array(
[s * (y - minimum) / diff for y in data[mapping["y"]]],
dtype=np.float32,
)
output[name]["z"] = np.array(
[s * (z - minimum) / diff for z in data[mapping["z"]]],
dtype=np.float32,
)
if mapping["c"] in data:
colormap = self.trees[name]["colormap"]
cmap = None
if isinstance(colormap, str):
cmap = plt.cm.get_cmap(colormap)
else:
cmap = colormap
colors = np.array([cmap(x) for x in data[mapping["c"]]])
colors = np.round(colors * 255.0)
output[name]["r"] = np.array(colors[:, 0], dtype=np.float32)
output[name]["g"] = np.array(colors[:, 1], dtype=np.float32)
output[name]["b"] = np.array(colors[:, 2], dtype=np.float32)
return output
def create_data(self) -> str:
"""Returns a JavaScript string defining a JavaScript object containing the data.
Returns:
:obj:`str`: JavaScript code defining an object containing the data
"""
s = self.scale
mini, maxi = self.get_min_max()
diff = maxi - mini
output = "const data = {\n"
# Create the data for the scatters
# TODO: If it's not interactive, labels shouldn't be exported.
for name, data in self.scatters_data.items():
mapping = self.scatters[name]["mapping"]
colormaps = self.scatters[name]["colormap"]
cmaps = [None] * len(colormaps)
for i, colormap in enumerate(colormaps):
if isinstance(colormap, str):
cmaps[i] = plt.cm.get_cmap(colormap)
else:
cmaps[i] = colormap
output += name + ": {\n"
x_norm = [round(s * (x - mini) / diff, 3) for x in data[mapping["x"]]]
output += "x: [" + ",".join(map(str, x_norm)) + "],\n"
y_norm = [round(s * (y - mini) / diff, 3) for y in data[mapping["y"]]]
output += "y: [" + ",".join(map(str, y_norm)) + "],\n"
z_norm = [round(s * (z - mini) / diff, 3) for z in data[mapping["z"]]]
output += "z: [" + ",".join(map(str, z_norm)) + "],\n"
if mapping["labels"] in data:
fmt_labels = ["'{0}'".format(s) for s in data[mapping["labels"]]]
output += "labels: [" + ",".join(fmt_labels) + "],\n"
if mapping["s"] in data:
output += "s: ["
for series in range(len(data[mapping["s"]])):
output += (
"["
+ ",".join(map(str, np.round(data[mapping["s"]][series], 3)))
+ "],\n"
)
output += "],\n"
output += "colors: [\n"
for series in range(len(data[mapping["c"]])):
output += "{\n"
if mapping["cs"] in data:
colors = np.array(
[cmaps[series](x) for x in data[mapping["c"]][series]]
)
for i, c in enumerate(colors):
hsl = np.array(colour.rgb2hsl(c[:3]))
hsl[1] = hsl[1] - hsl[1] * data[mapping["cs"]][series][i]
colors[i] = np.append(np.array(colour.hsl2rgb(hsl)), 1.0)
colors = np.round(colors * 255.0)
output += (
"r: [" + ",".join(map(str, map(int, colors[:, 0]))) + "],\n"
)
output += (
"g: [" + ",".join(map(str, map(int, colors[:, 1]))) + "],\n"
)
output += (
"b: [" + ",".join(map(str, map(int, colors[:, 2]))) + "],\n"
)
elif mapping["c"] in data:
colors = np.array(
[cmaps[series](x) for x in data[mapping["c"]][series]]
)
colors = np.round(colors * 255.0)
output += (
"r: [" + ",".join(map(str, map(int, colors[:, 0]))) + "],\n"
)
output += (
"g: [" + ",".join(map(str, map(int, colors[:, 1]))) + "],\n"
)
output += (
"b: [" + ",".join(map(str, map(int, colors[:, 2]))) + "],\n"
)
output += "},\n"
output += "]"
output += "},\n"
for name, data in self.trees_data.items():
mapping = self.trees[name]["mapping"]
point_helper = self.trees[name]["point_helper"]
output += name + ": {\n"
if point_helper is not None and point_helper in self.scatters_data:
scatter = self.scatters_data[point_helper]
scatter_mapping = self.scatters[point_helper]["mapping"]
x_t = []
y_t = []
z_t = []
for i in range(len(data[mapping["from"]])):
x_t.append(scatter[scatter_mapping["x"]][data[mapping["from"]][i]])
x_t.append(scatter[scatter_mapping["x"]][data[mapping["to"]][i]])
y_t.append(scatter[scatter_mapping["y"]][data[mapping["from"]][i]])
y_t.append(scatter[scatter_mapping["y"]][data[mapping["to"]][i]])
z_t.append(scatter[scatter_mapping["z"]][data[mapping["from"]][i]])
z_t.append(scatter[scatter_mapping["z"]][data[mapping["to"]][i]])
x_norm = [round(s * (x - mini) / diff, 3) for x in x_t]
output += f"x: [" + ",".join(map(str, x_norm)) + "],\n"
y_norm = [round(s * (y - mini) / diff, 3) for y in y_t]
output += "y: [" + ",".join(map(str, y_norm)) + "],\n"
z_norm = [round(s * (z - mini) / diff, 3) for z in z_t]
output += "z: [" + ",".join(map(str, z_norm)) + "],\n"
else:
x_norm = [round(s * (x - mini) / diff, 3) for x in data[mapping["x"]]]
output += "x: [" + ",".join(map(str, x_norm)) + "],\n"
y_norm = [round(s * (y - mini) / diff, 3) for y in data[mapping["y"]]]
output += "y: [" + ",".join(map(str, y_norm)) + "],\n"
z_norm = [round(s * (z - mini) / diff, 3) for z in data[mapping["z"]]]
output += "z: [" + ",".join(map(str, z_norm)) + "],\n"
if mapping["c"] in data:
colormap = self.trees[name]["colormap"]
cmap = None
if isinstance(colormap, str):
cmap = plt.cm.get_cmap(colormap)
else:
cmap = colormap
colors = np.array([cmap(x) for x in data[mapping["c"]]])
colors = np.round(colors * 255.0)
output += "r: [" + ",".join(map(str, colors[:, 0])) + "],\n"
output += "g: [" + ",".join(map(str, colors[:, 1])) + "],\n"
output += "b: [" + ",".join(map(str, colors[:, 2])) + "],\n"
output += "},\n"
output += "};\n"
return output
@staticmethod
def quickplot(
title: str = "",
clear_color: str = "#111111",
coords: bool = False,
coords_color: str = "#888888",
coords_box: bool = False,
coords_ticks: bool = False,
coords_grid: bool = False,
coords_tick_count: int = 10,
coords_tick_length: float = 2.0,
coords_offset: float = 5.0,
x_title: str = "",
y_title: str = "",
show_legend: bool = True,
legend_title: str = "Legend",
legend_orientation: str = "vertical",
legend_number_format: str = "{:.2f}",
view: str = "front",
scale: float = 750.0,
alpha_blending=False,
anti_aliasing=True,
style: Dict[str, Dict[str, Any]] = {},
impress: str = None,
x: List = [],
y: List = [],
z: List = None,
labels: List = None,
c: List = [],
scatter_name: str = "Data",
shader: str = "smoothCircle",
has_legend: bool = True,
point_scale: float = 1.0,
max_point_size: float = 50.0,
colormap: Union[str, Colormap, List[str], List[Colormap]] = "rainbow",
series_title: Union[str, List[str]] = None,
f: List = None,
t: List = None,
file_name: str = "index",
path: str = "./",
template: str = "default",
notebook_height: int = 500,
) -> None:
faerun = Faerun(
title,
clear_color,
coords,
coords_color,
coords_box,
coords_ticks,
coords_grid,
coords_tick_count,
coords_tick_length,
coords_offset,
x_title,
y_title,
show_legend,
legend_title,
legend_orientation,
legend_number_format,
view,
scale,
alpha_blending,
anti_aliasing,
style,
impress,
)
data = {
"x": x,
"y": y,
}
if z:
data["z"] = z
if labels:
data["labels"] = labels
if isinstance(c, Iterable) and not isinstance(c[0], str) and not series_title:
series_title = [f"Series {str(i)}" for i in range(len(c))]
if c:
data["c"] = c
faerun.add_scatter(
scatter_name,
data,
shader=shader,
has_legend=has_legend,
point_scale=point_scale,
max_point_size=max_point_size,
colormap=colormap,
series_title=series_title,
)
if f and t and len(f) == len(t):
faerun.add_tree(
scatter_name + "_tree", {"from": f, "to": t}, point_helper=scatter_name
)
faerun.plot(file_name, path, template, notebook_height)
@staticmethod
def make_list(obj: Any, make_list_list: bool = False) -> List:
""" If an object isn't a list, it is added to one and returned,
otherwise, the list is returned.
Arguments:
obj (:obj:`Any`): A Python object
Keyword Arguments:
make_list_list (:obj:`bool`): Whether to make a list a list of a list
Returns:
:obj:`List`: The object wrapped in a list (or the original list)
"""
# Check whether any object is a list, this is important for eg:
# [None, []]
any_list = False
only_none = True
if type(obj) is list:
for o in obj:
if o:
only_none = False
if type(o) is list:
any_list = True
if make_list_list and type(obj) is list and not any_list and not only_none:
return [obj]
elif type(obj) is list:
return obj
else:
return [obj]
@staticmethod
def expand_list(
l: List, length: int, with_value: Any = None, with_none: | |
"""
An implementation of the confluent hypergeometric function.
"""
from __future__ import division
import numpy as np
from numpy import pi
from numpy.lib.scimath import sqrt
from scipy.special import gamma, rgamma, jv, gammaln, poch
import warnings
from hyp1f1_decimal import hyp1f1 as hyp1f1_decimal
tol = 1.0e-15
BIGZ = 340
# Coefficients of the polynomial g appearing in the hyperasymptotic
# expansion of Paris (2013).
PARIS_G = np.vstack((np.array([0,0,0,0,0,0,0,0, -1, 2/3]),
np.array([0,0,0,0,0,0, -90, 270, -225, 46])/15,
np.array([0,0,0,0,-756, 5040, -11760, 11340, -3969,
230])/70,
np.array([0,0, -3240, 37800, -170100, 370440, -397530,
183330, -17781, -3626])/350,
np.array([-1069200, 19245600, -141134400, 541870560,
-1160830440, 1353607200, -743046480,
88280280, 43924815, -4032746])/231000))
def new_hyp1f1(a, b, z):
"""
An implementation of the confluent hypergeometric function based on _[pop].
References
----------
..[gst] <NAME>,
*Numerical Methods for Special Functions*,
SIAM, 2007.
..[dlmf] Nist, DLMF
..[pop] <NAME>,
*Numerical Methods for the Computation of the Confluent
and Gauss Hypergeometric Functions*,
http://arxiv.org/abs/1407.7786.
"""
if b <= 0 and b == int(b):
# Poles are located at 0, -1, -2, ...
return np.nan + 1J*np.nan
kummer = False
if (a < 0 or b < 0) and not (a < 0 and b < 0):
# Use Kummer's relation (3.19) in _[pop].
a, z = b - a, -z
kummer = True
if a >= 0 and b >= 0 and z >= 0:
res = hyp1f1_IA(a, b, z)
elif a >= 0 and b >= 0 and z < 0:
res = hyp1f1_IB(a, b, z)
elif a < 0 and b < 0:
res = hyp1f1_III(a, b, z)
else:
raise Exception("Shouldn't be able to get here!")
if kummer:
res *= np.exp(-z)
return res
def hyp1f1_IA(a, b, z):
"""Compute hyp1f1 in the case where a, b >= 0 and z >= 0."""
if np.abs(z) >= BIGZ:
res = asymptotic_series(a, b, z)
else:
res = taylor_series(a, b, z)
return res
def hyp1f1_IB(a, b, z):
"""Compute hyp1f1 in the case where a, b >= 0 and z < 0."""
if a > b + 2:
N = int(a - b)
if np.abs(z) >= BIGZ:
w0 = asymptotic_series(a, b + N + 1, z)
w1 = asymptotic_series(a, b + N, z)
else:
w0 = taylor_series(a, b + N + 1, z)
w1 = taylor_series(a, b + N, z)
res = b_backward_recurrence(a, b + N, z, w0, w1, N)
else:
if np.abs(z) >= BIGZ:
res = asymptotic_series(a, b, z)
else:
res = taylor_series(a, b, z)
return res
def hyp1f1_III(a, b, z):
"""Compute hyp1f1 in the case where a, b < 0."""
# Handle the special case where a is a negative integer, in which
# case hyp1f1 is a polynomial
if a == int(a):
m = int(-a)
term = 1
res = 1
for i in range(m):
term *= z*(a + i)/((i + 1)*(b + i))
res += term
# Use the (++) recurrence to get to case IA or IB.
else:
N = int(max(-a, -b) + 1)
if z >= 0:
w0 = hyp1f1_IA(a + N + 1, b + N + 1, z)
w1 = hyp1f1_IA(a + N, b + N, z)
else:
w0 = hyp1f1_IB(a + N + 1, b + N + 1, z)
w1 = hyp1f1_IB(a + N, b + N, z)
res = ab_backward_recurrence(a + N, b + N, z, w0, w1, N)
return res
def a_forward_recurrence(a, b, z, w0, w1, N):
"""Use the recurrence relation (DLMF 13.3.1)
(b-a)*1F1(a-1,b,z) + (2a-b+z)1F1(a,b,z) - a*1F1(a+1,b,z) = 0
to compute 1F1(a+n,b,z) given w0 = 1F1(a-1,b,z) and w1 = 1F1(a,b,z).
WARNING: 1F1 is the dominant solution of this recurrence relation
*if* Re(z) > 0. In other words, don't use it if Re(z) is small or
negative. Also, note that when Re(z) <= 0, 1F1 is *not* the
minimal solution, so you can't use Miller/Olver's algorithm to
recover stability. See [1] Chapter 4 for information on
dominant/minimal solutions, and in particular Example 4.9 for
1F1.
"""
for i in xrange(N):
tmp = w1
w1 = ((b - a)*w0 + (2*a - b + z)*w1)/a
w0 = tmp
a += 1
return w1
def b_backward_recurrence(a, b, z, w0, w1, N):
"""Use recurrence relation (3.14) from _[pop] to compute hyp1f1(a, b -
N, z) given w0 = hyp1f1(a, b + 1, z) and w1 = hyp1f1(a, b, z).
The minimal solution is gamma(b - a)*hyp1f1(a, b, z)/gamma(b), so
it's safe to naively use the recurrence relation.
"""
for i in range(N):
tmp = w1
w1 = -((z*(b - a))*w0 + b*(1 - b - z)*w1) / (b*(b - 1))
w0 = tmp
b -= 1
return w1
def b_forward_recurrence(a, b, z, w0, N, tol, maxiter=500):
"""Use the recurrence relation (3.14) from _[pop] to compute hyp1f1(a,
b + N, z) given w0 = hyp1f1(a, b, z).
The minimal solution is gamma(b - a)*hyp1f1(a, b, z)/gamma(b),
so we use Olver's algorithm. Here we follow the notation from the
DLMF 3.6.
"""
# TODO: use the log of gamma to prevent blowup
w0 *= gamma(b - a)*rgamma(b)
p, e = [0, 1], [w0]
curmin, n = 1e100, 1
# Forward substitution
while True:
an, bn, cn = 1, -(1 - b - n - z)/z, (b - a + n - 1)/z
p.append((bn*p[-1] - cn*p[-2])/an)
e.append(cn*e[-1]/an)
testmin = abs(e[-1]/(p[-2]*p[-1]))
if n <= N:
if testmin < curmin:
curmin = testmin
else:
if testmin <= tol*curmin or n - N > maxiter:
break
n += 1
# Back substitution
wn = 0
for i in range(n, N, -1):
wn = (p[i-1]*wn + e[i-1])/p[i]
return rgamma(b + N - a)*gamma(b + N)*wn
def ab_backward_recurrence(a, b, z, w0, w1, N):
"""Use recurrence relation (3.14) from _[pop] to compute hyp1f1(a - N,
b - N, z) given w0 = hyp1f1(a + 1, b + 1, z) and w1 = hyp1f1(a, b, z).
The minimal solution is hyp1f1(a, b, z)/gamma(b), so it's safe to
naively use the recurrence relation.
"""
for i in range(N):
tmp = w1
w1 = (a*z*w0 + b*(b - z - 1)*w1) / (b*(b - 1))
w0 = tmp
a -= 1
b -= 1
return w1
def ab_forward_recurrence(a, b, z, w0, N, tol, maxiter=500):
"""Use the recurrence relation (3.14) from _[pop] to compute hyp1f1(a
+ N, b + N, z) given w0 = hyp1f1(a, b, z).
The minimal solution is hyp1f1(a, b, z)/gamma(b), so we use
Olver's algorithm. Here we follow the notation from the DLMF 3.6.
"""
w0 *= rgamma(b)
p, e = [0, 1], [w0]
curmin, n = 1e100, 1
# Forward substitution
while True:
an, bn, cn = 1, -(b - z - 1 + n)/((a + n)*z), -1/((a + n)*z)
p.append((bn*p[-1] - cn*p[-2])/an)
e.append(cn*e[-1]/an)
testmin = abs(e[-1]/(p[-2]*p[-1]))
if n <= N:
if testmin < curmin:
curmin = testmin
else:
if testmin <= tol*curmin or n - N > maxiter:
break
n += 1
# Back substitution
wn = 0
for i in range(n, N, -1):
wn = (p[i-1]*wn + e[i-1])/p[i]
return gamma(b + N)*wn
def taylor_series(a, b, z, maxiters=500, tol=tol):
"""
Compute hyp1f1 by evaluating the Taylor series directly.
"""
Ao = 1
So = Ao
i = 0
while i <= maxiters:
An = Ao*(a + i)*z / ((b + i)*(i + 1))
Sn = So + An
if Sn != 0 and So != 0 and np.abs(An/Sn) < tol and np.abs(Ao/So) < tol:
break
else:
So = Sn
Ao = An
i += 1
# if i > maxiters:
# warnings.warn("Number of evaluations exceeded maxiters on "
# "a = {}, b = {}, z = {}.".format(a, b, z))
return Sn
def taylor_series_frac(a, b, z, maxiters=500, tol=tol):
"""Compute hyp1f1 using the continued fraction implementation of the
Taylor series (Muller 2001, Method 1.C).
"""
Aim2 = 1
Aim1 = 1 + z*a/b
for i in xrange(2, maxiters + 1):
r = (a + i - 1)/(i*(b + i - 1))
Ai = Aim1 + (Aim1 - Aim2)*r*z
if Aim1 != 0 and np.abs(Ai/Aim1) < tol:
break
Aim2 = Aim1
Aim1 = Ai
return Ai
def taylor_series_recur(a, b, z, maxiters=500, | |
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags', '') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chattr failed', details=to_native(e),
exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split(' ')[0:2]
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & | |
the value is the data itself.
For more information, see `Data Types <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes>`__ in the *Amazon DynamoDB Developer Guide* .
- **S** *(string) --*
An attribute of type String. For example:
``"S": "Hello"``
- **N** *(string) --*
An attribute of type Number. For example:
``"N": "123.45"``
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
- **B** *(bytes) --*
An attribute of type Binary. For example:
``"B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"``
- **SS** *(list) --*
An attribute of type String Set. For example:
``"SS": ["Giraffe", "Hippo" ,"Zebra"]``
- *(string) --*
- **NS** *(list) --*
An attribute of type Number Set. For example:
``"NS": ["42.2", "-19", "7.5", "3.14"]``
Numbers are sent across the network to DynamoDB as strings, to maximize compatibility across languages and libraries. However, DynamoDB treats them as number type attributes for mathematical operations.
- *(string) --*
- **BS** *(list) --*
An attribute of type Binary Set. For example:
``"BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="]``
- *(bytes) --*
- **M** *(dict) --*
An attribute of type Map. For example:
``"M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}}``
- *(string) --*
- *(dict) --*
Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see `Data Types <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes>`__ in the *Amazon DynamoDB Developer Guide* .
- **L** *(list) --*
An attribute of type List. For example:
``"L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N", "3.14159"}]``
- *(dict) --*
Represents the data for an attribute.
Each attribute value is described as a name-value pair. The name is the data type, and the value is the data itself.
For more information, see `Data Types <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes>`__ in the *Amazon DynamoDB Developer Guide* .
- **NULL** *(boolean) --*
An attribute of type Null. For example:
``"NULL": true``
- **BOOL** *(boolean) --*
An attribute of type Boolean. For example:
``"BOOL": true``
- **SizeEstimateRangeGB** *(list) --*
An estimate of item collection size, in gigabytes. This value is a two-element array containing a lower bound and an upper bound for the estimate. The estimate includes the size of all the items in the table, plus the size of all attributes projected into all of the local secondary indexes on that table. Use this estimate to measure whether a local secondary index is approaching its size limit.
The estimate is subject to change over time; therefore, do not rely on the precision or accuracy of the estimate.
- *(float) --*
- **ConsumedCapacity** *(list) --*
The capacity units consumed by the entire ``BatchWriteItem`` operation.
Each element consists of:
* ``TableName`` - The table that consumed the provisioned throughput.
* ``CapacityUnits`` - The total number of capacity units consumed.
- *(dict) --*
The capacity units consumed by an operation. The data returned includes the total provisioned throughput consumed, along with statistics for the table and any indexes involved in the operation. ``ConsumedCapacity`` is only returned if the request asked for it. For more information, see `Provisioned Throughput <https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html>`__ in the *Amazon DynamoDB Developer Guide* .
- **TableName** *(string) --*
The name of the table that was affected by the operation.
- **CapacityUnits** *(float) --*
The total number of capacity units consumed by the operation.
- **ReadCapacityUnits** *(float) --*
The total number of read capacity units consumed by the operation.
- **WriteCapacityUnits** *(float) --*
The total number of write capacity units consumed by the operation.
- **Table** *(dict) --*
The amount of throughput consumed on the table affected by the operation.
- **ReadCapacityUnits** *(float) --*
The total number of read capacity units consumed on a table or an index.
- **WriteCapacityUnits** *(float) --*
The total number of write capacity units consumed on a table or an index.
- **CapacityUnits** *(float) --*
The total number of capacity units consumed on a table or an index.
- **LocalSecondaryIndexes** *(dict) --*
The amount of throughput consumed on each local index affected by the operation.
- *(string) --*
- *(dict) --*
Represents the amount of provisioned throughput capacity consumed on a table or an index.
- **ReadCapacityUnits** *(float) --*
The total number of read capacity units consumed on a table or an index.
- **WriteCapacityUnits** *(float) --*
The total number of write capacity units consumed on a table or an index.
- **CapacityUnits** *(float) --*
The total number of capacity units consumed on a table or an index.
- **GlobalSecondaryIndexes** *(dict) --*
The amount of throughput consumed on each global index affected by the operation.
- *(string) --*
- *(dict) --*
Represents the amount of provisioned throughput capacity consumed on a table or an index.
- **ReadCapacityUnits** *(float) --*
The total number of read capacity units consumed on a table or an index.
- **WriteCapacityUnits** *(float) --*
The total number of write capacity units consumed on a table or an index.
- **CapacityUnits** *(float) --*
The total number of capacity units consumed on a table or an index.
:type RequestItems: dict
:param RequestItems: **[REQUIRED]**
A map of one or more table names and, for each table, a list of operations to be performed (``DeleteRequest`` or ``PutRequest`` ). Each element in the map consists of the following:
* ``DeleteRequest`` - Perform a ``DeleteItem`` operation on the specified item. The item to be deleted is identified by a ``Key`` subelement:
* ``Key`` - A map of primary key attribute values that uniquely identify the item. Each entry in this map consists of an attribute name and an attribute value. For each primary key, you must provide *all* of the key attributes. For example, with a simple primary key, you only need to provide a value for the partition key. For a composite primary key, you must provide values for *both* the partition key and the sort key.
* ``PutRequest`` - Perform a ``PutItem`` operation on the specified item. The item to be put is identified by an ``Item`` subelement:
* ``Item`` - A map of attributes and their values. Each entry in this map consists of an attribute name and an attribute value. Attribute values must not be null; string and binary type attributes must have lengths greater than zero; and set type attributes must not be empty. Requests that contain empty values will be rejected with a ``ValidationException`` exception. If you specify any attributes that are part of an index key, then the data types for those attributes must match those of the schema in the table\'s attribute definition.
- *(string) --*
- *(list) --*
- *(dict) --*
Represents an operation to perform - either ``DeleteItem`` or ``PutItem`` . You can only request one of these operations, not both, in a single ``WriteRequest`` . If you do need to perform both of these operations, you will need to provide two separate ``WriteRequest`` objects.
- **PutRequest** *(dict) --*
A request to perform a ``PutItem`` operation.
- **Item** *(dict) --* **[REQUIRED]**
A map of attribute name to attribute values, representing the primary key of an item to be processed by ``PutItem`` . All of the table\'s primary key attributes must be specified, and their data types must match those of the table\'s key schema. If any attributes are present in the item which are part of an index key schema for the table, their types must match the index key schema.
- *(string) --*
- *(dict) --*
Represents the data for an | |
with stats is not supported (yet)")
if is_scalar(mapt):
return
if list in mapt.keys():
other_keys = [k for k in mapt if k != list]
for e in other_keys:
if e in mapt[list]:
tomerge = mapt.pop(e)
if mode_inst:
for typ in tomerge:
if not type(typ) == type:
continue
if not typ in mapt[list][e]:
mapt[list][e][typ] = tomerge[typ]
# Note: don't update [list]["_stats" (or other modes' key)], we keep the original stats
# that is, what's actually been inspected on the list, originally
# (and we can't really update those stats as scalar stats aren't relevant
# to a list context
elif typ == mode_inst.key:
mode_inst.merge(mapt[list][e][mode_inst.key],tomerge[mode_inst.key])
pass
else:
mode_inst.merge(mapt[list][e][typ][mode_inst.key],tomerge[typ][mode_inst.key])
pass
elif mode == "mapping":
for typ in tomerge:
if typ is str and splitstr in mapt[list][e]:
# precedence splitstr > str, we keep splitstr and ignore str
continue
if not typ in mapt[list][e]:
# that field exist in the [list] but with a different type
# just merge the typ
mapt[list][e].update(tomerge)
# precedence splitstr > str
if splitstr is typ:
mapt[list][e].pop(str,None)
mapt[list][e].update(tomerge)
else:
# assuming what's in [list] is enough, we just popped the value
# from mapt, that's enough
pass
# explore further
merge_scalar_list(mapt[list],mode)
elif type(mapt) == dict:
for k in mapt:
merge_scalar_list(mapt[k],mode)
elif type(mapt) == list:
for e in mapt:
merge_scalar_list(e,mode)
def inspect_docs(docs, mode="type", clean=True, merge=False, logger=logging,
pre_mapping=False, limit=None, sample=None, metadata=True):
"""Inspect docs and return a summary of its structure:
- mode:
+ "type": explore documents and report strict data structure
+ "mapping": same as type but also perform test on data so guess best mapping
(eg. check if a string is splitable, etc...). Implies merge=True
+ "stats": explore documents and compute basic stats (count,min,max,sum)
+ "deepstats": same as stats but record values and also compute mean,stdev,median
(memory intensive...)
(mode can also be a list of modes, eg. ["type","mapping"]. There's little
overhead computing multiple types as most time is spent on actually getting the data)
- clean: don't delete recorded vqlues or temporary results
- merge: merge scalar into list when both exist (eg. {"val":..} and [{"val":...}]
- limit: can limit the inspection to the x first docs (None = no limit, inspects all)
- sample: in combination with limit, randomly extract a sample of 'limit' docs
(so not necessarily the x first ones defined by limit). If random.random()
is greater than sample, doc is inspected, otherwise it's skipped
- metadata: compute metadata on the result
"""
if type(mode) == str:
modes = [mode]
else:
modes = mode
_map = {}
for m in modes:
_map[m] = {}
cnt = 0
errors = set()
t0 = time.time()
innert0 = time.time()
if not sample is None:
assert limit, "Parameter 'sample' requires 'limit' to be defined"
assert sample != 1, "Sample value 1 not allowed (no documents would be inspected)"
if limit:
limit = int(limit)
logger.debug("Limiting inspection to the %s first documents" % limit)
for doc in docs:
if not sample is None:
if random.random() <= sample:
continue
for m in modes:
try:
inspect(doc,mapt=_map[m],mode=m)
except Exception as e:
logging.exception("Can't inspect document (_id: %s) because: %s\ndoc: %s" % (doc.get("_id"),e,pformat("dpc")))
errors.add(str(e))
cnt += 1
if cnt % 10000 == 0:
logger.info("%d documents processed [%s]" % (cnt,timesofar(innert0)))
innert0 = time.time()
if limit and cnt > limit:
logger.debug("done")
break
logger.info("Done [%s]" % timesofar(t0))
logger.info("Post-processing")
for m in modes:
mode_inst = get_mode_layer(m)
if mode_inst:
mode_inst.post(_map[m],m,clean)
merge = "mapping" in modes and True or merge
if merge:
merge_scalar_list(_map["mapping"],"mapping")
if "mapping" in modes and pre_mapping is False:
# directly generate ES mapping
import biothings.utils.es as es
try:
_map["mapping"] = es.generate_es_mapping(_map["mapping"])
if metadata:
# compute some extra metadata
_map = compute_metadata(_map,"mapping")
except es.MappingError as e:
prem = {"pre-mapping" : _map["mapping"], "errors" : e.args[1]}
_map["mapping"] = prem
elif errors:
_map["errors"] = errors
return _map
def compute_metadata(mapt,mode):
if mode == "mapping":
flat = flatten_doc(mapt["mapping"])
# total fields: ES6 requires to overcome the default 1000 limit if needed
mapt["__metadata__"] = {"total_fields" : len(flat)}
return mapt
def typify_inspect_doc(dmap):
"""
dmap is an inspect which was converted to be stored in a database,
namely actual python types were stringify to be storabled. This function
does the oposite and restore back python types within the inspect doc
"""
def typify(val):
if type(val) != type and val.startswith("__type__:"):
return eval(val.replace("__type__:",""))
else:
return val
return dict_walk(dmap,typify)
def stringify_inspect_doc(dmap):
def stringify(val):
if type(val) == type:
return "__type__:%s" % val.__name__ # prevent having dots in the field (not storable in mongo)
else:
return str(val)
return dict_walk(dmap,stringify)
if __name__ == "__main__":
d1 = {"id" : "124",'lofd': [{"val":34.3},{"ul":"bla"}],"d":{"start":134,"end":5543}}
d2 = {"id" : "5",'lofd': {"oula":"mak","val":34},"d":{"start":134,"end":5543}}
d3 = {"id" : "890",'lofd': [{"val":34}],"d":{"start":134,"end":5543}}
# merge either ways in the same
m12 = inspect_docs([d1,d2])["type"]
m21 = inspect_docs([d2,d1])["type"]
#if undordered list, then:
assert m21 == m12, "\nm21=%s\n!=\nm12=%s" % (pformat(m21),pformat(m12))
# val can be an int and a float
m1 = inspect_docs([{"val":34},{"val":1.2}])["type"]
# set: types can be in any order
assert set(m1["val"]) == {int,float}
# even if val is in a list
m2 = inspect_docs([{"val":34},[{"val":1.2}]])["type"]
# list and val not merged
assert set(m2.keys()) == {'val',list}
# another example with a mix a dict and list (see "p")
od1 = {"id" : "124","d":[{"p":123},{"p":456}]}
od2 = {"id" : "124","d":[{"p":123},{"p":[456,789]}]}
m12 = inspect_docs([od1,od2],mode="type")["type"]
m21 = inspect_docs([od2,od1],mode="type")["type"]
assert m12 == m21
# "p" is a integer or a list of integer
assert m12["d"][list]["p"].keys() == {list,int}
# stats
m = {}
inspect(d1,mapt=m,mode="stats")
# some simple check
assert set(m["id"].keys()) == {str}
assert m["id"][str]["_stats"]["_count"] == 1
assert m["id"][str]["_stats"]["_max"] == 3
assert m["id"][str]["_stats"]["_min"] == 3
assert m["lofd"].keys() == {list}
# list's stats
assert m["lofd"][list]["_stats"]["_count"] == 1
assert m["lofd"][list]["_stats"]["_max"] == 2
assert m["lofd"][list]["_stats"]["_min"] == 2
# one list's elem stats
assert m["lofd"][list]["val"][float]["_stats"]["_count"] == 1
assert m["lofd"][list]["val"][float]["_stats"]["_max"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_min"] == 34.3
# again
inspect(d1,mapt=m,mode="stats")
assert m["id"][str]["_stats"]["_count"] == 2
assert m["id"][str]["_stats"]["_max"] == 3
assert m["id"][str]["_stats"]["_min"] == 3
assert m["lofd"][list]["_stats"]["_count"] == 2
assert m["lofd"][list]["_stats"]["_max"] == 2
assert m["lofd"][list]["_stats"]["_min"] == 2
assert m["lofd"][list]["val"][float]["_stats"]["_count"] == 2
assert m["lofd"][list]["val"][float]["_stats"]["_max"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_min"] == 34.3
# mix with d2
inspect(d2,mapt=m,mode="stats")
assert m["id"][str]["_stats"]["_count"] == 3
assert m["id"][str]["_stats"]["_max"] == 3
assert m["id"][str]["_stats"]["_min"] == 1 # new min
assert m["lofd"][list]["_stats"]["_count"] == 2 # not incremented as in d2 it's not a list
assert m["lofd"][list]["_stats"]["_max"] == 2
assert m["lofd"][list]["_stats"]["_min"] == 2
# now float & int
assert m["lofd"][list]["val"][float]["_stats"]["_count"] == 2
assert m["lofd"][list]["val"][float]["_stats"]["_max"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_min"] == 34.3
# val{int} wasn't merged
assert m["lofd"]["val"][int]["_stats"]["_count"] == 1
assert m["lofd"]["val"][int]["_stats"]["_max"] == 34
assert m["lofd"]["val"][int]["_stats"]["_min"] == 34
# d2 again
inspect(d2,mapt=m,mode="stats")
assert m["id"][str]["_stats"]["_count"] == 4
assert m["id"][str]["_stats"]["_max"] == 3
assert m["id"][str]["_stats"]["_min"] == 1
assert m["lofd"][list]["_stats"]["_count"] == 2
assert m["lofd"][list]["_stats"]["_max"] == 2
assert m["lofd"][list]["_stats"]["_min"] == 2
assert m["lofd"][list]["val"][float]["_stats"]["_count"] == 2
assert m["lofd"][list]["val"][float]["_stats"]["_max"] == 34.3
assert m["lofd"][list]["val"][float]["_stats"]["_min"] == 34.3
assert m["lofd"]["val"][int]["_stats"]["_count"] == 2
assert m["lofd"]["val"][int]["_stats"]["_max"] == 34
assert m["lofd"]["val"][int]["_stats"]["_min"] == 34
# all counts should be 10
m = inspect_docs([d1] * 10,mode="stats")["stats"]
assert m["d"]["end"][int]["_stats"]["_count"] == 10
assert m["d"]["start"][int]["_stats"]["_count"] == 10
assert m["id"][str]["_stats"]["_count"] == 10
assert m["lofd"][list]["_stats"]["_count"] == 10
assert m["lofd"][list]["ul"][str]["_stats"]["_count"] == 10
assert m["lofd"][list]["val"][float]["_stats"]["_count"] == 10
#### test merge_stats
nd1 = {"id" : "124",'lofd': [{"val":34.3},{"ul":"bla"}]}
nd2 = {"id" : "5678",'lofd': {"val":50.2}}
m = {}
inspect(nd1,mapt=m,mode="deepstats")
inspect(nd2,mapt=m,mode="deepstats")
assert set(m["lofd"].keys()) == {list,'val','_stats'}, "%s" % m["lofd"].keys()
assert m["lofd"][list]["val"][float]["_stats"] == {'__vals': [34.3], '_count': 1, '_max': 34.3, '_min': 34.3}, \
m["lofd"][list]["val"][float]["_stats"]
# merge stats into the left param
DeepStatsMode().merge(m["lofd"][list]["val"][float]["_stats"],m["lofd"]["val"][float]["_stats"])
assert m["lofd"][list]["val"][float]["_stats"] == {'__vals': [34.3, 50.2], '_count': 2, '_max': 50.2, '_min': 34.3}
# mapping mode (splittable strings)
# "bla" is splitable in one case, not in the other
# "oula" is splitable, "arf" is not
sd1 = {"_id" : "124",'vals': [{"oula":"this is great"},{"bla":"I am splitable","arf":"ENS355432"}]}
sd2 = {"_id" : "5678",'vals': {"bla":"rs45653","void":654}}
sd3 = {"_id" : "124",'vals': [{"bla":"thisisanid"}]}
m = {}
inspect(sd3,mapt=m,mode="mapping")
# bla not splitable here
assert m["vals"][list]["bla"][str] == {}
inspect(sd1,mapt=m,mode="mapping")
# now it is
assert m["vals"][list]["bla"][splitstr] == {}
inspect(sd2,mapt=m,mode="mapping")
# not splitable in sd2
assert m["vals"]["bla"][str] == {}
# mapping with type of type
sd1 = {"_id" : "123","homologene" : {"id":"bla","gene" : [[123,456],[789,102]]}}
m = inspect_docs([sd1],mode="mapping")["mapping"]
assert m == {'homologene': {'properties': {'gene': | |
"manual_scaling")
@manual_scaling.setter
def manual_scaling(self, value: Optional[pulumi.Input['StandardAppVersionManualScalingArgs']]):
pulumi.set(self, "manual_scaling", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="noopOnDestroy")
def noop_on_destroy(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `true`, the application version will not be deleted.
"""
return pulumi.get(self, "noop_on_destroy")
@noop_on_destroy.setter
def noop_on_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "noop_on_destroy", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def runtime(self) -> Optional[pulumi.Input[str]]:
"""
Desired runtime. Example python27.
"""
return pulumi.get(self, "runtime")
@runtime.setter
def runtime(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "runtime", value)
@property
@pulumi.getter(name="runtimeApiVersion")
def runtime_api_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of the API in the given runtime environment.
Please see the app.yaml reference for valid values at `https://cloud.google.com/appengine/docs/standard/<language>/config/appref`\
Substitute `<language>` with `python`, `java`, `php`, `ruby`, `go` or `nodejs`.
"""
return pulumi.get(self, "runtime_api_version")
@runtime_api_version.setter
def runtime_api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "runtime_api_version", value)
@property
@pulumi.getter
def service(self) -> Optional[pulumi.Input[str]]:
"""
AppEngine service resource
"""
return pulumi.get(self, "service")
@service.setter
def service(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service", value)
@property
@pulumi.getter
def threadsafe(self) -> Optional[pulumi.Input[bool]]:
"""
Whether multiple requests can be dispatched to this version at once.
"""
return pulumi.get(self, "threadsafe")
@threadsafe.setter
def threadsafe(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "threadsafe", value)
@property
@pulumi.getter(name="versionId")
def version_id(self) -> Optional[pulumi.Input[str]]:
"""
Relative name of the version within the service. For example, `v1`. Version names can contain only lowercase letters, numbers, or hyphens. Reserved names,"default", "latest", and any name with the prefix "ah-".
"""
return pulumi.get(self, "version_id")
@version_id.setter
def version_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version_id", value)
@property
@pulumi.getter(name="vpcAccessConnector")
def vpc_access_connector(self) -> Optional[pulumi.Input['StandardAppVersionVpcAccessConnectorArgs']]:
"""
Enables VPC connectivity for standard apps.
Structure is documented below.
"""
return pulumi.get(self, "vpc_access_connector")
@vpc_access_connector.setter
def vpc_access_connector(self, value: Optional[pulumi.Input['StandardAppVersionVpcAccessConnectorArgs']]):
pulumi.set(self, "vpc_access_connector", value)
class StandardAppVersion(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automatic_scaling: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionAutomaticScalingArgs']]] = None,
basic_scaling: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionBasicScalingArgs']]] = None,
delete_service_on_destroy: Optional[pulumi.Input[bool]] = None,
deployment: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionDeploymentArgs']]] = None,
entrypoint: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionEntrypointArgs']]] = None,
env_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
handlers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StandardAppVersionHandlerArgs']]]]] = None,
inbound_services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_class: Optional[pulumi.Input[str]] = None,
libraries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StandardAppVersionLibraryArgs']]]]] = None,
manual_scaling: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionManualScalingArgs']]] = None,
noop_on_destroy: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
runtime: Optional[pulumi.Input[str]] = None,
runtime_api_version: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
threadsafe: Optional[pulumi.Input[bool]] = None,
version_id: Optional[pulumi.Input[str]] = None,
vpc_access_connector: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionVpcAccessConnectorArgs']]] = None,
__props__=None):
"""
Standard App Version resource to create a new version of standard GAE Application.
Learn about the differences between the standard environment and the flexible environment
at https://cloud.google.com/appengine/docs/the-appengine-environments.
Currently supporting Zip and File Containers.
To get more information about StandardAppVersion, see:
* [API documentation](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions)
* How-to Guides
* [Official Documentation](https://cloud.google.com/appengine/docs/standard)
## Example Usage
### App Engine Standard App Version
```python
import pulumi
import pulumi_gcp as gcp
bucket = gcp.storage.Bucket("bucket", location="US")
object = gcp.storage.BucketObject("object",
bucket=bucket.name,
source=pulumi.FileAsset("./test-fixtures/appengine/hello-world.zip"))
myapp_v1 = gcp.appengine.StandardAppVersion("myappV1",
version_id="v1",
service="myapp",
runtime="nodejs10",
entrypoint=gcp.appengine.StandardAppVersionEntrypointArgs(
shell="node ./app.js",
),
deployment=gcp.appengine.StandardAppVersionDeploymentArgs(
zip=gcp.appengine.StandardAppVersionDeploymentZipArgs(
source_url=pulumi.Output.all(bucket.name, object.name).apply(lambda bucketName, objectName: f"https://storage.googleapis.com/{bucket_name}/{object_name}"),
),
),
env_variables={
"port": "8080",
},
automatic_scaling=gcp.appengine.StandardAppVersionAutomaticScalingArgs(
max_concurrent_requests=10,
min_idle_instances=1,
max_idle_instances=3,
min_pending_latency="1s",
max_pending_latency="5s",
standard_scheduler_settings=gcp.appengine.StandardAppVersionAutomaticScalingStandardSchedulerSettingsArgs(
target_cpu_utilization=0.5,
target_throughput_utilization=0.75,
min_instances=2,
max_instances=10,
),
),
delete_service_on_destroy=True)
myapp_v2 = gcp.appengine.StandardAppVersion("myappV2",
version_id="v2",
service="myapp",
runtime="nodejs10",
entrypoint=gcp.appengine.StandardAppVersionEntrypointArgs(
shell="node ./app.js",
),
deployment=gcp.appengine.StandardAppVersionDeploymentArgs(
zip=gcp.appengine.StandardAppVersionDeploymentZipArgs(
source_url=pulumi.Output.all(bucket.name, object.name).apply(lambda bucketName, objectName: f"https://storage.googleapis.com/{bucket_name}/{object_name}"),
),
),
env_variables={
"port": "8080",
},
basic_scaling=gcp.appengine.StandardAppVersionBasicScalingArgs(
max_instances=5,
),
noop_on_destroy=True)
```
## Import
StandardAppVersion can be imported using any of these accepted formats
```sh
$ pulumi import gcp:appengine/standardAppVersion:StandardAppVersion default apps/{{project}}/services/{{service}}/versions/{{version_id}}
```
```sh
$ pulumi import gcp:appengine/standardAppVersion:StandardAppVersion default {{project}}/{{service}}/{{version_id}}
```
```sh
$ pulumi import gcp:appengine/standardAppVersion:StandardAppVersion default {{service}}/{{version_id}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['StandardAppVersionAutomaticScalingArgs']] automatic_scaling: Automatic scaling is based on request rate, response latencies, and other application metrics.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['StandardAppVersionBasicScalingArgs']] basic_scaling: Basic scaling creates instances when your application receives requests. Each instance will be shut down when the application becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity.
Structure is documented below.
:param pulumi.Input[bool] delete_service_on_destroy: If set to `true`, the service will be deleted if it is the last version.
:param pulumi.Input[pulumi.InputType['StandardAppVersionDeploymentArgs']] deployment: Code and application artifacts that make up this version.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['StandardAppVersionEntrypointArgs']] entrypoint: The entrypoint for the application.
Structure is documented below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] env_variables: Environment variables available to the application.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StandardAppVersionHandlerArgs']]]] handlers: An ordered list of URL-matching patterns that should be applied to incoming requests.
The first matching URL handles the request and other request handlers are not attempted.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] inbound_services: A list of the types of messages that this application is able to receive.
Each value may be one of `INBOUND_SERVICE_MAIL`, `INBOUND_SERVICE_MAIL_BOUNCE`, `INBOUND_SERVICE_XMPP_ERROR`, `INBOUND_SERVICE_XMPP_MESSAGE`, `INBOUND_SERVICE_XMPP_SUBSCRIBE`, `INBOUND_SERVICE_XMPP_PRESENCE`, `INBOUND_SERVICE_CHANNEL_PRESENCE`, and `INBOUND_SERVICE_WARMUP`.
:param pulumi.Input[str] instance_class: Instance class that is used to run this version. Valid values are
AutomaticScaling: F1, F2, F4, F4_1G
BasicScaling or ManualScaling: B1, B2, B4, B4_1G, B8
Defaults to F1 for AutomaticScaling and B2 for ManualScaling and BasicScaling. If no scaling is specified, AutomaticScaling is chosen.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StandardAppVersionLibraryArgs']]]] libraries: Configuration for third-party Python runtime libraries that are required by the application.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['StandardAppVersionManualScalingArgs']] manual_scaling: A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time.
Structure is documented below.
:param pulumi.Input[bool] noop_on_destroy: If set to `true`, the application version will not be deleted.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] runtime: Desired runtime. Example python27.
:param pulumi.Input[str] runtime_api_version: The version of the API in the given runtime environment.
Please see the app.yaml reference for valid values at `https://cloud.google.com/appengine/docs/standard/<language>/config/appref`\
Substitute `<language>` with `python`, `java`, `php`, `ruby`, `go` or `nodejs`.
:param pulumi.Input[str] service: AppEngine service resource
:param pulumi.Input[bool] threadsafe: Whether multiple requests can be dispatched to this version at once.
:param pulumi.Input[str] version_id: Relative name of the version within the service. For example, `v1`. Version names can contain only lowercase letters, numbers, or hyphens. Reserved names,"default", "latest", and any name with the prefix "ah-".
:param pulumi.Input[pulumi.InputType['StandardAppVersionVpcAccessConnectorArgs']] vpc_access_connector: Enables VPC connectivity for standard apps.
Structure is documented below.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StandardAppVersionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Standard App Version resource to create a new version of standard GAE Application.
Learn about the differences between the standard environment and the flexible environment
at https://cloud.google.com/appengine/docs/the-appengine-environments.
Currently supporting Zip and File Containers.
To get more information about StandardAppVersion, see:
* [API documentation](https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions)
* How-to Guides
* [Official Documentation](https://cloud.google.com/appengine/docs/standard)
## Example Usage
### App Engine Standard App Version
```python
import pulumi
import pulumi_gcp as gcp
bucket = gcp.storage.Bucket("bucket", location="US")
object = gcp.storage.BucketObject("object",
bucket=bucket.name,
source=pulumi.FileAsset("./test-fixtures/appengine/hello-world.zip"))
myapp_v1 = gcp.appengine.StandardAppVersion("myappV1",
version_id="v1",
service="myapp",
runtime="nodejs10",
entrypoint=gcp.appengine.StandardAppVersionEntrypointArgs(
shell="node ./app.js",
),
deployment=gcp.appengine.StandardAppVersionDeploymentArgs(
zip=gcp.appengine.StandardAppVersionDeploymentZipArgs(
source_url=pulumi.Output.all(bucket.name, object.name).apply(lambda bucketName, objectName: f"https://storage.googleapis.com/{bucket_name}/{object_name}"),
),
),
env_variables={
"port": "8080",
},
automatic_scaling=gcp.appengine.StandardAppVersionAutomaticScalingArgs(
max_concurrent_requests=10,
min_idle_instances=1,
max_idle_instances=3,
min_pending_latency="1s",
max_pending_latency="5s",
standard_scheduler_settings=gcp.appengine.StandardAppVersionAutomaticScalingStandardSchedulerSettingsArgs(
target_cpu_utilization=0.5,
target_throughput_utilization=0.75,
min_instances=2,
max_instances=10,
),
),
delete_service_on_destroy=True)
myapp_v2 = gcp.appengine.StandardAppVersion("myappV2",
version_id="v2",
service="myapp",
runtime="nodejs10",
entrypoint=gcp.appengine.StandardAppVersionEntrypointArgs(
shell="node ./app.js",
),
deployment=gcp.appengine.StandardAppVersionDeploymentArgs(
zip=gcp.appengine.StandardAppVersionDeploymentZipArgs(
source_url=pulumi.Output.all(bucket.name, object.name).apply(lambda bucketName, objectName: f"https://storage.googleapis.com/{bucket_name}/{object_name}"),
),
),
env_variables={
"port": "8080",
},
basic_scaling=gcp.appengine.StandardAppVersionBasicScalingArgs(
max_instances=5,
),
noop_on_destroy=True)
```
## Import
StandardAppVersion can be imported using any of these accepted formats
```sh
$ pulumi import gcp:appengine/standardAppVersion:StandardAppVersion default apps/{{project}}/services/{{service}}/versions/{{version_id}}
```
```sh
$ pulumi import gcp:appengine/standardAppVersion:StandardAppVersion default {{project}}/{{service}}/{{version_id}}
```
```sh
$ pulumi import gcp:appengine/standardAppVersion:StandardAppVersion default {{service}}/{{version_id}}
```
:param str resource_name: The name of the resource.
:param StandardAppVersionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StandardAppVersionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automatic_scaling: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionAutomaticScalingArgs']]] = None,
basic_scaling: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionBasicScalingArgs']]] = None,
delete_service_on_destroy: Optional[pulumi.Input[bool]] = None,
deployment: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionDeploymentArgs']]] = None,
entrypoint: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionEntrypointArgs']]] = | |
mc #Hope this helps with resource leaks!
if e_errors.is_ok(status['status'][0]):
#use_type = "%s=%s" % (status['drive_id'],
# use_type)
use_type = status['drive_id']
#Regardless of an error or not, we found the
# mover we were looking for. Give up.
break
### Without an explicit collection here, even the "del mc" above
### does not work until python runs the garbage collector.
### If python takes to long we run out of FDs, so force python
### to reclain those resources.
import gc
gc.collect()
##################################################
##This doesn't seem to work.
#if int(drive.mount) == 1:
# use_status = "mounted"
#elif int(drive.mount) == 0:
# use_status = EMPTY
#else:
# use_status = "unknown"
if drive.volser:
use_status = "mounted"
else:
use_status = EMPTY
drive_list.append({"name" : drive.drive_name,
"state" : use_state,
"status" : use_status, #Filler for AML2.
"volume" : drive.volser,
"type" : use_type,
})
ticket['drive_list'] = drive_list
return (e_errors.OK, 0, None)
def listVolumes(self, ticket):
if not hostaddr.allow(ticket['callback_addr']):
return
#We modify it if not Okay.
ticket['status'] = (e_errors.OK, 0, None, "", "")
stat, volumes = aml2.list_volser()
if stat != 0:
ticket['status'] = aml2.convert_status(stat)
else:
ticket['status'] = (e_errors.OK, 0, None, "", "")
volume_list = []
for volume in volumes:
use_media_type = aml2.media_names.get(volume.media_type, "UNKNOWN")
volume_list.append({'volume' : volume.volser,
'type' : use_media_type,
'state' : volume.attrib,
'location' : "",
})
reply = copy.copy(ticket)
self.reply_to_caller(reply)
ticket['no_reply'] = 1 #Tell WorkDone() not to send the ticket again.
reply = copy.copy(ticket)
reply['volume_list'] = volume_list
address_family = socket.getaddrinfo(ticket['callback_addr'][0], None)[0][0]
sock = socket.socket(address_family, socket.SOCK_STREAM)
try:
sock.connect(ticket['callback_addr'])
r = callback.write_tcp_obj(sock, reply)
sock.close()
if r:
message = "Error calling write_tcp_obj. Callback addr. %s" \
% (ticket['callback_addr'],)
Trace.log(e_errors.ERROR, message)
E=6
ticket['status'] = (e_errors.NET_ERROR, E, message)
except:
Trace.handle_error()
Trace.log(e_errors.ERROR,
"Callback address %s" % (ticket['callback_addr'],))
E=6
ticket['status'] = (e_errors.NET_ERROR, E, sys.exc_info()[2])
return ticket['status']
def listVolumes2(self, ticket):
ticket['work'] = "list_volumes" #Use old method for AML2.
ticket['function'] = "listVolume"
return self.listVolumes(ticket)
def listSlots(self, ticket):
# A bug in aci_getcellinfo() requires forking in list_slots().
# If we are the parent, just return and keep on going. This isn't
# the best solution because we loose keeping the reply in the
# udp_server in case the client didn't receive the reply. If
# a better solution is needed, then look at timed_command() in the
# STK implementation.
#
# The bug is that for every call to aci_getcellinfo() three file
# descriptors (that are sockets) are leaked.
#
# By using self.fork() instead of os.fork() we get automatic process
# tracking and termination (if needed).
#
#pid = self.fork()
#if pid != 0: # parent
# return
# ... else this is the child.
### All this extra forking code to work around aci_getcellinfo()
### is not needed now that list_slots() uses DoWork() to call
### listSlots. An implicit fork() is called in DoWork() for us.
stat, slots = aml2.list_slots()
if stat != 0:
ticket['status'] = aml2.convert_status(stat)
else:
ticket['status'] = (e_errors.OK, 0, "")
slot_list = []
for slot_info in slots:
#location = slot_info[0]
for i in range(len(slot_info[1])):
media_type = slot_info[1][i].eMediaType
use_media_type = aml2.media_names.get(media_type,
media_type)
slot_dict = {"location" : slot_info[0],
"media_type" : use_media_type,
}
try:
slot_dict['total'] = slot_info[1][i].ulCount
except IndexError:
slot_dict['total'] = 0
try:
slot_dict['free'] = slot_info[2][i].ulCount
except IndexError:
slot_dict['free'] = 0
try:
slot_dict['used'] = slot_info[3][i].ulCount
except IndexError:
slot_dict['used'] = 0
try:
slot_dict['disabled'] = slot_info[4][i].ulCount
except IndexError:
slot_dict['disabled'] = 0
slot_list.append(slot_dict)
ticket['slot_list'] = slot_list
return (e_errors.OK, 0, None)
#sys.exit(0) #Remember we are the child here.
def listClean(self, ticket):
stat, volumes_list = aml2.list_volser()
if stat != 0:
ticket['status'] = aml2.convert_status(stat)
else:
ticket['status'] = (e_errors.OK, 0, "")
vcc = volume_clerk_client.VolumeClerkClient(self.csc,
logc = self.logc,
alarmc = self.alarmc,
rcv_timeout=5,
rcv_tries=12)
clean_list = []
for volume_instance in volumes_list:
volume = volume_instance.volser
use_media_type = aml2.media_names.get(volume_instance.media_type,
"UNKNOWN")
if volume[0:2] != "CL":
#############################################
#Assuming cleaning tapes begin with CL is an unfortunate
# part of this implimentation.
#############################################
continue
vol_info = vcc.inquire_vol(volume, timeout = 5, retry = 12)
if e_errors.is_ok(vol_info):
location = "N/A"
max_usage = "N/A"
current_usage = "N/A"
remaining_usage = vol_info['remaining_bytes']
status = "N/A"
#media_type = vol_info['media_type']
else:
location = "N/A"
max_usage = "N/A"
current_usage = "N/A"
remaining_usage = "Unknown"
status = "N/A"
#media_type = "Unknown"
clean_list.append({"volume" : volume,
"location" : location,
"max_usage" : max_usage,
"current_usage" : current_usage,
"remaining_usage" : remaining_usage,
"status" : status,
"type" : use_media_type,
})
ticket['status'] = (e_errors.OK, 0, None)
reply = copy.copy(ticket)
self.reply_to_caller(reply)
ticket['no_reply'] = 1 #Tell WorkDone() not to send the ticket again.
reply = copy.copy(ticket)
reply['clean_list'] = clean_list
address_family = socket.getaddrinfo(ticket['callback_addr'][0], None)[0][0]
sock = socket.socket(address_family, socket.SOCK_STREAM)
try:
sock.connect(ticket['callback_addr'])
r = callback.write_tcp_obj(sock, reply)
sock.close()
if r:
Trace.log(e_errors.ERROR,
"Error calling write_tcp_obj. Callback addr. %s"
% (ticket['callback_addr'],))
except:
Trace.handle_error()
Trace.log(e_errors.ERROR,
"Callback address %s" % (ticket['callback_addr'],))
return (e_errors.OK, 0, None)
#########################################################################
# These functions are internal functions specific to AML2 media changer.
#########################################################################
def robotStatus(self, arm):
return self.retry_function(aml2.robotStatus, arm)
"""
def robotHome(self, arm):
return self.retry_function(aml2.robotHome,arm)
def robotStart(self, arm):
return self.retry_function(aml2.robotStart, arm)
"""
#########################################################################
#
# STK robot loader server
#
#########################################################################
class STK_MediaLoader(MediaLoaderMethods):
def __init__(self, medch, max_work=7, csc=None):
MediaLoaderMethods.__init__(self,medch,max_work,csc)
self.acls_host = self.mc_config.get('acls_host', 'UNKNOWN')
self.acls_uname = self.mc_config.get('acls_uname', 'UNKNOWN')
self.driveCleanTime = self.mc_config.get('DriveCleanTime',
{'9840':[60,1],'9940':[60,1]})
self.acssa_version = self.mc_config.get('acssa_version', 'UNKNOWN')
self.prepare = self.unload
self.DEBUG = 0
print "STK MediaLoader initialized"
# retry function call
def retry_function(self,function,*args):
count = self.getNretry()
sts=("",0,"")
# retry every error
while count > 0 and sts[0] != e_errors.OK:
try:
sts=apply(function,args)
if sts[1] == 6: # no record for display_drive
break
if sts[1] != 0:
if self.logdetail:
Trace.log(e_errors.ERROR, 'retry_function: function %s %s sts[1] %s sts[2] %s count %s'%(repr(function),args,sts[1],sts[2],count))
if function==self.mount:
if sts[1] == e_errors.MC_VOLNOTHOME:
# Volume is mounted in another drive.
# break loop here
break
time.sleep(60)
fixsts=apply(self.dismount,args)
Trace.log(e_errors.INFO, 'Tried %s %s status=%s %s Desperation dismount status %s %s'%(repr(function),args,sts[1],sts[2],fixsts[1],fixsts[2]))
time.sleep(60)
count = count - 1
else:
break
except:
exc,val,tb = Trace.handle_error()
return str(exc),0,""
return sts
# execute a stk cmd_proc command, but don't wait forever for it to complete
#mostly stolen from Demo/tkinter/guido/ShellWindow.py - spawn function
def timed_command(self,cmd,min_response_length=0,timeout=60):
message = ""
blanks=0
nread=0
now=timeofday.tod()
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
command = "(echo %s;echo logoff)|/export/home/ACSSS/bin/cmd_proc 2>&1" % (cmd,)
if self.acssa_version == '8.4':
command = "(echo %s)|/export/home/ACSSS/bin/cmd_proc 2>&1" % (cmd,)
Trace.trace(e_errors.INFO, "Sending %s"%(command,))
cmd_lookfor = "ACSSA> %s" % (cmd,)
# can not use dispatching work fork because we are already child.
# need to kill explictly and children can't kill
(dum,mark) = self.delta_t(0)
pid = os.fork()
if pid == 0:
# Child
for i in 0, 1, 2:
try:
os.close(i)
except os.error:
pass
if os.dup(p2cread) <> 0:
print 'ERROR: timed_command pc2cread bad read dup'
Trace.log(e_errors.ERROR, 'timed_command pc2cread bad read dup')
if os.dup(c2pwrite) <> 1:
print 'ERROR: timed_command c2pwrite bad write dup'
Trace.log(e_errors.ERROR, 'timed_command c2pwrite bad write dup')
if os.dup(c2pwrite) <> 2:
print 'ERROR: timed_command c2pwrite bad error dup'
Trace.log(e_errors.ERROR, 'timed_command c2pwrite bad error dup')
MAXFD = 100 # Max number of file descriptors (os.getdtablesize()???)
for i in range(3, MAXFD):
try:
os.close(i)
except:
pass
try:
#I know this is hard-coded and inflexible. That is what I want so as to
#prevent any possible security problem.
os.execv('/usr/bin/rsh',[self.acls_host,'-l',self.acls_uname,command])
finally:
exc, msg, tb = sys.exc_info()
Trace.log(e_errors.ERROR, "timed_command execv failed: %s %s %s"% (exc, msg, traceback.format_tb(tb)))
os._exit(1)
os.close(p2cread)
os.close(c2pwrite)
os.close(p2cwrite)
#wait for child to complete, or kill it
start = time.time()
if self.DEBUG:
print timeofday.tod(),cmd
Trace.trace(e_errors.INFO,"%s" %(cmd,))
active=0
(p,r) = (0,0)
try:
while active<timeout:
p,r = os.waitpid(pid,os.WNOHANG)
if p!=0:
#When we detect that the process has exited, leave
# the loop. This allows us to avoid the ETIMEDOUT
# and re-raised select errors.
break
#We need to start reading this now for really long responses.
# Otherwise, the buffer fills up with the child waiting
# for the parent to read something from the full buffer.
# And the parent waits for the child to finish.
wait_duration = max(timeout - active, 0)
try:
r, w, x = select.select([c2pread], [], [], wait_duration)
except (select.error, OSError, IOError), msg:
Trace.log(79, "select error in timed_command(): %s" % \
(str(msg),))
if msg.args[0] in [errno.EINTR]:
r, w, x = [], [], []
#The process was interupted by a signal; we need
# to keep it going.
active = time.time() - start
continue
else:
#We want to jump to the error handling code.
raise sys.exc_info()[0], sys.exc_info()[1], \
sys.exc_info()[2]
#If nothing was received, we want to wait again instead of
# falling into the os.read(). If the robot side hangs
# without closing the pipe we can timeout in select(), but
# not read().
if c2pread not in r:
active = time.time() - start
time.sleep(1)
continue
raw_msg = os.read(c2pread, 2000)
if raw_msg:
if self.DEBUG:
print raw_msg,
message = message + raw_msg
#Need to reset the timeout period.
start = time.time()
active = 0
else:
if raw_msg == '':
blanks | |
to update
parentChildren = None
if hasattr(value, '_children'):
parentChildren = value._children
# check if this is a valid parent
tmp = "Cannot change to that parent, "
if value is None:
# an object can be an orphan
pass
elif value is self:
# an object cannot be its own parent
raise TypeError(tmp+"because that is the object itself!")
elif isinstance(value, Wibject):
# some wibject parents can hold wobjects
if isinstance(self, Wobject):
if hasattr(value, '_wobjects'):
parentChildren = value._wobjects
else:
tmp2 = "a wobject can only have a wibject-parent "
raise TypeError(tmp+tmp2+"if it can hold wobjects!")
elif isinstance(value, Wobject):
# a wobject can only hold wobjects
if isinstance(self, Wibject):
raise TypeError(tmp+"a wibject cant have a wobject-parent!")
else:
raise TypeError(tmp+"it is not a wibject or wobject or None!")
# remove from parents childrens list
if hasattr(self._parent, '_children'):
while self in self._parent._children:
self._parent._children.remove(self)
if hasattr(self._parent, '_wobjects'):
while self in self._parent._wobjects:
self._parent._wobjects.remove(self)
# Should we destroy GL objects (because we are removed
# from an OpenGL context)?
figure1 = self.GetFigure()
figure2 = None
if hasattr(value, 'GetFigure'):
figure2 = value.GetFigure()
if figure1 and (figure1 is not figure2):
self.DestroyGl()
# set and add to new parent
self._parent = value
if parentChildren is not None:
parentChildren.append(self)
return locals()
@property
def children(self):
""" Get a shallow copy of the list of children.
"""
return [child for child in self._children]
def GetFigure(self):
""" GetFigure()
Get the figure that this object is part of.
The figure represents the OpenGL context.
Returns None if it has no figure.
"""
# init
iter = 0
object = self
# search
while hasattr(object,'parent'):
iter +=1
if object.parent is None:
break
if iter > 100:
break
object = object.parent
# check
if object.parent is None and hasattr(object, '_SwapBuffers'):
return object
else:
return None
def Draw(self, fast=False):
""" Draw(fast=False)
For wibjects: calls Draw() on the figure that contains this object.
For wobjects: calls Draw() on the axes that contains this object.
"""
if self._isbeingdrawn:
return False
else:
fig = self.GetFigure()
if fig:
fig.Draw()
return True
def FindObjects(self, spec):
""" FindObjects(pattern)
Find the objects in this objects' children, and its childrens
children, etc, that correspond to the given pattern.
The pattern can be a class or tuple of classes, an attribute name
(as a string) that the objects should have, or a callable that
returns True or False given an object. For example
'lambda x: ininstance(x, cls)' will do the same as giving a class.
If 'self' is a wibject and has a _wobject property (like the Axes
wibject) this method also performs the search in the list of wobjects.
"""
# Parse input
if hasattr(spec, 'func_name'):
callback = spec
elif isinstance(spec, (type, tuple)):
callback = lambda x: isinstance(x, spec)
elif isinstance(spec, basestring):
callback = lambda x: hasattr(x, spec)
elif hasattr(spec, '__call__'):
callback = spec # other callable
else:
raise ValueError('Invalid argument for FindObjects')
# Init list with result
result = []
# Try all children recursively
for child in self._children:
if callback(child):
result.append(child)
result.extend( child.FindObjects(callback) )
if hasattr(self, '_wobjects'):
for child in self._wobjects:
if callback(child):
result.append(child)
result.extend( child.FindObjects(callback) )
# Done
return result
def GetWeakref(self):
""" GetWeakref()
Get a weak reference to this object.
Call the weakref to obtain the real reference (or None if it's dead).
"""
return weakref.ref( self )
class Wibject(BaseObject):
""" Wibject(parent)
A Wibject (widget object) is a 2D object drawn in
screen coordinates. A Figure is a widget and so are an Axes and a
PushButton. Wibjects have a position property to set their location
and size. They also have a background color and multiple event properties.
This class may also be used as a container object for other wibjects.
An instance of this class has no visual appearance. The Box class
implements drawing a rectangle with an edge.
"""
def __init__(self, parent):
BaseObject.__init__(self, parent)
# the position of the widget within its parent
self._position = Position( 10,10,50,50, self)
# colors and edge
self._bgcolor = (0.8,0.8,0.8)
# event for position
self._eventPosition = events.EventPosition(self)
@property
def eventPosition(self):
""" Fired when the position (or size) of this wibject changes.
"""
return self._eventPosition
@misc.PropWithDraw
def position():
""" Get/Set the position of this wibject. Setting can be done
by supplying either a 2-element tuple or list to only change
the location, or a 4-element tuple or list to change location
and size.
See the docs of the vv.base.Position class for more information.
"""
def fget(self):
return self._position
def fset(self, value):
self._position.Set(value)
return locals()
@misc.PropWithDraw
def bgcolor():
""" Get/Set the background color of the wibject.
"""
def fget(self):
return self._bgcolor
def fset(self, value):
self._bgcolor = misc.getColor(value, 'setting bgcolor')
return locals()
def _Transform(self):
""" _Transform()
Apply a translation such that the wibject is
drawn in the correct place.
"""
# skip if we are on top
if not self.parent:
return
# get posision in screen coordinates
pos = self.position
# apply
gl.glTranslatef(pos.left, pos.top,0.0)
def OnDrawShape(self, clr):
# Implementation of the OnDrawShape method.
gl.glColor(clr[0], clr[1], clr[2], 1.0)
w,h = self.position.size
gl.glBegin(gl.GL_POLYGON)
gl.glVertex2f(0,0)
gl.glVertex2f(0,h)
gl.glVertex2f(w,h)
gl.glVertex2f(w,0)
gl.glEnd()
class Wobject(BaseObject):
""" Wobject(parent)
A Wobject (world object) is a visual element that
is drawn in 3D world coordinates (in the scene). Wobjects can be
children of other wobjects or of an Axes object (which is the
wibject that represents the scene).
To each wobject, several transformations can be applied,
which are also applied to its children. This way complex models can
be build. For example, in a robot arm the fingers would be children
of the hand, so that when the hand moves or rotates, the fingers move
along automatically. The fingers can then also be moved without affecting
the hand or other fingers.
The transformations are represented by Transform_* objects in
the list named "transformations". The transformations are applied
in the order as they appear in the list.
"""
def __init__(self, parent):
BaseObject.__init__(self, parent)
# the transformations applied to the object
self._transformations = []
@property
def transformations(self):
""" Get the list of transformations of this wobject. These
can be Transform_Translate, Transform_Scale, or Transform_Rotate
instances.
"""
return self._transformations
def GetAxes(self):
""" GetAxes()
Get the axes in which this wobject resides.
Note that this is not necesarily an Axes instance (like the line
objects in the Legend wibject).
"""
par = self.parent
if par is None:
return None
while not isinstance(par, Wibject):
par = par.parent
if par is None:
return None
return par
def Draw(self, fast=False):
""" Draw(fast=False)
Calls Draw on the axes that contains this object.
"""
if self._isbeingdrawn:
return False
else:
axes = self.GetAxes()
if axes:
axes.Draw()
return True
def _GetLimits(self, *args):
""" _GetLimits(self, x1, x2, y1, y2, z1, z2)
Get the limits in world coordinates between which the object
exists. This is used by the Axes class to set the camera correctly.
If None is returned, the limits are undefined.
Inheriting Wobject classes should overload this method. However, they
can use this method to take all transformations into account by giving
the cornerpoints of the untransformed object.
Returns a 3 element tuple of vv.Range instances: xlim, ylim, zlim.
"""
# Examine args
if not args:
minx, maxx, miny, maxy, minz, maxz = [], [], [], [], [], []
| |
file name')
else:
comp = ccache.get_compound(cid)
comp.molfile = mol
all_comp_data[cid] = comp
met_to_comp_dict[met] = cid
print( met+ ' was inserted as mol file. it mapped to the kegg id' + cid)
else:
warnings.warn("unexpected metabolite name:" + met)
comp = None
# we got a hit so it's a bigg metabolite, lets get either the kegg met or the structure
else:
all_references_readable = json.loads(all_references_json)
if 'KEGG Compound' in all_references_readable: # we matched it to kegg met and will use component contribution's database for this guy
kegg_reference = all_references_readable['KEGG Compound']
kegg_id = kegg_reference[0]['id']
met_to_comp_dict[met] = kegg_id
try:
comp = ccache.get_compound(kegg_id)
# s_mol = urllib.urlopen('http://rest.kegg.jp/get/cpd:%s/mol' % kegg_id).read()
# comp.molfile = s_mol
all_comp_data[kegg_id] = comp
print( met+ ' mapped to the kegg id' + kegg_id)
except:
print(met + ' had kegg id in the bigg database but wasn\'t in component contribution database')
#comp = get_info_by_mapping(ccache, all_references_readable, met)
all_comp_data[kegg_id] = None
else: # bigg met with no kegg met!!!
# get compound info... currently only checks if there is chebi info.
# comp = get_info_by_mapping(ccache, all_references_readable, met)
# all_comp_data[met] = comp
# non_kegg.append(met)
# check if iAF1260 has it
#file_name = '../../validation/AF1260supp/all_mol_files/' + met[0:-2] + '.mol'
# if os.path.isfile(file_name):
# # read mol file
# test_file = open(file_name, 'r')
# mol = test_file.read()
# inchiS = consistent_to_inchi(mol, 'mol')
# cid, Name = check_if_already_exists_inchi(inchiS)
# if cid == None:
# comp = Compound.from_inchi_with_keggID('MOL', met, inchiS)
# comp.molfile = mol
# # save the references
# met_to_comp_dict[met] = met
# all_comp_data[met] = comp
# non_kegg.append(met)
# print(
# met + ' was inserted as mol file. it had no id mapping to the internal reference. will use input file name')
# else:
# comp = ccache.get_compound(cid)
# comp.molfile = mol
# all_comp_data[cid] = comp
# met_to_comp_dict[met] = cid
# print(met + ' was inserted as mol file. it mapped to the kegg id' + cid)
print(met + ' had no kegg id in the bigg database. it could potentially be found using other databases, but ignoring for now')
comp = None
# # now begin to separate the S matrix according to category
# S_kegg = S[[input_metabolites.index(b) for b in to_kegg], :]
# S_bigg_non_kegg = S[[input_metabolites.index(b) for b in bigg_non_kegg],:]
# S_non_bigg = S[[input_metabolites.index(b) for b in non_bigg],:]
# new cids list
output_ids = [met_to_comp_dict.get(met, met) for met in input_metabolites]
return {'met_to_comp_dict':met_to_comp_dict,
'non_kegg':non_kegg,
'all_comp_data':all_comp_data,
'output_ids':output_ids}
def get_info_by_mapping(ccache, all_references_readable,met):
comp = get_compound_info(ccache, all_references_readable)
if comp == None:
print(met + ' had no id mapping to the internal reference OR it mapped, but in has no cached comp info')
else:
print(met + 'had compound information found from mapping databases')
return comp
def only_decompose(cc, reactions):
"""
Arguments:
reaction - a KeggReaction object
Returns:
the CC estimation for this reaction's untransformed dG0 (i.e.
using the major MS at pH 7 for each of the reactants)
X and G are the decompositions of the reaction into reactions and groups respectively
eq.10 in the paper
"""
X = []
G = []
for reaction in reactions:
try:
x, g = cc._decompose_reaction(reaction)
except inchi2gv.GroupDecompositionError:
x = numpy.zeros((cc.Nc, 1))
g = numpy.zeros((cc.params['G'].shape[1], 1))
X.append(list(x.flat))
G.append(list(g.flat))
X = numpy.matrix(X).T
G = numpy.matrix(G).T
return X, G
def add_thermo_comp_info(self, cc):
# check that all CIDs in the reaction are already cached by CC
Nc, Nr = self.S.shape
reactions = []
for j in xrange(Nr):
if j%50==0: print('creating reactions to decompose: '+ str(j))
sparse = {self.cids[i]: self.S[i, j] for i in xrange(Nc)
if self.S[i, j] != 0}
reaction = KeggReaction(sparse)
reactions.append(reaction)
self.dG0, self.cov_dG0 = cc.get_dG0_r_multi(reactions,self.comp_data)
def addHydrogens(input_mol):
import openbabel
obConversion = openbabel.OBConversion()
obConversion.SetInAndOutFormats("mol", "pdb")
mol_object = openbabel.OBMol()
obConversion.ReadString(mol_object, input_mol)
print mol_object.NumAtoms()
mol_object.AddHydrogens()
print mol_object.NumAtoms()
return obConversion.WriteString(mol_object)
def calc_concentration(calculate_conc, input_metabolites, comp_data=None, ccache=None):
'''
input is the metabolites in the stoichiometric matrix of the thermo model. These are already converted to keggIds
:return:
'''
import math
from component_contribution.molecule import Molecule
from component_contribution.compound_cacher import KeggCompoundCacher
from component_contribution.CfB_functions import process_input_mets
default_C = 0.001
conc_M = numpy.matrix(numpy.full((len(input_metabolites), 1), default_C))
# hydrogen = [i for i in input_metabolites if i.startswith('h_') or i.startswith('h2o_')]
# water = [i for i in input_metabolites if i.startswith('h2o_')]
# input_metabolites = [i for i in input_metabolites if not i.startswith('h_') and not i.startswith('h2o_')]
if ccache == None:
ccache = KeggCompoundCacher()
if comp_data == None:
comp_data = process_input_mets(input_metabolites, ccache)
comp_data_mapper = comp_data['all_comp_data']
# calc charge and NPSA, and [] for all comps
countcharge = {}
NPSA = {}
concentration = {}
for compound_id,comp in zip(comp_data_mapper.keys(), comp_data_mapper.values()):
print compound_id
if comp == None:
countcharge[compound_id] = None
NPSA[compound_id] = None
concentration[compound_id] = default_C
continue
# water or H+: putting H+ as "1" because its log(1)=0 and it will not affect reaction energy
if compound_id in ('C00001','C00080'):
countcharge[compound_id] = None
NPSA[compound_id] = None
concentration[compound_id] = 1
continue
# # glutamate or glutamine
# if compound_id in ('C00064', 'C00025'):
# countcharge[compound_id] = None
# NPSA[compound_id] = None
# concentration[compound_id] = 0.02
# continue
# for oxygen and hydrogen use "saturation concentrations for these species in water at 1 atm and 298.15K" feist et al 2007.
# values taken from http://www.molecularhydrogenfoundation.org/concentration-and-solubility-of-h2/
# oxygen
if compound_id in ('C00007'):
countcharge[compound_id] = None
NPSA[compound_id] = None
concentration[compound_id] = 0.000055
continue
# molecular hydrogen
if compound_id in ('C00282'):
countcharge[compound_id] = None
NPSA[compound_id] = None
concentration[compound_id] = 0.000034
continue
if calculate_conc:
# calculate charge ##
if comp.smiles_pH7:
smiles_pH7 = comp.smiles_pH7
mol = Molecule.FromSmiles(smiles_pH7)
else:
countcharge[compound_id] = None
NPSA[compound_id] = None
concentration[compound_id] = default_C
continue
charge_list = mol.GetAtomCharges()
charge_count = sum(x != 0 for x in charge_list)
countcharge[compound_id] = charge_count
# calc NPSA ##
NPSA1 = getNonPolarArea(comp.inchi, pH=7)
NPSA[compound_id] = NPSA1
# old way to calc NPSA
# # NPSA calculations work better from mol files
# molfile = comp.molfile
# mol = Molecule.FromMol(molfile)
#
# # create the pdb file. in the futre can cache this
# test = Molecule._ToFormat(mol.obmol,'pdb')
# file_name = "../examples/pdb_files/" + input_id +".pdb" # have to decide what we're going to save all the files as
# with open(file_name, "w") as text_file:
# text_file.write(test)
#
# # load pdb create " soup". A Soup is essentially a collection of atoms, which we can grab by:
# soup = pdbatoms.Soup(file_name)
# atoms = soup.atoms()
#
# # asa - calculates the accessible surface - area of every atom in list of atom, with respect to the other atoms. which assigns the asa to to each atom.asa
# pdbatoms.add_radii(atoms) # this calculates the radius of each atom.
# areas = asa.calculate_asa(atoms, 1.4) # 1.4 is the probe i.e. the size of water, changing this can change results ~ 25%
# total_area = sum(areas)
#
# # get atom neighbors
# adj = asa.adjacency_list(atoms, 1.8)
# adj = [[atoms[c].element for c in at] for at in adj]
#
# # get polar surface area, i.e. the area contributed by polar atoms only (oxygen, nitrogen and the hydrogen atoms attached to them
# polar_area=0
# for a, area, adj1 in zip(atoms, areas,adj):
# print a, area
# if a.element in ['O','N']: # a.bfactor = area
# polar_area += area
# if a.element=='H' and any([i in ['O','N'] for i in adj1]):
# polar_area += area
#
# NPSA1 = total_area - polar_area
conc = math.exp(charge_count*1.0425-NPSA1*0.0272)/1000
concentration[compound_id] = conc
else:
countcharge[compound_id] = None
NPSA[compound_id] = None
concentration[compound_id] = default_C
# 1. compounds can have multiple input ids associated.
# 2. some input ids have no compounds associated leave them with default concentrations
for i, met in enumerate(input_metabolites):
comp_id = comp_data['met_to_comp_dict'].get(met, None)
if comp_id == None: continue
conc_M[i] = concentration[comp_id]
return conc_M, concentration, NPSA, countcharge
def remove_duplicate(input_list):
repeat = []
uniq = []
for x in input_list:
if x not in uniq:
uniq.append(x)
# seen.add(x)
else:
repeat.append(x)
def get_compound_info(ccahce,info):
'''
this function retrieves the structural information for the references included in the bigg database. this function
should no longer be necessary once a proper internal database is generated.
:param info: this is a | |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Generates Python proto modules and grpc stubs for Beam protos.
"""
import contextlib
import glob
import inspect
import logging
import os
import platform
import re
import shutil
import subprocess
import sys
import time
from collections import defaultdict
from importlib import import_module
import pkg_resources
LOG = logging.getLogger()
LOG.setLevel(logging.INFO)
LICENSE_HEADER = """
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
NO_PROMISES_NOTICE = """
\"\"\"
For internal use only; no backwards-compatibility guarantees.
Automatically generated when running setup.py sdist or build[_py].
\"\"\"
"""
def clean_path(path):
return os.path.realpath(os.path.abspath(path))
# These paths are relative to the project root
BEAM_PROTO_PATHS = [
os.path.join('model', 'pipeline', 'src', 'main', 'proto'),
os.path.join('model', 'job-management', 'src', 'main', 'proto'),
os.path.join('model', 'fn-execution', 'src', 'main', 'proto'),
os.path.join('model', 'interactive', 'src', 'main', 'proto'),
]
PYTHON_SDK_ROOT = os.path.dirname(clean_path(__file__))
PROJECT_ROOT = clean_path(os.path.join(PYTHON_SDK_ROOT, '..', '..'))
PYTHON_OUTPUT_PATH = os.path.join(
PYTHON_SDK_ROOT, 'apache_beam', 'portability', 'api')
MODEL_RESOURCES = [
os.path.normpath((
'model/fn-execution/src/main/resources/org/'
'apache/beam/model/fnexecution/v1/standard_coders.yaml')),
]
class PythonPath(object):
def __init__(self, path: str, front: bool = False):
self._path = path
self._front = front
def __enter__(self):
if not self._path:
return
self._sys_path = sys.path.copy()
if self._front:
sys.path.insert(0, self._path)
else:
sys.path.append(self._path)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._path:
return
sys.path = self._sys_path
def generate_urn_files(out_dir, api_path):
"""
Create python files with statically defined URN constants.
Creates a <proto>_pb2_urn.py file for each <proto>_pb2.py file that contains
an enum type.
This works by importing each api.<proto>_pb2 module created by `protoc`,
inspecting the module's contents, and generating a new side-car urn module.
This is executed at build time rather than dynamically on import to ensure
that it is compatible with static type checkers like mypy.
"""
import google.protobuf.pyext._message as pyext_message
from google.protobuf import message
class Context(object):
INDENT = ' '
CAP_SPLIT = re.compile('([A-Z][^A-Z]*|^[a-z]+)')
def __init__(self, indent=0):
self.lines = []
self.imports = set()
self.empty_types = set()
self._indent = indent
@contextlib.contextmanager
def indent(self):
self._indent += 1
yield
self._indent -= 1
def prepend(self, s):
if s:
self.lines.insert(0, (self.INDENT * self._indent) + s + '\n')
else:
self.lines.insert(0, '\n')
def line(self, s):
if s:
self.lines.append((self.INDENT * self._indent) + s + '\n')
else:
self.lines.append('\n')
def import_type(self, typ):
modname = typ.__module__
if modname in ('__builtin__', 'builtin'):
return typ.__name__
else:
self.imports.add(modname)
_, modname = modname.rsplit('.', 1)
return modname + '.' + typ.__name__
@staticmethod
def is_message_type(obj):
return isinstance(obj, type) and \
issubclass(obj, message.Message)
@staticmethod
def is_enum_type(obj):
return type(obj).__name__ == 'EnumTypeWrapper'
def python_repr(self, obj):
if isinstance(obj, message.Message):
return self.message_repr(obj)
elif isinstance(
obj,
(
list,
pyext_message.RepeatedCompositeContainer, # pylint: disable=c-extension-no-member
pyext_message.RepeatedScalarContainer)): # pylint: disable=c-extension-no-member
return '[%s]' % ', '.join(self.python_repr(x) for x in obj)
else:
return repr(obj)
def empty_type(self, typ):
name = (
'EMPTY_' +
'_'.join(x.upper() for x in self.CAP_SPLIT.findall(typ.__name__)))
self.empty_types.add('%s = %s()' % (name, self.import_type(typ)))
return name
def message_repr(self, msg):
parts = []
for field, value in msg.ListFields():
parts.append('%s=%s' % (field.name, self.python_repr(value)))
if parts:
return '%s(%s)' % (self.import_type(type(msg)), ', '.join(parts))
else:
return self.empty_type(type(msg))
def write_enum(self, enum_name, enum, indent):
ctx = Context(indent=indent)
with ctx.indent():
for v in enum.DESCRIPTOR.values:
extensions = v.GetOptions().Extensions
prop = (
extensions[beam_runner_api_pb2.beam_urn],
extensions[beam_runner_api_pb2.beam_constant],
extensions[metrics_pb2.monitoring_info_spec],
extensions[metrics_pb2.label_props],
)
reprs = [self.python_repr(x) for x in prop]
if all(x == "''" or x.startswith('EMPTY_') for x in reprs):
continue
ctx.line(
'%s = PropertiesFromEnumValue(%s)' %
(v.name, ', '.join(self.python_repr(x) for x in prop)))
if ctx.lines:
ctx.prepend('class %s(object):' % enum_name)
ctx.prepend('')
ctx.line('')
return ctx.lines
def write_message(self, message_name, message, indent=0):
ctx = Context(indent=indent)
with ctx.indent():
for obj_name, obj in inspect.getmembers(message):
if self.is_message_type(obj):
ctx.lines += self.write_message(obj_name, obj, ctx._indent)
elif self.is_enum_type(obj):
ctx.lines += self.write_enum(obj_name, obj, ctx._indent)
if ctx.lines:
ctx.prepend('class %s(object):' % message_name)
ctx.prepend('')
return ctx.lines
pb2_files = list(glob.glob(os.path.join(out_dir, '*_pb2.py')))
with PythonPath(os.path.dirname(api_path), front=True):
beam_runner_api_pb2 = import_module(
'api.org.apache.beam.model.pipeline.v1.beam_runner_api_pb2')
metrics_pb2 = import_module(
'api.org.apache.beam.model.pipeline.v1.metrics_pb2')
for pb2_file in pb2_files:
modname = os.path.splitext(pb2_file)[0]
out_file = modname + '_urns.py'
api_start_idx = modname.index(os.path.sep + 'api' + os.path.sep)
import_path = modname[api_start_idx + 1:].replace(os.path.sep, '.')
mod = import_module(import_path)
ctx = Context()
for obj_name, obj in inspect.getmembers(mod):
if ctx.is_message_type(obj):
ctx.lines += ctx.write_message(obj_name, obj)
if ctx.lines:
for line in reversed(sorted(ctx.empty_types)):
ctx.prepend(line)
for modname in reversed(sorted(ctx.imports)):
pkg, target = modname.rsplit('.', 1)
rel_import = build_relative_import(api_path, pkg, out_file)
ctx.prepend('from %s import %s' % (rel_import, target))
rel_import = build_relative_import(
os.path.dirname(api_path), 'utils', out_file)
ctx.prepend('from %s import PropertiesFromEnumValue' % rel_import)
LOG.info("Writing urn stubs: %s" % out_file)
with open(out_file, 'w') as f:
f.writelines(ctx.lines)
def _find_protoc_gen_mypy():
# NOTE: this shouldn't be necessary if the virtualenv's environment
# is passed to tasks below it, since protoc will search the PATH itself
fname = 'protoc-gen-mypy'
if platform.system() == 'Windows':
fname += ".exe"
pathstr = os.environ.get('PATH')
search_paths = pathstr.split(os.pathsep) if pathstr else []
# should typically be installed into the venv's bin dir
search_paths.insert(0, os.path.dirname(sys.executable))
for path in search_paths:
fullpath = os.path.join(path, fname)
if os.path.exists(fullpath):
LOG.info('Found protoc_gen_mypy at %s' % fullpath)
return fullpath
raise RuntimeError(
"Could not find %s in %s" % (fname, ', '.join(search_paths)))
def find_by_ext(root_dir, ext):
for root, _, files in os.walk(root_dir):
for file in files:
if file.endswith(ext):
yield clean_path(os.path.join(root, file))
def ensure_grpcio_exists():
try:
from grpc_tools import protoc # pylint: disable=unused-import
except ImportError:
if platform.system() == 'Windows':
# For Windows, grpcio-tools has to be installed manually.
raise RuntimeError(
'Cannot generate protos for Windows since grpcio-tools package is '
'not installed. Please install this package manually '
'using \'pip install grpcio-tools\'.')
return _install_grpcio_tools()
def _install_grpcio_tools():
"""
Though wheels are available for grpcio-tools, setup_requires uses
easy_install which doesn't understand them. This means that it is
compiled from scratch (which is expensive as it compiles the full
protoc compiler). Instead, we attempt to install a wheel in a temporary
directory and add it to the path as needed.
See https://github.com/pypa/setuptools/issues/377
"""
install_path = os.path.join(PYTHON_SDK_ROOT, '.eggs', 'grpcio-wheels')
logging.warning('Installing grpcio-tools into %s', install_path)
start = time.time()
subprocess.check_call([
sys.executable,
'-m',
'pip',
'install',
'--target',
install_path,
'--upgrade',
'-r',
os.path.join(PYTHON_SDK_ROOT, 'build-requirements.txt')
])
logging.warning(
'Installing grpcio-tools took %0.2f seconds.', time.time() - start)
return install_path
def build_relative_import(root_path, import_path, start_file_path):
tail_path = import_path.replace('.', os.path.sep)
source_path = os.path.join(root_path, tail_path)
is_module = os.path.isfile(source_path + '.py')
if is_module:
source_path = os.path.dirname(source_path)
rel_path = os.path.relpath(
source_path, start=os.path.dirname(start_file_path))
if rel_path == '.':
if is_module:
rel_path += os.path.basename(tail_path)
return rel_path
if rel_path.endswith('..'):
rel_path += os.path.sep
# In a path that looks like ../../../foo, every double dot
# after the right most double dot needs to be collapsed to
# a single dot to look like ././../foo to which we can convert
# to ....foo for the proper relative import.
first_half_idx = rel_path.rfind('..' + os.path.sep)
if first_half_idx == 0:
return rel_path.replace(os.path.sep, '')
first_half = rel_path[:first_half_idx].replace('..', '.')
final_import = first_half.replace(os.path.sep, '') + '..' + \
rel_path[first_half_idx+3:].replace(os.path.sep, '.')
if is_module:
if final_import.count('.') == len(final_import):
return final_import + os.path.basename(tail_path)
return final_import + '.{}'.format(os.path.basename(tail_path))
return final_import
def generate_init_files_lite(api_root):
proto_root = os.path.join(api_root, 'org')
for root, _, _ in os.walk(proto_root):
init_file = os.path.join(root, '__init__.py')
with open(init_file, 'w+'):
pass
def generate_init_files_full(api_root):
proto_root = os.path.join(api_root, 'org')
api_module_root = os.path.join(api_root, '__init__.py')
modules = defaultdict(list)
for root, _, files in os.walk(proto_root):
init_file = os.path.join(root, '__init__.py')
with open(init_file, 'w+') as f:
f.write(LICENSE_HEADER.lstrip())
for file in files:
if not file.endswith('.py') or file == '__init__.py':
continue
module_name = file.split('.')[0]
f.write('from . import {}\n'.format(module_name))
modules[root].append(module_name)
with | |
<reponame>tguillemLSST/CLMM
"""Tests for dataops.py"""
import numpy as np
from numpy import testing
import clmm
from clmm import GCData
import clmm.dataops as da
import clmm.theory as theo
TOLERANCE = {'atol':1.e-7, 'rtol':1.e-7}
def test_compute_cross_shear():
shear1, shear2, phi = 0.15, 0.08, 0.52
expected_cross_shear = 0.08886301350787848
cross_shear = da._compute_cross_shear(shear1, shear2, phi)
testing.assert_allclose(cross_shear, expected_cross_shear)
shear1 = np.array([0.15, 0.40])
shear2 = np.array([0.08, 0.30])
phi = np.array([0.52, 1.23])
expected_cross_shear = [0.08886301350787848, 0.48498333705834484]
cross_shear = da._compute_cross_shear(shear1, shear2, phi)
testing.assert_allclose(cross_shear, expected_cross_shear)
# Edge case tests
testing.assert_allclose(da._compute_cross_shear(100., 0., 0.), 0.0,
**TOLERANCE)
testing.assert_allclose(da._compute_cross_shear(100., 0., np.pi/2), 0.0,
**TOLERANCE)
testing.assert_allclose(da._compute_cross_shear(0., 100., 0.), -100.0,
**TOLERANCE)
testing.assert_allclose(da._compute_cross_shear(0., 100., np.pi/2), 100.0,
**TOLERANCE)
testing.assert_allclose(da._compute_cross_shear(0., 100., np.pi/4.), 0.0,
**TOLERANCE)
testing.assert_allclose(da._compute_cross_shear(0., 0., 0.3), 0.,
**TOLERANCE)
def test_compute_tangential_shear():
shear1, shear2, phi = 0.15, 0.08, 0.52
expected_tangential_shear = -0.14492537676438383
tangential_shear = da._compute_tangential_shear(shear1, shear2, phi)
testing.assert_allclose(tangential_shear, expected_tangential_shear)
shear1 = np.array([0.15, 0.40])
shear2 = np.array([0.08, 0.30])
phi = np.array([0.52, 1.23])
expected_tangential_shear = [-0.14492537676438383, 0.1216189244145496]
tangential_shear = da._compute_tangential_shear(shear1, shear2, phi)
testing.assert_allclose(tangential_shear, expected_tangential_shear)
# test for reasonable values
testing.assert_almost_equal(da._compute_tangential_shear(100., 0., 0.), -100.0)
testing.assert_almost_equal(da._compute_tangential_shear(0., 100., np.pi/4.), -100.0)
testing.assert_almost_equal(da._compute_tangential_shear(0., 0., 0.3), 0.)
def test_compute_lensing_angles_flatsky():
ra_l, dec_l = 161., 65.
ra_s, dec_s = np.array([-355., 355.]), np.array([-85., 85.])
# Test domains on inputs
testing.assert_raises(ValueError, da._compute_lensing_angles_flatsky,
-365., dec_l, ra_s, dec_s)
testing.assert_raises(ValueError, da._compute_lensing_angles_flatsky,
365., dec_l, ra_s, dec_s)
testing.assert_raises(ValueError, da._compute_lensing_angles_flatsky,
ra_l, 95., ra_s, dec_s)
testing.assert_raises(ValueError, da._compute_lensing_angles_flatsky,
ra_l, -95., ra_s, dec_s)
testing.assert_raises(ValueError, da._compute_lensing_angles_flatsky,
ra_l, dec_l, ra_s-10., dec_s)
testing.assert_raises(ValueError, da._compute_lensing_angles_flatsky,
ra_l, dec_l, ra_s+10., dec_s)
testing.assert_raises(ValueError, da._compute_lensing_angles_flatsky,
ra_l, dec_l, ra_s, dec_s-10.)
testing.assert_raises(ValueError, da._compute_lensing_angles_flatsky,
ra_l, dec_l, ra_s, dec_s+10.)
# Ensure that we throw a warning with >1 deg separation
testing.assert_warns(UserWarning, da._compute_lensing_angles_flatsky,
ra_l, dec_l, np.array([151.32, 161.34]), np.array([41.49, 51.55]))
# Test outputs for reasonable values
ra_l, dec_l = 161.32, 51.49
ra_s, dec_s = np.array([161.29, 161.34]), np.array([51.45, 51.55])
thetas, phis = da._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, dec_s)
testing.assert_allclose(thetas, np.array([0.00077050407583119666, 0.00106951489719733675]),
**TOLERANCE,
err_msg="Reasonable values with flat sky not matching to precision for theta")
testing.assert_allclose(phis, np.array([-1.13390499136495481736, 1.77544123918164542530]),
**TOLERANCE,
err_msg="Reasonable values with flat sky not matching to precision for phi")
# lens and source at the same ra
testing.assert_allclose(da._compute_lensing_angles_flatsky(ra_l, dec_l, np.array([161.32, 161.34]), dec_s),
[[0.00069813170079771690, 0.00106951489719733675], [-1.57079632679489655800, 1.77544123918164542530]],
**TOLERANCE, err_msg="Failure when lens and a source share an RA")
# lens and source at the same dec
testing.assert_allclose(da._compute_lensing_angles_flatsky(ra_l, dec_l, ra_s, np.array([51.49, 51.55])),
[[0.00032601941539388962, 0.00106951489719733675], [0.00000000000000000000, 1.77544123918164542530]],
**TOLERANCE, err_msg="Failure when lens and a source share a DEC")
# lens and source at the same ra and dec
testing.assert_allclose(da._compute_lensing_angles_flatsky(ra_l, dec_l, np.array([ra_l, 161.34]), np.array([dec_l, 51.55])),
[[0.00000000000000000000, 0.00106951489719733675], [0.00000000000000000000, 1.77544123918164542530]],
TOLERANCE['rtol'], err_msg="Failure when lens and a source share an RA and a DEC")
# angles over the branch cut between 0 and 360
testing.assert_allclose(da._compute_lensing_angles_flatsky(0.1, dec_l, np.array([359.9, 359.5]), dec_s),
[[0.0022828333888309108, 0.006603944760273219], [-0.31079754672938664, 0.15924369771830643]],
TOLERANCE['rtol'], err_msg="Failure when ra_l and ra_s are close but on the opposite sides of the 0 axis")
# angles over the branch cut between 0 and 360
testing.assert_allclose(da._compute_lensing_angles_flatsky(-180, dec_l, np.array([180.1, 179.7]), dec_s),
[[0.0012916551296819666, 0.003424250083245557], [-2.570568636904587, 0.31079754672944354]],
TOLERANCE['rtol'], err_msg="Failure when ra_l and ra_s are the same but one is defined negative")
def test_compute_tangential_and_cross_components(modeling_data):
# Input values
ra_lens, dec_lens, z_lens = 120., 42., 0.5
ra_source = np.array([120.1, 119.9])
dec_source = np.array([41.9, 42.2])
z_source = np.array([1.,2.])
shear1 = np.array([0.2, 0.4])
shear2 = np.array([0.3, 0.5])
# Correct values
expected_angsep = np.array([0.0021745039090962414, 0.0037238407383072053])
expected_cross_shear = np.array([0.2780316984090899, 0.6398792901134982])
expected_tangential_shear = np.array([-0.22956126563459447, -0.02354769805831558])
# DeltaSigma expected values for clmm.Cosmology(H0=70.0, Omega_dm0=0.275, Omega_b0=0.025)
expected_cross_DS = np.array([1224.3326297393244, 1899.6061989365176])*0.7*1.0e12*1.0002565513832675
expected_tangential_DS = np.array([-1010.889584349285, -69.9059242788237])*0.7*1.0e12*1.0002565513832675
# test incosnsitent data
testing.assert_raises(TypeError, da.compute_tangential_and_cross_components,
ra_lens=ra_lens, dec_lens=dec_lens, ra_source=ra_source[0], dec_source=dec_source,
shear1=shear1, shear2=shear2)
testing.assert_raises(TypeError, da.compute_tangential_and_cross_components,
ra_lens=ra_lens, dec_lens=dec_lens, ra_source=ra_source[:1], dec_source=dec_source,
shear1=shear1, shear2=shear2)
# test not implemented geometry
testing.assert_raises(NotImplementedError, da.compute_tangential_and_cross_components,
ra_lens=ra_lens, dec_lens=dec_lens, ra_source=ra_source, dec_source=dec_source,
shear1=shear1, shear2=shear2, geometry='something crazy')
# Pass arrays directly into function
angsep, tshear, xshear = da.compute_tangential_and_cross_components(ra_lens=ra_lens, dec_lens=dec_lens,
ra_source=ra_source, dec_source=dec_source,
shear1=shear1, shear2=shear2)
testing.assert_allclose(angsep, expected_angsep, **TOLERANCE,
err_msg="Angular Separation not correct when passing lists")
testing.assert_allclose(tshear, expected_tangential_shear, **TOLERANCE,
err_msg="Tangential Shear not correct when passing lists")
testing.assert_allclose(xshear, expected_cross_shear, **TOLERANCE,
err_msg="Cross Shear not correct when passing lists")
# Pass LISTS into function
angsep, tshear, xshear = da.compute_tangential_and_cross_components(ra_lens=ra_lens, dec_lens=dec_lens,
ra_source=list(ra_source), dec_source=list(dec_source),
shear1=list(shear1), shear2=list(shear2))
testing.assert_allclose(angsep, expected_angsep, **TOLERANCE,
err_msg="Angular Separation not correct when passing lists")
testing.assert_allclose(tshear, expected_tangential_shear, **TOLERANCE,
err_msg="Tangential Shear not correct when passing lists")
testing.assert_allclose(xshear, expected_cross_shear, **TOLERANCE,
err_msg="Cross Shear not correct when passing lists")
# Use the cluster method
cluster = clmm.GalaxyCluster(unique_id='blah', ra=ra_lens, dec=dec_lens, z=z_lens,
galcat=GCData([ra_source, dec_source, shear1, shear2],
names=('ra', 'dec', 'e1', 'e2')))
# Test error with bad name/missing column
testing.assert_raises(TypeError, cluster.compute_tangential_and_cross_components,
shape_component1='crazy name')
# Test output
angsep3, tshear3, xshear3 = cluster.compute_tangential_and_cross_components()
testing.assert_allclose(angsep3, expected_angsep, **TOLERANCE,
err_msg="Angular Separation not correct when using cluster method")
testing.assert_allclose(tshear3, expected_tangential_shear, **TOLERANCE,
err_msg="Tangential Shear not correct when using cluster method")
testing.assert_allclose(xshear3, expected_cross_shear, **TOLERANCE,
err_msg="Cross Shear not correct when using cluster method")
# Check behaviour for the deltasigma option.
cosmo = clmm.Cosmology(H0=70.0, Omega_dm0=0.275, Omega_b0=0.025)
# test missing info for is_deltasigma=True
testing.assert_raises(TypeError, da.compute_tangential_and_cross_components,
ra_lens=ra_lens, dec_lens=dec_lens, ra_source=ra_source, dec_source=dec_source,
shear1=shear1, shear2=shear2, is_deltasigma=True, cosmo=None, z_lens=z_lens, z_source=z_source)
testing.assert_raises(TypeError, da.compute_tangential_and_cross_components,
ra_lens=ra_lens, dec_lens=dec_lens, ra_source=ra_source, dec_source=dec_source,
shear1=shear1, shear2=shear2, is_deltasigma=True, cosmo=cosmo, z_lens=None, z_source=z_source)
testing.assert_raises(TypeError, da.compute_tangential_and_cross_components,
ra_lens=ra_lens, dec_lens=dec_lens, ra_source=ra_source, dec_source=dec_source,
shear1=shear1, shear2=shear2, is_deltasigma=True, cosmo=cosmo, z_lens=z_lens, z_source=None)
# check values for DeltaSigma
angsep_DS, tDS, xDS = da.compute_tangential_and_cross_components(
ra_lens=ra_lens, dec_lens=dec_lens,
ra_source=ra_source, dec_source=dec_source,
shear1=shear1, shear2=shear2, is_deltasigma=True,
cosmo=cosmo, z_lens=z_lens, z_source=z_source)
testing.assert_allclose(angsep_DS, expected_angsep, **TOLERANCE,
err_msg="Angular Separation not correct")
testing.assert_allclose(tDS, expected_tangential_DS, **TOLERANCE,
err_msg="Tangential Shear not correct")
testing.assert_allclose(xDS, expected_cross_DS, **TOLERANCE,
err_msg="Cross Shear not correct")
# Tests with the cluster object
# cluster object missing source redshift, and function call missing cosmology
cluster = clmm.GalaxyCluster(unique_id='blah', ra=ra_lens, dec=dec_lens, z=z_lens,
galcat=GCData([ra_source, dec_source, shear1, shear2],
names=('ra', 'dec', 'e1', 'e2')))
testing.assert_raises(TypeError, cluster.compute_tangential_and_cross_components, is_deltasigma=True)
# cluster object OK but function call missing cosmology
cluster = clmm.GalaxyCluster(unique_id='blah', ra=ra_lens, dec=dec_lens, z=z_lens,
galcat=GCData([ra_source, dec_source, shear1, shear2, z_source],
names=('ra', 'dec', 'e1', 'e2','z')))
testing.assert_raises(TypeError, cluster.compute_tangential_and_cross_components, is_deltasigma=True)
# check values for DeltaSigma
angsep_DS, tDS, xDS = cluster.compute_tangential_and_cross_components(cosmo=cosmo, is_deltasigma=True)
testing.assert_allclose(angsep_DS, expected_angsep, **TOLERANCE,
err_msg="Angular Separation not correct when using cluster method")
testing.assert_allclose(tDS, expected_tangential_DS, **TOLERANCE,
err_msg="Tangential Shear not correct when using cluster method")
testing.assert_allclose(xDS, expected_cross_DS, **TOLERANCE,
err_msg="Cross Shear not correct when using cluster method")
def _test_profile_table_output(profile, expected_rmin, expected_radius, expected_rmax,
expected_p0, expected_p1, expected_nsrc,
expected_gal_id=None, p0='p_0', p1='p_1'):
"""Func to make the validation of the table with the expected values
"""
testing.assert_allclose(profile['radius_min'], expected_rmin, **TOLERANCE,
err_msg="Minimum radius in bin not expected.")
testing.assert_allclose(profile['radius'], expected_radius, **TOLERANCE,
err_msg="Mean radius in bin not expected.")
testing.assert_allclose(profile['radius_max'], expected_rmax, **TOLERANCE,
err_msg="Maximum radius in bin not expected.")
testing.assert_allclose(profile[p0], expected_p0, **TOLERANCE,
err_msg="Tangential shear in bin not expected")
testing.assert_allclose(profile[p1], expected_p1, **TOLERANCE,
err_msg="Cross shear in bin not expected")
testing.assert_array_equal(profile['n_src'], expected_nsrc)
if expected_gal_id is not None:
testing.assert_array_equal(profile['gal_id'], expected_gal_id)
return
def test_make_radial_profiles():
# Set up a cluster object and compute cross and tangential shears
ra_lens, dec_lens, z_lens = 120., 42., 0.5
ra_source = np.array([120.1, 119.9, 119.9])
dec_source = np.array([41.9, 42.2, 42.2])
id_source = np.array([1, 2, 3])
shear1 = np.array([0.2, 0.4, 0.4])
shear2 = np.array([0.3, 0.5, 0.5])
z_sources = np.ones(3)
angsep_units, bin_units = 'radians', 'radians'
# Set up radial values
bins_radians = np.array([0.002, 0.003, 0.004])
expected_radius = [0.0021745039090962414, 0.0037238407383072053]
#######################################
### Use without cluster object ########
#######################################
angsep, tshear, xshear = da.compute_tangential_and_cross_components(ra_lens=ra_lens, dec_lens=dec_lens,
ra_source=ra_source, dec_source=dec_source,
shear1=shear1, shear2=shear2)
# Tests passing int as bins arg makes the correct bins
bins = 2
vec_bins = clmm.utils.make_bins(np.min(angsep), np.max(angsep), bins)
testing.assert_array_equal(da.make_radial_profile([tshear, xshear, z_sources], angsep, angsep_units, bin_units, bins=bins)[0],
da.make_radial_profile([tshear, xshear, z_sources], angsep, angsep_units, bin_units, bins=vec_bins)[0])
# Test the outputs of compute_tangential_and_cross_components just to be safe
expected_angsep = np.array([0.0021745039090962414, 0.0037238407383072053, 0.0037238407383072053])
expected_cross_shear = np.array([0.2780316984090899, 0.6398792901134982, 0.6398792901134982])
expected_tan_shear = np.array([-0.22956126563459447, -0.02354769805831558, -0.02354769805831558])
testing.assert_allclose(angsep, expected_angsep, **TOLERANCE,
err_msg="Angular Separation not correct when testing shear profiles")
testing.assert_allclose(tshear, expected_tan_shear, **TOLERANCE,
err_msg="Tangential Shear not correct when testing shear profiles")
testing.assert_allclose(xshear, expected_cross_shear, **TOLERANCE,
err_msg="Cross Shear not correct when testing shear profiles")
# Test default behavior, remember that include_empty_bins=False excludes all bins with N>=1
profile = da.make_radial_profile([tshear, xshear, z_sources], angsep, angsep_units, bin_units,
bins=bins_radians, include_empty_bins=False)
_test_profile_table_output(profile, bins_radians[1], expected_radius[1], bins_radians[2],
expected_tan_shear[1], expected_cross_shear[1], [2])
# Test metadata
testing.assert_array_equal(profile.meta['bin_units'], bin_units)
testing.assert_array_equal(profile.meta['cosmo'], None)
# Test simple unit convesion
profile = da.make_radial_profile([tshear, xshear, z_sources], angsep*180./np.pi, 'degrees', bin_units,
bins=bins_radians, include_empty_bins=False)
_test_profile_table_output(profile, bins_radians[1], expected_radius[1], bins_radians[2],
expected_tan_shear[1], expected_cross_shear[1], [2])
# including empty bins
profile = da.make_radial_profile([tshear, xshear, z_sources], angsep, angsep_units, bin_units,
bins=bins_radians, include_empty_bins=True)
_test_profile_table_output(profile, bins_radians[:-1], expected_radius, bins_radians[1:],
expected_tan_shear[:-1], expected_cross_shear[:-1], [1,2])
# test with return_binnumber
profile, binnumber = da.make_radial_profile([tshear, xshear, z_sources], angsep, angsep_units, bin_units,
bins=bins_radians, include_empty_bins=True, return_binnumber=True)
_test_profile_table_output(profile, bins_radians[:-1], expected_radius, bins_radians[1:],
expected_tan_shear[:-1], expected_cross_shear[:-1], [1,2])
testing.assert_array_equal(binnumber, [1, 2, 2])
###################################
### Test with cluster object ######
###################################
# Test error | |
<gh_stars>10-100
import math
import sys
import numpy as np
import scipy.spatial
from pychemia import Composition, Structure, pcm_log
from pychemia.analysis import ClusterAnalysis, ClusterMatch
from pychemia.code.lennardjones import lj_compact_evaluate
from pychemia.utils.mathematics import unit_vector, length_vectors, unit_vectors, rotate_towards_axis, length_vector
from pychemia.utils.periodic import covalent_radius, atomic_number
from pychemia.utils.serializer import generic_serializer
from pychemia.code.lennardjones import LennardJones
from pychemia.external.symmol import get_point_group
from ._population import Population
from ._distances import FingerPrints, StructureDistances
class LJCluster(Population):
def __init__(self, name, composition=None, tag='global', target_forces=1E-3, value_tol=1E-2,
distance_tolerance=0.1, minimal_density=70.0, refine=True, direct_evaluation=False):
if composition is not None:
self.composition = Composition(composition)
else:
self.composition = None
Population.__init__(self, name=name, tag=tag, direct_evaluation=direct_evaluation,
distance_tolerance=distance_tolerance)
self.tag = tag
self.target_forces = target_forces
self.value_tol = value_tol
self.minimal_density = minimal_density
self.refine = refine
self.fingerprinter = FingerPrints(self.pcdb)
self.distancer = StructureDistances(self.pcdb)
def add_random(self):
"""
Add one random structure to the population
"""
if self.composition is None:
raise ValueError('No composition associated to this population')
comp = self.composition.composition.copy()
structure = Structure.random_cluster(composition=comp)
return self.new_entry(structure), None
def get_duplicates(self, ids, tolerance=None, fast=True):
ret = {}
selection = self.ids_sorted(ids)
values = np.array([self.value(i) for i in selection])
if len(values) == 0:
return ret
diffs = np.ediff1d(values)
for i in range(len(diffs)):
idiff = diffs[i]
if idiff < self.value_tol:
ident1 = selection[i]
ident2 = selection[i + 1]
pcm_log.debug('Testing distances between %s and %s' % (str(ident1), str(ident2)))
distance = self.distance(ident1, ident2)
if distance < self.distance_tolerance:
pcm_log.debug('Distance %7.3f < %7.3f' % (distance, self.distance_tolerance))
ret[ident2] = ident1
if len(ret) > 0:
pcm_log.debug('Number of duplicates %d' % len(ret))
return ret
def cross(self, ids):
if len(ids) != 2:
raise ValueError("Crossing only implemented between two clusters")
entry0 = self.get_entry(ids[0])
entry1 = self.get_entry(ids[1])
pos0 = np.array(entry0['structure']['positions']).reshape((-1, 3))
pos1 = np.array(entry1['structure']['positions']).reshape((-1, 3))
cut = np.random.randint(1, len(pos0))
new_pos0 = np.concatenate((pos0[:cut], pos1[cut:]))
new_pos1 = np.concatenate((pos1[:cut], pos0[cut:]))
new_structure = Structure(positions=new_pos0, symbols=entry0['structure']['symbols'],
periodicity=False)
entry_id = self.new_entry(structure=new_structure)
new_structure = Structure(positions=new_pos1, symbols=entry0['structure']['symbols'],
periodicity=False)
entry_jd = self.new_entry(structure=new_structure)
return entry_id, entry_jd
def distance(self, entry_id, entry_jd, rcut=50):
"""
Return a measure of the distance between two clusters by computing
a n-dimensional vector of the distances between each atom to the
origin and
:param rcut:
:param entry_id: The id of one population entry
:param entry_jd: The id of another population entry
:return: (int) The distance between two clusters
"""
ids_pair = tuple(np.sort([entry_id, entry_jd]))
distance_entry = self.distancer.get_distance(ids_pair)
if distance_entry is None:
fingerprints = {}
for entry_ijd in [entry_id, entry_jd]:
if self.fingerprinter.get_fingerprint(entry_ijd) is None:
structure = self.get_structure(entry_ijd)
analysis = ClusterAnalysis(structure)
x, ys = analysis.discrete_radial_distribution_function()
fingerprint = {'_id': entry_ijd}
for k in ys:
atomic_number1 = atomic_number(k[0])
atomic_number2 = atomic_number(k[1])
pair = '%06d' % min(atomic_number1 * 1000 + atomic_number2,
atomic_number2 * 1000 + atomic_number1)
fingerprint[pair] = list(ys[k])
if self.fingerprinter.get_fingerprint(entry_ijd) is None:
self.fingerprinter.set_fingerprint(fingerprint)
else:
self.fingerprinter.update(entry_ijd, fingerprint)
fingerprints[entry_ijd] = fingerprint
else:
fingerprints[entry_ijd] = self.fingerprinter.get_fingerprint(entry_ijd)
dij = []
for pair in fingerprints[entry_id]:
if pair in fingerprints[entry_jd] and pair != '_id':
vect1 = fingerprints[entry_id][pair]
vect2 = fingerprints[entry_jd][pair]
if len(vect1) < len(vect2):
tmp = np.zeros(len(vect2))
tmp[:len(vect1)] = vect1
vect1 = tmp
elif len(vect1) > len(vect2):
tmp = np.zeros(len(vect1))
tmp[:len(vect2)] = vect2
vect2 = tmp
uvect1 = unit_vector(vect1)
uvect2 = unit_vector(vect2)
dij.append(0.5 * (1.0 - np.dot(uvect1, uvect2)))
distance = float(np.mean(dij))
self.distancer.set_distance(ids_pair, distance)
else:
distance = distance_entry['distance']
return distance
@property
def to_dict(self):
return {'name': self.name,
'tag': self.tag,
'target_forces': self.target_forces,
'value_tol': self.value_tol,
'distance_tolerance': self.distance_tolerance,
'minimal_density': self.minimal_density}
def get2_duplicates(self, ids, fast=True):
dupes_dict = {}
dupes_list = []
selection = self.ids_sorted(ids)
pcm_log.debug('Searching duplicates in %d structures' % len(selection))
for i in range(len(selection) - 1):
ncomps = 0
entry_id = selection[i]
if fast and entry_id in dupes_list:
continue
sys.stdout.write(" %5d of %5d: " % (i, len(selection)))
value_i = self.value(entry_id)
for j in range(i + 1, len(selection)):
entry_jd = selection[j]
if fast and entry_jd in dupes_list:
continue
value_j = self.value(entry_jd)
if abs(value_i - value_j) < self.value_tol:
ncomps += 1
distance = self.distance(entry_id, entry_jd)
if distance < self.distance_tolerance:
if entry_id in dupes_dict:
dupes_dict[entry_id].append(entry_jd)
else:
if fast:
dupes_dict[entry_id] = [entry_jd]
else:
dupes_dict[entry_id] = entry_jd
dupes_list.append(entry_jd)
sys.stdout.write(' comparisons: %d\n' % ncomps)
return dupes_dict, [x for x in selection if x in dupes_list]
def is_evaluated(self, entry_id):
entry = self.get_entry(entry_id)
if entry is not None and 'properties' not in entry:
raise ValueError('Anomalous condition for %s' % entry_id)
if entry is not None and entry['properties'] is not None:
properties = entry['properties']
if 'forces' not in properties:
forces = None
elif properties['forces'] is None:
forces = None
else:
forces = np.max(np.apply_along_axis(np.linalg.norm, 1, np.array(properties['forces']).reshape((-1, 3))))
else:
forces = None
if forces is not None and forces < self.target_forces:
return True
else:
return False
def from_dict(self, population_dict):
return LJCluster(name=population_dict['name'],
tag=population_dict['tag'],
target_forces=population_dict['target_forces'],
value_tol=population_dict['value_tol'],
distance_tolerance=population_dict['distance_tolerance'],
minimal_density=population_dict['minimal_density'])
def move(self, entry_id, entry_jd, factor=0.2, in_place=False):
st_orig = self.get_structure(entry_id)
st_dest = self.get_structure(entry_jd)
cm = ClusterMatch(st_orig, st_dest)
cm.match()
# pos_orig = np.array(entry_orig['structure']['positions']).reshape((-1, 3))
# pos_dest = np.array(entry_dest['structure']['positions']).reshape((-1, 3))
pos_orig = cm.structure1.positions
pos_dest = cm.structure2.positions
# Move to a position with negative energy
reduc = 1
new_positions = np.array(pos_orig)
while True:
new_positions = rotation_move(pos_orig, pos_dest, fraction=reduc * factor)
new_structure = Structure(positions=new_positions, symbols=st_orig.symbols, periodicity=False)
lj = LennardJones(new_structure)
if lj.get_energy() < 0.0:
break
reduc -= 0.05
pcm_log.debug('Effective factor will be reduced to %7.3f, original factor %7.3f' % (reduc * factor, factor))
if reduc <= 0.0:
# print 'No movement effective'
break
# Avoid condition with atoms too close
distance_matrix = scipy.spatial.distance_matrix(new_positions, new_positions)
tmp = np.max(distance_matrix.flatten())
# print 'Scaling by', tmp
minimal_distance = np.min((distance_matrix + tmp * np.eye(len(new_positions))).flatten())
if minimal_distance < 1E-8:
pcm_log.debug("Null distance between different atoms, no moving")
new_positions = pos_orig
if tmp > 5:
# print 'Big scaling, better not to move'
new_positions = pos_orig
else:
max_cov = np.max(covalent_radius(st_orig.symbols))
new_positions *= max_cov / minimal_distance
new_structure = Structure(positions=new_positions, symbols=st_orig.symbols, periodicity=False)
# print 'Density of cluster', new_structure.density
if in_place:
self.unset_properties(entry_id)
return self.set_structure(entry_id, new_structure)
else:
return self.new_entry(new_structure, active=False)
def evaluate(self, structure, gtol=None):
if gtol is None:
gtol = self.target_forces
positions, forces, energy = lj_compact_evaluate(structure, gtol, self.minimal_density)
structure.set_positions(positions)
structure.relocate_to_cm()
if structure.natom > 2:
structure.align_inertia_momenta()
sorted_indices = structure.sort_sites()
forces = forces[sorted_indices]
pg = get_point_group(structure, executable='symmol')
properties = {'forces': generic_serializer(forces), 'energy': energy, 'point_group': pg}
return structure, properties
def evaluate_entry(self, entry_id):
pcm_log.debug('Evaluating %s target density= %7.3F' % (entry_id, self.minimal_density))
structure = self.get_structure(entry_id)
structure, properties = self.evaluate(structure, gtol=self.target_forces)
self.update_properties(entry_id=entry_id, new_properties=properties)
return self.set_structure(entry_id, structure)
def refine(self, entry_id, gtol=None):
if self.refine:
pcm_log.debug('Evaluating %s target density= %7.3F' % (entry_id, self.minimal_density))
structure = self.get_structure(entry_id)
structure, properties = self.evaluate(structure, gtol=gtol)
self.update_properties(entry_id=entry_id, new_properties=properties)
return self.set_structure(entry_id, structure)
def maxforce(self, entry_id):
return np.max(length_vectors(self.get_forces(entry_id)))
def refine_progressive(self, entry_id):
if self.refine:
inivalue = self.value(entry_id)
gtol = 10 ** math.ceil(math.log10(self.maxforce(entry_id)))
while True:
pcm_log.debug('Local minimization up to %7.3f ' % gtol)
gtol /= 10
pcm_log.debug('Evaluating %s target density= %7.3F' % (entry_id, self.minimal_density))
structure = self.get_structure(entry_id)
structure, properties = self.evaluate(structure, gtol=gtol)
if properties['energy'] / structure.natom < inivalue:
self.update_properties(entry_id=entry_id, new_properties=properties)
return self.set_structure(entry_id, structure)
else:
pcm_log.debug('Relaxation raise value %7.3f < %7.3f' % (inivalue, properties['energy'] /
structure.natom))
if self.maxforce(entry_id) > gtol:
pcm_log.debug('I cannot relax more...')
break
def move_random(self, entry_id, factor=0.2, in_place=False, kind='move'):
entry = self.get_entry(entry_id)
pos = np.array(entry['structure']['positions']).reshape((-1, 3))
# Unit Vectors
uv = unit_vectors(2 * np.random.rand(*pos.shape) - 1)
new_pos = generic_serializer(pos + factor * uv)
structure = Structure(positions=new_pos, symbols=entry['structure']['symbols'], periodicity=False)
if in_place:
self.update_properties(entry_id=entry_id, new_properties={})
return self.set_structure(entry_id, structure)
else:
structure = Structure(positions=new_pos, symbols=entry['structure']['symbols'], periodicity=False)
return self.new_entry(structure, active=False)
def get_structure(self, entry_id):
entry = self.get_entry(entry_id)
if 'structure' not in entry:
raise ValueError('structure is not present on %s' % entry_id)
if entry['structure'] is None:
raise ValueError('structure is None for %s' % entry_id)
return Structure.from_dict(entry['structure'])
def get_forces(self, entry_id):
entry = self.get_entry(entry_id, projection={'properties.forces': 1})
forces = np.array(entry['properties']['forces']).reshape((-1, 3))
return forces
def str_entry(self, entry_id):
structure = self.get_structure(entry_id)
entry = self.get_entry(entry_id, projection={'properties': 1})
msg = 'Cluster: LJ%d Point group: %s Energy: %7.3f Forces: %7.1E'
pg = entry['properties']['point_group']
return msg % (structure.natom, pg, entry['properties']['energy'], self.maxforce(entry_id))
def new_entry(self, structure, active=True):
if active and self.direct_evaluation:
structure, properties = self.evaluate(structure, gtol=self.target_forces)
else:
properties = {}
status = {self.tag: active}
entry = {'structure': structure.to_dict, 'properties': properties, 'status': status}
entry_id = self.set_entry(entry)
pcm_log.debug('Added new entry: %s with tag=%s: %s' % (str(entry_id), self.tag, str(active)))
return entry_id
def recover(self):
data = self.get_population_info()
if data is not None:
self.distance_tolerance = data['distance_tolerance']
self.value_tol = data['value_tol']
self.name = data['name']
self.target_forces = data['target_forces']
self.minimal_density = data['minimal_density']
def value(self, entry_id):
entry = self.get_entry(entry_id)
structure = self.get_structure(entry_id)
if 'properties' not in entry:
pcm_log.debug('This entry has no properties %s' % str(entry['_id']))
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.